diff --git a/cmd/404-server-with-metrics/server-with-metrics.go b/cmd/404-server-with-metrics/server-with-metrics.go index 7b6e5b5e89..a8ca4834f1 100644 --- a/cmd/404-server-with-metrics/server-with-metrics.go +++ b/cmd/404-server-with-metrics/server-with-metrics.go @@ -28,7 +28,7 @@ import ( "runtime" "time" - "k8s.io/klog" + "k8s.io/klog/v2" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" diff --git a/cmd/e2e-test/cdn_test.go b/cmd/e2e-test/cdn_test.go index 35b1f4edcb..bd56eb3066 100644 --- a/cmd/e2e-test/cdn_test.go +++ b/cmd/e2e-test/cdn_test.go @@ -34,7 +34,7 @@ import ( "k8s.io/ingress-gce/pkg/fuzz" "k8s.io/ingress-gce/pkg/fuzz/features" "k8s.io/ingress-gce/pkg/utils" - "k8s.io/klog" + "k8s.io/klog/v2" ) // TestCDN is for ingress versions before the CDN config was expanded diff --git a/cmd/e2e-test/main_test.go b/cmd/e2e-test/main_test.go index 0df6ccf8a4..a0512877de 100644 --- a/cmd/e2e-test/main_test.go +++ b/cmd/e2e-test/main_test.go @@ -30,7 +30,7 @@ import ( "k8s.io/ingress-gce/pkg/e2e" _ "k8s.io/ingress-gce/pkg/klog" "k8s.io/ingress-gce/pkg/version" - "k8s.io/klog" + "k8s.io/klog/v2" // Pull in the auth library for GCP. _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" diff --git a/cmd/echo/app/handlers.go b/cmd/echo/app/handlers.go index dcc5b7e362..d0488968f9 100644 --- a/cmd/echo/app/handlers.go +++ b/cmd/echo/app/handlers.go @@ -27,7 +27,7 @@ import ( "time" "k8s.io/ingress-gce/pkg/version" - "k8s.io/klog" + "k8s.io/klog/v2" ) const ( diff --git a/cmd/echo/app/tls.go b/cmd/echo/app/tls.go index 63d814fff9..cb622d346b 100644 --- a/cmd/echo/app/tls.go +++ b/cmd/echo/app/tls.go @@ -27,7 +27,7 @@ import ( "math/big" "time" - "k8s.io/klog" + "k8s.io/klog/v2" ) // createCert creates a certificate and key in temporary files and returns their paths. diff --git a/cmd/echo/main.go b/cmd/echo/main.go index 386862fcc2..8a2c8a75a9 100644 --- a/cmd/echo/main.go +++ b/cmd/echo/main.go @@ -23,7 +23,7 @@ import ( "k8s.io/ingress-gce/cmd/echo/app" _ "k8s.io/ingress-gce/pkg/klog" "k8s.io/ingress-gce/pkg/version" - "k8s.io/klog" + "k8s.io/klog/v2" ) func main() { diff --git a/cmd/glbc/app/clients.go b/cmd/glbc/app/clients.go index 9f6ca7bdf7..9368ca4d53 100644 --- a/cmd/glbc/app/clients.go +++ b/cmd/glbc/app/clients.go @@ -27,7 +27,7 @@ import ( "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" cloudprovider "k8s.io/cloud-provider" - "k8s.io/klog" + "k8s.io/klog/v2" "k8s.io/legacy-cloud-providers/gce" // Register the GCP authorization provider. diff --git a/cmd/glbc/app/handlers.go b/cmd/glbc/app/handlers.go index 3581526a53..6fb916b32a 100644 --- a/cmd/glbc/app/handlers.go +++ b/cmd/glbc/app/handlers.go @@ -27,7 +27,7 @@ import ( "syscall" "github.com/prometheus/client_golang/prometheus/promhttp" - "k8s.io/klog" + "k8s.io/klog/v2" "k8s.io/ingress-gce/pkg/context" "k8s.io/ingress-gce/pkg/controller" diff --git a/cmd/glbc/app/init.go b/cmd/glbc/app/init.go index 7a8e7719ca..11d4e395af 100644 --- a/cmd/glbc/app/init.go +++ b/cmd/glbc/app/init.go @@ -29,7 +29,7 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/ingress-gce/pkg/flags" "k8s.io/ingress-gce/pkg/utils" - "k8s.io/klog" + "k8s.io/klog/v2" ) // DefaultBackendServicePort returns the ServicePort which will be diff --git a/cmd/glbc/app/namer.go b/cmd/glbc/app/namer.go index b263f9900d..3f671a6ddc 100644 --- a/cmd/glbc/app/namer.go +++ b/cmd/glbc/app/namer.go @@ -23,7 +23,7 @@ import ( "time" "k8s.io/ingress-gce/pkg/utils/namer" - "k8s.io/klog" + "k8s.io/klog/v2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" diff --git a/cmd/glbc/main.go b/cmd/glbc/main.go index a23f1cff27..61957f91b9 100644 --- a/cmd/glbc/main.go +++ b/cmd/glbc/main.go @@ -32,7 +32,7 @@ import ( "k8s.io/ingress-gce/pkg/psc" "k8s.io/ingress-gce/pkg/serviceattachment" "k8s.io/ingress-gce/pkg/svcneg" - "k8s.io/klog" + "k8s.io/klog/v2" crdclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" "k8s.io/client-go/kubernetes" @@ -340,6 +340,7 @@ func runControllers(ctx *ingctx.ControllerContext) { enableAsm, asmServiceNEGSkipNamespaces, flags.F.EnableEndpointSlices, + klog.TODO(), // TODO(#1761): Replace this with a top level logger configuration once one is available. ) ctx.AddHealthCheck("neg-controller", negController.IsHealthy) diff --git a/cmd/workload-controller/app/client.go b/cmd/workload-controller/app/client.go index c0f001f763..5ae0095b57 100644 --- a/cmd/workload-controller/app/client.go +++ b/cmd/workload-controller/app/client.go @@ -20,7 +20,7 @@ import ( "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" "k8s.io/ingress-gce/pkg/flags" - "k8s.io/klog" + "k8s.io/klog/v2" // Register the GCP authorization provider. _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" diff --git a/cmd/workload-controller/main.go b/cmd/workload-controller/main.go index 8b7ef7ce3b..c51a008a38 100644 --- a/cmd/workload-controller/main.go +++ b/cmd/workload-controller/main.go @@ -32,7 +32,7 @@ import ( "k8s.io/ingress-gce/pkg/flags" _ "k8s.io/ingress-gce/pkg/klog" "k8s.io/ingress-gce/pkg/version" - "k8s.io/klog" + "k8s.io/klog/v2" ) func main() { diff --git a/cmd/workload-daemon/main.go b/cmd/workload-daemon/main.go index dc45af80a8..098cd0d82f 100644 --- a/cmd/workload-daemon/main.go +++ b/cmd/workload-daemon/main.go @@ -24,7 +24,7 @@ import ( daemon "k8s.io/ingress-gce/pkg/experimental/workload/daemon" gce "k8s.io/ingress-gce/pkg/experimental/workload/daemon/provider/gce" - "k8s.io/klog" + "k8s.io/klog/v2" // GCP Authentication _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" diff --git a/go.mod b/go.mod index f86c287a95..fb53853385 100644 --- a/go.mod +++ b/go.mod @@ -24,8 +24,8 @@ require ( k8s.io/client-go v0.22.2 k8s.io/cloud-provider v0.20.0 k8s.io/component-base v0.20.0 - k8s.io/klog v1.0.0 + k8s.io/klog/v2 v2.50.2 k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e k8s.io/legacy-cloud-providers v0.20.0 - k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a + k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 ) diff --git a/go.sum b/go.sum index c42f309f17..1a0f178783 100644 --- a/go.sum +++ b/go.sum @@ -168,8 +168,9 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9 github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc= github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v1.2.0 h1:QK40JKJyMdUDz+h+xvCsru/bJhvG0UxvePV0ufL/AcE= +github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= @@ -990,21 +991,21 @@ k8s.io/controller-manager v0.20.0/go.mod h1:nD4qym/pmCz2v1tpqvlEBVlHW9CAZwedloM8 k8s.io/csi-translation-lib v0.20.0/go.mod h1:M4CdD66GxEI6ev8aTtsA2NkK9kIF9K5VZQMcw/SsoLs= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= -k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.9.0 h1:D7HV+n1V57XeZ0m6tdRkfknthUaM06VFbWldOFh8kzM= k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= +k8s.io/klog/v2 v2.50.2 h1:OsuacU5nfUhtQY1ictYdOEzkUoP5zkZ61i8F67Ch6W0= +k8s.io/klog/v2 v2.50.2/go.mod h1:N3kgBtsFxMb4nQ0eBDgbHEt/dtxBuTkSFQ+7K5OUoz4= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e h1:KLHHjkdQFomZy8+06csTWZ0m1343QqxZhR2LJ1OxCYM= k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= k8s.io/legacy-cloud-providers v0.20.0 h1:bF2WKO3ZWEjBvCv9RJxsxOAGrmQHWSgTiDBjK8uW2iw= k8s.io/legacy-cloud-providers v0.20.0/go.mod h1:1jEkaU7h9+b1EYdfWDBvhFAr+QpRfUjQfK+dGhxPGfA= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a h1:8dYfu/Fc9Gz2rNJKB9IQRGgQOh2clmRzNIPPY1xLY5g= k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 h1:HNSDgDCrr/6Ly3WEGKZftiE7IY19Vz2GdbOCyI4qqhc= +k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= diff --git a/pkg/backends/backends.go b/pkg/backends/backends.go index 622d21b513..0da87d30fa 100644 --- a/pkg/backends/backends.go +++ b/pkg/backends/backends.go @@ -25,7 +25,7 @@ import ( "k8s.io/ingress-gce/pkg/composite" "k8s.io/ingress-gce/pkg/utils" "k8s.io/ingress-gce/pkg/utils/namer" - "k8s.io/klog" + "k8s.io/klog/v2" "k8s.io/legacy-cloud-providers/gce" ) diff --git a/pkg/backends/features/affinity.go b/pkg/backends/features/affinity.go index b8550c1d64..447771e008 100644 --- a/pkg/backends/features/affinity.go +++ b/pkg/backends/features/affinity.go @@ -21,7 +21,7 @@ import ( "k8s.io/ingress-gce/pkg/composite" "k8s.io/ingress-gce/pkg/utils" - "k8s.io/klog" + "k8s.io/klog/v2" ) // EnsureAffinity reads the sessionAffinity and AffinityCookieTtlSec configuration diff --git a/pkg/backends/features/cdn.go b/pkg/backends/features/cdn.go index d7fcbf39fd..a698f436ad 100644 --- a/pkg/backends/features/cdn.go +++ b/pkg/backends/features/cdn.go @@ -24,7 +24,7 @@ import ( "github.com/kr/pretty" "k8s.io/ingress-gce/pkg/composite" "k8s.io/ingress-gce/pkg/utils" - "k8s.io/klog" + "k8s.io/klog/v2" ) // This values are copied from GCP Load balancer diff --git a/pkg/backends/features/cdn_test.go b/pkg/backends/features/cdn_test.go index 8f8fe38b33..a15d21af73 100644 --- a/pkg/backends/features/cdn_test.go +++ b/pkg/backends/features/cdn_test.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/ingress-gce/pkg/apis/backendconfig/v1" "k8s.io/ingress-gce/pkg/composite" "k8s.io/ingress-gce/pkg/utils" - "k8s.io/klog" + "k8s.io/klog/v2" ) var ( diff --git a/pkg/backends/features/customrequestheaders.go b/pkg/backends/features/customrequestheaders.go index f4c64c6751..0c0763c18f 100644 --- a/pkg/backends/features/customrequestheaders.go +++ b/pkg/backends/features/customrequestheaders.go @@ -20,7 +20,7 @@ import ( "k8s.io/ingress-gce/pkg/composite" "k8s.io/ingress-gce/pkg/utils" - "k8s.io/klog" + "k8s.io/klog/v2" ) // EnsureCustomRequestHeaders reads the CustomRequestHeaders configuration specified in the ServicePort.BackendConfig diff --git a/pkg/backends/features/draining.go b/pkg/backends/features/draining.go index 1129ee8059..18636a530f 100644 --- a/pkg/backends/features/draining.go +++ b/pkg/backends/features/draining.go @@ -21,7 +21,7 @@ import ( "k8s.io/ingress-gce/pkg/composite" "k8s.io/ingress-gce/pkg/utils" - "k8s.io/klog" + "k8s.io/klog/v2" ) // EnsureDraining reads the ConnectionDraining configuration specified in diff --git a/pkg/backends/features/iap.go b/pkg/backends/features/iap.go index 6c8d399dd0..502ff7ac33 100644 --- a/pkg/backends/features/iap.go +++ b/pkg/backends/features/iap.go @@ -22,7 +22,7 @@ import ( "k8s.io/ingress-gce/pkg/composite" "k8s.io/ingress-gce/pkg/utils" - "k8s.io/klog" + "k8s.io/klog/v2" ) // EnsureIAP reads the IAP configuration specified in the BackendConfig diff --git a/pkg/backends/features/logging.go b/pkg/backends/features/logging.go index 2e9b2b0c21..528570b2b0 100644 --- a/pkg/backends/features/logging.go +++ b/pkg/backends/features/logging.go @@ -21,7 +21,7 @@ import ( "k8s.io/ingress-gce/pkg/composite" "k8s.io/ingress-gce/pkg/utils" - "k8s.io/klog" + "k8s.io/klog/v2" ) // EnsureLogging reads the log configurations specified in the ServicePort.BackendConfig diff --git a/pkg/backends/features/securitypolicy.go b/pkg/backends/features/securitypolicy.go index 532c53c7b1..154afe969a 100644 --- a/pkg/backends/features/securitypolicy.go +++ b/pkg/backends/features/securitypolicy.go @@ -19,7 +19,7 @@ package features import ( "fmt" - "k8s.io/klog" + "k8s.io/klog/v2" "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta" "k8s.io/legacy-cloud-providers/gce" diff --git a/pkg/backends/features/timeout.go b/pkg/backends/features/timeout.go index fab3b5b1b7..6927626a8b 100644 --- a/pkg/backends/features/timeout.go +++ b/pkg/backends/features/timeout.go @@ -21,7 +21,7 @@ import ( "k8s.io/ingress-gce/pkg/composite" "k8s.io/ingress-gce/pkg/utils" - "k8s.io/klog" + "k8s.io/klog/v2" ) // EnsureTimeout reads the TimeoutSec configuration specified in the ServicePort.BackendConfig diff --git a/pkg/backends/ig_linker.go b/pkg/backends/ig_linker.go index cd7b527c29..325561cd61 100644 --- a/pkg/backends/ig_linker.go +++ b/pkg/backends/ig_linker.go @@ -23,7 +23,7 @@ import ( "k8s.io/ingress-gce/pkg/composite" "k8s.io/ingress-gce/pkg/instances" "k8s.io/ingress-gce/pkg/utils" - "k8s.io/klog" + "k8s.io/klog/v2" ) // BalancingMode represents the loadbalancing configuration of an individual diff --git a/pkg/backends/neg_linker.go b/pkg/backends/neg_linker.go index 63b260440c..1229b28446 100644 --- a/pkg/backends/neg_linker.go +++ b/pkg/backends/neg_linker.go @@ -15,6 +15,7 @@ package backends import ( "fmt" + "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud" "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta" "k8s.io/apimachinery/pkg/util/sets" @@ -25,7 +26,7 @@ import ( "k8s.io/ingress-gce/pkg/flags" "k8s.io/ingress-gce/pkg/neg/types" "k8s.io/ingress-gce/pkg/utils" - "k8s.io/klog" + "k8s.io/klog/v2" "k8s.io/legacy-cloud-providers/gce" ) diff --git a/pkg/backends/regional_ig_linker.go b/pkg/backends/regional_ig_linker.go index 8fb23e0741..b7ce6330bf 100644 --- a/pkg/backends/regional_ig_linker.go +++ b/pkg/backends/regional_ig_linker.go @@ -21,7 +21,7 @@ import ( "k8s.io/ingress-gce/pkg/composite" "k8s.io/ingress-gce/pkg/instances" "k8s.io/ingress-gce/pkg/utils" - "k8s.io/klog" + "k8s.io/klog/v2" ) // RegionalInstanceGroupLinker handles linking backends to InstanceGroups. diff --git a/pkg/backends/syncer.go b/pkg/backends/syncer.go index 45478b258e..3d286f91ab 100644 --- a/pkg/backends/syncer.go +++ b/pkg/backends/syncer.go @@ -25,7 +25,7 @@ import ( "k8s.io/ingress-gce/pkg/healthchecks" lbfeatures "k8s.io/ingress-gce/pkg/loadbalancers/features" "k8s.io/ingress-gce/pkg/utils" - "k8s.io/klog" + "k8s.io/klog/v2" "k8s.io/legacy-cloud-providers/gce" ) diff --git a/pkg/cmconfig/controller.go b/pkg/cmconfig/controller.go index 7ea08329ac..89a23ab867 100644 --- a/pkg/cmconfig/controller.go +++ b/pkg/cmconfig/controller.go @@ -12,7 +12,7 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/ingress-gce/pkg/utils/patch" - "k8s.io/klog" + "k8s.io/klog/v2" ) // ConfigMapConfigController is the ConfigMap based config controller. diff --git a/pkg/cmconfig/controller_test.go b/pkg/cmconfig/controller_test.go index 3d99f92323..7ed0c7365f 100644 --- a/pkg/cmconfig/controller_test.go +++ b/pkg/cmconfig/controller_test.go @@ -15,7 +15,7 @@ import ( informerv1 "k8s.io/client-go/informers/core/v1" "k8s.io/client-go/kubernetes/fake" "k8s.io/ingress-gce/pkg/utils" - "k8s.io/klog" + "k8s.io/klog/v2" ) const ( diff --git a/pkg/common/operator/backendconfig.go b/pkg/common/operator/backendconfig.go index 8f1a017085..2c54168a02 100644 --- a/pkg/common/operator/backendconfig.go +++ b/pkg/common/operator/backendconfig.go @@ -1,7 +1,7 @@ package operator import ( - "k8s.io/klog" + "k8s.io/klog/v2" "k8s.io/ingress-gce/pkg/annotations" backendconfigv1 "k8s.io/ingress-gce/pkg/apis/backendconfig/v1" diff --git a/pkg/composite/composite.go b/pkg/composite/composite.go index b7cccf8bd8..fb5ba07e4b 100644 --- a/pkg/composite/composite.go +++ b/pkg/composite/composite.go @@ -26,7 +26,7 @@ import ( computebeta "google.golang.org/api/compute/v0.beta" "google.golang.org/api/compute/v1" "k8s.io/ingress-gce/pkg/composite/metrics" - "k8s.io/klog" + "k8s.io/klog/v2" "k8s.io/legacy-cloud-providers/gce" ) diff --git a/pkg/composite/gen.go b/pkg/composite/gen.go index 602ad4f2ac..f564e6f174 100644 --- a/pkg/composite/gen.go +++ b/pkg/composite/gen.go @@ -29,7 +29,7 @@ import ( compute "google.golang.org/api/compute/v1" "google.golang.org/api/googleapi" compositemetrics "k8s.io/ingress-gce/pkg/composite/metrics" - "k8s.io/klog" + "k8s.io/klog/v2" "k8s.io/legacy-cloud-providers/gce" ) diff --git a/pkg/composite/gen/main.go b/pkg/composite/gen/main.go index 3ad6f168ba..baeb876b63 100644 --- a/pkg/composite/gen/main.go +++ b/pkg/composite/gen/main.go @@ -78,7 +78,7 @@ package composite import ( "fmt" - "k8s.io/klog" + "k8s.io/klog/v2" computealpha "google.golang.org/api/compute/v0.alpha" computebeta "google.golang.org/api/compute/v0.beta" compute "google.golang.org/api/compute/v1" diff --git a/pkg/context/context.go b/pkg/context/context.go index 9a4d13c975..93f0b7d6f9 100644 --- a/pkg/context/context.go +++ b/pkg/context/context.go @@ -58,7 +58,7 @@ import ( "k8s.io/ingress-gce/pkg/utils" "k8s.io/ingress-gce/pkg/utils/endpointslices" "k8s.io/ingress-gce/pkg/utils/namer" - "k8s.io/klog" + "k8s.io/klog/v2" "k8s.io/legacy-cloud-providers/gce" ) diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go index 0356fcf387..f50ac869e6 100644 --- a/pkg/controller/controller.go +++ b/pkg/controller/controller.go @@ -53,7 +53,7 @@ import ( "k8s.io/ingress-gce/pkg/utils" "k8s.io/ingress-gce/pkg/utils/common" "k8s.io/ingress-gce/pkg/utils/namer" - "k8s.io/klog" + "k8s.io/klog/v2" ) // LoadBalancerController watches the kubernetes api and adds/removes services diff --git a/pkg/controller/node.go b/pkg/controller/node.go index 5c05e0e92d..0cb5b212ef 100644 --- a/pkg/controller/node.go +++ b/pkg/controller/node.go @@ -25,7 +25,7 @@ import ( "k8s.io/ingress-gce/pkg/context" "k8s.io/ingress-gce/pkg/instances" "k8s.io/ingress-gce/pkg/utils" - "k8s.io/klog" + "k8s.io/klog/v2" ) // NodeController synchronizes the state of the nodes to the unmanaged instance diff --git a/pkg/controller/translator/translator.go b/pkg/controller/translator/translator.go index 1507846090..3ef5d1974f 100644 --- a/pkg/controller/translator/translator.go +++ b/pkg/controller/translator/translator.go @@ -21,7 +21,7 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/ingress-gce/pkg/utils/endpointslices" - "k8s.io/klog" + "k8s.io/klog/v2" api_v1 "k8s.io/api/core/v1" discoveryapi "k8s.io/api/discovery/v1" diff --git a/pkg/crd/crd.go b/pkg/crd/crd.go index 582b320bf3..1c5947c2b7 100644 --- a/pkg/crd/crd.go +++ b/pkg/crd/crd.go @@ -21,7 +21,7 @@ import ( "fmt" "time" - "k8s.io/klog" + "k8s.io/klog/v2" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" crdclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" diff --git a/pkg/e2e/adapter/beconfig.go b/pkg/e2e/adapter/beconfig.go index 9576462e11..aad3b71bc3 100644 --- a/pkg/e2e/adapter/beconfig.go +++ b/pkg/e2e/adapter/beconfig.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/ingress-gce/pkg/apis/backendconfig/v1" "k8s.io/ingress-gce/pkg/apis/backendconfig/v1beta1" client "k8s.io/ingress-gce/pkg/backendconfig/client/clientset/versioned" - "k8s.io/klog" + "k8s.io/klog/v2" ) // BackendConfigCRUD wraps basic CRUD to allow use of v1beta and v1 APIs. diff --git a/pkg/e2e/adapter/ingress.go b/pkg/e2e/adapter/ingress.go index c86db5f86b..756a7d530a 100644 --- a/pkg/e2e/adapter/ingress.go +++ b/pkg/e2e/adapter/ingress.go @@ -14,7 +14,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/client-go/kubernetes" - "k8s.io/klog" + "k8s.io/klog/v2" ) const ( diff --git a/pkg/e2e/adapter/psc.go b/pkg/e2e/adapter/psc.go index 94d67a3e5f..87b24791c5 100644 --- a/pkg/e2e/adapter/psc.go +++ b/pkg/e2e/adapter/psc.go @@ -14,7 +14,7 @@ import ( sav1beta1 "k8s.io/ingress-gce/pkg/apis/serviceattachment/v1beta1" serviceattachmentclient "k8s.io/ingress-gce/pkg/serviceattachment/client/clientset/versioned" "k8s.io/ingress-gce/pkg/utils/patch" - "k8s.io/klog" + "k8s.io/klog/v2" ) const ( diff --git a/pkg/e2e/cert.go b/pkg/e2e/cert.go index 50fc6a59d5..bf3e867f05 100644 --- a/pkg/e2e/cert.go +++ b/pkg/e2e/cert.go @@ -31,8 +31,8 @@ import ( "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta" compute "google.golang.org/api/compute/v1" - "k8s.io/api/core/v1" - "k8s.io/klog" + v1 "k8s.io/api/core/v1" + "k8s.io/klog/v2" ) // CertType indicates the intended environment in which this cert is created. diff --git a/pkg/e2e/fixtures.go b/pkg/e2e/fixtures.go index f2081c4004..9c7f9b17b7 100644 --- a/pkg/e2e/fixtures.go +++ b/pkg/e2e/fixtures.go @@ -47,7 +47,7 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/ingress-gce/cmd/echo/app" - "k8s.io/klog" + "k8s.io/klog/v2" utilpointer "k8s.io/utils/pointer" ) diff --git a/pkg/e2e/framework.go b/pkg/e2e/framework.go index 24c2277ff4..b2d8ec9a7f 100644 --- a/pkg/e2e/framework.go +++ b/pkg/e2e/framework.go @@ -44,7 +44,7 @@ import ( frontendconfigclient "k8s.io/ingress-gce/pkg/frontendconfig/client/clientset/versioned" serviceattachment "k8s.io/ingress-gce/pkg/serviceattachment/client/clientset/versioned" svcnegclient "k8s.io/ingress-gce/pkg/svcneg/client/clientset/versioned" - "k8s.io/klog" + "k8s.io/klog/v2" ) // Options for the test framework. diff --git a/pkg/e2e/helpers.go b/pkg/e2e/helpers.go index b58125cc63..8b916c501a 100644 --- a/pkg/e2e/helpers.go +++ b/pkg/e2e/helpers.go @@ -55,7 +55,7 @@ import ( "k8s.io/ingress-gce/pkg/psc" "k8s.io/ingress-gce/pkg/utils" "k8s.io/ingress-gce/pkg/utils/common" - "k8s.io/klog" + "k8s.io/klog/v2" utilpointer "k8s.io/utils/pointer" ) diff --git a/pkg/e2e/sandbox.go b/pkg/e2e/sandbox.go index d0b1496325..45315e1adf 100644 --- a/pkg/e2e/sandbox.go +++ b/pkg/e2e/sandbox.go @@ -20,7 +20,7 @@ import ( "context" "sync" - "k8s.io/klog" + "k8s.io/klog/v2" "sort" "testing" diff --git a/pkg/e2e/status.go b/pkg/e2e/status.go index 0e438db5bc..bc34886f33 100644 --- a/pkg/e2e/status.go +++ b/pkg/e2e/status.go @@ -19,14 +19,15 @@ package e2e import ( "context" "fmt" - "k8s.io/apimachinery/pkg/api/errors" "time" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" informerv1 "k8s.io/client-go/informers/core/v1" "k8s.io/client-go/tools/cache" - "k8s.io/klog" + "k8s.io/klog/v2" ) // IngressStability denotes the stabilization status of all Ingresses in a sandbox. diff --git a/pkg/experimental/workload/controller.go b/pkg/experimental/workload/controller.go index 1ae56ffdc2..10410458e9 100644 --- a/pkg/experimental/workload/controller.go +++ b/pkg/experimental/workload/controller.go @@ -38,7 +38,7 @@ import ( workloadclient "k8s.io/ingress-gce/pkg/experimental/workload/client/clientset/versioned" informerworkload "k8s.io/ingress-gce/pkg/experimental/workload/client/informers/externalversions/workload/v1alpha1" "k8s.io/ingress-gce/pkg/utils" - "k8s.io/klog" + "k8s.io/klog/v2" ) const controllerName = "workload-controller.k8s.io" diff --git a/pkg/experimental/workload/daemon/daemon.go b/pkg/experimental/workload/daemon/daemon.go index 1e375d9e0b..2f0afa0242 100644 --- a/pkg/experimental/workload/daemon/daemon.go +++ b/pkg/experimental/workload/daemon/daemon.go @@ -32,7 +32,7 @@ import ( workloadclient "k8s.io/ingress-gce/pkg/experimental/workload/client/clientset/versioned" daemonutils "k8s.io/ingress-gce/pkg/experimental/workload/daemon/utils" "k8s.io/ingress-gce/pkg/utils/patch" - "k8s.io/klog" + "k8s.io/klog/v2" ) // RunDaemon executes the workload daemon diff --git a/pkg/experimental/workload/daemon/provider/gce/vm.go b/pkg/experimental/workload/daemon/provider/gce/vm.go index 02854ccf1d..3917100835 100644 --- a/pkg/experimental/workload/daemon/provider/gce/vm.go +++ b/pkg/experimental/workload/daemon/provider/gce/vm.go @@ -32,7 +32,7 @@ import ( "k8s.io/client-go/util/homedir" "k8s.io/ingress-gce/pkg/experimental/metadata" daemonutils "k8s.io/ingress-gce/pkg/experimental/workload/daemon/utils" - "k8s.io/klog" + "k8s.io/klog/v2" ) // VM represents a VM instance running on Google Cloud. diff --git a/pkg/experimental/workload/daemon/utils/kube-config.go b/pkg/experimental/workload/daemon/utils/kube-config.go index 7da8d6d17d..93af356572 100644 --- a/pkg/experimental/workload/daemon/utils/kube-config.go +++ b/pkg/experimental/workload/daemon/utils/kube-config.go @@ -22,7 +22,7 @@ import ( "path/filepath" "text/template" - "k8s.io/klog" + "k8s.io/klog/v2" ) // GenKubeConfigForKSA generates a KubeConfig to access the cluster using a Kubernetes service account diff --git a/pkg/firewalls/controller.go b/pkg/firewalls/controller.go index e8ce8ae31c..8b55a39334 100644 --- a/pkg/firewalls/controller.go +++ b/pkg/firewalls/controller.go @@ -35,7 +35,7 @@ import ( "k8s.io/ingress-gce/pkg/flags" "k8s.io/ingress-gce/pkg/loadbalancers/features" "k8s.io/ingress-gce/pkg/utils" - "k8s.io/klog" + "k8s.io/klog/v2" "k8s.io/legacy-cloud-providers/gce" ) diff --git a/pkg/firewalls/firewalls.go b/pkg/firewalls/firewalls.go index 4a6757089f..ffc8122d97 100644 --- a/pkg/firewalls/firewalls.go +++ b/pkg/firewalls/firewalls.go @@ -25,7 +25,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/ingress-gce/pkg/utils" namer_util "k8s.io/ingress-gce/pkg/utils/namer" - "k8s.io/klog" + "k8s.io/klog/v2" "k8s.io/legacy-cloud-providers/gce" netset "k8s.io/utils/net" ) diff --git a/pkg/firewalls/firewalls_l4.go b/pkg/firewalls/firewalls_l4.go index 8844f4d0c7..8cede6bf40 100644 --- a/pkg/firewalls/firewalls_l4.go +++ b/pkg/firewalls/firewalls_l4.go @@ -25,7 +25,7 @@ import ( "k8s.io/client-go/tools/record" "k8s.io/ingress-gce/pkg/flags" "k8s.io/ingress-gce/pkg/utils" - "k8s.io/klog" + "k8s.io/klog/v2" "k8s.io/legacy-cloud-providers/gce" ) diff --git a/pkg/fuzz/features/backendconfig_example.go b/pkg/fuzz/features/backendconfig_example.go index 64b0b76009..9c19deda1b 100644 --- a/pkg/fuzz/features/backendconfig_example.go +++ b/pkg/fuzz/features/backendconfig_example.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/api/networking/v1" "k8s.io/ingress-gce/pkg/annotations" "k8s.io/ingress-gce/pkg/fuzz" - "k8s.io/klog" + "k8s.io/klog/v2" ) // BackendConfigExample is an example of how a Feature will integrate with the diff --git a/pkg/fuzz/features/iap.go b/pkg/fuzz/features/iap.go index 1938f30d00..fd89ffcfee 100644 --- a/pkg/fuzz/features/iap.go +++ b/pkg/fuzz/features/iap.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/api/networking/v1" "k8s.io/ingress-gce/pkg/annotations" "k8s.io/ingress-gce/pkg/fuzz" - "k8s.io/klog" + "k8s.io/klog/v2" ) // IAP is a feature in BackendConfig that supports using GCP Identity-Aware Proxy (IAP). diff --git a/pkg/fuzz/features/neg.go b/pkg/fuzz/features/neg.go index 58a4660478..2cb590b252 100644 --- a/pkg/fuzz/features/neg.go +++ b/pkg/fuzz/features/neg.go @@ -27,7 +27,7 @@ import ( "strconv" "strings" - "k8s.io/klog" + "k8s.io/klog/v2" "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta" v1 "k8s.io/api/core/v1" diff --git a/pkg/fuzz/gcp.go b/pkg/fuzz/gcp.go index 0846821427..540aafa12d 100644 --- a/pkg/fuzz/gcp.go +++ b/pkg/fuzz/gcp.go @@ -27,7 +27,7 @@ import ( computebeta "google.golang.org/api/compute/v0.beta" compute "google.golang.org/api/compute/v1" "google.golang.org/api/googleapi" - "k8s.io/klog" + "k8s.io/klog/v2" "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud" "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/filter" diff --git a/pkg/fuzz/validator.go b/pkg/fuzz/validator.go index cb40e86fe9..a4c99789c3 100644 --- a/pkg/fuzz/validator.go +++ b/pkg/fuzz/validator.go @@ -35,7 +35,7 @@ import ( frontendconfig "k8s.io/ingress-gce/pkg/apis/frontendconfig/v1beta1" "k8s.io/ingress-gce/pkg/utils/common" "k8s.io/ingress-gce/pkg/utils/namer" - "k8s.io/klog" + "k8s.io/klog/v2" ) // pathForDefaultBackend is a unique string that will not match any path. diff --git a/pkg/fuzz/validator_test.go b/pkg/fuzz/validator_test.go index f258e71f20..806ae0fd9e 100644 --- a/pkg/fuzz/validator_test.go +++ b/pkg/fuzz/validator_test.go @@ -30,7 +30,7 @@ import ( "k8s.io/client-go/kubernetes/fake" "k8s.io/ingress-gce/cmd/glbc/app" "k8s.io/ingress-gce/pkg/utils/namer" - "k8s.io/klog" + "k8s.io/klog/v2" ) var baseIngress = &networkingv1.Ingress{ diff --git a/pkg/healthchecks/healthchecks.go b/pkg/healthchecks/healthchecks.go index b7fd58b418..7442878176 100644 --- a/pkg/healthchecks/healthchecks.go +++ b/pkg/healthchecks/healthchecks.go @@ -31,7 +31,7 @@ import ( "k8s.io/ingress-gce/pkg/loadbalancers/features" "k8s.io/ingress-gce/pkg/translator" "k8s.io/ingress-gce/pkg/utils" - "k8s.io/klog" + "k8s.io/klog/v2" "k8s.io/legacy-cloud-providers/gce" ) diff --git a/pkg/healthchecks/healthchecks_l4.go b/pkg/healthchecks/healthchecks_l4.go index b1ba09e3c9..6d7fc1effc 100644 --- a/pkg/healthchecks/healthchecks_l4.go +++ b/pkg/healthchecks/healthchecks_l4.go @@ -32,7 +32,7 @@ import ( "k8s.io/ingress-gce/pkg/firewalls" "k8s.io/ingress-gce/pkg/utils" "k8s.io/ingress-gce/pkg/utils/namer" - "k8s.io/klog" + "k8s.io/klog/v2" "k8s.io/legacy-cloud-providers/gce" ) diff --git a/pkg/healthchecks/healthchecks_test.go b/pkg/healthchecks/healthchecks_test.go index 30f45d00bb..a687e20c0c 100644 --- a/pkg/healthchecks/healthchecks_test.go +++ b/pkg/healthchecks/healthchecks_test.go @@ -42,7 +42,7 @@ import ( "k8s.io/ingress-gce/pkg/translator" "k8s.io/ingress-gce/pkg/utils" namer_util "k8s.io/ingress-gce/pkg/utils/namer" - "k8s.io/klog" + "k8s.io/klog/v2" "k8s.io/legacy-cloud-providers/gce" ) diff --git a/pkg/instances/instances.go b/pkg/instances/instances.go index 6453dac7c8..1aeada67d4 100644 --- a/pkg/instances/instances.go +++ b/pkg/instances/instances.go @@ -26,7 +26,7 @@ import ( "k8s.io/client-go/tools/record" "k8s.io/ingress-gce/pkg/events" "k8s.io/ingress-gce/pkg/utils/namer" - "k8s.io/klog" + "k8s.io/klog/v2" core "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" diff --git a/pkg/klog/klog.go b/pkg/klog/klog.go index 51a914b380..4508368092 100644 --- a/pkg/klog/klog.go +++ b/pkg/klog/klog.go @@ -16,7 +16,7 @@ limitations under the License. package klog -import "k8s.io/klog" +import "k8s.io/klog/v2" func init() { klog.InitFlags(nil) diff --git a/pkg/l4lb/l4controller.go b/pkg/l4lb/l4controller.go index 4d0007900d..3fc7ae63e4 100644 --- a/pkg/l4lb/l4controller.go +++ b/pkg/l4lb/l4controller.go @@ -39,7 +39,7 @@ import ( "k8s.io/ingress-gce/pkg/utils" "k8s.io/ingress-gce/pkg/utils/common" "k8s.io/ingress-gce/pkg/utils/namer" - "k8s.io/klog" + "k8s.io/klog/v2" ) const ( diff --git a/pkg/l4lb/l4lbcommon.go b/pkg/l4lb/l4lbcommon.go index e04daef7f3..43c635854c 100644 --- a/pkg/l4lb/l4lbcommon.go +++ b/pkg/l4lb/l4lbcommon.go @@ -28,7 +28,7 @@ import ( "k8s.io/ingress-gce/pkg/loadbalancers" "k8s.io/ingress-gce/pkg/utils" "k8s.io/ingress-gce/pkg/utils/patch" - "k8s.io/klog" + "k8s.io/klog/v2" "k8s.io/legacy-cloud-providers/gce" ) diff --git a/pkg/l4lb/l4netlbcontroller.go b/pkg/l4lb/l4netlbcontroller.go index f9cbb1a51c..f4fa9a8270 100644 --- a/pkg/l4lb/l4netlbcontroller.go +++ b/pkg/l4lb/l4netlbcontroller.go @@ -36,7 +36,7 @@ import ( "k8s.io/ingress-gce/pkg/utils" "k8s.io/ingress-gce/pkg/utils/common" "k8s.io/ingress-gce/pkg/utils/namer" - "k8s.io/klog" + "k8s.io/klog/v2" ) const l4NetLBControllerName = "l4netlb-controller" diff --git a/pkg/l4lb/l4netlbcontroller_test.go b/pkg/l4lb/l4netlbcontroller_test.go index 1d7f9d547f..1f6edf7021 100644 --- a/pkg/l4lb/l4netlbcontroller_test.go +++ b/pkg/l4lb/l4netlbcontroller_test.go @@ -48,7 +48,7 @@ import ( "k8s.io/ingress-gce/pkg/utils" "k8s.io/ingress-gce/pkg/utils/common" "k8s.io/ingress-gce/pkg/utils/namer" - "k8s.io/klog" + "k8s.io/klog/v2" "k8s.io/legacy-cloud-providers/gce" ) diff --git a/pkg/l4lb/metrics/metrics.go b/pkg/l4lb/metrics/metrics.go index 5db8aa75ff..ef86e4a74d 100644 --- a/pkg/l4lb/metrics/metrics.go +++ b/pkg/l4lb/metrics/metrics.go @@ -20,7 +20,7 @@ import ( "time" "github.com/prometheus/client_golang/prometheus" - "k8s.io/klog" + "k8s.io/klog/v2" ) const ( diff --git a/pkg/loadbalancers/address_manager.go b/pkg/loadbalancers/address_manager.go index 496187d2be..991abce43a 100644 --- a/pkg/loadbalancers/address_manager.go +++ b/pkg/loadbalancers/address_manager.go @@ -26,7 +26,7 @@ import ( compute "google.golang.org/api/compute/v1" "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud" - "k8s.io/klog" + "k8s.io/klog/v2" ) // IPAddressType defines if IP address is Managed by controller diff --git a/pkg/loadbalancers/addresses.go b/pkg/loadbalancers/addresses.go index 006e41f0e5..b38a3dd914 100644 --- a/pkg/loadbalancers/addresses.go +++ b/pkg/loadbalancers/addresses.go @@ -26,7 +26,7 @@ import ( "k8s.io/ingress-gce/pkg/flags" "k8s.io/ingress-gce/pkg/utils" "k8s.io/ingress-gce/pkg/utils/namer" - "k8s.io/klog" + "k8s.io/klog/v2" ) // checkStaticIP reserves a regional or global static IP allocated to the Forwarding Rule. diff --git a/pkg/loadbalancers/certificates.go b/pkg/loadbalancers/certificates.go index b85b591af7..44203d393e 100644 --- a/pkg/loadbalancers/certificates.go +++ b/pkg/loadbalancers/certificates.go @@ -23,7 +23,7 @@ import ( "k8s.io/ingress-gce/pkg/composite" "k8s.io/ingress-gce/pkg/translator" "k8s.io/ingress-gce/pkg/utils" - "k8s.io/klog" + "k8s.io/klog/v2" ) const SslCertificateMissing = "SslCertificateMissing" diff --git a/pkg/loadbalancers/features/l7ilb.go b/pkg/loadbalancers/features/l7ilb.go index 38b789229c..33fe57e423 100644 --- a/pkg/loadbalancers/features/l7ilb.go +++ b/pkg/loadbalancers/features/l7ilb.go @@ -20,11 +20,12 @@ import ( "context" "errors" "fmt" + "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud" "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/filter" "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta" - "k8s.io/klog" + "k8s.io/klog/v2" "k8s.io/legacy-cloud-providers/gce" ) diff --git a/pkg/loadbalancers/forwarding_rules.go b/pkg/loadbalancers/forwarding_rules.go index 93e342e1f9..bb763afccd 100644 --- a/pkg/loadbalancers/forwarding_rules.go +++ b/pkg/loadbalancers/forwarding_rules.go @@ -31,7 +31,7 @@ import ( "k8s.io/ingress-gce/pkg/translator" "k8s.io/ingress-gce/pkg/utils" "k8s.io/ingress-gce/pkg/utils/namer" - "k8s.io/klog" + "k8s.io/klog/v2" "k8s.io/legacy-cloud-providers/gce" ) diff --git a/pkg/loadbalancers/l4.go b/pkg/loadbalancers/l4.go index bcce040449..93d5f40f21 100644 --- a/pkg/loadbalancers/l4.go +++ b/pkg/loadbalancers/l4.go @@ -35,7 +35,7 @@ import ( "k8s.io/ingress-gce/pkg/metrics" "k8s.io/ingress-gce/pkg/utils" "k8s.io/ingress-gce/pkg/utils/namer" - "k8s.io/klog" + "k8s.io/klog/v2" "k8s.io/legacy-cloud-providers/gce" ) diff --git a/pkg/loadbalancers/l4netlb.go b/pkg/loadbalancers/l4netlb.go index 05822645da..0e03a8af06 100644 --- a/pkg/loadbalancers/l4netlb.go +++ b/pkg/loadbalancers/l4netlb.go @@ -34,7 +34,7 @@ import ( "k8s.io/ingress-gce/pkg/metrics" "k8s.io/ingress-gce/pkg/utils" "k8s.io/ingress-gce/pkg/utils/namer" - "k8s.io/klog" + "k8s.io/klog/v2" "k8s.io/legacy-cloud-providers/gce" ) diff --git a/pkg/loadbalancers/l7.go b/pkg/loadbalancers/l7.go index e7f5b442ca..17e2b139ca 100644 --- a/pkg/loadbalancers/l7.go +++ b/pkg/loadbalancers/l7.go @@ -37,7 +37,7 @@ import ( "k8s.io/ingress-gce/pkg/loadbalancers/features" "k8s.io/ingress-gce/pkg/utils" "k8s.io/ingress-gce/pkg/utils/namer" - "k8s.io/klog" + "k8s.io/klog/v2" "k8s.io/legacy-cloud-providers/gce" ) diff --git a/pkg/loadbalancers/l7s.go b/pkg/loadbalancers/l7s.go index 425c4d5e6d..d37984f2a1 100644 --- a/pkg/loadbalancers/l7s.go +++ b/pkg/loadbalancers/l7s.go @@ -29,7 +29,7 @@ import ( "k8s.io/ingress-gce/pkg/utils" "k8s.io/ingress-gce/pkg/utils/common" namer_util "k8s.io/ingress-gce/pkg/utils/namer" - "k8s.io/klog" + "k8s.io/klog/v2" "k8s.io/legacy-cloud-providers/gce" ) diff --git a/pkg/loadbalancers/target_proxies.go b/pkg/loadbalancers/target_proxies.go index 29b70b994f..d4f657cabb 100644 --- a/pkg/loadbalancers/target_proxies.go +++ b/pkg/loadbalancers/target_proxies.go @@ -24,7 +24,7 @@ import ( "k8s.io/ingress-gce/pkg/translator" "k8s.io/ingress-gce/pkg/utils" "k8s.io/ingress-gce/pkg/utils/namer" - "k8s.io/klog" + "k8s.io/klog/v2" ) const ( diff --git a/pkg/loadbalancers/url_maps.go b/pkg/loadbalancers/url_maps.go index 9752ff2929..f7cf809884 100644 --- a/pkg/loadbalancers/url_maps.go +++ b/pkg/loadbalancers/url_maps.go @@ -26,7 +26,7 @@ import ( "k8s.io/ingress-gce/pkg/events" "k8s.io/ingress-gce/pkg/translator" "k8s.io/ingress-gce/pkg/utils" - "k8s.io/klog" + "k8s.io/klog/v2" ) // ensureComputeURLMap retrieves the current URLMap and overwrites it if incorrect. If the resource diff --git a/pkg/metrics/features.go b/pkg/metrics/features.go index 23aeb627ee..71f8c6ecdc 100644 --- a/pkg/metrics/features.go +++ b/pkg/metrics/features.go @@ -24,7 +24,7 @@ import ( "k8s.io/ingress-gce/pkg/annotations" frontendconfigv1beta1 "k8s.io/ingress-gce/pkg/apis/frontendconfig/v1beta1" "k8s.io/ingress-gce/pkg/utils" - "k8s.io/klog" + "k8s.io/klog/v2" ) type feature string diff --git a/pkg/metrics/metrics.go b/pkg/metrics/metrics.go index 1d71c319d5..2a4645c665 100644 --- a/pkg/metrics/metrics.go +++ b/pkg/metrics/metrics.go @@ -29,7 +29,7 @@ import ( pscmetrics "k8s.io/ingress-gce/pkg/psc/metrics" "k8s.io/ingress-gce/pkg/utils" "k8s.io/ingress-gce/pkg/version" - "k8s.io/klog" + "k8s.io/klog/v2" ) const ( diff --git a/pkg/neg/controller.go b/pkg/neg/controller.go index 6de2c3cd8b..a4a42a0de1 100644 --- a/pkg/neg/controller.go +++ b/pkg/neg/controller.go @@ -49,11 +49,10 @@ import ( negtypes "k8s.io/ingress-gce/pkg/neg/types" svcnegclient "k8s.io/ingress-gce/pkg/svcneg/client/clientset/versioned" "k8s.io/ingress-gce/pkg/utils" - "k8s.io/ingress-gce/pkg/utils/common" "k8s.io/ingress-gce/pkg/utils/endpointslices" namer2 "k8s.io/ingress-gce/pkg/utils/namer" "k8s.io/ingress-gce/pkg/utils/patch" - "k8s.io/klog" + "k8s.io/klog/v2" ) func init() { @@ -105,6 +104,8 @@ type Controller struct { // runL4 indicates whether to run NEG controller that processes L4 ILB services runL4 bool + + logger klog.Logger } // NewController returns a network endpoint group controller. @@ -137,22 +138,25 @@ func NewController( enableAsm bool, asmServiceNEGSkipNamespaces []string, enableEndpointSlices bool, + logger klog.Logger, ) *Controller { + logger = logger.WithName("NEGController") + // init event recorder // TODO: move event recorder initializer to main. Reuse it among controllers. eventBroadcaster := record.NewBroadcaster() - eventBroadcaster.StartLogging(klog.Infof) + eventBroadcaster.StartStructuredLogging(0) eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{ Interface: kubeClient.CoreV1().Events(""), }) negScheme := runtime.NewScheme() err := scheme.AddToScheme(negScheme) if err != nil { - klog.Errorf("Errored adding default scheme to event recorder: %q", err) + logger.Error(err, "Errored adding default scheme to event recorder") } err = svcnegv1beta1.AddToScheme(negScheme) if err != nil { - klog.Errorf("Errored adding NEG CRD scheme to event recorder: %q", err) + logger.Error(err, "Errored adding NEG CRD scheme to event recorder") } recorder := eventBroadcaster.NewRecorder(negScheme, apiv1.EventSource{Component: "neg-controller"}) @@ -178,14 +182,18 @@ func NewController( nodeInformer.GetIndexer(), svcNegInformer.GetIndexer(), enableNonGcpMode, - enableEndpointSlices) + enableEndpointSlices, + logger) var reflector readiness.Reflector if enableReadinessReflector { reflector = readiness.NewReadinessReflector( kubeClient, podInformer.GetIndexer(), - cloud, manager) + cloud, + manager, + logger, + ) } else { reflector = &readiness.NoopReflector{} } @@ -211,13 +219,14 @@ func NewController( reflector: reflector, collector: controllerMetrics, runL4: runL4Controller, + logger: logger, } if runIngress { ingressInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { addIng := obj.(*v1.Ingress) if !utils.IsGLBCIngress(addIng) { - klog.V(4).Infof("Ignoring add for ingress %v based on annotation %v", common.NamespacedName(addIng), annotations.IngressClassKey) + logger.V(4).Info("Ignoring add for ingress based on annotation", "ingress", klog.KObj(addIng), "annotation", annotations.IngressClassKey) return } negController.enqueueIngressServices(addIng) @@ -225,7 +234,7 @@ func NewController( DeleteFunc: func(obj interface{}) { delIng := obj.(*v1.Ingress) if !utils.IsGLBCIngress(delIng) { - klog.V(4).Infof("Ignoring delete for ingress %v based on annotation %v", common.NamespacedName(delIng), annotations.IngressClassKey) + logger.V(4).Info("Ignoring delete for ingress based on annotation", "ingress", klog.KObj(delIng), "annotation", annotations.IngressClassKey) return } negController.enqueueIngressServices(delIng) @@ -236,7 +245,7 @@ func NewController( // Check if ingress class changed and previous class was a GCE ingress // Ingress class change may require cleanup so enqueue related services if !utils.IsGLBCIngress(curIng) && !utils.IsGLBCIngress(oldIng) { - klog.V(4).Infof("Ignoring update for ingress %v based on annotation %v", common.NamespacedName(curIng), annotations.IngressClassKey) + logger.V(4).Info("Ignoring update for ingress based on annotation", "ingress", klog.KObj(curIng), "annotation", annotations.IngressClassKey) return } keys := gatherIngressServiceKeys(oldIng) @@ -300,7 +309,7 @@ func NewController( currentNode := cur.(*apiv1.Node) candidateNodeCheck := utils.CandidateNodesPredicateIncludeUnreadyExcludeUpgradingNodes if candidateNodeCheck(oldNode) != candidateNodeCheck(currentNode) { - klog.Infof("Node %q has changed, enqueueing", currentNode.Name) + logger.Info("Node has changed, enqueueing", "node", klog.KObj(currentNode)) negController.enqueueNode(currentNode) } } @@ -325,13 +334,13 @@ func NewController( func (c *Controller) Run(stopCh <-chan struct{}) { wait.PollUntil(5*time.Second, func() (bool, error) { - klog.V(2).Infof("Waiting for initial sync") + c.logger.V(2).Info("Waiting for initial sync") return c.hasSynced(), nil }, stopCh) - klog.V(2).Infof("Starting network endpoint group controller") + c.logger.V(2).Info("Starting network endpoint group controller") defer func() { - klog.V(2).Infof("Shutting down network endpoint group controller") + c.logger.V(2).Info("Shutting down network endpoint group controller") c.stop() }() @@ -350,20 +359,20 @@ func (c *Controller) Run(stopCh <-chan struct{}) { func (c *Controller) IsHealthy() error { // log the last node sync - klog.V(5).Infof("Last node sync was at %v", c.nodeSyncTracker.Get()) + c.logger.V(5).Info("Last node sync time", "time", c.nodeSyncTracker.Get()) // check if last seen service and endpoint processing is more than an hour ago if c.syncTracker.Get().Before(time.Now().Add(-time.Hour)) { msg := fmt.Sprintf("NEG controller has not processed any service "+ "and endpoint updates for more than an hour. Something went wrong. "+ "Last sync was on %v", c.syncTracker.Get()) - klog.Error(msg) + c.logger.Error(nil, msg) return fmt.Errorf(msg) } return nil } func (c *Controller) stop() { - klog.V(2).Infof("Shutting down network endpoint group controller") + c.logger.V(2).Info("Shutting down network endpoint group controller") c.serviceQueue.ShutDown() c.endpointQueue.ShutDown() c.nodeQueue.ShutDown() @@ -415,7 +424,7 @@ func (c *Controller) processEndpoint(key string) { namespace, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { - klog.Errorf("Failed to split endpoint namespaced key %q: %v", key, err) + c.logger.Error(err, "Failed to split endpoint namespaced key", "key", key) return } c.manager.Sync(namespace, name) @@ -490,7 +499,7 @@ func (c *Controller) processService(key string) error { } } if len(svcPortInfoMap) != 0 || len(destinationRulesPortInfoMap) != 0 { - klog.V(2).Infof("Syncing service %q", key) + c.logger.V(2).Info("Syncing service", "service", key) if err = c.syncNegStatusAnnotation(namespace, name, svcPortInfoMap); err != nil { return err } @@ -503,7 +512,7 @@ func (c *Controller) processService(key string) error { return err } // do not need Neg - klog.V(4).Infof("Service %q does not need any NEG. Skipping", key) + c.logger.V(3).Info("Service does not need any NEG. Skipping", "service", key) c.collector.DeleteNegService(key) // neg annotation is not found or NEG is not enabled c.manager.StopSyncer(namespace, name) @@ -526,7 +535,7 @@ func (c *Controller) mergeIngressPortInfo(service *apiv1.Service, name types.Nam if negAnnotation != nil && negAnnotation.NEGEnabledForIngress() { // Only service ports referenced by ingress are synced for NEG ings := getIngressServicesFromStore(c.ingressLister, service) - ingressSvcPortTuples := gatherPortMappingUsedByIngress(ings, service) + ingressSvcPortTuples := gatherPortMappingUsedByIngress(ings, service, c.logger) ingressPortInfoMap := negtypes.NewPortInfoMap(name.Namespace, name.Name, ingressSvcPortTuples, c.namer, true, nil) if err := portInfoMap.Merge(ingressPortInfoMap); err != nil { return fmt.Errorf("failed to merge service ports referenced by ingress (%v): %w", ingressPortInfoMap, err) @@ -583,7 +592,7 @@ func (c *Controller) mergeVmIpNEGsPortInfo(service *apiv1.Service, name types.Na // Only process ILB services after L4 controller has marked it with v2 finalizer. if !utils.IsSubsettingL4ILBService(service) { msg := fmt.Sprintf("Ignoring ILB Service %s, namespace %s as it does not have the v2 finalizer", service.Name, service.Namespace) - klog.Warning(msg) + c.logger.Info(msg) c.recorder.Eventf(service, apiv1.EventTypeWarning, "ProcessServiceSkipped", msg) return nil } @@ -648,13 +657,13 @@ func (c *Controller) getCSMPortInfoMap(namespace, name string, service *apiv1.Se servicePortInfoMap := make(negtypes.PortInfoMap) if c.enableASM { // Find all destination rules that using this service. - destinationRules := getDestinationRulesFromStore(c.destinationRuleLister, service) + destinationRules := getDestinationRulesFromStore(c.destinationRuleLister, service, c.logger) // Fill all service ports into portinfomap servicePorts := gatherPortMappingFromService(service) for namespacedName, destinationRule := range destinationRules { destinationRulePortInfoMap, err := negtypes.NewPortInfoMapWithDestinationRule(namespace, name, servicePorts, c.namer, false, destinationRule) if err != nil { - klog.Warningf("DestinationRule(%s) contains duplicated subset, creating NEGs for the newer ones. %s", namespacedName.Name, err) + c.logger.Error(err, "DestinationRule contains duplicated subset, creating NEGs for the newer ones.", "destinationRule", namespacedName.Name) } if err := destinationRulesPortInfoMap.Merge(destinationRulePortInfoMap); err != nil { return servicePortInfoMap, destinationRulesPortInfoMap, fmt.Errorf("failed to merge service ports referenced by Istio:DestinationRule (%v): %w", destinationRulePortInfoMap, err) @@ -665,9 +674,9 @@ func (c *Controller) getCSMPortInfoMap(namespace, name string, service *apiv1.Se } // Create NEGs for every ports of the services. if service.Spec.Selector == nil || len(service.Spec.Selector) == 0 { - klog.Infof("Skip NEG creation for services that with no selector: %s:%s", namespace, name) + c.logger.Info("Skip NEG creation for services that with no selector", "service", klog.KRef(namespace, name)) } else if contains(c.asmServiceNEGSkipNamespaces, namespace) { - klog.Infof("Skip NEG creation for services in namespace: %s", namespace) + c.logger.Info("Skip NEG creation for services in namespace", "namespace", namespace) } else { servicePortInfoMap = negtypes.NewPortInfoMap(namespace, name, servicePorts, c.namer, false, nil) } @@ -694,7 +703,7 @@ func (c *Controller) syncNegStatusAnnotation(namespace, name string, portMap neg if _, ok := service.Annotations[annotations.NEGStatusKey]; ok { newSvcObjectMeta := service.ObjectMeta.DeepCopy() delete(newSvcObjectMeta.Annotations, annotations.NEGStatusKey) - klog.V(2).Infof("Removing NEG status annotation from service: %s/%s", namespace, name) + c.logger.V(2).Info("Removing NEG status annotation from service", "service", klog.KRef(namespace, name)) return patch.PatchServiceObjectMetadata(coreClient, service, *newSvcObjectMeta) } // service doesn't have the expose NEG annotation and doesn't need update @@ -716,7 +725,7 @@ func (c *Controller) syncNegStatusAnnotation(namespace, name string, portMap neg newSvcObjectMeta.Annotations = make(map[string]string) } newSvcObjectMeta.Annotations[annotations.NEGStatusKey] = annotation - klog.V(2).Infof("Updating NEG visibility annotation %q on service %s/%s.", annotation, namespace, name) + c.logger.V(2).Info("Updating NEG visibility annotation on service", "annotation", annotation, "service", klog.KRef(namespace, name)) return patch.PatchServiceObjectMetadata(coreClient, service, *newSvcObjectMeta) } @@ -734,7 +743,7 @@ func (c *Controller) syncDestinationRuleNegStatusAnnotation(namespace, destinati } if len(portmap) == 0 { delete(drAnnotations, annotations.NEGStatusKey) - klog.V(2).Infof("Removing NEG status annotation from DestinationRule: %s/%s", namespace, destinationRule) + c.logger.V(2).Info("Removing NEG status annotation from DestinationRule", "namespace", namespace, "destinationRule", destinationRule) } else { negStatus := annotations.NewDestinationRuleNegStatus(zones, portmap.ToPortSubsetNegMap()) negStatuAnnotation, err := negStatus.Marshal() @@ -757,7 +766,7 @@ func (c *Controller) syncDestinationRuleNegStatusAnnotation(namespace, destinati if err != nil { return err } - klog.V(2).Infof("Updating NEG visibility annotation %q on Istio:DestinationRule %s/%s.", string(patchBytes), namespace, destinationRuleName) + c.logger.V(2).Info("Updating NEG visibility annotation on Istio:DestinationRule", "annotation", string(patchBytes), "namespace", namespace, "destinationRuleName", destinationRuleName) _, err = dsClient.Patch(context.TODO(), destinationRuleName, apimachinerytypes.MergePatchType, patchBytes, metav1.PatchOptions{}) return err } @@ -769,9 +778,9 @@ func (c *Controller) handleErr(err error, key interface{}) { } msg := fmt.Sprintf("error processing service %q: %v", key, err) - klog.Errorf(msg) + c.logger.Error(nil, msg) if service, exists, err := c.serviceLister.GetByKey(key.(string)); err != nil { - klog.Warningf("Failed to retrieve service %q from store: %v", key.(string), err) + c.logger.Error(err, "Failed to retrieve service from store", "service", key.(string)) } else if exists { c.recorder.Eventf(service.(*apiv1.Service), apiv1.EventTypeWarning, "ProcessServiceFailed", msg) } @@ -781,7 +790,7 @@ func (c *Controller) handleErr(err error, key interface{}) { func (c *Controller) enqueueEndpoint(obj interface{}) { key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) if err != nil { - klog.Errorf("Failed to generate endpoint key: %v", err) + c.logger.Error(err, "Failed to generate endpoint key") return } c.endpointQueue.Add(key) @@ -792,17 +801,17 @@ func (c *Controller) enqueueEndpointSlice(obj interface{}) { if !ok { tombstone, ok := obj.(cache.DeletedFinalStateUnknown) if !ok { - klog.Errorf("Unexpected object type: %T, expected cache.DeletedFinalStateUnknown", obj) + c.logger.Error(nil, "Unexpected object type, expected cache.DeletedFinalStateUnknown", "objectTypeFound", fmt.Sprintf("%T", obj)) return } if endpointSlice, ok = tombstone.Obj.(*discovery.EndpointSlice); !ok { - klog.Errorf("Unexpected tombstone object type: %T, expected *discovery.EndpointSlice", obj) + c.logger.Error(nil, "Unexpected tombstone object, expected *discovery.EndpointSlice", "objectTypeFound", fmt.Sprintf("%T", obj)) return } } key, err := endpointslices.EndpointSlicesServiceKey(endpointSlice) if err != nil { - klog.Errorf("Failed to find a service label inside endpoint slice %v: %v", endpointSlice, err) + c.logger.Error(err, "Failed to find a service label inside endpoint slice", "endpointSlice", klog.KObj(endpointSlice)) return } c.endpointQueue.Add(key) @@ -811,7 +820,7 @@ func (c *Controller) enqueueEndpointSlice(obj interface{}) { func (c *Controller) enqueueNode(obj interface{}) { key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) if err != nil { - klog.Errorf("Failed to generate node key: %v", err) + c.logger.Error(err, "Failed to generate node key") return } c.nodeQueue.Add(key) @@ -820,7 +829,7 @@ func (c *Controller) enqueueNode(obj interface{}) { func (c *Controller) enqueueService(obj interface{}) { key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) if err != nil { - klog.Errorf("Failed to generate service key: %v", err) + c.logger.Error(err, "Failed to generate service key") return } c.serviceQueue.Add(key) @@ -843,12 +852,12 @@ func (c *Controller) enqueueIngressServices(ing *v1.Ingress) { func (c *Controller) enqueueDestinationRule(obj interface{}) { drus, ok := obj.(*unstructured.Unstructured) if !ok { - klog.Errorf("Failed to convert informer object to Unstructured object") + c.logger.Error(nil, "Failed to convert informer object to Unstructured object") return } targetServiceNamespace, drHost, _, err := castToDestinationRule(drus) if err != nil { - klog.Errorf("Failed to convert informer object to DestinationRule") + c.logger.Error(nil, "Failed to convert informer object to DestinationRule") return } svcKey := utils.ServiceKeyFunc(targetServiceNamespace, drHost) @@ -857,13 +866,13 @@ func (c *Controller) enqueueDestinationRule(obj interface{}) { func (c *Controller) gc() { if err := c.manager.GC(); err != nil { - klog.Errorf("NEG controller garbage collection failed: %v", err) + c.logger.Error(err, "NEG controller garbage collection failed") } } // gatherPortMappingUsedByIngress returns a map containing port:targetport // of all service ports of the service that are referenced by ingresses -func gatherPortMappingUsedByIngress(ings []v1.Ingress, svc *apiv1.Service) negtypes.SvcPortTupleSet { +func gatherPortMappingUsedByIngress(ings []v1.Ingress, svc *apiv1.Service, logger klog.Logger) negtypes.SvcPortTupleSet { ingressSvcPortTuples := make(negtypes.SvcPortTupleSet) for _, ing := range ings { if utils.IsGLBCIngress(&ing) { @@ -871,7 +880,7 @@ func gatherPortMappingUsedByIngress(ings []v1.Ingress, svc *apiv1.Service) negty if id.Service.Name == svc.Name && id.Service.Namespace == svc.Namespace { servicePort := translator.ServicePort(*svc, id.Port) if servicePort == nil { - klog.Warningf("Port %+v in Service %q not found", id.Port, id.Service.String()) + logger.Error(nil, "Port not found in service", "port", fmt.Sprintf("%+v", id.Port), "service", id.Service.String()) return false } ingressSvcPortTuples.Insert(negtypes.SvcPortTuple{ @@ -937,13 +946,13 @@ func gatherPortMappingFromService(svc *apiv1.Service) negtypes.SvcPortTupleSet { // getDestinationRulesFromStore returns all DestinationRules that referring service svc. // Please notice that a DestionationRule can point to a service in a different namespace. -func getDestinationRulesFromStore(store cache.Store, svc *apiv1.Service) (drs map[apimachinerytypes.NamespacedName]*istioV1alpha3.DestinationRule) { +func getDestinationRulesFromStore(store cache.Store, svc *apiv1.Service, logger klog.Logger) (drs map[apimachinerytypes.NamespacedName]*istioV1alpha3.DestinationRule) { drs = make(map[apimachinerytypes.NamespacedName]*istioV1alpha3.DestinationRule) for _, obj := range store.List() { drUnstructed := obj.(*unstructured.Unstructured) targetServiceNamespace, drHost, dr, err := castToDestinationRule(drUnstructed) if err != nil { - klog.Errorf("Failed to cast Unstructured DestinationRule to DestinationRule.") + logger.Error(err, "Failed to cast Unstructured DestinationRule to DestinationRule") continue } diff --git a/pkg/neg/controller_test.go b/pkg/neg/controller_test.go index c4dfd4e03e..7871c714ab 100644 --- a/pkg/neg/controller_test.go +++ b/pkg/neg/controller_test.go @@ -49,6 +49,7 @@ import ( negtypes "k8s.io/ingress-gce/pkg/neg/types" svcnegclient "k8s.io/ingress-gce/pkg/svcneg/client/clientset/versioned" "k8s.io/ingress-gce/pkg/utils" + "k8s.io/klog/v2" "k8s.io/legacy-cloud-providers/gce" ) @@ -152,6 +153,7 @@ func newTestControllerWithParamsAndContext(kubeClient kubernetes.Interface, test true, //enableAsm []string{}, enableEndpointSlices, + klog.TODO(), ) } func newTestController(kubeClient kubernetes.Interface) *Controller { @@ -654,7 +656,7 @@ func TestGatherPortMappingUsedByIngress(t *testing.T) { for _, tc := range testCases { controller := newTestController(fake.NewSimpleClientset()) defer controller.stop() - portTupleSet := gatherPortMappingUsedByIngress(tc.ings, newTestService(controller, true, []int32{})) + portTupleSet := gatherPortMappingUsedByIngress(tc.ings, newTestService(controller, true, []int32{}), klog.TODO()) if len(portTupleSet) != len(tc.expect) { t.Errorf("For test case %q, expect %d ports, but got %d.", tc.desc, len(tc.expect), len(portTupleSet)) } diff --git a/pkg/neg/manager.go b/pkg/neg/manager.go index 7e3a30aa00..93aa49157e 100644 --- a/pkg/neg/manager.go +++ b/pkg/neg/manager.go @@ -45,7 +45,7 @@ import ( "k8s.io/ingress-gce/pkg/utils" "k8s.io/ingress-gce/pkg/utils/common" "k8s.io/ingress-gce/pkg/utils/patch" - "k8s.io/klog" + "k8s.io/klog/v2" utilpointer "k8s.io/utils/pointer" ) @@ -97,6 +97,8 @@ type syncerManager struct { // zoneMap keeps track of the last set of zones the neg controller // has seen. zoneMap is protected by the mu mutex. zoneMap map[string]struct{} + + logger klog.Logger } func newSyncerManager(namer negtypes.NetworkEndpointGroupNamer, @@ -112,11 +114,12 @@ func newSyncerManager(namer negtypes.NetworkEndpointGroupNamer, nodeLister cache.Indexer, svcNegLister cache.Indexer, enableNonGcpMode bool, - enableEndpointSlices bool) *syncerManager { + enableEndpointSlices bool, + logger klog.Logger) *syncerManager { zones, err := zoneGetter.ListZones(utils.AllNodesPredicate) if err != nil { - klog.V(3).Infof("Unable to initialize zone map in neg manager: %s", err) + logger.V(3).Info("Unable to initialize zone map in neg manager", "err", err) } zoneMap := make(map[string]struct{}) for _, zone := range zones { @@ -140,6 +143,7 @@ func newSyncerManager(namer negtypes.NetworkEndpointGroupNamer, enableNonGcpMode: enableNonGcpMode, enableEndpointSlices: enableEndpointSlices, zoneMap: zoneMap, + logger: logger, } } @@ -164,7 +168,7 @@ func (manager *syncerManager) EnsureSyncers(namespace, name string, newPorts neg // Hence, Existing NEG syncer for the service port will always work manager.removeCommonPorts(adds, removes) manager.svcPortMap[key] = newPorts - klog.V(3).Infof("EnsureSyncer %v/%v: syncing %v ports, removing %v ports, adding %v ports", namespace, name, newPorts, removes, adds) + manager.logger.V(3).Info("EnsureSyncer is syncing ports", "service", klog.KRef(namespace, name), "ports", newPorts, "portsToRemove", removes, "portsToAdd", adds) errList := []error{} successfulSyncers := 0 @@ -209,7 +213,7 @@ func (manager *syncerManager) EnsureSyncers(namespace, name string, newPorts neg // determine the implementation that calculates NEG endpoints on each sync. epc := negsyncer.GetEndpointsCalculator(manager.nodeLister, manager.podLister, manager.zoneGetter, - syncerKey, portInfo.EpCalculatorMode) + syncerKey, portInfo.EpCalculatorMode, manager.logger.WithValues("service", klog.KRef(syncerKey.Namespace, syncerKey.Name), "negName", syncerKey.NegName)) syncer = negsyncer.NewTransactionSyncer( syncerKey, manager.recorder, @@ -227,6 +231,7 @@ func (manager *syncerManager) EnsureSyncers(namespace, name string, newPorts neg manager.svcNegClient, !manager.namer.IsNEG(portInfo.NegName), manager.enableEndpointSlices, + manager.logger, ) manager.syncerMap[syncerKey] = syncer } @@ -298,7 +303,7 @@ func (manager *syncerManager) SyncNodes() { func (manager *syncerManager) updateZoneMap() bool { zones, err := manager.zoneGetter.ListZones(utils.AllNodesPredicate) if err != nil { - klog.Warningf("Unable to list zones: %s", err) + manager.logger.Error(err, "Unable to list zones") return false } @@ -323,8 +328,8 @@ func (manager *syncerManager) ShutDown() { // GC garbage collects syncers and NEGs. func (manager *syncerManager) GC() error { - klog.V(2).Infof("Start NEG garbage collection.") - defer klog.V(2).Infof("NEG garbage collection finished.") + manager.logger.V(2).Info("Start NEG garbage collection.") + defer manager.logger.V(2).Info("NEG garbage collection finished.") start := time.Now() // Garbage collect Syncers manager.garbageCollectSyncer() @@ -355,7 +360,7 @@ func (manager *syncerManager) ReadinessGateEnabledNegs(namespace string, podLabe obj, exists, err := manager.serviceLister.GetByKey(svcKey.Key()) if err != nil { - klog.Errorf("Failed to retrieve service %s from store: %v", svcKey.Key(), err) + manager.logger.Error(err, "Failed to retrieve service from store", "service", svcKey.Key()) continue } @@ -410,7 +415,7 @@ func (manager *syncerManager) ensureDeleteSvcNegCR(namespace, negName string) er if err = manager.svcNegClient.NetworkingV1beta1().ServiceNetworkEndpointGroups(namespace).Delete(context.Background(), negName, metav1.DeleteOptions{}); err != nil { return fmt.Errorf("errored while deleting neg cr %s/%s: %w", negName, namespace, err) } - klog.V(2).Infof("Deleted neg cr %s/%s", negName, namespace) + manager.logger.V(2).Info("Deleted neg cr", "svcneg", klog.KRef(namespace, negName)) } return nil } @@ -438,7 +443,7 @@ func (manager *syncerManager) garbageCollectNEG() error { for key, neg := range negList { if key.Type() != meta.Zonal { // covers the case when key.Zone is not populated - klog.V(4).Infof("Ignoring key %v as it is not zonal", key) + manager.logger.V(4).Info("Ignoring key as it is not zonal", "key", key) continue } if manager.namer.IsNEG(neg.Name) { @@ -539,7 +544,7 @@ func (manager *syncerManager) garbageCollectNEGWithCRD() error { for _, cr := range deletionCandidates { shouldDeleteNegCR := true deleteByZone := len(cr.Status.NetworkEndpointGroups) == 0 - klog.V(2).Infof("Deletion candidate %s/%s has %d NEG references", cr.Namespace, cr.Name, len(cr.Status.NetworkEndpointGroups)) + manager.logger.V(2).Info("Count of NEG references for deletion candidate", "count", len(cr.Status.NetworkEndpointGroups), "svcneg", klog.KObj(cr)) for _, negRef := range cr.Status.NetworkEndpointGroups { resourceID, err := cloud.ParseResourceURL(negRef.SelfLink) if err != nil { @@ -552,7 +557,7 @@ func (manager *syncerManager) garbageCollectNEGWithCRD() error { } if deleteByZone { - klog.V(2).Infof("Deletion candidate %s/%s has 0 NEG reference: %+v", cr.Namespace, cr.Name, cr) + manager.logger.V(2).Info("Deletion candidate has 0 NEG reference", "svcneg", klog.KObj(cr), "cr", cr) for _, zone := range zones { shouldDeleteNegCR = shouldDeleteNegCR && deleteNegOrReportErr(cr.Name, zone, cr) } @@ -572,13 +577,13 @@ func (manager *syncerManager) garbageCollectNEGWithCRD() error { portInfoMap := manager.svcPortMap[svcKey] for _, portInfo := range portInfoMap { if portInfo.NegName == cr.Name { - klog.V(2).Infof("NEG CR %s/%s is still desired, skipping deletion", cr.Namespace, cr.Name) + manager.logger.V(2).Info("NEG CR is still desired, skipping deletion", "svcneg", klog.KObj(cr)) return } } - klog.V(2).Infof("Deleting NEG CR %s/%s", cr.Namespace, cr.Name) - if err := deleteSvcNegCR(manager.svcNegClient, cr); err != nil { + manager.logger.V(2).Info("Deleting NEG CR", "svcneg", klog.KObj(cr)) + if err := deleteSvcNegCR(manager.svcNegClient, cr, manager.logger); err != nil { errList = append(errList, err) } }() @@ -592,7 +597,7 @@ func (manager *syncerManager) ensureDeleteNetworkEndpointGroup(name, zone string neg, err := manager.cloud.GetNetworkEndpointGroup(name, zone, meta.VersionGA) if err != nil { if utils.IsNotFoundError(err) || utils.IsHTTPErrorCode(err, http.StatusBadRequest) { - klog.V(2).Infof("Ignoring error when querying for neg %s/%s during GC: %q", name, zone, err) + manager.logger.V(2).Info("Ignoring error when querying for neg during GC", "negName", name, "zone", zone, "err", err) return nil } return err @@ -602,16 +607,16 @@ func (manager *syncerManager) ensureDeleteNetworkEndpointGroup(name, zone string // Controller managed custom named negs will always have a populated description, so do not delete custom named // negs with empty descriptions. if !manager.namer.IsNEG(name) && neg.Description == "" { - klog.V(2).Infof("Skipping deletion of Neg %s in %s because name was not generated and empty description", name, zone) + manager.logger.V(2).Info("Skipping deletion of Neg because name was not generated and empty description", "negName", name, "zone", zone) return nil } if matches, err := utils.VerifyDescription(*expectedDesc, neg.Description, name, zone); !matches { - klog.V(2).Infof("Skipping deletion of Neg %s in %s because of conflicting description: %s", name, zone, err) + manager.logger.V(2).Info("Skipping deletion of Neg because of conflicting description", "negName", name, "zone", zone, "err", err) return nil } } - klog.V(2).Infof("Deleting NEG %q in %q.", name, zone) + manager.logger.V(2).Info("Deleting NEG", "negName", name, "zone", zone) return manager.cloud.DeleteNetworkEndpointGroup(name, zone, meta.VersionGA) } @@ -625,7 +630,7 @@ func (manager *syncerManager) ensureSvcNegCR(svcKey serviceKey, portInfo negtype obj, exists, err := manager.serviceLister.GetByKey(svcKey.Key()) if err != nil { - klog.Errorf("Failed to retrieve service %s from store: %v", svcKey.Key(), err) + manager.logger.Error(err, "Failed to retrieve service from store", "service", svcKey.Key()) } if !exists { @@ -661,13 +666,13 @@ func (manager *syncerManager) ensureSvcNegCR(svcKey serviceKey, portInfo negtype // Neg does not exist so create it _, err = manager.svcNegClient.NetworkingV1beta1().ServiceNetworkEndpointGroups(svcKey.namespace).Create(context.Background(), &newCR, metav1.CreateOptions{}) - klog.V(2).Infof("Created ServiceNetworkEndpointGroup CR for neg %s/%s", svcKey.namespace, portInfo.NegName) + manager.logger.V(2).Info("Created ServiceNetworkEndpointGroup CR for neg", "svcneg", klog.KRef(svcKey.namespace, portInfo.NegName)) return err } - needUpdate, err := ensureNegCRLabels(negCR, labels) + needUpdate, err := ensureNegCRLabels(negCR, labels, manager.logger) if err != nil { - klog.Errorf("failed to ensure labels for neg %s/%s for service %s: %s", negCR.Namespace, negCR.Name, service.Name, err) + manager.logger.Error(err, "failed to ensure labels for neg", "svcneg", klog.KRef(negCR.Namespace, negCR.Name), "service", service.Name) return err } needUpdate = ensureNegCROwnerRef(negCR, newCR.OwnerReferences) || needUpdate @@ -679,10 +684,10 @@ func (manager *syncerManager) ensureSvcNegCR(svcKey serviceKey, portInfo negtype return nil } -func ensureNegCRLabels(negCR *negv1beta1.ServiceNetworkEndpointGroup, labels map[string]string) (bool, error) { +func ensureNegCRLabels(negCR *negv1beta1.ServiceNetworkEndpointGroup, labels map[string]string, logger klog.Logger) (bool, error) { needsUpdate := false existingLabels := negCR.GetLabels() - klog.V(4).Infof("existing neg %s/%s labels: %+v", negCR.Namespace, negCR.Name, existingLabels) + logger.V(4).Info("Ensuring NEG CR labels", "svcneg", klog.KRef(negCR.Namespace, negCR.Name), "existingLabels", existingLabels) //Check that required labels exist and are matching for key, value := range labels { @@ -711,18 +716,18 @@ func ensureNegCROwnerRef(negCR *negv1beta1.ServiceNetworkEndpointGroup, expected } // deleteSvcNegCR will remove finalizers on the given negCR and if deletion timestamp is not set, will delete it as well -func deleteSvcNegCR(svcNegClient svcnegclient.Interface, negCR *negv1beta1.ServiceNetworkEndpointGroup) error { +func deleteSvcNegCR(svcNegClient svcnegclient.Interface, negCR *negv1beta1.ServiceNetworkEndpointGroup, logger klog.Logger) error { updatedCR := negCR.DeepCopy() updatedCR.Finalizers = []string{} if _, err := patchNegStatus(svcNegClient, *negCR, *updatedCR); err != nil { return err } - klog.V(2).Infof("Removed finalizer on ServiceNetworkEndpointGroup CR %s/%s", negCR.Namespace, negCR.Name) + logger.V(2).Info("Removed finalizer on ServiceNetworkEndpointGroup CR", "svcneg", klog.KRef(negCR.Namespace, negCR.Name)) // If CR does not have a deletion timestamp, delete if negCR.GetDeletionTimestamp().IsZero() { - klog.V(2).Infof("Deleting ServiceNetworkEndpointGroup CR %s/%s", negCR.Namespace, negCR.Name) + logger.V(2).Info("Deleting ServiceNetworkEndpointGroup CR", "svcneg", klog.KRef(negCR.Namespace, negCR.Name)) return svcNegClient.NetworkingV1beta1().ServiceNetworkEndpointGroups(negCR.Namespace).Delete(context.Background(), negCR.Name, metav1.DeleteOptions{}) } return nil diff --git a/pkg/neg/manager_test.go b/pkg/neg/manager_test.go index 2d66323323..54054f365e 100644 --- a/pkg/neg/manager_test.go +++ b/pkg/neg/manager_test.go @@ -29,6 +29,7 @@ import ( "google.golang.org/api/googleapi" "k8s.io/ingress-gce/pkg/composite" "k8s.io/ingress-gce/pkg/utils" + "k8s.io/klog/v2" "k8s.io/legacy-cloud-providers/gce" apiv1 "k8s.io/api/core/v1" @@ -92,6 +93,7 @@ func NewTestSyncerManager(kubeClient kubernetes.Interface) (*syncerManager, *gce testContext.SvcNegInformer.GetIndexer(), false, //enableNonGcpMode false, //enableEndpointSlices + klog.TODO(), ) return manager, testContext.Cloud } diff --git a/pkg/neg/readiness/poller.go b/pkg/neg/readiness/poller.go index 4dc0107dce..fae529dca8 100644 --- a/pkg/neg/readiness/poller.go +++ b/pkg/neg/readiness/poller.go @@ -18,11 +18,12 @@ package readiness import ( "fmt" - "k8s.io/apimachinery/pkg/util/clock" "strconv" "sync" "time" + "k8s.io/apimachinery/pkg/util/clock" + "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud" "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta" "k8s.io/apimachinery/pkg/types" @@ -30,7 +31,7 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/ingress-gce/pkg/composite" negtypes "k8s.io/ingress-gce/pkg/neg/types" - "k8s.io/klog" + "k8s.io/klog/v2" ) const ( @@ -86,9 +87,11 @@ type poller struct { negCloud negtypes.NetworkEndpointGroupCloud clock clock.Clock + + logger klog.Logger } -func NewPoller(podLister cache.Indexer, lookup NegLookup, patcher podStatusPatcher, negCloud negtypes.NetworkEndpointGroupCloud) *poller { +func NewPoller(podLister cache.Indexer, lookup NegLookup, patcher podStatusPatcher, negCloud negtypes.NetworkEndpointGroupCloud, logger klog.Logger) *poller { return &poller{ pollMap: make(map[negMeta]*pollTarget), podLister: podLister, @@ -96,6 +99,7 @@ func NewPoller(podLister cache.Indexer, lookup NegLookup, patcher podStatusPatch patcher: patcher, negCloud: negCloud, clock: clock.RealClock{}, + logger: logger.WithName("Poller"), } } @@ -144,12 +148,12 @@ func (p *poller) ScanForWork() []negMeta { // This function is threadsafe. func (p *poller) Poll(key negMeta) (retry bool, err error) { if !p.markPolling(key) { - klog.V(4).Infof("NEG %q in zone %q as is already being polled or no longer needed to be polled.", key.Name, key.Zone) + p.logger.V(4).Info("NEG is already being polled or no longer needed to be polled.", "neg", key.Name, "negZone", key.Zone) return true, nil } defer p.unMarkPolling(key) - klog.V(2).Infof("polling NEG %q in zone %q", key.Name, key.Zone) + p.logger.V(2).Info("polling NEG", "neg", key.Name, "negZone", key.Zone) // TODO(freehan): filter the NEs that are in interest once the API supports it res, err := p.negCloud.ListNetworkEndpoints(key.Name, key.Zone /*showHealthStatus*/, true, key.SyncerKey.GetAPIVersion()) if err != nil { @@ -159,7 +163,7 @@ func (p *poller) Poll(key negMeta) (retry bool, err error) { // until the next status poll is executed. However, the pods are not marked as Ready and still passes the LB health check will // serve LB traffic. The side effect during the delay period is the workload (depending on rollout strategy) might slow down rollout. // TODO(freehan): enable exponential backoff. - klog.Errorf("Failed to ListNetworkEndpoint in NEG %q, retry in %v", key.String(), retryDelay.String()) + p.logger.Error(err, "Failed to ListNetworkEndpoint in NEG. Retrying after some time.", "neg", key.String(), "retryDelay", retryDelay.String()) <-p.clock.After(retryDelay) return true, err } @@ -176,7 +180,7 @@ func (p *poller) Poll(key negMeta) (retry bool, err error) { func (p *poller) processHealthStatus(key negMeta, healthStatuses []*composite.NetworkEndpointWithHealthStatus) (bool, error) { p.lock.Lock() defer p.lock.Unlock() - klog.V(4).Infof("processHealthStatus(%q, %+v)", key.String(), healthStatuses) + p.logger.V(4).Info("Executing processHealthStatus", "neg", key.String(), "healthStatuses", healthStatuses) var ( errList []error @@ -193,12 +197,12 @@ func (p *poller) processHealthStatus(key negMeta, healthStatuses []*composite.Ne for _, healthStatus := range healthStatuses { if healthStatus == nil { - klog.Warningf("healthStatus is nil from response %+v", healthStatuses) + p.logger.Error(nil, "healthStatus is nil from response", "healthStatuses", healthStatuses) continue } if healthStatus.NetworkEndpoint == nil { - klog.Warningf("Health status has nil associated network endpoint: %v", healthStatus) + p.logger.Error(nil, "Health status has nil associated network endpoint", "healthStatus", healthStatus) continue } @@ -216,7 +220,7 @@ func (p *poller) processHealthStatus(key negMeta, healthStatuses []*composite.Ne continue } - bsKey := getHealthyBackendService(healthStatus) + bsKey := getHealthyBackendService(healthStatus, p.logger) if bsKey == nil { unhealthyPods = append(unhealthyPods, podName) continue @@ -256,21 +260,21 @@ func (p *poller) processHealthStatus(key negMeta, healthStatuses []*composite.Ne } // getHealthyBackendService returns one of the first backend service key where the endpoint is considered healthy. -func getHealthyBackendService(healthStatus *composite.NetworkEndpointWithHealthStatus) *meta.Key { +func getHealthyBackendService(healthStatus *composite.NetworkEndpointWithHealthStatus, logger klog.Logger) *meta.Key { for _, hs := range healthStatus.Healths { if hs == nil { - klog.Errorf("Health status is nil in health status of network endpoint %v ", healthStatus) + logger.Error(nil, "Health status is nil in health status of network endpoint", "healthStatus", healthStatus) continue } if hs.BackendService == nil { - klog.Errorf("Backend service is nil in health status of network endpoint %v", healthStatus) + logger.Error(nil, "Backend service is nil in health status of network endpoint", "healthStatus", healthStatus) continue } if hs.HealthState == healthyState { id, err := cloud.ParseResourceURL(hs.BackendService.BackendService) if err != nil { - klog.Errorf("Failed to parse backend service reference from a Network Endpoint health status %v: %v", healthStatus, err) + logger.Error(err, "Failed to parse backend service reference from a Network Endpoint health status", "healthStatus", healthStatus) continue } if id != nil { diff --git a/pkg/neg/readiness/reflector.go b/pkg/neg/readiness/reflector.go index 3f3fde2158..4b0c6abd90 100644 --- a/pkg/neg/readiness/reflector.go +++ b/pkg/neg/readiness/reflector.go @@ -23,7 +23,7 @@ import ( "time" "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" @@ -34,7 +34,7 @@ import ( "k8s.io/client-go/util/workqueue" negtypes "k8s.io/ingress-gce/pkg/neg/types" "k8s.io/ingress-gce/pkg/neg/types/shared" - "k8s.io/klog" + "k8s.io/klog/v2" ) const ( @@ -72,15 +72,18 @@ type readinessReflector struct { eventRecorder record.EventRecorder queue workqueue.RateLimitingInterface + + logger klog.Logger } -func NewReadinessReflector(kubeClient kubernetes.Interface, podLister cache.Indexer, negCloud negtypes.NetworkEndpointGroupCloud, lookup NegLookup) Reflector { +func NewReadinessReflector(kubeClient kubernetes.Interface, podLister cache.Indexer, negCloud negtypes.NetworkEndpointGroupCloud, lookup NegLookup, logger klog.Logger) Reflector { broadcaster := record.NewBroadcaster() broadcaster.StartLogging(klog.Infof) broadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{ Interface: kubeClient.CoreV1().Events(""), }) recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "neg-readiness-reflector"}) + logger = logger.WithName("ReadinessReflector") reflector := &readinessReflector{ client: kubeClient, podLister: podLister, @@ -89,16 +92,17 @@ func NewReadinessReflector(kubeClient kubernetes.Interface, podLister cache.Inde eventBroadcaster: broadcaster, eventRecorder: recorder, queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), + logger: logger, } - poller := NewPoller(podLister, lookup, reflector, negCloud) + poller := NewPoller(podLister, lookup, reflector, negCloud, logger) reflector.poller = poller return reflector } func (r *readinessReflector) Run(stopCh <-chan struct{}) { defer r.queue.ShutDown() - klog.V(2).Infof("Starting NEG readiness reflector") - defer klog.V(2).Infof("Shutting down NEG readiness reflector") + r.logger.V(2).Info("Starting NEG readiness reflector") + defer r.logger.V(2).Info("Shutting down NEG readiness reflector") go wait.Until(r.worker, time.Second, stopCh) <-stopCh @@ -129,12 +133,12 @@ func (r *readinessReflector) handleErr(err error, key interface{}) { } if r.queue.NumRequeues(key) < maxRetries { - klog.V(2).Infof("Error syncing pod %q, retrying. Error: %v", key, err) + r.logger.V(2).Info("Error syncing pod. Retrying.", "pod", key, "err", err) r.queue.AddRateLimited(key) return } - klog.Warningf("Dropping pod %q out of the queue: %v", key, err) + r.logger.Info("Dropping pod out of the queue", "pod", key, "err", err) r.queue.Forget(key) } @@ -155,7 +159,7 @@ func (r *readinessReflector) syncPod(podKey string, neg, backendService *meta.Ke return err } if !exists { - klog.V(5).Infof("Pod %q is no longer exists. Skipping", podKey) + r.logger.V(3).Info("Pod no longer exists. Skipping", "pod", podKey) return nil } @@ -164,7 +168,7 @@ func (r *readinessReflector) syncPod(podKey string, neg, backendService *meta.Ke return nil } - klog.V(4).Infof("syncPod(%q, %v, %v)", podKey, neg, backendService) + r.logger.V(3).Info("Syncing pod", "pod", podKey, "neg", neg, "backendService", backendService) expectedCondition := r.getExpectedNegCondition(pod, neg, backendService) return r.ensurePodNegCondition(pod, expectedCondition) } @@ -220,12 +224,12 @@ func (r *readinessReflector) getExpectedNegCondition(pod *v1.Pod, neg, backendSe func (r *readinessReflector) SyncPod(pod *v1.Pod) { key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(pod) if err != nil { - klog.Errorf("Failed to generate pod key: %v", err) + r.logger.Error(err, "Failed to generate pod key") return } if !needToProcess(pod) { - klog.V(6).Infof("Skip processing pod %q", key) + r.logger.V(3).Info("Skip processing pod", "pod", key) } r.queue.Add(key) } @@ -252,10 +256,10 @@ func (r *readinessReflector) poll() { // pollNeg polls a NEG func (r *readinessReflector) pollNeg(key negMeta) { - klog.V(4).Infof("Polling NEG %q", key.String()) + r.logger.V(3).Info("Polling NEG", "neg", key.String()) retry, err := r.poller.Poll(key) if err != nil { - klog.Errorf("Failed to poll %q: %v", key, err) + r.logger.Error(err, "Failed to poll neg", "neg", key) } if retry { r.poll() @@ -271,7 +275,7 @@ func (r *readinessReflector) ensurePodNegCondition(pod *v1.Pod, expectedConditio // check if it is necessary to patch condition, ok := NegReadinessConditionStatus(pod) if ok && reflect.DeepEqual(expectedCondition, condition) { - klog.V(4).Infof("NEG condition for pod %s/%s is expected, skip patching", pod.Namespace, pod.Name) + r.logger.V(3).Info("NEG condition for pod is expected, skip patching", "pod", klog.KRef(pod.Namespace, pod.Name)) return nil } diff --git a/pkg/neg/readiness/reflector_test.go b/pkg/neg/readiness/reflector_test.go index 36a9ef541e..c095308833 100644 --- a/pkg/neg/readiness/reflector_test.go +++ b/pkg/neg/readiness/reflector_test.go @@ -29,6 +29,7 @@ import ( "k8s.io/apimachinery/pkg/util/clock" negtypes "k8s.io/ingress-gce/pkg/neg/types" "k8s.io/ingress-gce/pkg/neg/types/shared" + "k8s.io/klog/v2" ) // fakeLookUp implements LookUp interface @@ -47,7 +48,7 @@ func (f *fakeLookUp) ReadinessGateEnabled(syncerKey negtypes.NegSyncerKey) bool } func newTestReadinessReflector(testContext *negtypes.TestContext) *readinessReflector { - reflector := NewReadinessReflector(testContext.KubeClient, testContext.PodInformer.GetIndexer(), negtypes.NewAdapter(testContext.Cloud), &fakeLookUp{}) + reflector := NewReadinessReflector(testContext.KubeClient, testContext.PodInformer.GetIndexer(), negtypes.NewAdapter(testContext.Cloud), &fakeLookUp{}, klog.TODO()) ret := reflector.(*readinessReflector) return ret } diff --git a/pkg/neg/readiness/utils.go b/pkg/neg/readiness/utils.go index a3407239a8..5555a76422 100644 --- a/pkg/neg/readiness/utils.go +++ b/pkg/neg/readiness/utils.go @@ -20,7 +20,7 @@ import ( "context" "fmt" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" clientset "k8s.io/client-go/kubernetes" @@ -28,7 +28,7 @@ import ( negtypes "k8s.io/ingress-gce/pkg/neg/types" "k8s.io/ingress-gce/pkg/neg/types/shared" "k8s.io/ingress-gce/pkg/utils/patch" - "k8s.io/klog" + "k8s.io/klog/v2" ) // NegReadinessConditionStatus return (cond, true) if neg condition exists, otherwise (_, false) diff --git a/pkg/neg/syncers/endpoints_calculator.go b/pkg/neg/syncers/endpoints_calculator.go index 28434b6b56..37e51f2f21 100644 --- a/pkg/neg/syncers/endpoints_calculator.go +++ b/pkg/neg/syncers/endpoints_calculator.go @@ -18,14 +18,15 @@ package syncers import ( "fmt" + "strings" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" listers "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/ingress-gce/pkg/neg/types" "k8s.io/ingress-gce/pkg/utils" - "k8s.io/klog" - "strings" + "k8s.io/klog/v2" ) // LocalL4ILBEndpointGetter implements the NetworkEndpointsCalculator interface. @@ -41,10 +42,17 @@ type LocalL4ILBEndpointsCalculator struct { zoneGetter types.ZoneGetter subsetSizeLimit int svcId string + logger klog.Logger } -func NewLocalL4ILBEndpointsCalculator(nodeLister listers.NodeLister, zoneGetter types.ZoneGetter, svcId string) *LocalL4ILBEndpointsCalculator { - return &LocalL4ILBEndpointsCalculator{nodeLister: nodeLister, zoneGetter: zoneGetter, subsetSizeLimit: maxSubsetSizeLocal, svcId: svcId} +func NewLocalL4ILBEndpointsCalculator(nodeLister listers.NodeLister, zoneGetter types.ZoneGetter, svcId string, logger klog.Logger) *LocalL4ILBEndpointsCalculator { + return &LocalL4ILBEndpointsCalculator{ + nodeLister: nodeLister, + zoneGetter: zoneGetter, + subsetSizeLimit: maxSubsetSizeLocal, + svcId: svcId, + logger: logger.WithName("LocalL4ILBEndpointsCalculator"), + } } // Mode indicates the mode that the EndpointsCalculator is operating in. @@ -62,11 +70,11 @@ func (l *LocalL4ILBEndpointsCalculator) CalculateEndpoints(eds []types.Endpoints for _, ed := range eds { for _, addr := range ed.Addresses { if addr.NodeName == nil { - klog.V(2).Infof("Endpoint %q in Endpoints %s/%s does not have an associated node. Skipping", addr.Addresses, ed.Meta.Namespace, ed.Meta.Name) + l.logger.V(2).Info("Address inside Endpoints does not have an associated node. Skipping", "address", addr.Addresses, "endpoints", klog.KRef(ed.Meta.Namespace, ed.Meta.Name)) continue } if addr.TargetRef == nil { - klog.V(2).Infof("Endpoint %q in Endpoints %s/%s does not have an associated pod. Skipping", addr.Addresses, ed.Meta.Namespace, ed.Meta.Name) + l.logger.V(2).Info("Address inside Endpoints does not have an associated pod. Skipping", "address", addr.Addresses, "endpoints", klog.KRef(ed.Meta.Namespace, ed.Meta.Name)) continue } numEndpoints++ @@ -76,16 +84,16 @@ func (l *LocalL4ILBEndpointsCalculator) CalculateEndpoints(eds []types.Endpoints processedNodes.Insert(*addr.NodeName) node, err := l.nodeLister.Get(*addr.NodeName) if err != nil { - klog.Errorf("failed to retrieve node object for %q: %v", *addr.NodeName, err) + l.logger.Error(err, "failed to retrieve node object", "nodeName", *addr.NodeName) continue } if ok := candidateNodeCheck(node); !ok { - klog.Infof("Dropping Node %q from subset since it is not a valid LB candidate", node.Name) + l.logger.Info("Dropping Node from subset since it is not a valid LB candidate", "nodeName", node.Name) continue } zone, err := l.zoneGetter.GetZoneForNode(node.Name) if err != nil { - klog.Errorf("Unable to find zone for node %s, err %v, skipping", node.Name, err) + l.logger.Error(err, "Unable to find zone for node, skipping", "nodeName", node.Name) continue } zoneNodeMap[zone] = append(zoneNodeMap[zone], node) @@ -96,8 +104,8 @@ func (l *LocalL4ILBEndpointsCalculator) CalculateEndpoints(eds []types.Endpoints return nil, nil, nil } // Compute the networkEndpoints, with total endpoints count <= l.subsetSizeLimit - klog.V(2).Infof("LocalL4ILBEndpointsCalculator - Got zoneNodeMap %q as input for service ID %v", nodeMapToString(zoneNodeMap), l.svcId) - subsetMap, err := getSubsetPerZone(zoneNodeMap, l.subsetSizeLimit, l.svcId, currentMap) + klog.V(2).Infof("Got zoneNodeMap as input for service", "zoneNodeMap", nodeMapToString(zoneNodeMap), "serviceID", l.svcId) + subsetMap, err := getSubsetPerZone(zoneNodeMap, l.subsetSizeLimit, l.svcId, currentMap, l.logger) return subsetMap, nil, err } @@ -115,11 +123,17 @@ type ClusterL4ILBEndpointsCalculator struct { subsetSizeLimit int // svcId is the unique identifier for the service, that is used as a salt when hashing nodenames. svcId string + + logger klog.Logger } -func NewClusterL4ILBEndpointsCalculator(nodeLister listers.NodeLister, zoneGetter types.ZoneGetter, svcId string) *ClusterL4ILBEndpointsCalculator { - return &ClusterL4ILBEndpointsCalculator{nodeLister: nodeLister, zoneGetter: zoneGetter, - subsetSizeLimit: maxSubsetSizeDefault, svcId: svcId} +func NewClusterL4ILBEndpointsCalculator(nodeLister listers.NodeLister, zoneGetter types.ZoneGetter, svcId string, logger klog.Logger) *ClusterL4ILBEndpointsCalculator { + return &ClusterL4ILBEndpointsCalculator{ + nodeLister: nodeLister, + zoneGetter: zoneGetter, + subsetSizeLimit: maxSubsetSizeDefault, + svcId: svcId, + logger: logger.WithName("ClusterL4ILBEndpointsCalculator")} } // Mode indicates the mode that the EndpointsCalculator is operating in. @@ -136,14 +150,14 @@ func (l *ClusterL4ILBEndpointsCalculator) CalculateEndpoints(_ []types.Endpoints for _, node := range nodes { zone, err := l.zoneGetter.GetZoneForNode(node.Name) if err != nil { - klog.Errorf("Unable to find zone for node %s, err %v, skipping", node.Name, err) + l.logger.Error(err, "Unable to find zone for node skipping", "nodeName", node.Name) continue } zoneNodeMap[zone] = append(zoneNodeMap[zone], node) } - klog.V(2).Infof("ClusterL4ILBEndpointsCalculator - Got zoneNodeMap %q as input for service ID %v", nodeMapToString(zoneNodeMap), l.svcId) + klog.V(2).Infof("Got zoneNodeMap as input for service", "zoneNodeMap", nodeMapToString(zoneNodeMap), "serviceID", l.svcId) // Compute the networkEndpoints, with total endpoints <= l.subsetSizeLimit. - subsetMap, err := getSubsetPerZone(zoneNodeMap, l.subsetSizeLimit, l.svcId, currentMap) + subsetMap, err := getSubsetPerZone(zoneNodeMap, l.subsetSizeLimit, l.svcId, currentMap, l.logger) return subsetMap, nil, err } @@ -154,15 +168,17 @@ type L7EndpointsCalculator struct { podLister cache.Indexer subsetLabels string networkEndpointType types.NetworkEndpointType + logger klog.Logger } -func NewL7EndpointsCalculator(zoneGetter types.ZoneGetter, podLister cache.Indexer, svcPortName, subsetLabels string, endpointType types.NetworkEndpointType) *L7EndpointsCalculator { +func NewL7EndpointsCalculator(zoneGetter types.ZoneGetter, podLister cache.Indexer, svcPortName, subsetLabels string, endpointType types.NetworkEndpointType, logger klog.Logger) *L7EndpointsCalculator { return &L7EndpointsCalculator{ zoneGetter: zoneGetter, servicePortName: svcPortName, podLister: podLister, subsetLabels: subsetLabels, networkEndpointType: endpointType, + logger: logger.WithName("L7EndpointsCalculator"), } } diff --git a/pkg/neg/syncers/endpoints_calculator_test.go b/pkg/neg/syncers/endpoints_calculator_test.go index 3341afa362..acba48b202 100644 --- a/pkg/neg/syncers/endpoints_calculator_test.go +++ b/pkg/neg/syncers/endpoints_calculator_test.go @@ -27,6 +27,7 @@ import ( "k8s.io/client-go/tools/cache" negtypes "k8s.io/ingress-gce/pkg/neg/types" "k8s.io/ingress-gce/pkg/utils" + "k8s.io/klog/v2" "k8s.io/legacy-cloud-providers/gce" ) @@ -101,7 +102,7 @@ func TestLocalGetEndpointSet(t *testing.T) { }, } svcKey := fmt.Sprintf("%s/%s", testServiceName, testServiceNamespace) - ec := NewLocalL4ILBEndpointsCalculator(nodeLister, zoneGetter, svcKey) + ec := NewLocalL4ILBEndpointsCalculator(nodeLister, zoneGetter, svcKey, klog.TODO()) for _, tc := range testCases { createNodes(t, tc.nodeNames, tc.nodeLabelsMap, tc.nodeReadyStatusMap, transactionSyncer.nodeLister) retSet, _, err := ec.CalculateEndpoints(tc.endpointsData, nil) @@ -194,7 +195,7 @@ func TestClusterGetEndpointSet(t *testing.T) { }, } svcKey := fmt.Sprintf("%s/%s", testServiceName, testServiceNamespace) - ec := NewClusterL4ILBEndpointsCalculator(nodeLister, zoneGetter, svcKey) + ec := NewClusterL4ILBEndpointsCalculator(nodeLister, zoneGetter, svcKey, klog.TODO()) for _, tc := range testCases { createNodes(t, tc.nodeNames, tc.nodeLabelsMap, tc.nodeReadyStatusMap, transactionSyncer.nodeLister) retSet, _, err := ec.CalculateEndpoints(tc.endpointsData, nil) diff --git a/pkg/neg/syncers/subsets.go b/pkg/neg/syncers/subsets.go index f4141a2a81..a19893f8ad 100644 --- a/pkg/neg/syncers/subsets.go +++ b/pkg/neg/syncers/subsets.go @@ -22,10 +22,10 @@ import ( "fmt" "sort" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" negtypes "k8s.io/ingress-gce/pkg/neg/types" "k8s.io/ingress-gce/pkg/utils" - "k8s.io/klog" + "k8s.io/klog/v2" ) const ( @@ -157,7 +157,7 @@ func sortZones(nodesPerZone map[string][]*v1.Node) []ZoneInfo { // Since the number of nodes will keep increasing in successive zones due to the sorting, even if fewer nodes were // present in some zones, more nodes will be picked from other nodes, taking the total subset size to the given limit // whenever possible. -func getSubsetPerZone(nodesPerZone map[string][]*v1.Node, totalLimit int, svcID string, currentMap map[string]negtypes.NetworkEndpointSet) (map[string]negtypes.NetworkEndpointSet, error) { +func getSubsetPerZone(nodesPerZone map[string][]*v1.Node, totalLimit int, svcID string, currentMap map[string]negtypes.NetworkEndpointSet, logger klog.Logger) (map[string]negtypes.NetworkEndpointSet, error) { result := make(map[string]negtypes.NetworkEndpointSet) var currentList []negtypes.NetworkEndpoint @@ -170,7 +170,7 @@ func getSubsetPerZone(nodesPerZone map[string][]*v1.Node, totalLimit int, svcID for _, zone := range zoneList { // split the limit across the leftover zones. subsetSize = totalLimit / zonesRemaining - klog.Infof("Picking subset of size %d for zone %v, service %s", subsetSize, zone, svcID) + logger.Info("Picking subset for a zone", "subsetSize", subsetSize, "zone", zone, "svcID", svcID) result[zone.Name] = negtypes.NewNetworkEndpointSet() if currentMap != nil { if zset, ok := currentMap[zone.Name]; ok && zset != nil { diff --git a/pkg/neg/syncers/subsets_test.go b/pkg/neg/syncers/subsets_test.go index b839ac81dc..c739905c87 100644 --- a/pkg/neg/syncers/subsets_test.go +++ b/pkg/neg/syncers/subsets_test.go @@ -22,6 +22,7 @@ import ( "testing" "k8s.io/ingress-gce/pkg/neg/types" + "k8s.io/klog/v2" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -188,7 +189,7 @@ func TestUnevenNodesInZones(t *testing.T) { }, } for _, tc := range testCases { - subsetMap, err := getSubsetPerZone(tc.nodesMap, tc.subsetLimit, tc.svcKey, nil) + subsetMap, err := getSubsetPerZone(tc.nodesMap, tc.subsetLimit, tc.svcKey, nil, klog.TODO()) if err != nil { t.Errorf("Failed to get subset for test '%s', err %v", tc.description, err) } diff --git a/pkg/neg/syncers/syncer.go b/pkg/neg/syncers/syncer.go index 16fd73606f..b74291cd31 100644 --- a/pkg/neg/syncers/syncer.go +++ b/pkg/neg/syncers/syncer.go @@ -26,7 +26,7 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" negtypes "k8s.io/ingress-gce/pkg/neg/types" - "k8s.io/klog" + "k8s.io/klog/v2" ) type syncerCore interface { @@ -55,9 +55,11 @@ type syncer struct { syncCh chan interface{} clock clock.Clock backoff backoffHandler + + logger klog.Logger } -func newSyncer(negSyncerKey negtypes.NegSyncerKey, serviceLister cache.Indexer, recorder record.EventRecorder, core syncerCore) *syncer { +func newSyncer(negSyncerKey negtypes.NegSyncerKey, serviceLister cache.Indexer, recorder record.EventRecorder, core syncerCore, logger klog.Logger) *syncer { return &syncer{ NegSyncerKey: negSyncerKey, core: core, @@ -67,6 +69,7 @@ func newSyncer(negSyncerKey negtypes.NegSyncerKey, serviceLister cache.Indexer, shuttingDown: false, clock: clock.RealClock{}, backoff: NewExponentialBackendOffHandler(maxRetries, minRetryDelay, maxRetryDelay), + logger: logger, } } @@ -78,7 +81,7 @@ func (s *syncer) Start() error { return fmt.Errorf("NEG syncer for %s is shutting down. ", s.NegSyncerKey.String()) } - klog.V(2).Infof("Starting NEG syncer for service port %s", s.NegSyncerKey.String()) + s.logger.V(2).Info("Starting NEG syncer for service port", "negSynckerKey", s.NegSyncerKey.String()) s.init() go func() { for { @@ -108,7 +111,7 @@ func (s *syncer) Start() error { s.stateLock.Lock() s.shuttingDown = false s.stateLock.Unlock() - klog.V(2).Infof("Stopping NEG syncer for %s", s.NegSyncerKey.String()) + s.logger.V(2).Info("Stopping NEG syncer", "negSynckerKey", s.NegSyncerKey.String()) return } case <-retryCh: @@ -130,7 +133,7 @@ func (s *syncer) Stop() { s.stateLock.Lock() defer s.stateLock.Unlock() if !s.stopped { - klog.V(2).Infof("Stopping NEG syncer for service port %s", s.NegSyncerKey.String()) + s.logger.V(2).Info("Stopping NEG syncer for service port", "negSynckerKey", s.NegSyncerKey.String()) s.stopped = true s.shuttingDown = true close(s.syncCh) @@ -139,7 +142,7 @@ func (s *syncer) Stop() { func (s *syncer) Sync() bool { if s.IsStopped() { - klog.Warningf("NEG syncer for %s is already stopped.", s.NegSyncerKey.String()) + s.logger.Info("NEG syncer is already stopped.", "negSynckerKey", s.NegSyncerKey.String()) return false } select { diff --git a/pkg/neg/syncers/syncer_test.go b/pkg/neg/syncers/syncer_test.go index b84b7d7d6d..dd43e52819 100644 --- a/pkg/neg/syncers/syncer_test.go +++ b/pkg/neg/syncers/syncer_test.go @@ -28,6 +28,7 @@ import ( "k8s.io/client-go/tools/record" negtypes "k8s.io/ingress-gce/pkg/neg/types" "k8s.io/ingress-gce/pkg/utils" + "k8s.io/klog/v2" ) const ( @@ -101,6 +102,7 @@ func newSyncerTester() *syncerTester { testContext.ServiceInformer.GetIndexer(), record.NewFakeRecorder(100), st, + klog.TODO(), ) st.syncer = s return st diff --git a/pkg/neg/syncers/transaction.go b/pkg/neg/syncers/transaction.go index f96a45e4ab..6267b22a4e 100644 --- a/pkg/neg/syncers/transaction.go +++ b/pkg/neg/syncers/transaction.go @@ -43,7 +43,7 @@ import ( negtypes "k8s.io/ingress-gce/pkg/neg/types" svcnegclient "k8s.io/ingress-gce/pkg/svcneg/client/clientset/versioned" "k8s.io/ingress-gce/pkg/utils/patch" - "k8s.io/klog" + "k8s.io/klog/v2" ) type transactionSyncer struct { @@ -91,6 +91,8 @@ type transactionSyncer struct { customName bool enableEndpointSlices bool + + logger klog.Logger } func NewTransactionSyncer( @@ -109,7 +111,11 @@ func NewTransactionSyncer( kubeSystemUID string, svcNegClient svcnegclient.Interface, customName bool, - enableEndpointSlices bool) negtypes.NegSyncer { + enableEndpointSlices bool, + l klog.Logger) negtypes.NegSyncer { + + logger := l.WithName("Syncer").WithValues("service", klog.KRef(negSyncerKey.Namespace, negSyncerKey.Name), "negName", negSyncerKey.NegName) + // TransactionSyncer implements the syncer core ts := &transactionSyncer{ NegSyncerKey: negSyncerKey, @@ -130,28 +136,29 @@ func NewTransactionSyncer( svcNegClient: svcNegClient, customName: customName, enableEndpointSlices: enableEndpointSlices, + logger: logger, } // Syncer implements life cycle logic - syncer := newSyncer(negSyncerKey, serviceLister, recorder, ts) + syncer := newSyncer(negSyncerKey, serviceLister, recorder, ts, logger) // transactionSyncer needs syncer interface for internals ts.syncer = syncer ts.retry = NewDelayRetryHandler(func() { syncer.Sync() }, NewExponentialBackendOffHandler(maxRetries, minRetryDelay, maxRetryDelay)) return syncer } -func GetEndpointsCalculator(nodeLister, podLister cache.Indexer, zoneGetter negtypes.ZoneGetter, syncerKey negtypes.NegSyncerKey, mode negtypes.EndpointsCalculatorMode) negtypes.NetworkEndpointsCalculator { +func GetEndpointsCalculator(nodeLister, podLister cache.Indexer, zoneGetter negtypes.ZoneGetter, syncerKey negtypes.NegSyncerKey, mode negtypes.EndpointsCalculatorMode, logger klog.Logger) negtypes.NetworkEndpointsCalculator { serviceKey := strings.Join([]string{syncerKey.Name, syncerKey.Namespace}, "/") if syncerKey.NegType == negtypes.VmIpEndpointType { nodeLister := listers.NewNodeLister(nodeLister) switch mode { case negtypes.L4LocalMode: - return NewLocalL4ILBEndpointsCalculator(nodeLister, zoneGetter, serviceKey) + return NewLocalL4ILBEndpointsCalculator(nodeLister, zoneGetter, serviceKey, logger) default: - return NewClusterL4ILBEndpointsCalculator(nodeLister, zoneGetter, serviceKey) + return NewClusterL4ILBEndpointsCalculator(nodeLister, zoneGetter, serviceKey, logger) } } return NewL7EndpointsCalculator(zoneGetter, podLister, syncerKey.PortTuple.Name, - syncerKey.SubsetLabels, syncerKey.NegType) + syncerKey.SubsetLabels, syncerKey.NegType, logger) } func (s *transactionSyncer) sync() error { @@ -185,11 +192,10 @@ func (s *transactionSyncer) syncInternalImpl() error { } if s.syncer.IsStopped() || s.syncer.IsShuttingDown() { - klog.V(4).Infof("Skip syncing NEG %q for %s.", s.NegSyncerKey.NegName, s.NegSyncerKey.String()) + s.logger.V(3).Info("Skip syncing NEG", "negSyncerKey", s.NegSyncerKey.String()) return nil } - klog.V(2).Infof("Sync NEG %q for %s, Endpoints Calculator mode %s", s.NegSyncerKey.NegName, - s.NegSyncerKey.String(), s.endpointsCalculator.Mode()) + s.logger.V(2).Info("Sync NEG", "negSyncerKey", s.NegSyncerKey.String(), "endpointsCalculatorMode", s.endpointsCalculator.Mode()) currentMap, err := retrieveExistingZoneNetworkEndpointMap(s.NegSyncerKey.NegName, s.zoneGetter, s.cloud, s.NegSyncerKey.GetAPIVersion(), s.endpointsCalculator.Mode()) if err != nil { @@ -199,7 +205,7 @@ func (s *transactionSyncer) syncInternalImpl() error { // Merge the current state from cloud with the transaction table together // The combined state represents the eventual result when all transactions completed - mergeTransactionIntoZoneEndpointMap(currentMap, s.transactions) + mergeTransactionIntoZoneEndpointMap(currentMap, s.transactions, s.logger) s.logStats(currentMap, "after in-progress operations have completed, NEG endpoints") var targetMap map[string]negtypes.NetworkEndpointSet @@ -211,7 +217,7 @@ func (s *transactionSyncer) syncInternalImpl() error { return err } if len(slices) < 1 { - klog.Warningf("Endpoint slices for service %s/%s don't exist. Skipping NEG sync", s.Namespace, s.Name) + s.logger.Error(nil, "Endpoint slices for the service doesn't exist. Skipping NEG sync") return nil } endpointSlices := make([]*discovery.EndpointSlice, len(slices)) @@ -236,7 +242,7 @@ func (s *transactionSyncer) syncInternalImpl() error { return err } if !exists { - klog.Warningf("Endpoint %s/%s does not exist. Skipping NEG sync", s.Namespace, s.Name) + s.logger.Info("Endpoint does not exist. Skipping NEG sync", "endpoint", klog.KRef(s.Namespace, s.Name)) return nil } endpointsData := negtypes.EndpointsDataFromEndpoints(ep.(*apiv1.Endpoints)) @@ -256,17 +262,17 @@ func (s *transactionSyncer) syncInternalImpl() error { // This mostly happens when transaction entry require reconciliation but the transaction is still progress // e.g. endpoint A is in the process of adding to NEG N, and the new desire state is not to have A in N. // This ensures the endpoint that requires reconciliation to wait till the existing transaction to complete. - filterEndpointByTransaction(addEndpoints, s.transactions) - filterEndpointByTransaction(removeEndpoints, s.transactions) + filterEndpointByTransaction(addEndpoints, s.transactions, s.logger) + filterEndpointByTransaction(removeEndpoints, s.transactions, s.logger) // filter out the endpoints that are in transaction - filterEndpointByTransaction(committedEndpoints, s.transactions) + filterEndpointByTransaction(committedEndpoints, s.transactions, s.logger) if s.needCommit() { s.commitPods(committedEndpoints, endpointPodMap) } if len(addEndpoints) == 0 && len(removeEndpoints) == 0 { - klog.V(4).Infof("No endpoint change for %s/%s, skip syncing NEG. ", s.Namespace, s.Name) + s.logger.V(3).Info("No endpoint change. Skip syncing NEG. ", s.Namespace, s.Name) return nil } s.logEndpoints(addEndpoints, "adding endpoint") @@ -321,7 +327,7 @@ func (s *transactionSyncer) syncNetworkEndpoints(addEndpoints, removeEndpoints m syncFunc := func(endpointMap map[string]negtypes.NetworkEndpointSet, operation transactionOp) error { for zone, endpointSet := range endpointMap { if endpointSet.Len() == 0 { - klog.V(2).Infof("0 endpoint for %v operation for %s in NEG %s at %s. Skipping", attachOp, s.NegSyncerKey.String(), s.NegSyncerKey.NegName, zone) + s.logger.V(2).Info("0 endpoints in the endpoint list. Skipping operation", "operation", attachOp, "negSyncerKey", s.NegSyncerKey.String(), "zone", zone) continue } @@ -362,13 +368,13 @@ func (s *transactionSyncer) syncNetworkEndpoints(addEndpoints, removeEndpoints m // attachNetworkEndpoints creates go routine to run operations for attaching network endpoints func (s *transactionSyncer) attachNetworkEndpoints(zone string, networkEndpointMap map[negtypes.NetworkEndpoint]*composite.NetworkEndpoint) { - klog.V(2).Infof("Attaching %d endpoint(s) for %s in NEG %s at %s.", len(networkEndpointMap), s.NegSyncerKey.String(), s.NegSyncerKey.NegName, zone) + s.logger.V(2).Info("Attaching endpoints to NEG.", "countOfEndpointsBeingAttached", len(networkEndpointMap), "negSyncerKey", s.NegSyncerKey.String(), "zone", zone) go s.operationInternal(attachOp, zone, networkEndpointMap) } // detachNetworkEndpoints creates go routine to run operations for detaching network endpoints func (s *transactionSyncer) detachNetworkEndpoints(zone string, networkEndpointMap map[negtypes.NetworkEndpoint]*composite.NetworkEndpoint) { - klog.V(2).Infof("Detaching %d endpoint(s) for %s in NEG %s at %s.", len(networkEndpointMap), s.NegSyncerKey.String(), s.NegSyncerKey.NegName, zone) + s.logger.V(2).Info("Detaching endpoints from NEG.", "countOfEndpointsBeingDetached", len(networkEndpointMap), "negSyncerKey", s.NegSyncerKey.String(), "zone", zone) go s.operationInternal(detachOp, zone, networkEndpointMap) } @@ -431,7 +437,7 @@ func (s *transactionSyncer) commitTransaction(err error, networkEndpointMap map[ _, ok := s.transactions.Get(networkEndpoint) // clear transaction if !ok { - klog.Errorf("Endpoint %q was not found in the transaction table.", networkEndpoint) + s.logger.Error(nil, "Endpoint was not found in the transaction table.", "endpoint", networkEndpoint) continue } s.transactions.Delete(networkEndpoint) @@ -461,7 +467,7 @@ func (s *transactionSyncer) commitPods(endpointMap map[string]negtypes.NetworkEn for _, endpoint := range endpointSet.List() { podName, ok := endpointPodMap[endpoint] if !ok { - klog.Warningf("Endpoint %v is not included in the endpointPodMap %v", endpoint, endpointPodMap) + s.logger.Error(nil, "Endpoint is not included in the endpointPodMap", "endpoint", endpoint, "endpointPodMap", endpointPodMap) continue } zoneEndpointMap[endpoint] = podName @@ -475,7 +481,7 @@ func (s *transactionSyncer) commitPods(endpointMap map[string]negtypes.NetworkEn func (s *transactionSyncer) isZoneChange() bool { negCR, err := getNegFromStore(s.svcNegLister, s.Namespace, s.NegSyncerKey.NegName) if err != nil { - klog.Warningf("unable to retrieve neg %s/%s from the store: %s", s.Namespace, s.NegName, err) + s.logger.Error(err, "unable to retrieve neg from the store", "neg", klog.KRef(s.Namespace, s.NegName)) return false } @@ -483,7 +489,7 @@ func (s *transactionSyncer) isZoneChange() bool { for _, ref := range negCR.Status.NetworkEndpointGroups { id, err := cloud.ParseResourceURL(ref.SelfLink) if err != nil { - klog.Warningf("unable to parse selflink %s", ref.SelfLink) + s.logger.Error(err, "unable to parse selflink", "selfLink", ref.SelfLink) continue } existingZones.Insert(id.Key.Zone) @@ -491,7 +497,7 @@ func (s *transactionSyncer) isZoneChange() bool { zones, err := s.zoneGetter.ListZones(negtypes.NodePredicateForEndpointCalculatorMode(s.EpCalculatorMode)) if err != nil { - klog.Errorf("unable to list zones: %s", err) + s.logger.Error(err, "unable to list zones") return false } currZones := sets.NewString(zones...) @@ -500,11 +506,11 @@ func (s *transactionSyncer) isZoneChange() bool { } // filterEndpointByTransaction removes the all endpoints from endpoint map if they exists in the transaction table -func filterEndpointByTransaction(endpointMap map[string]negtypes.NetworkEndpointSet, table networkEndpointTransactionTable) { +func filterEndpointByTransaction(endpointMap map[string]negtypes.NetworkEndpointSet, table networkEndpointTransactionTable, logger klog.Logger) { for _, endpointSet := range endpointMap { for _, endpoint := range endpointSet.List() { if entry, ok := table.Get(endpoint); ok { - klog.V(2).Infof("Endpoint %q is removed from the endpoint set as transaction %v still exists.", endpoint, entry) + logger.V(2).Info("Endpoint is removed from the endpoint set as transaction still exists.", "endpoint", endpoint, "transactionEntry", entry) endpointSet.Delete(endpoint) } } @@ -513,12 +519,12 @@ func filterEndpointByTransaction(endpointMap map[string]negtypes.NetworkEndpoint // mergeTransactionIntoZoneEndpointMap merges the ongoing transaction into the endpointMap. // This converts the existing endpointMap to the state when all transactions completed -func mergeTransactionIntoZoneEndpointMap(endpointMap map[string]negtypes.NetworkEndpointSet, transactions networkEndpointTransactionTable) { +func mergeTransactionIntoZoneEndpointMap(endpointMap map[string]negtypes.NetworkEndpointSet, transactions networkEndpointTransactionTable, logger klog.Logger) { for _, endpointKey := range transactions.Keys() { entry, ok := transactions.Get(endpointKey) // If called in syncInternal, as the transaction table if !ok { - klog.V(2).Infof("Transaction entry of key %q was not found.", endpointKey) + logger.V(2).Info("Transaction entry of key was not found.", "endpointKey", endpointKey) continue } // Add endpoints in attach transaction @@ -541,16 +547,17 @@ func mergeTransactionIntoZoneEndpointMap(endpointMap map[string]negtypes.Network // logStats logs aggregated stats of the input endpointMap func (s *transactionSyncer) logStats(endpointMap map[string]negtypes.NetworkEndpointSet, desc string) { - stats := []string{} + var stats []interface{} + stats = append(stats, "description", desc) for zone, endpointSet := range endpointMap { - stats = append(stats, fmt.Sprintf("%d endpoints in zone %q", endpointSet.Len(), zone)) + stats = append(stats, zone, fmt.Sprintf("%d endpoints", endpointSet.Len())) } - klog.V(3).Infof("For NEG %q, %s: %s.", s.NegSyncerKey.NegName, desc, strings.Join(stats, ",")) + s.logger.V(3).Info("Stats for NEG", stats...) } // logEndpoints logs individual endpoint in the input endpointMap func (s *transactionSyncer) logEndpoints(endpointMap map[string]negtypes.NetworkEndpointSet, desc string) { - klog.V(3).Infof("For NEG %q, %s: %+v", s.NegSyncerKey.NegName, desc, endpointMap) + s.logger.V(3).Info("Endpoints for NEG", "description", desc, "endpointMap", endpointMap) } // updateInitStatus queries the k8s api server for the current NEG CR and updates the Initialized condition and neg objects as appropriate. @@ -562,7 +569,7 @@ func (s *transactionSyncer) updateInitStatus(negObjRefs []negv1beta1.NegObjectRe origNeg, err := getNegFromStore(s.svcNegLister, s.Namespace, s.NegSyncerKey.NegName) if err != nil { - klog.Errorf("Error updating init status for neg %s, failed getting neg from store: %s", s.NegSyncerKey.NegName, err) + s.logger.Error(err, "Error updating init status for neg, failed to get neg from store.") return } @@ -578,7 +585,7 @@ func (s *transactionSyncer) updateInitStatus(negObjRefs []negv1beta1.NegObjectRe _, err = patchNegStatus(s.svcNegClient, origNeg.Status, neg.Status, s.Namespace, s.NegSyncerKey.NegName) if err != nil { - klog.Errorf("Error updating Neg CR %s : %s", s.NegSyncerKey.NegName, err) + s.logger.Error(err, "Error updating Neg CR") } } @@ -589,7 +596,7 @@ func (s *transactionSyncer) updateStatus(syncErr error) { } origNeg, err := getNegFromStore(s.svcNegLister, s.Namespace, s.NegSyncerKey.NegName) if err != nil { - klog.Errorf("Error updating status for neg %s, failed getting neg from store: %s", s.NegSyncerKey.NegName, err) + s.logger.Error(err, "Error updating status for neg, failed to get neg from store") return } neg := origNeg.DeepCopy() @@ -608,7 +615,7 @@ func (s *transactionSyncer) updateStatus(syncErr error) { _, err = patchNegStatus(s.svcNegClient, origNeg.Status, neg.Status, s.Namespace, s.NegSyncerKey.NegName) if err != nil { - klog.Errorf("Error updating Neg CR %s : %s", s.NegSyncerKey.NegName, err) + s.logger.Error(err, "Error updating Neg CR") } } diff --git a/pkg/neg/syncers/transaction_test.go b/pkg/neg/syncers/transaction_test.go index 903d4163a6..3fbef9f631 100644 --- a/pkg/neg/syncers/transaction_test.go +++ b/pkg/neg/syncers/transaction_test.go @@ -41,6 +41,7 @@ import ( negtypes "k8s.io/ingress-gce/pkg/neg/types" "k8s.io/ingress-gce/pkg/utils" "k8s.io/ingress-gce/pkg/utils/endpointslices" + "k8s.io/klog/v2" "k8s.io/legacy-cloud-providers/gce" utilpointer "k8s.io/utils/pointer" ) @@ -578,7 +579,7 @@ func TestMergeTransactionIntoZoneEndpointMap(t *testing.T) { } for _, tc := range testCases { - mergeTransactionIntoZoneEndpointMap(tc.endpointMap, tc.table()) + mergeTransactionIntoZoneEndpointMap(tc.endpointMap, tc.table(), klog.TODO()) if !reflect.DeepEqual(tc.endpointMap, tc.expectEndpointMap) { t.Errorf("For test case %q, endpointSets endpoint map to be %+v, but got %+v", tc.desc, tc.expectEndpointMap, tc.endpointMap) } @@ -645,7 +646,7 @@ func TestFilterEndpointByTransaction(t *testing.T) { for _, tc := range testCases { input := tc.endpointMap - filterEndpointByTransaction(input, tc.table()) + filterEndpointByTransaction(input, tc.table(), klog.TODO()) if !reflect.DeepEqual(tc.endpointMap, tc.expectEndpointMap) { t.Errorf("For test case %q, endpointSets endpoint map to be %+v, but got %+v", tc.desc, tc.expectEndpointMap, tc.endpointMap) } @@ -1410,7 +1411,7 @@ func TestUnknownNodes(t *testing.T) { func newL4ILBTestTransactionSyncer(fakeGCE negtypes.NetworkEndpointGroupCloud, mode negtypes.EndpointsCalculatorMode, enableEndpointSlices bool) (negtypes.NegSyncer, *transactionSyncer) { negsyncer, ts := newTestTransactionSyncer(fakeGCE, negtypes.VmIpEndpointType, false, enableEndpointSlices) - ts.endpointsCalculator = GetEndpointsCalculator(ts.nodeLister, ts.podLister, ts.zoneGetter, ts.NegSyncerKey, mode) + ts.endpointsCalculator = GetEndpointsCalculator(ts.nodeLister, ts.podLister, ts.zoneGetter, ts.NegSyncerKey, mode, klog.TODO()) return negsyncer, ts } @@ -1452,11 +1453,12 @@ func newTestTransactionSyncer(fakeGCE negtypes.NetworkEndpointGroupCloud, negTyp testContext.SvcNegInformer.GetIndexer(), reflector, GetEndpointsCalculator(testContext.NodeInformer.GetIndexer(), testContext.PodInformer.GetIndexer(), negtypes.NewFakeZoneGetter(), - svcPort, mode), + svcPort, mode, klog.TODO()), string(kubeSystemUID), testContext.SvcNegClient, customName, enableEndpointSlices, + klog.TODO(), ) transactionSyncer := negsyncer.(*syncer).core.(*transactionSyncer) indexers := map[string]cache.IndexFunc{ diff --git a/pkg/neg/syncers/utils.go b/pkg/neg/syncers/utils.go index b586f6ecd1..f9a60e5db4 100644 --- a/pkg/neg/syncers/utils.go +++ b/pkg/neg/syncers/utils.go @@ -34,7 +34,7 @@ import ( "k8s.io/ingress-gce/pkg/composite" negtypes "k8s.io/ingress-gce/pkg/neg/types" "k8s.io/ingress-gce/pkg/utils" - "k8s.io/klog" + "k8s.io/klog/v2" ) const ( diff --git a/pkg/psc/controller.go b/pkg/psc/controller.go index 136b79eabc..9d6eb5a2da 100644 --- a/pkg/psc/controller.go +++ b/pkg/psc/controller.go @@ -49,7 +49,7 @@ import ( "k8s.io/ingress-gce/pkg/utils/patch" sautils "k8s.io/ingress-gce/pkg/utils/serviceattachment" "k8s.io/ingress-gce/pkg/utils/slice" - "k8s.io/klog" + "k8s.io/klog/v2" "k8s.io/legacy-cloud-providers/gce" ) diff --git a/pkg/ratelimit/ratelimit.go b/pkg/ratelimit/ratelimit.go index 67454534c1..5c3fc87158 100644 --- a/pkg/ratelimit/ratelimit.go +++ b/pkg/ratelimit/ratelimit.go @@ -27,7 +27,7 @@ import ( "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta" "k8s.io/client-go/util/flowcontrol" "k8s.io/ingress-gce/pkg/flags" - "k8s.io/klog" + "k8s.io/klog/v2" ) // GCERateLimiter implements cloud.RateLimiter diff --git a/pkg/storage/configmaps.go b/pkg/storage/configmaps.go index f436209a9d..a963e468c5 100644 --- a/pkg/storage/configmaps.go +++ b/pkg/storage/configmaps.go @@ -22,7 +22,7 @@ import ( "strings" "sync" - "k8s.io/klog" + "k8s.io/klog/v2" api_v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" diff --git a/pkg/sync/sync.go b/pkg/sync/sync.go index 513ce23370..c98c4e9d5e 100644 --- a/pkg/sync/sync.go +++ b/pkg/sync/sync.go @@ -26,7 +26,7 @@ import ( "k8s.io/ingress-gce/pkg/utils" "k8s.io/ingress-gce/pkg/utils/common" "k8s.io/ingress-gce/pkg/utils/namer" - "k8s.io/klog" + "k8s.io/klog/v2" ) // ErrSkipBackendsSync is an error that can be returned by a Controller to diff --git a/pkg/translator/healthchecks.go b/pkg/translator/healthchecks.go index 901c77710b..1d8a522540 100644 --- a/pkg/translator/healthchecks.go +++ b/pkg/translator/healthchecks.go @@ -31,7 +31,7 @@ import ( backendconfigv1 "k8s.io/ingress-gce/pkg/apis/backendconfig/v1" "k8s.io/ingress-gce/pkg/loadbalancers/features" "k8s.io/ingress-gce/pkg/utils" - "k8s.io/klog" + "k8s.io/klog/v2" ) const ( diff --git a/pkg/utils/common/common.go b/pkg/utils/common/common.go index a195c0a4ab..f7f91b304b 100644 --- a/pkg/utils/common/common.go +++ b/pkg/utils/common/common.go @@ -29,7 +29,7 @@ import ( "k8s.io/apimachinery/pkg/util/strategicpatch" client "k8s.io/client-go/kubernetes/typed/networking/v1" "k8s.io/client-go/tools/cache" - "k8s.io/klog" + "k8s.io/klog/v2" ) var ( diff --git a/pkg/utils/common/finalizer.go b/pkg/utils/common/finalizer.go index e3d24a8b00..2e625cdb02 100644 --- a/pkg/utils/common/finalizer.go +++ b/pkg/utils/common/finalizer.go @@ -23,7 +23,7 @@ import ( client "k8s.io/client-go/kubernetes/typed/networking/v1" "k8s.io/ingress-gce/pkg/utils/patch" "k8s.io/ingress-gce/pkg/utils/slice" - "k8s.io/klog" + "k8s.io/klog/v2" ) const ( diff --git a/pkg/utils/description.go b/pkg/utils/description.go index ff071c4a22..227c89f185 100644 --- a/pkg/utils/description.go +++ b/pkg/utils/description.go @@ -19,7 +19,7 @@ package utils import ( "encoding/json" - "k8s.io/klog" + "k8s.io/klog/v2" ) // Description stores the description for a BackendService. diff --git a/pkg/utils/gceurlmap.go b/pkg/utils/gceurlmap.go index 75c56e253f..a53e9937cc 100644 --- a/pkg/utils/gceurlmap.go +++ b/pkg/utils/gceurlmap.go @@ -17,7 +17,7 @@ import ( "fmt" "strings" - "k8s.io/klog" + "k8s.io/klog/v2" ) // GCEURLMap is a simplified representation of a UrlMap somewhere diff --git a/pkg/utils/namer/frontendnamer.go b/pkg/utils/namer/frontendnamer.go index 6d8fcf8a6b..0dc3bc9dfd 100644 --- a/pkg/utils/namer/frontendnamer.go +++ b/pkg/utils/namer/frontendnamer.go @@ -20,7 +20,7 @@ import ( v1 "k8s.io/api/networking/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/ingress-gce/pkg/utils/common" - "k8s.io/klog" + "k8s.io/klog/v2" ) const ( diff --git a/pkg/utils/namer/namer.go b/pkg/utils/namer/namer.go index 25073c2561..4126937669 100644 --- a/pkg/utils/namer/namer.go +++ b/pkg/utils/namer/namer.go @@ -24,7 +24,7 @@ import ( "strings" "sync" - "k8s.io/klog" + "k8s.io/klog/v2" ) const ( diff --git a/pkg/utils/namer/utils.go b/pkg/utils/namer/utils.go index cc51fffd2f..48c7bad42f 100644 --- a/pkg/utils/namer/utils.go +++ b/pkg/utils/namer/utils.go @@ -19,7 +19,7 @@ import ( v1 "k8s.io/api/networking/v1" "k8s.io/ingress-gce/pkg/utils/common" - "k8s.io/klog" + "k8s.io/klog/v2" ) const gceResourceNamePattern = "(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)" diff --git a/pkg/utils/negdescription.go b/pkg/utils/negdescription.go index 49cbdd797c..e3fba72039 100644 --- a/pkg/utils/negdescription.go +++ b/pkg/utils/negdescription.go @@ -20,7 +20,7 @@ import ( "encoding/json" "fmt" - "k8s.io/klog" + "k8s.io/klog/v2" ) // Description stores the description for a BackendService. diff --git a/pkg/utils/serviceattachment/svcattachmentdesc.go b/pkg/utils/serviceattachment/svcattachmentdesc.go index f3c74a4ce1..35d2f8125f 100644 --- a/pkg/utils/serviceattachment/svcattachmentdesc.go +++ b/pkg/utils/serviceattachment/svcattachmentdesc.go @@ -20,7 +20,7 @@ import ( "encoding/json" "k8s.io/ingress-gce/pkg/utils/descutils" - "k8s.io/klog" + "k8s.io/klog/v2" ) // ServiceAttachmentDesc stores the description for a Service Attachment. diff --git a/pkg/utils/taskqueue.go b/pkg/utils/taskqueue.go index 7def719fb6..fa351f4c45 100644 --- a/pkg/utils/taskqueue.go +++ b/pkg/utils/taskqueue.go @@ -19,7 +19,7 @@ package utils import ( "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" - "k8s.io/klog" + "k8s.io/klog/v2" ) var ( diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index a01c6a4660..5aed1291e8 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -44,7 +44,7 @@ import ( "k8s.io/ingress-gce/pkg/flags" "k8s.io/ingress-gce/pkg/utils/common" "k8s.io/ingress-gce/pkg/utils/slice" - "k8s.io/klog" + "k8s.io/klog/v2" "k8s.io/legacy-cloud-providers/gce" ) diff --git a/vendor/github.com/go-logr/logr/.golangci.yaml b/vendor/github.com/go-logr/logr/.golangci.yaml new file mode 100644 index 0000000000..94ff801df1 --- /dev/null +++ b/vendor/github.com/go-logr/logr/.golangci.yaml @@ -0,0 +1,29 @@ +run: + timeout: 1m + tests: true + +linters: + disable-all: true + enable: + - asciicheck + - deadcode + - errcheck + - forcetypeassert + - gocritic + - gofmt + - goimports + - gosimple + - govet + - ineffassign + - misspell + - revive + - staticcheck + - structcheck + - typecheck + - unused + - varcheck + +issues: + exclude-use-default: false + max-issues-per-linter: 0 + max-same-issues: 10 diff --git a/vendor/github.com/go-logr/logr/CHANGELOG.md b/vendor/github.com/go-logr/logr/CHANGELOG.md new file mode 100644 index 0000000000..c356960046 --- /dev/null +++ b/vendor/github.com/go-logr/logr/CHANGELOG.md @@ -0,0 +1,6 @@ +# CHANGELOG + +## v1.0.0-rc1 + +This is the first logged release. Major changes (including breaking changes) +have occurred since earlier tags. diff --git a/vendor/github.com/go-logr/logr/CONTRIBUTING.md b/vendor/github.com/go-logr/logr/CONTRIBUTING.md new file mode 100644 index 0000000000..5d37e294c5 --- /dev/null +++ b/vendor/github.com/go-logr/logr/CONTRIBUTING.md @@ -0,0 +1,17 @@ +# Contributing + +Logr is open to pull-requests, provided they fit within the intended scope of +the project. Specifically, this library aims to be VERY small and minimalist, +with no external dependencies. + +## Compatibility + +This project intends to follow [semantic versioning](http://semver.org) and +is very strict about compatibility. Any proposed changes MUST follow those +rules. + +## Performance + +As a logging library, logr must be as light-weight as possible. Any proposed +code change must include results of running the [benchmark](./benchmark) +before and after the change. diff --git a/vendor/github.com/go-logr/logr/README.md b/vendor/github.com/go-logr/logr/README.md index e9b5520a1c..ad825f5f0a 100644 --- a/vendor/github.com/go-logr/logr/README.md +++ b/vendor/github.com/go-logr/logr/README.md @@ -1,112 +1,182 @@ -# A more minimal logging API for Go +# A minimal logging API for Go -Before you consider this package, please read [this blog post by the -inimitable Dave Cheney][warning-makes-no-sense]. I really appreciate what -he has to say, and it largely aligns with my own experiences. Too many -choices of levels means inconsistent logs. +[![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/logr.svg)](https://pkg.go.dev/github.com/go-logr/logr) + +logr offers an(other) opinion on how Go programs and libraries can do logging +without becoming coupled to a particular logging implementation. This is not +an implementation of logging - it is an API. In fact it is two APIs with two +different sets of users. + +The `Logger` type is intended for application and library authors. It provides +a relatively small API which can be used everywhere you want to emit logs. It +defers the actual act of writing logs (to files, to stdout, or whatever) to the +`LogSink` interface. + +The `LogSink` interface is intended for logging library implementers. It is a +pure interface which can be implemented by logging frameworks to provide the actual logging +functionality. + +This decoupling allows application and library developers to write code in +terms of `logr.Logger` (which has very low dependency fan-out) while the +implementation of logging is managed "up stack" (e.g. in or near `main()`.) +Application developers can then switch out implementations as necessary. + +Many people assert that libraries should not be logging, and as such efforts +like this are pointless. Those people are welcome to convince the authors of +the tens-of-thousands of libraries that *DO* write logs that they are all +wrong. In the meantime, logr takes a more practical approach. + +## Typical usage + +Somewhere, early in an application's life, it will make a decision about which +logging library (implementation) it actually wants to use. Something like: + +``` + func main() { + // ... other setup code ... + + // Create the "root" logger. We have chosen the "logimpl" implementation, + // which takes some initial parameters and returns a logr.Logger. + logger := logimpl.New(param1, param2) + + // ... other setup code ... +``` + +Most apps will call into other libraries, create structures to govern the flow, +etc. The `logr.Logger` object can be passed to these other libraries, stored +in structs, or even used as a package-global variable, if needed. For example: + +``` + app := createTheAppObject(logger) + app.Run() +``` + +Outside of this early setup, no other packages need to know about the choice of +implementation. They write logs in terms of the `logr.Logger` that they +received: -This package offers a purely abstract interface, based on these ideas but with -a few twists. Code can depend on just this interface and have the actual -logging implementation be injected from callers. Ideally only `main()` knows -what logging implementation is being used. +``` + type appObject struct { + // ... other fields ... + logger logr.Logger + // ... other fields ... + } -# Differences from Dave's ideas + func (app *appObject) Run() { + app.logger.Info("starting up", "timestamp", time.Now()) + + // ... app code ... +``` + +## Background + +If the Go standard library had defined an interface for logging, this project +probably would not be needed. Alas, here we are. + +### Inspiration + +Before you consider this package, please read [this blog post by the +inimitable Dave Cheney][warning-makes-no-sense]. We really appreciate what +he has to say, and it largely aligns with our own experiences. + +### Differences from Dave's ideas The main differences are: -1) Dave basically proposes doing away with the notion of a logging API in favor -of `fmt.Printf()`. I disagree, especially when you consider things like output -locations, timestamps, file and line decorations, and structured logging. I -restrict the API to just 2 types of logs: info and error. +1. Dave basically proposes doing away with the notion of a logging API in favor +of `fmt.Printf()`. We disagree, especially when you consider things like output +locations, timestamps, file and line decorations, and structured logging. This +package restricts the logging API to just 2 types of logs: info and error. Info logs are things you want to tell the user which are not errors. Error logs are, well, errors. If your code receives an `error` from a subordinate function call and is logging that `error` *and not returning it*, use error logs. -2) Verbosity-levels on info logs. This gives developers a chance to indicate +2. Verbosity-levels on info logs. This gives developers a chance to indicate arbitrary grades of importance for info logs, without assigning names with -semantic meaning such as "warning", "trace", and "debug". Superficially this +semantic meaning such as "warning", "trace", and "debug." Superficially this may feel very similar, but the primary difference is the lack of semantics. Because verbosity is a numerical value, it's safe to assume that an app running with higher verbosity means more (and less important) logs will be generated. -This is a BETA grade API. +## Implementations (non-exhaustive) There are implementations for the following logging libraries: +- **a function** (can bridge to non-structured libraries): [funcr](https://github.com/go-logr/logr/tree/master/funcr) - **github.com/google/glog**: [glogr](https://github.com/go-logr/glogr) -- **k8s.io/klog**: [klogr](https://git.k8s.io/klog/klogr) +- **k8s.io/klog** (for Kubernetes): [klogr](https://git.k8s.io/klog/klogr) - **go.uber.org/zap**: [zapr](https://github.com/go-logr/zapr) -- **log** (the Go standard library logger): - [stdr](https://github.com/go-logr/stdr) +- **log** (the Go standard library logger): [stdr](https://github.com/go-logr/stdr) - **github.com/sirupsen/logrus**: [logrusr](https://github.com/bombsimon/logrusr) - **github.com/wojas/genericr**: [genericr](https://github.com/wojas/genericr) (makes it easy to implement your own backend) - **logfmt** (Heroku style [logging](https://www.brandur.org/logfmt)): [logfmtr](https://github.com/iand/logfmtr) +- **github.com/rs/zerolog**: [zerologr](https://github.com/go-logr/zerologr) -# FAQ +## FAQ -## Conceptual +### Conceptual -## Why structured logging? +#### Why structured logging? -- **Structured logs are more easily queriable**: Since you've got +- **Structured logs are more easily queryable**: Since you've got key-value pairs, it's much easier to query your structured logs for particular values by filtering on the contents of a particular key -- think searching request logs for error codes, Kubernetes reconcilers for - the name and namespace of the reconciled object, etc + the name and namespace of the reconciled object, etc. -- **Structured logging makes it easier to have cross-referencable logs**: +- **Structured logging makes it easier to have cross-referenceable logs**: Similarly to searchability, if you maintain conventions around your keys, it becomes easy to gather all log lines related to a particular concept. - + - **Structured logs allow better dimensions of filtering**: if you have structure to your logs, you've got more precise control over how much information is logged -- you might choose in a particular configuration to log certain keys but not others, only log lines where a certain key - matches a certain value, etc, instead of just having v-levels and names + matches a certain value, etc., instead of just having v-levels and names to key off of. - **Structured logs better represent structured data**: sometimes, the data that you want to log is inherently structured (think tuple-link - objects). Structured logs allow you to preserve that structure when + objects.) Structured logs allow you to preserve that structure when outputting. -## Why V-levels? +#### Why V-levels? **V-levels give operators an easy way to control the chattiness of log operations**. V-levels provide a way for a given package to distinguish the relative importance or verbosity of a given log message. Then, if a particular logger or package is logging too many messages, the user -of the package can simply change the v-levels for that library. +of the package can simply change the v-levels for that library. -## Why not more named levels, like Warning? +#### Why not named levels, like Info/Warning/Error? Read [Dave Cheney's post][warning-makes-no-sense]. Then read [Differences from Dave's ideas](#differences-from-daves-ideas). -## Why not allow format strings, too? +#### Why not allow format strings, too? **Format strings negate many of the benefits of structured logs**: - They're not easily searchable without resorting to fuzzy searching, - regular expressions, etc + regular expressions, etc. - They don't store structured data well, since contents are flattened into - a string + a string. -- They're not cross-referencable +- They're not cross-referenceable. -- They don't compress easily, since the message is not constant +- They don't compress easily, since the message is not constant. -(unless you turn positional parameters into key-value pairs with numerical +(Unless you turn positional parameters into key-value pairs with numerical keys, at which point you've gotten key-value logging with meaningless -keys) +keys.) -## Practical +### Practical -## Why key-value pairs, and not a map? +#### Why key-value pairs, and not a map? Key-value pairs are *much* easier to optimize, especially around allocations. Zap (a structured logger that inspired logr's interface) has @@ -117,26 +187,26 @@ While the interface ends up being a little less obvious, you get potentially better performance, plus avoid making users type `map[string]string{}` every time they want to log. -## What if my V-levels differ between libraries? +#### What if my V-levels differ between libraries? That's fine. Control your V-levels on a per-logger basis, and use the -`WithName` function to pass different loggers to different libraries. +`WithName` method to pass different loggers to different libraries. Generally, you should take care to ensure that you have relatively consistent V-levels within a given logger, however, as this makes deciding on what verbosity of logs to request easier. -## But I *really* want to use a format string! +#### But I really want to use a format string! That's not actually a question. Assuming your question is "how do I convert my mental model of logging with format strings to logging with constant messages": -1. figure out what the error actually is, as you'd write in a TL;DR style, - and use that as a message +1. Figure out what the error actually is, as you'd write in a TL;DR style, + and use that as a message. 2. For every place you'd write a format specifier, look to the word before - it, and add that as a key value pair + it, and add that as a key value pair. For instance, consider the following examples (all taken from spots in the Kubernetes codebase): @@ -150,34 +220,59 @@ Kubernetes codebase): response when requesting url", "attempt", retries, "after seconds", seconds, "url", url)` -If you *really* must use a format string, place it as a key value, and -call `fmt.Sprintf` yourself -- for instance, `log.Printf("unable to +If you *really* must use a format string, use it in a key's value, and +call `fmt.Sprintf` yourself. For instance: `log.Printf("unable to reflect over type %T")` becomes `logger.Info("unable to reflect over type", "type", fmt.Sprintf("%T"))`. In general though, the cases where this is necessary should be few and far between. -## How do I choose my V-levels? +#### How do I choose my V-levels? This is basically the only hard constraint: increase V-levels to denote more verbose or more debug-y logs. Otherwise, you can start out with `0` as "you always want to see this", `1` as "common logging that you might *possibly* want to turn off", and -`10` as "I would like to performance-test your log collection stack". +`10` as "I would like to performance-test your log collection stack." Then gradually choose levels in between as you need them, working your way down from 10 (for debug and trace style logs) and up from 1 (for chattier -info-type logs). +info-type logs.) + +#### How do I choose my keys? -## How do I choose my keys +Keys are fairly flexible, and can hold more or less any string +value. For best compatibility with implementations and consistency +with existing code in other projects, there are a few conventions you +should consider. -- make your keys human-readable -- constant keys are generally a good idea -- be consistent across your codebase -- keys should naturally match parts of the message string +- Make your keys human-readable. +- Constant keys are generally a good idea. +- Be consistent across your codebase. +- Keys should naturally match parts of the message string. +- Use lower case for simple keys and + [lowerCamelCase](https://en.wiktionary.org/wiki/lowerCamelCase) for + more complex ones. Kubernetes is one example of a project that has + [adopted that + convention](https://github.com/kubernetes/community/blob/HEAD/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments). While key names are mostly unrestricted (and spaces are acceptable), it's generally a good idea to stick to printable ascii characters, or at least match the general character set of your log lines. +#### Why should keys be constant values? + +The point of structured logging is to make later log processing easier. Your +keys are, effectively, the schema of each log message. If you use different +keys across instances of the same log line, you will make your structured logs +much harder to use. `Sprintf()` is for values, not for keys! + +#### Why is this not a pure interface? + +The Logger type is implemented as a struct in order to allow the Go compiler to +optimize things like high-V `Info` logs that are not triggered. Not all of +these implementations are implemented yet, but this structure was suggested as +a way to ensure they *can* be implemented. All of the real work is behind the +`LogSink` interface. + [warning-makes-no-sense]: http://dave.cheney.net/2015/11/05/lets-talk-about-logging diff --git a/vendor/github.com/go-logr/logr/discard.go b/vendor/github.com/go-logr/logr/discard.go index 2bafb13d15..9d92a38f1d 100644 --- a/vendor/github.com/go-logr/logr/discard.go +++ b/vendor/github.com/go-logr/logr/discard.go @@ -16,36 +16,39 @@ limitations under the License. package logr -// Discard returns a valid Logger that discards all messages logged to it. -// It can be used whenever the caller is not interested in the logs. +// Discard returns a Logger that discards all messages logged to it. It can be +// used whenever the caller is not interested in the logs. Logger instances +// produced by this function always compare as equal. func Discard() Logger { - return DiscardLogger{} + return Logger{ + level: 0, + sink: discardLogSink{}, + } } -// DiscardLogger is a Logger that discards all messages. -type DiscardLogger struct{} +// discardLogSink is a LogSink that discards all messages. +type discardLogSink struct{} -func (l DiscardLogger) Enabled() bool { - return false +// Verify that it actually implements the interface +var _ LogSink = discardLogSink{} + +func (l discardLogSink) Init(RuntimeInfo) { } -func (l DiscardLogger) Info(msg string, keysAndValues ...interface{}) { +func (l discardLogSink) Enabled(int) bool { + return false } -func (l DiscardLogger) Error(err error, msg string, keysAndValues ...interface{}) { +func (l discardLogSink) Info(int, string, ...interface{}) { } -func (l DiscardLogger) V(level int) Logger { - return l +func (l discardLogSink) Error(error, string, ...interface{}) { } -func (l DiscardLogger) WithValues(keysAndValues ...interface{}) Logger { +func (l discardLogSink) WithValues(...interface{}) LogSink { return l } -func (l DiscardLogger) WithName(name string) Logger { +func (l discardLogSink) WithName(string) LogSink { return l } - -// Verify that it actually implements the interface -var _ Logger = DiscardLogger{} diff --git a/vendor/github.com/go-logr/logr/go.mod b/vendor/github.com/go-logr/logr/go.mod index 591884e91f..7baec9b570 100644 --- a/vendor/github.com/go-logr/logr/go.mod +++ b/vendor/github.com/go-logr/logr/go.mod @@ -1,3 +1,3 @@ module github.com/go-logr/logr -go 1.14 +go 1.16 diff --git a/vendor/github.com/go-logr/logr/logr.go b/vendor/github.com/go-logr/logr/logr.go index 842428bd3a..44cd398c9f 100644 --- a/vendor/github.com/go-logr/logr/logr.go +++ b/vendor/github.com/go-logr/logr/logr.go @@ -16,83 +16,101 @@ limitations under the License. // This design derives from Dave Cheney's blog: // http://dave.cheney.net/2015/11/05/lets-talk-about-logging -// -// This is a BETA grade API. Until there is a significant 2nd implementation, -// I don't really know how it will change. -// Package logr defines abstract interfaces for logging. Packages can depend on -// these interfaces and callers can implement logging in whatever way is -// appropriate. +// Package logr defines a general-purpose logging API and abstract interfaces +// to back that API. Packages in the Go ecosystem can depend on this package, +// while callers can implement logging with whatever backend is appropriate. // // Usage // -// Logging is done using a Logger. Loggers can have name prefixes and named -// values attached, so that all log messages logged with that Logger have some -// base context associated. +// Logging is done using a Logger instance. Logger is a concrete type with +// methods, which defers the actual logging to a LogSink interface. The main +// methods of Logger are Info() and Error(). Arguments to Info() and Error() +// are key/value pairs rather than printf-style formatted strings, emphasizing +// "structured logging". // -// The term "key" is used to refer to the name associated with a particular -// value, to disambiguate it from the general Logger name. +// With Go's standard log package, we might write: +// log.Printf("setting target value %s", targetValue) // -// For instance, suppose we're trying to reconcile the state of an object, and -// we want to log that we've made some decision. +// With logr's structured logging, we'd write: +// logger.Info("setting target", "value", targetValue) // -// With the traditional log package, we might write: -// log.Printf("decided to set field foo to value %q for object %s/%s", -// targetValue, object.Namespace, object.Name) +// Errors are much the same. Instead of: +// log.Printf("failed to open the pod bay door for user %s: %v", user, err) // -// With logr's structured logging, we'd write: -// // elsewhere in the file, set up the logger to log with the prefix of -// // "reconcilers", and the named value target-type=Foo, for extra context. -// log := mainLogger.WithName("reconcilers").WithValues("target-type", "Foo") +// We'd write: +// logger.Error(err, "failed to open the pod bay door", "user", user) // -// // later on... -// log.Info("setting foo on object", "value", targetValue, "object", object) +// Info() and Error() are very similar, but they are separate methods so that +// LogSink implementations can choose to do things like attach additional +// information (such as stack traces) on calls to Error(). +// +// Verbosity +// +// Often we want to log information only when the application in "verbose +// mode". To write log lines that are more verbose, Logger has a V() method. +// The higher the V-level of a log line, the less critical it is considered. +// Log-lines with V-levels that are not enabled (as per the LogSink) will not +// be written. Level V(0) is the default, and logger.V(0).Info() has the same +// meaning as logger.Info(). Negative V-levels have the same meaning as V(0). +// +// Where we might have written: +// if flVerbose >= 2 { +// log.Printf("an unusual thing happened") +// } +// +// We can write: +// logger.V(2).Info("an unusual thing happened") +// +// Logger Names +// +// Logger instances can have name strings so that all messages logged through +// that instance have additional context. For example, you might want to add +// a subsystem name: // -// Depending on our logging implementation, we could then make logging decisions -// based on field values (like only logging such events for objects in a certain -// namespace), or copy the structured information into a structured log store. +// logger.WithName("compactor").Info("started", "time", time.Now()) // -// For logging errors, Logger has a method called Error. Suppose we wanted to -// log an error while reconciling. With the traditional log package, we might -// write: -// log.Errorf("unable to reconcile object %s/%s: %v", object.Namespace, object.Name, err) +// The WithName() method returns a new Logger, which can be passed to +// constructors or other functions for further use. Repeated use of WithName() +// will accumulate name "segments". These name segments will be joined in some +// way by the LogSink implementation. It is strongly recommended that name +// segments contain simple identifiers (letters, digits, and hyphen), and do +// not contain characters that could muddle the log output or confuse the +// joining operation (e.g. whitespace, commas, periods, slashes, brackets, +// quotes, etc). // -// With logr, we'd instead write: -// // assuming the above setup for log -// log.Error(err, "unable to reconcile object", "object", object) +// Saved Values // -// This functions similarly to: -// log.Info("unable to reconcile object", "error", err, "object", object) +// Logger instances can store any number of key/value pairs, which will be +// logged alongside all messages logged through that instance. For example, +// you might want to create a Logger instance per managed object: // -// However, it ensures that a standard key for the error value ("error") is used -// across all error logging. Furthermore, certain implementations may choose to -// attach additional information (such as stack traces) on calls to Error, so -// it's preferred to use Error to log errors. +// With the standard log package, we might write: +// log.Printf("decided to set field foo to value %q for object %s/%s", +// targetValue, object.Namespace, object.Name) // -// Parts of a log line +// With logr we'd write: +// // Elsewhere: set up the logger to log the object name. +// obj.logger = mainLogger.WithValues( +// "name", obj.name, "namespace", obj.namespace) // -// Each log message from a Logger has four types of context: -// logger name, log verbosity, log message, and the named values. +// // later on... +// obj.logger.Info("setting foo", "value", targetValue) // -// The Logger name consists of a series of name "segments" added by successive -// calls to WithName. These name segments will be joined in some way by the -// underlying implementation. It is strongly recommended that name segments -// contain simple identifiers (letters, digits, and hyphen), and do not contain -// characters that could muddle the log output or confuse the joining operation -// (e.g. whitespace, commas, periods, slashes, brackets, quotes, etc). +// Best Practices // -// Log verbosity represents how little a log matters. Level zero, the default, -// matters most. Increasing levels matter less and less. Try to avoid lots of -// different verbosity levels, and instead provide useful keys, logger names, -// and log messages for users to filter on. It's illegal to pass a log level -// below zero. +// Logger has very few hard rules, with the goal that LogSink implementations +// might have a lot of freedom to differentiate. There are, however, some +// things to consider. // // The log message consists of a constant message attached to the log line. // This should generally be a simple description of what's occurring, and should -// never be a format string. +// never be a format string. Variable information can then be attached using +// named values. // -// Variable information can then be attached using named values (key/value -// pairs). Keys are arbitrary strings, while values may be any Go value. +// Keys are arbitrary strings, but should generally be constant values. Values +// may be any Go value, but how the value is formatted is determined by the +// LogSink implementation. // // Key Naming Conventions // @@ -102,6 +120,7 @@ limitations under the License. // * be constant (not dependent on input data) // * contain only printable characters // * not contain whitespace or punctuation +// * use lower case for simple keys and lowerCamelCase for more complex ones // // These guidelines help ensure that log data is processed properly regardless // of the log implementation. For example, log implementations will try to @@ -110,21 +129,22 @@ limitations under the License. // While users are generally free to use key names of their choice, it's // generally best to avoid using the following keys, as they're frequently used // by implementations: -// -// * `"caller"`: the calling information (file/line) of a particular log line. -// * `"error"`: the underlying error value in the `Error` method. -// * `"level"`: the log level. -// * `"logger"`: the name of the associated logger. -// * `"msg"`: the log message. -// * `"stacktrace"`: the stack trace associated with a particular log line or -// error (often from the `Error` message). -// * `"ts"`: the timestamp for a log line. +// * "caller": the calling information (file/line) of a particular log line +// * "error": the underlying error value in the `Error` method +// * "level": the log level +// * "logger": the name of the associated logger +// * "msg": the log message +// * "stacktrace": the stack trace associated with a particular log line or +// error (often from the `Error` message) +// * "ts": the timestamp for a log line // // Implementations are encouraged to make use of these keys to represent the // above concepts, when necessary (for example, in a pure-JSON output form, it // would be necessary to represent at least message and timestamp as ordinary // named values). // +// Break Glass +// // Implementations may choose to give callers access to the underlying // logging implementation. The recommended pattern for this is: // // Underlier exposes access to the underlying logging implementation. @@ -134,81 +154,220 @@ limitations under the License. // type Underlier interface { // GetUnderlying() // } +// +// Logger grants access to the sink to enable type assertions like this: +// func DoSomethingWithImpl(log logr.Logger) { +// if underlier, ok := log.GetSink()(impl.Underlier) { +// implLogger := underlier.GetUnderlying() +// ... +// } +// } +// +// Custom `With*` functions can be implemented by copying the complete +// Logger struct and replacing the sink in the copy: +// // WithFooBar changes the foobar parameter in the log sink and returns a +// // new logger with that modified sink. It does nothing for loggers where +// // the sink doesn't support that parameter. +// func WithFoobar(log logr.Logger, foobar int) logr.Logger { +// if foobarLogSink, ok := log.GetSink()(FoobarSink); ok { +// log = log.WithSink(foobarLogSink.WithFooBar(foobar)) +// } +// return log +// } +// +// Don't use New to construct a new Logger with a LogSink retrieved from an +// existing Logger. Source code attribution might not work correctly and +// unexported fields in Logger get lost. +// +// Beware that the same LogSink instance may be shared by different logger +// instances. Calling functions that modify the LogSink will affect all of +// those. package logr import ( "context" ) -// TODO: consider adding back in format strings if they're really needed -// TODO: consider other bits of zap/zapcore functionality like ObjectMarshaller (for arbitrary objects) -// TODO: consider other bits of glog functionality like Flush, OutputStats +// New returns a new Logger instance. This is primarily used by libraries +// implementing LogSink, rather than end users. +func New(sink LogSink) Logger { + logger := Logger{} + logger.setSink(sink) + sink.Init(runtimeInfo) + return logger +} -// Logger represents the ability to log messages, both errors and not. -type Logger interface { - // Enabled tests whether this Logger is enabled. For example, commandline - // flags might be used to set the logging verbosity and disable some info - // logs. - Enabled() bool +// setSink stores the sink and updates any related fields. It mutates the +// logger and thus is only safe to use for loggers that are not currently being +// used concurrently. +func (l *Logger) setSink(sink LogSink) { + l.sink = sink +} - // Info logs a non-error message with the given key/value pairs as context. - // - // The msg argument should be used to add some constant description to - // the log line. The key/value pairs can then be used to add additional - // variable information. The key/value pairs should alternate string - // keys and arbitrary values. - Info(msg string, keysAndValues ...interface{}) - - // Error logs an error, with the given message and key/value pairs as context. - // It functions similarly to calling Info with the "error" named value, but may - // have unique behavior, and should be preferred for logging errors (see the - // package documentations for more information). - // - // The msg field should be used to add context to any underlying error, - // while the err field should be used to attach the actual error that - // triggered this log line, if present. - Error(err error, msg string, keysAndValues ...interface{}) +// GetSink returns the stored sink. +func (l Logger) GetSink() LogSink { + return l.sink +} + +// WithSink returns a copy of the logger with the new sink. +func (l Logger) WithSink(sink LogSink) Logger { + l.setSink(sink) + return l +} + +// Logger is an interface to an abstract logging implementation. This is a +// concrete type for performance reasons, but all the real work is passed on to +// a LogSink. Implementations of LogSink should provide their own constructors +// that return Logger, not LogSink. +// +// The underlying sink can be accessed through GetSink and be modified through +// WithSink. This enables the implementation of custom extensions (see "Break +// Glass" in the package documentation). Normally the sink should be used only +// indirectly. +type Logger struct { + sink LogSink + level int +} + +// Enabled tests whether this Logger is enabled. For example, commandline +// flags might be used to set the logging verbosity and disable some info logs. +func (l Logger) Enabled() bool { + return l.sink.Enabled(l.level) +} + +// Info logs a non-error message with the given key/value pairs as context. +// +// The msg argument should be used to add some constant description to the log +// line. The key/value pairs can then be used to add additional variable +// information. The key/value pairs must alternate string keys and arbitrary +// values. +func (l Logger) Info(msg string, keysAndValues ...interface{}) { + if l.Enabled() { + if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { + withHelper.GetCallStackHelper()() + } + l.sink.Info(l.level, msg, keysAndValues...) + } +} + +// Error logs an error, with the given message and key/value pairs as context. +// It functions similarly to Info, but may have unique behavior, and should be +// preferred for logging errors (see the package documentations for more +// information). +// +// The msg argument should be used to add context to any underlying error, +// while the err argument should be used to attach the actual error that +// triggered this log line, if present. +func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) { + if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { + withHelper.GetCallStackHelper()() + } + l.sink.Error(err, msg, keysAndValues...) +} + +// V returns a new Logger instance for a specific verbosity level, relative to +// this Logger. In other words, V-levels are additive. A higher verbosity +// level means a log message is less important. Negative V-levels are treated +// as 0. +func (l Logger) V(level int) Logger { + if level < 0 { + level = 0 + } + l.level += level + return l +} + +// WithValues returns a new Logger instance with additional key/value pairs. +// See Info for documentation on how key/value pairs work. +func (l Logger) WithValues(keysAndValues ...interface{}) Logger { + l.setSink(l.sink.WithValues(keysAndValues...)) + return l +} - // V returns an Logger value for a specific verbosity level, relative to - // this Logger. In other words, V values are additive. V higher verbosity - // level means a log message is less important. It's illegal to pass a log - // level less than zero. - V(level int) Logger - - // WithValues adds some key-value pairs of context to a logger. - // See Info for documentation on how key/value pairs work. - WithValues(keysAndValues ...interface{}) Logger - - // WithName adds a new element to the logger's name. - // Successive calls with WithName continue to append - // suffixes to the logger's name. It's strongly recommended - // that name segments contain only letters, digits, and hyphens - // (see the package documentation for more information). - WithName(name string) Logger +// WithName returns a new Logger instance with the specified name element added +// to the Logger's name. Successive calls with WithName append additional +// suffixes to the Logger's name. It's strongly recommended that name segments +// contain only letters, digits, and hyphens (see the package documentation for +// more information). +func (l Logger) WithName(name string) Logger { + l.setSink(l.sink.WithName(name)) + return l } -// InfoLogger provides compatibility with code that relies on the v0.1.0 -// interface. +// WithCallDepth returns a Logger instance that offsets the call stack by the +// specified number of frames when logging call site information, if possible. +// This is useful for users who have helper functions between the "real" call +// site and the actual calls to Logger methods. If depth is 0 the attribution +// should be to the direct caller of this function. If depth is 1 the +// attribution should skip 1 call frame, and so on. Successive calls to this +// are additive. +// +// If the underlying log implementation supports a WithCallDepth(int) method, +// it will be called and the result returned. If the implementation does not +// support CallDepthLogSink, the original Logger will be returned. +// +// To skip one level, WithCallStackHelper() should be used instead of +// WithCallDepth(1) because it works with implementions that support the +// CallDepthLogSink and/or CallStackHelperLogSink interfaces. +func (l Logger) WithCallDepth(depth int) Logger { + if withCallDepth, ok := l.sink.(CallDepthLogSink); ok { + l.setSink(withCallDepth.WithCallDepth(depth)) + } + return l +} + +// WithCallStackHelper returns a new Logger instance that skips the direct +// caller when logging call site information, if possible. This is useful for +// users who have helper functions between the "real" call site and the actual +// calls to Logger methods and want to support loggers which depend on marking +// each individual helper function, like loggers based on testing.T. +// +// In addition to using that new logger instance, callers also must call the +// returned function. // -// Deprecated: InfoLogger is an artifact of early versions of this API. New -// users should never use it and existing users should use Logger instead. This -// will be removed in a future release. -type InfoLogger = Logger +// If the underlying log implementation supports a WithCallDepth(int) method, +// WithCallDepth(1) will be called to produce a new logger. If it supports a +// WithCallStackHelper() method, that will be also called. If the +// implementation does not support either of these, the original Logger will be +// returned. +func (l Logger) WithCallStackHelper() (func(), Logger) { + var helper func() + if withCallDepth, ok := l.sink.(CallDepthLogSink); ok { + l.setSink(withCallDepth.WithCallDepth(1)) + } + if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { + helper = withHelper.GetCallStackHelper() + } else { + helper = func() {} + } + return helper, l +} +// contextKey is how we find Loggers in a context.Context. type contextKey struct{} -// FromContext returns a Logger constructed from ctx or nil if no -// logger details are found. -func FromContext(ctx context.Context) Logger { +// FromContext returns a Logger from ctx or an error if no Logger is found. +func FromContext(ctx context.Context) (Logger, error) { if v, ok := ctx.Value(contextKey{}).(Logger); ok { - return v + return v, nil } - return nil + return Logger{}, notFoundError{} } -// FromContextOrDiscard returns a Logger constructed from ctx or a Logger -// that discards all messages if no logger details are found. +// notFoundError exists to carry an IsNotFound method. +type notFoundError struct{} + +func (notFoundError) Error() string { + return "no logr.Logger was present" +} + +func (notFoundError) IsNotFound() bool { + return true +} + +// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this +// returns a Logger that discards all log messages. func FromContextOrDiscard(ctx context.Context) Logger { if v, ok := ctx.Value(contextKey{}).(Logger); ok { return v @@ -217,12 +376,59 @@ func FromContextOrDiscard(ctx context.Context) Logger { return Discard() } -// NewContext returns a new context derived from ctx that embeds the Logger. -func NewContext(ctx context.Context, l Logger) context.Context { - return context.WithValue(ctx, contextKey{}, l) +// NewContext returns a new Context, derived from ctx, which carries the +// provided Logger. +func NewContext(ctx context.Context, logger Logger) context.Context { + return context.WithValue(ctx, contextKey{}, logger) } -// CallDepthLogger represents a Logger that knows how to climb the call stack +// RuntimeInfo holds information that the logr "core" library knows which +// LogSinks might want to know. +type RuntimeInfo struct { + // CallDepth is the number of call frames the logr library adds between the + // end-user and the LogSink. LogSink implementations which choose to print + // the original logging site (e.g. file & line) should climb this many + // additional frames to find it. + CallDepth int +} + +// runtimeInfo is a static global. It must not be changed at run time. +var runtimeInfo = RuntimeInfo{ + CallDepth: 1, +} + +// LogSink represents a logging implementation. End-users will generally not +// interact with this type. +type LogSink interface { + // Init receives optional information about the logr library for LogSink + // implementations that need it. + Init(info RuntimeInfo) + + // Enabled tests whether this LogSink is enabled at the specified V-level. + // For example, commandline flags might be used to set the logging + // verbosity and disable some info logs. + Enabled(level int) bool + + // Info logs a non-error message with the given key/value pairs as context. + // The level argument is provided for optional logging. This method will + // only be called when Enabled(level) is true. See Logger.Info for more + // details. + Info(level int, msg string, keysAndValues ...interface{}) + + // Error logs an error, with the given message and key/value pairs as + // context. See Logger.Error for more details. + Error(err error, msg string, keysAndValues ...interface{}) + + // WithValues returns a new LogSink with additional key/value pairs. See + // Logger.WithValues for more details. + WithValues(keysAndValues ...interface{}) LogSink + + // WithName returns a new LogSink with the specified name appended. See + // Logger.WithName for more details. + WithName(name string) LogSink +} + +// CallDepthLogSink represents a Logger that knows how to climb the call stack // to identify the original call site and can offset the depth by a specified // number of frames. This is useful for users who have helper functions // between the "real" call site and the actual calls to Logger methods. @@ -232,35 +438,59 @@ func NewContext(ctx context.Context, l Logger) context.Context { // // This is an optional interface and implementations are not required to // support it. -type CallDepthLogger interface { - Logger - - // WithCallDepth returns a Logger that will offset the call stack by the - // specified number of frames when logging call site information. If depth - // is 0 the attribution should be to the direct caller of this method. If - // depth is 1 the attribution should skip 1 call frame, and so on. +type CallDepthLogSink interface { + // WithCallDepth returns a LogSink that will offset the call + // stack by the specified number of frames when logging call + // site information. + // + // If depth is 0, the LogSink should skip exactly the number + // of call frames defined in RuntimeInfo.CallDepth when Info + // or Error are called, i.e. the attribution should be to the + // direct caller of Logger.Info or Logger.Error. + // + // If depth is 1 the attribution should skip 1 call frame, and so on. // Successive calls to this are additive. - WithCallDepth(depth int) Logger + WithCallDepth(depth int) LogSink } -// WithCallDepth returns a Logger that will offset the call stack by the -// specified number of frames when logging call site information, if possible. -// This is useful for users who have helper functions between the "real" call -// site and the actual calls to Logger methods. If depth is 0 the attribution -// should be to the direct caller of this function. If depth is 1 the -// attribution should skip 1 call frame, and so on. Successive calls to this -// are additive. +// CallStackHelperLogSink represents a Logger that knows how to climb +// the call stack to identify the original call site and can skip +// intermediate helper functions if they mark themselves as +// helper. Go's testing package uses that approach. // -// If the underlying log implementation supports the CallDepthLogger interface, -// the WithCallDepth method will be called and the result returned. If the -// implementation does not support CallDepthLogger, the original Logger will be -// returned. +// This is useful for users who have helper functions between the +// "real" call site and the actual calls to Logger methods. +// Implementations that log information about the call site (such as +// file, function, or line) would otherwise log information about the +// intermediate helper functions. // -// Callers which care about whether this was supported or not should test for -// CallDepthLogger support themselves. -func WithCallDepth(logger Logger, depth int) Logger { - if decorator, ok := logger.(CallDepthLogger); ok { - return decorator.WithCallDepth(depth) - } - return logger +// This is an optional interface and implementations are not required +// to support it. Implementations that choose to support this must not +// simply implement it as WithCallDepth(1), because +// Logger.WithCallStackHelper will call both methods if they are +// present. This should only be implemented for LogSinks that actually +// need it, as with testing.T. +type CallStackHelperLogSink interface { + // GetCallStackHelper returns a function that must be called + // to mark the direct caller as helper function when logging + // call site information. + GetCallStackHelper() func() +} + +// Marshaler is an optional interface that logged values may choose to +// implement. Loggers with structured output, such as JSON, should +// log the object return by the MarshalLog method instead of the +// original value. +type Marshaler interface { + // MarshalLog can be used to: + // - ensure that structs are not logged as strings when the original + // value has a String method: return a different type without a + // String method + // - select which fields of a complex type should get logged: + // return a simpler struct with fewer fields + // - log unexported fields: return a different struct + // with exported fields + // + // It may return any value of any type. + MarshalLog() interface{} } diff --git a/vendor/k8s.io/klog/.travis.yml b/vendor/k8s.io/klog/.travis.yml deleted file mode 100644 index 5677664c21..0000000000 --- a/vendor/k8s.io/klog/.travis.yml +++ /dev/null @@ -1,16 +0,0 @@ -language: go -go_import_path: k8s.io/klog -dist: xenial -go: - - 1.9.x - - 1.10.x - - 1.11.x - - 1.12.x -script: - - go get -t -v ./... - - diff -u <(echo -n) <(gofmt -d .) - - diff -u <(echo -n) <(golint $(go list -e ./...)) - - go tool vet . || go vet . - - go test -v -race ./... -install: - - go get golang.org/x/lint/golint diff --git a/vendor/k8s.io/klog/CONTRIBUTING.md b/vendor/k8s.io/klog/CONTRIBUTING.md deleted file mode 100644 index 574a56abbb..0000000000 --- a/vendor/k8s.io/klog/CONTRIBUTING.md +++ /dev/null @@ -1,22 +0,0 @@ -# Contributing Guidelines - -Welcome to Kubernetes. We are excited about the prospect of you joining our [community](https://github.com/kubernetes/community)! The Kubernetes community abides by the CNCF [code of conduct](code-of-conduct.md). Here is an excerpt: - -_As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities._ - -## Getting Started - -We have full documentation on how to get started contributing here: - -- [Contributor License Agreement](https://git.k8s.io/community/CLA.md) Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests -- [Kubernetes Contributor Guide](http://git.k8s.io/community/contributors/guide) - Main contributor documentation, or you can just jump directly to the [contributing section](http://git.k8s.io/community/contributors/guide#contributing) -- [Contributor Cheat Sheet](https://git.k8s.io/community/contributors/guide/contributor-cheatsheet.md) - Common resources for existing developers - -## Mentorship - -- [Mentoring Initiatives](https://git.k8s.io/community/mentoring) - We have a diverse set of mentorship programs available that are always looking for volunteers! - -## Contact Information - -- [Slack](https://kubernetes.slack.com/messages/sig-architecture) -- [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-sig-architecture) diff --git a/vendor/k8s.io/klog/LICENSE b/vendor/k8s.io/klog/LICENSE deleted file mode 100644 index 37ec93a14f..0000000000 --- a/vendor/k8s.io/klog/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, "control" means (i) the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -"Object" form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -"submitted" means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -2. Grant of Copyright License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. - -3. Grant of Patent License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. - -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of -this License; and -You must cause any modified files to carry prominent notices stating that You -changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -5. Submission of Contributions. - -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. - -6. Trademarks. - -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. - -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. - -8. Limitation of Liability. - -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. - -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work - -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets "[]" replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same "printed page" as the copyright notice for easier identification within -third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/k8s.io/klog/OWNERS b/vendor/k8s.io/klog/OWNERS deleted file mode 100644 index 380e514f28..0000000000 --- a/vendor/k8s.io/klog/OWNERS +++ /dev/null @@ -1,19 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners -reviewers: - - jayunit100 - - hoegaarden - - andyxning - - neolit123 - - pohly - - yagonobre - - vincepri - - detiber -approvers: - - dims - - thockin - - justinsb - - tallclair - - piosz - - brancz - - DirectXMan12 - - lavalamp diff --git a/vendor/k8s.io/klog/README.md b/vendor/k8s.io/klog/README.md deleted file mode 100644 index 841468b4b6..0000000000 --- a/vendor/k8s.io/klog/README.md +++ /dev/null @@ -1,97 +0,0 @@ -klog -==== - -klog is a permanent fork of https://github.com/golang/glog. - -## Why was klog created? - -The decision to create klog was one that wasn't made lightly, but it was necessary due to some -drawbacks that are present in [glog](https://github.com/golang/glog). Ultimately, the fork was created due to glog not being under active development; this can be seen in the glog README: - -> The code in this repo [...] is not itself under development - -This makes us unable to solve many use cases without a fork. The factors that contributed to needing feature development are listed below: - - * `glog` [presents a lot "gotchas"](https://github.com/kubernetes/kubernetes/issues/61006) and introduces challenges in containerized environments, all of which aren't well documented. - * `glog` doesn't provide an easy way to test logs, which detracts from the stability of software using it - * A long term goal is to implement a logging interface that allows us to add context, change output format, etc. - -Historical context is available here: - - * https://github.com/kubernetes/kubernetes/issues/61006 - * https://github.com/kubernetes/kubernetes/issues/70264 - * https://groups.google.com/forum/#!msg/kubernetes-sig-architecture/wCWiWf3Juzs/hXRVBH90CgAJ - * https://groups.google.com/forum/#!msg/kubernetes-dev/7vnijOMhLS0/1oRiNtigBgAJ - ----- - -How to use klog -=============== -- Replace imports for `github.com/golang/glog` with `k8s.io/klog` -- Use `klog.InitFlags(nil)` explicitly for initializing global flags as we no longer use `init()` method to register the flags -- You can now use `log-file` instead of `log-dir` for logging to a single file (See `examples/log_file/usage_log_file.go`) -- If you want to redirect everything logged using klog somewhere else (say syslog!), you can use `klog.SetOutput()` method and supply a `io.Writer`. (See `examples/set_output/usage_set_output.go`) -- For more logging conventions (See [Logging Conventions](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md)) - -### Coexisting with glog -This package can be used side by side with glog. [This example](examples/coexist_glog/coexist_glog.go) shows how to initialize and syncronize flags from the global `flag.CommandLine` FlagSet. In addition, the example makes use of stderr as combined output by setting `alsologtostderr` (or `logtostderr`) to `true`. - -## Community, discussion, contribution, and support - -Learn how to engage with the Kubernetes community on the [community page](http://kubernetes.io/community/). - -You can reach the maintainers of this project at: - -- [Slack](https://kubernetes.slack.com/messages/sig-architecture) -- [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-sig-architecture) - -### Code of conduct - -Participation in the Kubernetes community is governed by the [Kubernetes Code of Conduct](code-of-conduct.md). - ----- - -glog -==== - -Leveled execution logs for Go. - -This is an efficient pure Go implementation of leveled logs in the -manner of the open source C++ package - https://github.com/google/glog - -By binding methods to booleans it is possible to use the log package -without paying the expense of evaluating the arguments to the log. -Through the -vmodule flag, the package also provides fine-grained -control over logging at the file level. - -The comment from glog.go introduces the ideas: - - Package glog implements logging analogous to the Google-internal - C++ INFO/ERROR/V setup. It provides functions Info, Warning, - Error, Fatal, plus formatting variants such as Infof. It - also provides V-style logging controlled by the -v and - -vmodule=file=2 flags. - - Basic examples: - - glog.Info("Prepare to repel boarders") - - glog.Fatalf("Initialization failed: %s", err) - - See the documentation for the V function for an explanation - of these examples: - - if glog.V(2) { - glog.Info("Starting transaction...") - } - - glog.V(2).Infoln("Processed", nItems, "elements") - - -The repository contains an open source version of the log package -used inside Google. The master copy of the source lives inside -Google, not here. The code in this repo is for export only and is not itself -under development. Feature requests will be ignored. - -Send bug reports to golang-nuts@googlegroups.com. diff --git a/vendor/k8s.io/klog/RELEASE.md b/vendor/k8s.io/klog/RELEASE.md deleted file mode 100644 index b53eb960ce..0000000000 --- a/vendor/k8s.io/klog/RELEASE.md +++ /dev/null @@ -1,9 +0,0 @@ -# Release Process - -The `klog` is released on an as-needed basis. The process is as follows: - -1. An issue is proposing a new release with a changelog since the last release -1. All [OWNERS](OWNERS) must LGTM this release -1. An OWNER runs `git tag -s $VERSION` and inserts the changelog and pushes the tag with `git push $VERSION` -1. The release issue is closed -1. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] kubernetes-template-project $VERSION is released` diff --git a/vendor/k8s.io/klog/SECURITY_CONTACTS b/vendor/k8s.io/klog/SECURITY_CONTACTS deleted file mode 100644 index 6128a58699..0000000000 --- a/vendor/k8s.io/klog/SECURITY_CONTACTS +++ /dev/null @@ -1,20 +0,0 @@ -# Defined below are the security contacts for this repo. -# -# They are the contact point for the Product Security Committee to reach out -# to for triaging and handling of incoming issues. -# -# The below names agree to abide by the -# [Embargo Policy](https://git.k8s.io/security/private-distributors-list.md#embargo-policy) -# and will be removed and replaced if they violate that agreement. -# -# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE -# INSTRUCTIONS AT https://kubernetes.io/security/ - -dims -thockin -justinsb -tallclair -piosz -brancz -DirectXMan12 -lavalamp diff --git a/vendor/k8s.io/klog/code-of-conduct.md b/vendor/k8s.io/klog/code-of-conduct.md deleted file mode 100644 index 0d15c00cf3..0000000000 --- a/vendor/k8s.io/klog/code-of-conduct.md +++ /dev/null @@ -1,3 +0,0 @@ -# Kubernetes Community Code of Conduct - -Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md) diff --git a/vendor/k8s.io/klog/go.mod b/vendor/k8s.io/klog/go.mod deleted file mode 100644 index 3877d8546a..0000000000 --- a/vendor/k8s.io/klog/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module k8s.io/klog - -go 1.12 - -require github.com/go-logr/logr v0.1.0 diff --git a/vendor/k8s.io/klog/go.sum b/vendor/k8s.io/klog/go.sum deleted file mode 100644 index fb64d277a7..0000000000 --- a/vendor/k8s.io/klog/go.sum +++ /dev/null @@ -1,2 +0,0 @@ -github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= diff --git a/vendor/k8s.io/klog/klog.go b/vendor/k8s.io/klog/klog.go deleted file mode 100644 index 2712ce0afc..0000000000 --- a/vendor/k8s.io/klog/klog.go +++ /dev/null @@ -1,1308 +0,0 @@ -// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ -// -// Copyright 2013 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package klog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup. -// It provides functions Info, Warning, Error, Fatal, plus formatting variants such as -// Infof. It also provides V-style logging controlled by the -v and -vmodule=file=2 flags. -// -// Basic examples: -// -// klog.Info("Prepare to repel boarders") -// -// klog.Fatalf("Initialization failed: %s", err) -// -// See the documentation for the V function for an explanation of these examples: -// -// if klog.V(2) { -// klog.Info("Starting transaction...") -// } -// -// klog.V(2).Infoln("Processed", nItems, "elements") -// -// Log output is buffered and written periodically using Flush. Programs -// should call Flush before exiting to guarantee all log output is written. -// -// By default, all log statements write to standard error. -// This package provides several flags that modify this behavior. -// As a result, flag.Parse must be called before any logging is done. -// -// -logtostderr=true -// Logs are written to standard error instead of to files. -// -alsologtostderr=false -// Logs are written to standard error as well as to files. -// -stderrthreshold=ERROR -// Log events at or above this severity are logged to standard -// error as well as to files. -// -log_dir="" -// Log files will be written to this directory instead of the -// default temporary directory. -// -// Other flags provide aids to debugging. -// -// -log_backtrace_at="" -// When set to a file and line number holding a logging statement, -// such as -// -log_backtrace_at=gopherflakes.go:234 -// a stack trace will be written to the Info log whenever execution -// hits that statement. (Unlike with -vmodule, the ".go" must be -// present.) -// -v=0 -// Enable V-leveled logging at the specified level. -// -vmodule="" -// The syntax of the argument is a comma-separated list of pattern=N, -// where pattern is a literal file name (minus the ".go" suffix) or -// "glob" pattern and N is a V level. For instance, -// -vmodule=gopher*=3 -// sets the V level to 3 in all Go files whose names begin "gopher". -// -package klog - -import ( - "bufio" - "bytes" - "errors" - "flag" - "fmt" - "io" - stdLog "log" - "math" - "os" - "path/filepath" - "runtime" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" -) - -// severity identifies the sort of log: info, warning etc. It also implements -// the flag.Value interface. The -stderrthreshold flag is of type severity and -// should be modified only through the flag.Value interface. The values match -// the corresponding constants in C++. -type severity int32 // sync/atomic int32 - -// These constants identify the log levels in order of increasing severity. -// A message written to a high-severity log file is also written to each -// lower-severity log file. -const ( - infoLog severity = iota - warningLog - errorLog - fatalLog - numSeverity = 4 -) - -const severityChar = "IWEF" - -var severityName = []string{ - infoLog: "INFO", - warningLog: "WARNING", - errorLog: "ERROR", - fatalLog: "FATAL", -} - -// get returns the value of the severity. -func (s *severity) get() severity { - return severity(atomic.LoadInt32((*int32)(s))) -} - -// set sets the value of the severity. -func (s *severity) set(val severity) { - atomic.StoreInt32((*int32)(s), int32(val)) -} - -// String is part of the flag.Value interface. -func (s *severity) String() string { - return strconv.FormatInt(int64(*s), 10) -} - -// Get is part of the flag.Value interface. -func (s *severity) Get() interface{} { - return *s -} - -// Set is part of the flag.Value interface. -func (s *severity) Set(value string) error { - var threshold severity - // Is it a known name? - if v, ok := severityByName(value); ok { - threshold = v - } else { - v, err := strconv.ParseInt(value, 10, 32) - if err != nil { - return err - } - threshold = severity(v) - } - logging.stderrThreshold.set(threshold) - return nil -} - -func severityByName(s string) (severity, bool) { - s = strings.ToUpper(s) - for i, name := range severityName { - if name == s { - return severity(i), true - } - } - return 0, false -} - -// OutputStats tracks the number of output lines and bytes written. -type OutputStats struct { - lines int64 - bytes int64 -} - -// Lines returns the number of lines written. -func (s *OutputStats) Lines() int64 { - return atomic.LoadInt64(&s.lines) -} - -// Bytes returns the number of bytes written. -func (s *OutputStats) Bytes() int64 { - return atomic.LoadInt64(&s.bytes) -} - -// Stats tracks the number of lines of output and number of bytes -// per severity level. Values must be read with atomic.LoadInt64. -var Stats struct { - Info, Warning, Error OutputStats -} - -var severityStats = [numSeverity]*OutputStats{ - infoLog: &Stats.Info, - warningLog: &Stats.Warning, - errorLog: &Stats.Error, -} - -// Level is exported because it appears in the arguments to V and is -// the type of the v flag, which can be set programmatically. -// It's a distinct type because we want to discriminate it from logType. -// Variables of type level are only changed under logging.mu. -// The -v flag is read only with atomic ops, so the state of the logging -// module is consistent. - -// Level is treated as a sync/atomic int32. - -// Level specifies a level of verbosity for V logs. *Level implements -// flag.Value; the -v flag is of type Level and should be modified -// only through the flag.Value interface. -type Level int32 - -// get returns the value of the Level. -func (l *Level) get() Level { - return Level(atomic.LoadInt32((*int32)(l))) -} - -// set sets the value of the Level. -func (l *Level) set(val Level) { - atomic.StoreInt32((*int32)(l), int32(val)) -} - -// String is part of the flag.Value interface. -func (l *Level) String() string { - return strconv.FormatInt(int64(*l), 10) -} - -// Get is part of the flag.Value interface. -func (l *Level) Get() interface{} { - return *l -} - -// Set is part of the flag.Value interface. -func (l *Level) Set(value string) error { - v, err := strconv.ParseInt(value, 10, 32) - if err != nil { - return err - } - logging.mu.Lock() - defer logging.mu.Unlock() - logging.setVState(Level(v), logging.vmodule.filter, false) - return nil -} - -// moduleSpec represents the setting of the -vmodule flag. -type moduleSpec struct { - filter []modulePat -} - -// modulePat contains a filter for the -vmodule flag. -// It holds a verbosity level and a file pattern to match. -type modulePat struct { - pattern string - literal bool // The pattern is a literal string - level Level -} - -// match reports whether the file matches the pattern. It uses a string -// comparison if the pattern contains no metacharacters. -func (m *modulePat) match(file string) bool { - if m.literal { - return file == m.pattern - } - match, _ := filepath.Match(m.pattern, file) - return match -} - -func (m *moduleSpec) String() string { - // Lock because the type is not atomic. TODO: clean this up. - logging.mu.Lock() - defer logging.mu.Unlock() - var b bytes.Buffer - for i, f := range m.filter { - if i > 0 { - b.WriteRune(',') - } - fmt.Fprintf(&b, "%s=%d", f.pattern, f.level) - } - return b.String() -} - -// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the -// struct is not exported. -func (m *moduleSpec) Get() interface{} { - return nil -} - -var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N") - -// Syntax: -vmodule=recordio=2,file=1,gfs*=3 -func (m *moduleSpec) Set(value string) error { - var filter []modulePat - for _, pat := range strings.Split(value, ",") { - if len(pat) == 0 { - // Empty strings such as from a trailing comma can be ignored. - continue - } - patLev := strings.Split(pat, "=") - if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 { - return errVmoduleSyntax - } - pattern := patLev[0] - v, err := strconv.ParseInt(patLev[1], 10, 32) - if err != nil { - return errors.New("syntax error: expect comma-separated list of filename=N") - } - if v < 0 { - return errors.New("negative value for vmodule level") - } - if v == 0 { - continue // Ignore. It's harmless but no point in paying the overhead. - } - // TODO: check syntax of filter? - filter = append(filter, modulePat{pattern, isLiteral(pattern), Level(v)}) - } - logging.mu.Lock() - defer logging.mu.Unlock() - logging.setVState(logging.verbosity, filter, true) - return nil -} - -// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters -// that require filepath.Match to be called to match the pattern. -func isLiteral(pattern string) bool { - return !strings.ContainsAny(pattern, `\*?[]`) -} - -// traceLocation represents the setting of the -log_backtrace_at flag. -type traceLocation struct { - file string - line int -} - -// isSet reports whether the trace location has been specified. -// logging.mu is held. -func (t *traceLocation) isSet() bool { - return t.line > 0 -} - -// match reports whether the specified file and line matches the trace location. -// The argument file name is the full path, not the basename specified in the flag. -// logging.mu is held. -func (t *traceLocation) match(file string, line int) bool { - if t.line != line { - return false - } - if i := strings.LastIndex(file, "/"); i >= 0 { - file = file[i+1:] - } - return t.file == file -} - -func (t *traceLocation) String() string { - // Lock because the type is not atomic. TODO: clean this up. - logging.mu.Lock() - defer logging.mu.Unlock() - return fmt.Sprintf("%s:%d", t.file, t.line) -} - -// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the -// struct is not exported -func (t *traceLocation) Get() interface{} { - return nil -} - -var errTraceSyntax = errors.New("syntax error: expect file.go:234") - -// Syntax: -log_backtrace_at=gopherflakes.go:234 -// Note that unlike vmodule the file extension is included here. -func (t *traceLocation) Set(value string) error { - if value == "" { - // Unset. - t.line = 0 - t.file = "" - } - fields := strings.Split(value, ":") - if len(fields) != 2 { - return errTraceSyntax - } - file, line := fields[0], fields[1] - if !strings.Contains(file, ".") { - return errTraceSyntax - } - v, err := strconv.Atoi(line) - if err != nil { - return errTraceSyntax - } - if v <= 0 { - return errors.New("negative or zero value for level") - } - logging.mu.Lock() - defer logging.mu.Unlock() - t.line = v - t.file = file - return nil -} - -// flushSyncWriter is the interface satisfied by logging destinations. -type flushSyncWriter interface { - Flush() error - Sync() error - io.Writer -} - -// init sets up the defaults and runs flushDaemon. -func init() { - logging.stderrThreshold = errorLog // Default stderrThreshold is ERROR. - logging.setVState(0, nil, false) - logging.logDir = "" - logging.logFile = "" - logging.logFileMaxSizeMB = 1800 - logging.toStderr = true - logging.alsoToStderr = false - logging.skipHeaders = false - logging.addDirHeader = false - logging.skipLogHeaders = false - go logging.flushDaemon() -} - -// InitFlags is for explicitly initializing the flags. -func InitFlags(flagset *flag.FlagSet) { - if flagset == nil { - flagset = flag.CommandLine - } - - flagset.StringVar(&logging.logDir, "log_dir", logging.logDir, "If non-empty, write log files in this directory") - flagset.StringVar(&logging.logFile, "log_file", logging.logFile, "If non-empty, use this log file") - flagset.Uint64Var(&logging.logFileMaxSizeMB, "log_file_max_size", logging.logFileMaxSizeMB, - "Defines the maximum size a log file can grow to. Unit is megabytes. "+ - "If the value is 0, the maximum file size is unlimited.") - flagset.BoolVar(&logging.toStderr, "logtostderr", logging.toStderr, "log to standard error instead of files") - flagset.BoolVar(&logging.alsoToStderr, "alsologtostderr", logging.alsoToStderr, "log to standard error as well as files") - flagset.Var(&logging.verbosity, "v", "number for the log level verbosity") - flagset.BoolVar(&logging.skipHeaders, "add_dir_header", logging.addDirHeader, "If true, adds the file directory to the header") - flagset.BoolVar(&logging.skipHeaders, "skip_headers", logging.skipHeaders, "If true, avoid header prefixes in the log messages") - flagset.BoolVar(&logging.skipLogHeaders, "skip_log_headers", logging.skipLogHeaders, "If true, avoid headers when opening log files") - flagset.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr") - flagset.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") - flagset.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace") -} - -// Flush flushes all pending log I/O. -func Flush() { - logging.lockAndFlushAll() -} - -// loggingT collects all the global state of the logging setup. -type loggingT struct { - // Boolean flags. Not handled atomically because the flag.Value interface - // does not let us avoid the =true, and that shorthand is necessary for - // compatibility. TODO: does this matter enough to fix? Seems unlikely. - toStderr bool // The -logtostderr flag. - alsoToStderr bool // The -alsologtostderr flag. - - // Level flag. Handled atomically. - stderrThreshold severity // The -stderrthreshold flag. - - // freeList is a list of byte buffers, maintained under freeListMu. - freeList *buffer - // freeListMu maintains the free list. It is separate from the main mutex - // so buffers can be grabbed and printed to without holding the main lock, - // for better parallelization. - freeListMu sync.Mutex - - // mu protects the remaining elements of this structure and is - // used to synchronize logging. - mu sync.Mutex - // file holds writer for each of the log types. - file [numSeverity]flushSyncWriter - // pcs is used in V to avoid an allocation when computing the caller's PC. - pcs [1]uintptr - // vmap is a cache of the V Level for each V() call site, identified by PC. - // It is wiped whenever the vmodule flag changes state. - vmap map[uintptr]Level - // filterLength stores the length of the vmodule filter chain. If greater - // than zero, it means vmodule is enabled. It may be read safely - // using sync.LoadInt32, but is only modified under mu. - filterLength int32 - // traceLocation is the state of the -log_backtrace_at flag. - traceLocation traceLocation - // These flags are modified only under lock, although verbosity may be fetched - // safely using atomic.LoadInt32. - vmodule moduleSpec // The state of the -vmodule flag. - verbosity Level // V logging level, the value of the -v flag/ - - // If non-empty, overrides the choice of directory in which to write logs. - // See createLogDirs for the full list of possible destinations. - logDir string - - // If non-empty, specifies the path of the file to write logs. mutually exclusive - // with the log-dir option. - logFile string - - // When logFile is specified, this limiter makes sure the logFile won't exceeds a certain size. When exceeds, the - // logFile will be cleaned up. If this value is 0, no size limitation will be applied to logFile. - logFileMaxSizeMB uint64 - - // If true, do not add the prefix headers, useful when used with SetOutput - skipHeaders bool - - // If true, do not add the headers to log files - skipLogHeaders bool - - // If true, add the file directory to the header - addDirHeader bool -} - -// buffer holds a byte Buffer for reuse. The zero value is ready for use. -type buffer struct { - bytes.Buffer - tmp [64]byte // temporary byte array for creating headers. - next *buffer -} - -var logging loggingT - -// setVState sets a consistent state for V logging. -// l.mu is held. -func (l *loggingT) setVState(verbosity Level, filter []modulePat, setFilter bool) { - // Turn verbosity off so V will not fire while we are in transition. - logging.verbosity.set(0) - // Ditto for filter length. - atomic.StoreInt32(&logging.filterLength, 0) - - // Set the new filters and wipe the pc->Level map if the filter has changed. - if setFilter { - logging.vmodule.filter = filter - logging.vmap = make(map[uintptr]Level) - } - - // Things are consistent now, so enable filtering and verbosity. - // They are enabled in order opposite to that in V. - atomic.StoreInt32(&logging.filterLength, int32(len(filter))) - logging.verbosity.set(verbosity) -} - -// getBuffer returns a new, ready-to-use buffer. -func (l *loggingT) getBuffer() *buffer { - l.freeListMu.Lock() - b := l.freeList - if b != nil { - l.freeList = b.next - } - l.freeListMu.Unlock() - if b == nil { - b = new(buffer) - } else { - b.next = nil - b.Reset() - } - return b -} - -// putBuffer returns a buffer to the free list. -func (l *loggingT) putBuffer(b *buffer) { - if b.Len() >= 256 { - // Let big buffers die a natural death. - return - } - l.freeListMu.Lock() - b.next = l.freeList - l.freeList = b - l.freeListMu.Unlock() -} - -var timeNow = time.Now // Stubbed out for testing. - -/* -header formats a log header as defined by the C++ implementation. -It returns a buffer containing the formatted header and the user's file and line number. -The depth specifies how many stack frames above lives the source line to be identified in the log message. - -Log lines have this form: - Lmmdd hh:mm:ss.uuuuuu threadid file:line] msg... -where the fields are defined as follows: - L A single character, representing the log level (eg 'I' for INFO) - mm The month (zero padded; ie May is '05') - dd The day (zero padded) - hh:mm:ss.uuuuuu Time in hours, minutes and fractional seconds - threadid The space-padded thread ID as returned by GetTID() - file The file name - line The line number - msg The user-supplied message -*/ -func (l *loggingT) header(s severity, depth int) (*buffer, string, int) { - _, file, line, ok := runtime.Caller(3 + depth) - if !ok { - file = "???" - line = 1 - } else { - if slash := strings.LastIndex(file, "/"); slash >= 0 { - path := file - file = path[slash+1:] - if l.addDirHeader { - if dirsep := strings.LastIndex(path[:slash], "/"); dirsep >= 0 { - file = path[dirsep+1:] - } - } - } - } - return l.formatHeader(s, file, line), file, line -} - -// formatHeader formats a log header using the provided file name and line number. -func (l *loggingT) formatHeader(s severity, file string, line int) *buffer { - now := timeNow() - if line < 0 { - line = 0 // not a real line number, but acceptable to someDigits - } - if s > fatalLog { - s = infoLog // for safety. - } - buf := l.getBuffer() - if l.skipHeaders { - return buf - } - - // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. - // It's worth about 3X. Fprintf is hard. - _, month, day := now.Date() - hour, minute, second := now.Clock() - // Lmmdd hh:mm:ss.uuuuuu threadid file:line] - buf.tmp[0] = severityChar[s] - buf.twoDigits(1, int(month)) - buf.twoDigits(3, day) - buf.tmp[5] = ' ' - buf.twoDigits(6, hour) - buf.tmp[8] = ':' - buf.twoDigits(9, minute) - buf.tmp[11] = ':' - buf.twoDigits(12, second) - buf.tmp[14] = '.' - buf.nDigits(6, 15, now.Nanosecond()/1000, '0') - buf.tmp[21] = ' ' - buf.nDigits(7, 22, pid, ' ') // TODO: should be TID - buf.tmp[29] = ' ' - buf.Write(buf.tmp[:30]) - buf.WriteString(file) - buf.tmp[0] = ':' - n := buf.someDigits(1, line) - buf.tmp[n+1] = ']' - buf.tmp[n+2] = ' ' - buf.Write(buf.tmp[:n+3]) - return buf -} - -// Some custom tiny helper functions to print the log header efficiently. - -const digits = "0123456789" - -// twoDigits formats a zero-prefixed two-digit integer at buf.tmp[i]. -func (buf *buffer) twoDigits(i, d int) { - buf.tmp[i+1] = digits[d%10] - d /= 10 - buf.tmp[i] = digits[d%10] -} - -// nDigits formats an n-digit integer at buf.tmp[i], -// padding with pad on the left. -// It assumes d >= 0. -func (buf *buffer) nDigits(n, i, d int, pad byte) { - j := n - 1 - for ; j >= 0 && d > 0; j-- { - buf.tmp[i+j] = digits[d%10] - d /= 10 - } - for ; j >= 0; j-- { - buf.tmp[i+j] = pad - } -} - -// someDigits formats a zero-prefixed variable-width integer at buf.tmp[i]. -func (buf *buffer) someDigits(i, d int) int { - // Print into the top, then copy down. We know there's space for at least - // a 10-digit number. - j := len(buf.tmp) - for { - j-- - buf.tmp[j] = digits[d%10] - d /= 10 - if d == 0 { - break - } - } - return copy(buf.tmp[i:], buf.tmp[j:]) -} - -func (l *loggingT) println(s severity, args ...interface{}) { - buf, file, line := l.header(s, 0) - fmt.Fprintln(buf, args...) - l.output(s, buf, file, line, false) -} - -func (l *loggingT) print(s severity, args ...interface{}) { - l.printDepth(s, 1, args...) -} - -func (l *loggingT) printDepth(s severity, depth int, args ...interface{}) { - buf, file, line := l.header(s, depth) - fmt.Fprint(buf, args...) - if buf.Bytes()[buf.Len()-1] != '\n' { - buf.WriteByte('\n') - } - l.output(s, buf, file, line, false) -} - -func (l *loggingT) printf(s severity, format string, args ...interface{}) { - buf, file, line := l.header(s, 0) - fmt.Fprintf(buf, format, args...) - if buf.Bytes()[buf.Len()-1] != '\n' { - buf.WriteByte('\n') - } - l.output(s, buf, file, line, false) -} - -// printWithFileLine behaves like print but uses the provided file and line number. If -// alsoLogToStderr is true, the log message always appears on standard error; it -// will also appear in the log file unless --logtostderr is set. -func (l *loggingT) printWithFileLine(s severity, file string, line int, alsoToStderr bool, args ...interface{}) { - buf := l.formatHeader(s, file, line) - fmt.Fprint(buf, args...) - if buf.Bytes()[buf.Len()-1] != '\n' { - buf.WriteByte('\n') - } - l.output(s, buf, file, line, alsoToStderr) -} - -// redirectBuffer is used to set an alternate destination for the logs -type redirectBuffer struct { - w io.Writer -} - -func (rb *redirectBuffer) Sync() error { - return nil -} - -func (rb *redirectBuffer) Flush() error { - return nil -} - -func (rb *redirectBuffer) Write(bytes []byte) (n int, err error) { - return rb.w.Write(bytes) -} - -// SetOutput sets the output destination for all severities -func SetOutput(w io.Writer) { - logging.mu.Lock() - defer logging.mu.Unlock() - for s := fatalLog; s >= infoLog; s-- { - rb := &redirectBuffer{ - w: w, - } - logging.file[s] = rb - } -} - -// SetOutputBySeverity sets the output destination for specific severity -func SetOutputBySeverity(name string, w io.Writer) { - logging.mu.Lock() - defer logging.mu.Unlock() - sev, ok := severityByName(name) - if !ok { - panic(fmt.Sprintf("SetOutputBySeverity(%q): unrecognized severity name", name)) - } - rb := &redirectBuffer{ - w: w, - } - logging.file[sev] = rb -} - -// output writes the data to the log files and releases the buffer. -func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoToStderr bool) { - l.mu.Lock() - if l.traceLocation.isSet() { - if l.traceLocation.match(file, line) { - buf.Write(stacks(false)) - } - } - data := buf.Bytes() - if l.toStderr { - os.Stderr.Write(data) - } else { - if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() { - os.Stderr.Write(data) - } - - if logging.logFile != "" { - // Since we are using a single log file, all of the items in l.file array - // will point to the same file, so just use one of them to write data. - if l.file[infoLog] == nil { - if err := l.createFiles(infoLog); err != nil { - os.Stderr.Write(data) // Make sure the message appears somewhere. - l.exit(err) - } - } - l.file[infoLog].Write(data) - } else { - if l.file[s] == nil { - if err := l.createFiles(s); err != nil { - os.Stderr.Write(data) // Make sure the message appears somewhere. - l.exit(err) - } - } - - switch s { - case fatalLog: - l.file[fatalLog].Write(data) - fallthrough - case errorLog: - l.file[errorLog].Write(data) - fallthrough - case warningLog: - l.file[warningLog].Write(data) - fallthrough - case infoLog: - l.file[infoLog].Write(data) - } - } - } - if s == fatalLog { - // If we got here via Exit rather than Fatal, print no stacks. - if atomic.LoadUint32(&fatalNoStacks) > 0 { - l.mu.Unlock() - timeoutFlush(10 * time.Second) - os.Exit(1) - } - // Dump all goroutine stacks before exiting. - // First, make sure we see the trace for the current goroutine on standard error. - // If -logtostderr has been specified, the loop below will do that anyway - // as the first stack in the full dump. - if !l.toStderr { - os.Stderr.Write(stacks(false)) - } - // Write the stack trace for all goroutines to the files. - trace := stacks(true) - logExitFunc = func(error) {} // If we get a write error, we'll still exit below. - for log := fatalLog; log >= infoLog; log-- { - if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set. - f.Write(trace) - } - } - l.mu.Unlock() - timeoutFlush(10 * time.Second) - os.Exit(255) // C++ uses -1, which is silly because it's anded with 255 anyway. - } - l.putBuffer(buf) - l.mu.Unlock() - if stats := severityStats[s]; stats != nil { - atomic.AddInt64(&stats.lines, 1) - atomic.AddInt64(&stats.bytes, int64(len(data))) - } -} - -// timeoutFlush calls Flush and returns when it completes or after timeout -// elapses, whichever happens first. This is needed because the hooks invoked -// by Flush may deadlock when klog.Fatal is called from a hook that holds -// a lock. -func timeoutFlush(timeout time.Duration) { - done := make(chan bool, 1) - go func() { - Flush() // calls logging.lockAndFlushAll() - done <- true - }() - select { - case <-done: - case <-time.After(timeout): - fmt.Fprintln(os.Stderr, "klog: Flush took longer than", timeout) - } -} - -// stacks is a wrapper for runtime.Stack that attempts to recover the data for all goroutines. -func stacks(all bool) []byte { - // We don't know how big the traces are, so grow a few times if they don't fit. Start large, though. - n := 10000 - if all { - n = 100000 - } - var trace []byte - for i := 0; i < 5; i++ { - trace = make([]byte, n) - nbytes := runtime.Stack(trace, all) - if nbytes < len(trace) { - return trace[:nbytes] - } - n *= 2 - } - return trace -} - -// logExitFunc provides a simple mechanism to override the default behavior -// of exiting on error. Used in testing and to guarantee we reach a required exit -// for fatal logs. Instead, exit could be a function rather than a method but that -// would make its use clumsier. -var logExitFunc func(error) - -// exit is called if there is trouble creating or writing log files. -// It flushes the logs and exits the program; there's no point in hanging around. -// l.mu is held. -func (l *loggingT) exit(err error) { - fmt.Fprintf(os.Stderr, "log: exiting because of error: %s\n", err) - // If logExitFunc is set, we do that instead of exiting. - if logExitFunc != nil { - logExitFunc(err) - return - } - l.flushAll() - os.Exit(2) -} - -// syncBuffer joins a bufio.Writer to its underlying file, providing access to the -// file's Sync method and providing a wrapper for the Write method that provides log -// file rotation. There are conflicting methods, so the file cannot be embedded. -// l.mu is held for all its methods. -type syncBuffer struct { - logger *loggingT - *bufio.Writer - file *os.File - sev severity - nbytes uint64 // The number of bytes written to this file - maxbytes uint64 // The max number of bytes this syncBuffer.file can hold before cleaning up. -} - -func (sb *syncBuffer) Sync() error { - return sb.file.Sync() -} - -// CalculateMaxSize returns the real max size in bytes after considering the default max size and the flag options. -func CalculateMaxSize() uint64 { - if logging.logFile != "" { - if logging.logFileMaxSizeMB == 0 { - // If logFileMaxSizeMB is zero, we don't have limitations on the log size. - return math.MaxUint64 - } - // Flag logFileMaxSizeMB is in MB for user convenience. - return logging.logFileMaxSizeMB * 1024 * 1024 - } - // If "log_file" flag is not specified, the target file (sb.file) will be cleaned up when reaches a fixed size. - return MaxSize -} - -func (sb *syncBuffer) Write(p []byte) (n int, err error) { - if sb.nbytes+uint64(len(p)) >= sb.maxbytes { - if err := sb.rotateFile(time.Now(), false); err != nil { - sb.logger.exit(err) - } - } - n, err = sb.Writer.Write(p) - sb.nbytes += uint64(n) - if err != nil { - sb.logger.exit(err) - } - return -} - -// rotateFile closes the syncBuffer's file and starts a new one. -// The startup argument indicates whether this is the initial startup of klog. -// If startup is true, existing files are opened for appending instead of truncated. -func (sb *syncBuffer) rotateFile(now time.Time, startup bool) error { - if sb.file != nil { - sb.Flush() - sb.file.Close() - } - var err error - sb.file, _, err = create(severityName[sb.sev], now, startup) - sb.nbytes = 0 - if err != nil { - return err - } - - sb.Writer = bufio.NewWriterSize(sb.file, bufferSize) - - if sb.logger.skipLogHeaders { - return nil - } - - // Write header. - var buf bytes.Buffer - fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05")) - fmt.Fprintf(&buf, "Running on machine: %s\n", host) - fmt.Fprintf(&buf, "Binary: Built with %s %s for %s/%s\n", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH) - fmt.Fprintf(&buf, "Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg\n") - n, err := sb.file.Write(buf.Bytes()) - sb.nbytes += uint64(n) - return err -} - -// bufferSize sizes the buffer associated with each log file. It's large -// so that log records can accumulate without the logging thread blocking -// on disk I/O. The flushDaemon will block instead. -const bufferSize = 256 * 1024 - -// createFiles creates all the log files for severity from sev down to infoLog. -// l.mu is held. -func (l *loggingT) createFiles(sev severity) error { - now := time.Now() - // Files are created in decreasing severity order, so as soon as we find one - // has already been created, we can stop. - for s := sev; s >= infoLog && l.file[s] == nil; s-- { - sb := &syncBuffer{ - logger: l, - sev: s, - maxbytes: CalculateMaxSize(), - } - if err := sb.rotateFile(now, true); err != nil { - return err - } - l.file[s] = sb - } - return nil -} - -const flushInterval = 5 * time.Second - -// flushDaemon periodically flushes the log file buffers. -func (l *loggingT) flushDaemon() { - for range time.NewTicker(flushInterval).C { - l.lockAndFlushAll() - } -} - -// lockAndFlushAll is like flushAll but locks l.mu first. -func (l *loggingT) lockAndFlushAll() { - l.mu.Lock() - l.flushAll() - l.mu.Unlock() -} - -// flushAll flushes all the logs and attempts to "sync" their data to disk. -// l.mu is held. -func (l *loggingT) flushAll() { - // Flush from fatal down, in case there's trouble flushing. - for s := fatalLog; s >= infoLog; s-- { - file := l.file[s] - if file != nil { - file.Flush() // ignore error - file.Sync() // ignore error - } - } -} - -// CopyStandardLogTo arranges for messages written to the Go "log" package's -// default logs to also appear in the Google logs for the named and lower -// severities. Subsequent changes to the standard log's default output location -// or format may break this behavior. -// -// Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not -// recognized, CopyStandardLogTo panics. -func CopyStandardLogTo(name string) { - sev, ok := severityByName(name) - if !ok { - panic(fmt.Sprintf("log.CopyStandardLogTo(%q): unrecognized severity name", name)) - } - // Set a log format that captures the user's file and line: - // d.go:23: message - stdLog.SetFlags(stdLog.Lshortfile) - stdLog.SetOutput(logBridge(sev)) -} - -// logBridge provides the Write method that enables CopyStandardLogTo to connect -// Go's standard logs to the logs provided by this package. -type logBridge severity - -// Write parses the standard logging line and passes its components to the -// logger for severity(lb). -func (lb logBridge) Write(b []byte) (n int, err error) { - var ( - file = "???" - line = 1 - text string - ) - // Split "d.go:23: message" into "d.go", "23", and "message". - if parts := bytes.SplitN(b, []byte{':'}, 3); len(parts) != 3 || len(parts[0]) < 1 || len(parts[2]) < 1 { - text = fmt.Sprintf("bad log format: %s", b) - } else { - file = string(parts[0]) - text = string(parts[2][1:]) // skip leading space - line, err = strconv.Atoi(string(parts[1])) - if err != nil { - text = fmt.Sprintf("bad line number: %s", b) - line = 1 - } - } - // printWithFileLine with alsoToStderr=true, so standard log messages - // always appear on standard error. - logging.printWithFileLine(severity(lb), file, line, true, text) - return len(b), nil -} - -// setV computes and remembers the V level for a given PC -// when vmodule is enabled. -// File pattern matching takes the basename of the file, stripped -// of its .go suffix, and uses filepath.Match, which is a little more -// general than the *? matching used in C++. -// l.mu is held. -func (l *loggingT) setV(pc uintptr) Level { - fn := runtime.FuncForPC(pc) - file, _ := fn.FileLine(pc) - // The file is something like /a/b/c/d.go. We want just the d. - if strings.HasSuffix(file, ".go") { - file = file[:len(file)-3] - } - if slash := strings.LastIndex(file, "/"); slash >= 0 { - file = file[slash+1:] - } - for _, filter := range l.vmodule.filter { - if filter.match(file) { - l.vmap[pc] = filter.level - return filter.level - } - } - l.vmap[pc] = 0 - return 0 -} - -// Verbose is a boolean type that implements Infof (like Printf) etc. -// See the documentation of V for more information. -type Verbose bool - -// V reports whether verbosity at the call site is at least the requested level. -// The returned value is a boolean of type Verbose, which implements Info, Infoln -// and Infof. These methods will write to the Info log if called. -// Thus, one may write either -// if klog.V(2) { klog.Info("log this") } -// or -// klog.V(2).Info("log this") -// The second form is shorter but the first is cheaper if logging is off because it does -// not evaluate its arguments. -// -// Whether an individual call to V generates a log record depends on the setting of -// the -v and --vmodule flags; both are off by default. If the level in the call to -// V is at least the value of -v, or of -vmodule for the source file containing the -// call, the V call will log. -func V(level Level) Verbose { - // This function tries hard to be cheap unless there's work to do. - // The fast path is two atomic loads and compares. - - // Here is a cheap but safe test to see if V logging is enabled globally. - if logging.verbosity.get() >= level { - return Verbose(true) - } - - // It's off globally but it vmodule may still be set. - // Here is another cheap but safe test to see if vmodule is enabled. - if atomic.LoadInt32(&logging.filterLength) > 0 { - // Now we need a proper lock to use the logging structure. The pcs field - // is shared so we must lock before accessing it. This is fairly expensive, - // but if V logging is enabled we're slow anyway. - logging.mu.Lock() - defer logging.mu.Unlock() - if runtime.Callers(2, logging.pcs[:]) == 0 { - return Verbose(false) - } - v, ok := logging.vmap[logging.pcs[0]] - if !ok { - v = logging.setV(logging.pcs[0]) - } - return Verbose(v >= level) - } - return Verbose(false) -} - -// Info is equivalent to the global Info function, guarded by the value of v. -// See the documentation of V for usage. -func (v Verbose) Info(args ...interface{}) { - if v { - logging.print(infoLog, args...) - } -} - -// Infoln is equivalent to the global Infoln function, guarded by the value of v. -// See the documentation of V for usage. -func (v Verbose) Infoln(args ...interface{}) { - if v { - logging.println(infoLog, args...) - } -} - -// Infof is equivalent to the global Infof function, guarded by the value of v. -// See the documentation of V for usage. -func (v Verbose) Infof(format string, args ...interface{}) { - if v { - logging.printf(infoLog, format, args...) - } -} - -// Info logs to the INFO log. -// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Info(args ...interface{}) { - logging.print(infoLog, args...) -} - -// InfoDepth acts as Info but uses depth to determine which call frame to log. -// InfoDepth(0, "msg") is the same as Info("msg"). -func InfoDepth(depth int, args ...interface{}) { - logging.printDepth(infoLog, depth, args...) -} - -// Infoln logs to the INFO log. -// Arguments are handled in the manner of fmt.Println; a newline is always appended. -func Infoln(args ...interface{}) { - logging.println(infoLog, args...) -} - -// Infof logs to the INFO log. -// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Infof(format string, args ...interface{}) { - logging.printf(infoLog, format, args...) -} - -// Warning logs to the WARNING and INFO logs. -// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Warning(args ...interface{}) { - logging.print(warningLog, args...) -} - -// WarningDepth acts as Warning but uses depth to determine which call frame to log. -// WarningDepth(0, "msg") is the same as Warning("msg"). -func WarningDepth(depth int, args ...interface{}) { - logging.printDepth(warningLog, depth, args...) -} - -// Warningln logs to the WARNING and INFO logs. -// Arguments are handled in the manner of fmt.Println; a newline is always appended. -func Warningln(args ...interface{}) { - logging.println(warningLog, args...) -} - -// Warningf logs to the WARNING and INFO logs. -// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Warningf(format string, args ...interface{}) { - logging.printf(warningLog, format, args...) -} - -// Error logs to the ERROR, WARNING, and INFO logs. -// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Error(args ...interface{}) { - logging.print(errorLog, args...) -} - -// ErrorDepth acts as Error but uses depth to determine which call frame to log. -// ErrorDepth(0, "msg") is the same as Error("msg"). -func ErrorDepth(depth int, args ...interface{}) { - logging.printDepth(errorLog, depth, args...) -} - -// Errorln logs to the ERROR, WARNING, and INFO logs. -// Arguments are handled in the manner of fmt.Println; a newline is always appended. -func Errorln(args ...interface{}) { - logging.println(errorLog, args...) -} - -// Errorf logs to the ERROR, WARNING, and INFO logs. -// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Errorf(format string, args ...interface{}) { - logging.printf(errorLog, format, args...) -} - -// Fatal logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). -// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Fatal(args ...interface{}) { - logging.print(fatalLog, args...) -} - -// FatalDepth acts as Fatal but uses depth to determine which call frame to log. -// FatalDepth(0, "msg") is the same as Fatal("msg"). -func FatalDepth(depth int, args ...interface{}) { - logging.printDepth(fatalLog, depth, args...) -} - -// Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). -// Arguments are handled in the manner of fmt.Println; a newline is always appended. -func Fatalln(args ...interface{}) { - logging.println(fatalLog, args...) -} - -// Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). -// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Fatalf(format string, args ...interface{}) { - logging.printf(fatalLog, format, args...) -} - -// fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks. -// It allows Exit and relatives to use the Fatal logs. -var fatalNoStacks uint32 - -// Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). -// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Exit(args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - logging.print(fatalLog, args...) -} - -// ExitDepth acts as Exit but uses depth to determine which call frame to log. -// ExitDepth(0, "msg") is the same as Exit("msg"). -func ExitDepth(depth int, args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - logging.printDepth(fatalLog, depth, args...) -} - -// Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). -func Exitln(args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - logging.println(fatalLog, args...) -} - -// Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). -// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Exitf(format string, args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - logging.printf(fatalLog, format, args...) -} diff --git a/vendor/k8s.io/klog/klog_file.go b/vendor/k8s.io/klog/klog_file.go deleted file mode 100644 index e4010ad4df..0000000000 --- a/vendor/k8s.io/klog/klog_file.go +++ /dev/null @@ -1,139 +0,0 @@ -// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ -// -// Copyright 2013 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// File I/O for logs. - -package klog - -import ( - "errors" - "fmt" - "os" - "os/user" - "path/filepath" - "strings" - "sync" - "time" -) - -// MaxSize is the maximum size of a log file in bytes. -var MaxSize uint64 = 1024 * 1024 * 1800 - -// logDirs lists the candidate directories for new log files. -var logDirs []string - -func createLogDirs() { - if logging.logDir != "" { - logDirs = append(logDirs, logging.logDir) - } - logDirs = append(logDirs, os.TempDir()) -} - -var ( - pid = os.Getpid() - program = filepath.Base(os.Args[0]) - host = "unknownhost" - userName = "unknownuser" -) - -func init() { - h, err := os.Hostname() - if err == nil { - host = shortHostname(h) - } - - current, err := user.Current() - if err == nil { - userName = current.Username - } - - // Sanitize userName since it may contain filepath separators on Windows. - userName = strings.Replace(userName, `\`, "_", -1) -} - -// shortHostname returns its argument, truncating at the first period. -// For instance, given "www.google.com" it returns "www". -func shortHostname(hostname string) string { - if i := strings.Index(hostname, "."); i >= 0 { - return hostname[:i] - } - return hostname -} - -// logName returns a new log file name containing tag, with start time t, and -// the name for the symlink for tag. -func logName(tag string, t time.Time) (name, link string) { - name = fmt.Sprintf("%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d", - program, - host, - userName, - tag, - t.Year(), - t.Month(), - t.Day(), - t.Hour(), - t.Minute(), - t.Second(), - pid) - return name, program + "." + tag -} - -var onceLogDirs sync.Once - -// create creates a new log file and returns the file and its filename, which -// contains tag ("INFO", "FATAL", etc.) and t. If the file is created -// successfully, create also attempts to update the symlink for that tag, ignoring -// errors. -// The startup argument indicates whether this is the initial startup of klog. -// If startup is true, existing files are opened for appending instead of truncated. -func create(tag string, t time.Time, startup bool) (f *os.File, filename string, err error) { - if logging.logFile != "" { - f, err := openOrCreate(logging.logFile, startup) - if err == nil { - return f, logging.logFile, nil - } - return nil, "", fmt.Errorf("log: unable to create log: %v", err) - } - onceLogDirs.Do(createLogDirs) - if len(logDirs) == 0 { - return nil, "", errors.New("log: no log dirs") - } - name, link := logName(tag, t) - var lastErr error - for _, dir := range logDirs { - fname := filepath.Join(dir, name) - f, err := openOrCreate(fname, startup) - if err == nil { - symlink := filepath.Join(dir, link) - os.Remove(symlink) // ignore err - os.Symlink(name, symlink) // ignore err - return f, fname, nil - } - lastErr = err - } - return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr) -} - -// The startup argument indicates whether this is the initial startup of klog. -// If startup is true, existing files are opened for appending instead of truncated. -func openOrCreate(name string, startup bool) (*os.File, error) { - if startup { - f, err := os.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666) - return f, err - } - f, err := os.Create(name) - return f, err -} diff --git a/vendor/k8s.io/klog/v2/OWNERS b/vendor/k8s.io/klog/v2/OWNERS index 380e514f28..ad5063fdf1 100644 --- a/vendor/k8s.io/klog/v2/OWNERS +++ b/vendor/k8s.io/klog/v2/OWNERS @@ -15,5 +15,5 @@ approvers: - tallclair - piosz - brancz - - DirectXMan12 - lavalamp + - serathius diff --git a/vendor/k8s.io/klog/v2/README.md b/vendor/k8s.io/klog/v2/README.md index 64d29622e8..a9c945e1d0 100644 --- a/vendor/k8s.io/klog/v2/README.md +++ b/vendor/k8s.io/klog/v2/README.md @@ -23,6 +23,18 @@ Historical context is available here: * https://groups.google.com/forum/#!msg/kubernetes-sig-architecture/wCWiWf3Juzs/hXRVBH90CgAJ * https://groups.google.com/forum/#!msg/kubernetes-dev/7vnijOMhLS0/1oRiNtigBgAJ +## Release versioning + +Semantic versioning is used in this repository. It contains several Go modules +with different levels of stability: +- `k8s.io/klog/v2` - stable API, `vX.Y.Z` tags +- `k8s.io/tools` - no stable API yet (may change eventually), `tools/v0.Y.Z` tags +- `examples` - no stable API, no tags, no intention to ever stabilize + +Exempt from the API stability guarantee are items (packages, functions, etc.) +which are marked explicitly as `EXPERIMENTAL` in their docs comment. Those +may still change in incompatible ways or get removed entirely. + ---- How to use klog @@ -32,6 +44,7 @@ How to use klog - You can now use `log_file` instead of `log_dir` for logging to a single file (See `examples/log_file/usage_log_file.go`) - If you want to redirect everything logged using klog somewhere else (say syslog!), you can use `klog.SetOutput()` method and supply a `io.Writer`. (See `examples/set_output/usage_set_output.go`) - For more logging conventions (See [Logging Conventions](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md)) +- See our documentation on [pkg.go.dev/k8s.io](https://pkg.go.dev/k8s.io/klog). **NOTE**: please use the newer go versions that support semantic import versioning in modules, ideally go 1.11.4 or greater. @@ -85,7 +98,7 @@ The comment from glog.go introduces the ideas: glog.Fatalf("Initialization failed: %s", err) - See the documentation for the V function for an explanation + See the documentation of the V function for an explanation of these examples: if glog.V(2) { diff --git a/vendor/k8s.io/klog/v2/contextual.go b/vendor/k8s.io/klog/v2/contextual.go new file mode 100644 index 0000000000..bb0380896a --- /dev/null +++ b/vendor/k8s.io/klog/v2/contextual.go @@ -0,0 +1,265 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package klog + +import ( + "context" + + "github.com/go-logr/logr" +) + +// This file provides the implementation of +// https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/1602-structured-logging +// +// SetLogger and ClearLogger were originally added to klog.go and got moved +// here. Contextual logging adds a way to retrieve a Logger for direct logging +// without the logging calls in klog.go. +// +// The global variables are expected to be modified only during sequential +// parts of a program (init, serial tests) and therefore are not protected by +// mutex locking. + +var ( + // contextualLoggingEnabled controls whether contextual logging is + // active. Disabling it may have some small performance benefit. + contextualLoggingEnabled = true + + // globalLogger is the global Logger chosen by users of klog, nil if + // none is available. + globalLogger *Logger + + // globalLoggerOptions contains the options that were supplied for + // globalLogger. + globalLoggerOptions loggerOptions + + // contextualLogger defines whether globalLogger may get called + // directly. + contextualLogger bool + + // klogLogger is used as fallback for logging through the normal klog code + // when no Logger is set. + klogLogger logr.Logger = logr.New(&klogger{}) +) + +// SetLogger sets a Logger implementation that will be used as backing +// implementation of the traditional klog log calls. klog will do its own +// verbosity checks before calling logger.V().Info. logger.Error is always +// called, regardless of the klog verbosity settings. +// +// If set, all log lines will be suppressed from the regular output, and +// redirected to the logr implementation. +// Use as: +// ... +// klog.SetLogger(zapr.NewLogger(zapLog)) +// +// To remove a backing logr implemention, use ClearLogger. Setting an +// empty logger with SetLogger(logr.Logger{}) does not work. +// +// Modifying the logger is not thread-safe and should be done while no other +// goroutines invoke log calls, usually during program initialization. +func SetLogger(logger logr.Logger) { + SetLoggerWithOptions(logger) +} + +// SetLoggerWithOptions is a more flexible version of SetLogger. Without +// additional options, it behaves exactly like SetLogger. By passing +// ContextualLogger(true) as option, it can be used to set a logger that then +// will also get called directly by applications which retrieve it via +// FromContext, Background, or TODO. +// +// Supporting direct calls is recommended because it avoids the overhead of +// routing log entries through klogr into klog and then into the actual Logger +// backend. +// +// Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func SetLoggerWithOptions(logger logr.Logger, opts ...LoggerOption) { + globalLogger = &logger + globalLoggerOptions = loggerOptions{} + for _, opt := range opts { + opt(&globalLoggerOptions) + } +} + +// ContextualLogger determines whether the logger passed to +// SetLoggerWithOptions may also get called directly. Such a logger cannot rely +// on verbosity checking in klog. +// +// Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func ContextualLogger(enabled bool) LoggerOption { + return func(o *loggerOptions) { + o.contextualLogger = enabled + } +} + +// FlushLogger provides a callback for flushing data buffered by the logger. +// +// Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func FlushLogger(flush func()) LoggerOption { + return func(o *loggerOptions) { + o.flush = flush + } +} + +// LoggerOption implements the functional parameter paradigm for +// SetLoggerWithOptions. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type LoggerOption func(o *loggerOptions) + +type loggerOptions struct { + contextualLogger bool + flush func() +} + +// SetContextualLogger does the same as SetLogger, but in addition the +// logger may also get called directly by code that retrieves it +// with FromContext, TODO or Background. The logger therefore must +// implements its own verbosity checking. +func SetContextualLogger(logger logr.Logger) { + globalLogger = &logger + contextualLogger = true +} + +// ClearLogger removes a backing Logger implementation if one was set earlier +// with SetLogger. +// +// Modifying the logger is not thread-safe and should be done while no other +// goroutines invoke log calls, usually during program initialization. +func ClearLogger() { + globalLogger = nil + globalLoggerOptions = loggerOptions{} +} + +// EnableContextualLogging controls whether contextual logging is enabled. +// By default it is enabled. When disabled, FromContext avoids looking up +// the logger in the context and always returns the global logger. +// LoggerWithValues, LoggerWithName, and NewContext become no-ops +// and return their input logger respectively context. This may be useful +// to avoid the additional overhead for contextual logging. +// +// This must be called during initialization before goroutines are started. +// +// Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func EnableContextualLogging(enabled bool) { + contextualLoggingEnabled = enabled +} + +// FromContext retrieves a logger set by the caller or, if not set, +// falls back to the program's global logger (a Logger instance or klog +// itself). +// +// Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func FromContext(ctx context.Context) Logger { + if contextualLoggingEnabled { + if logger, err := logr.FromContext(ctx); err == nil { + return logger + } + } + + return Background() +} + +// TODO can be used as a last resort by code that has no means of +// receiving a logger from its caller. FromContext or an explicit logger +// parameter should be used instead. +// +// Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func TODO() Logger { + return Background() +} + +// Background retrieves the fallback logger. It should not be called before +// that logger was initialized by the program and not by code that should +// better receive a logger via its parameters. TODO can be used as a temporary +// solution for such code. +// +// Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func Background() Logger { + if globalLoggerOptions.contextualLogger { + // Is non-nil because globalLoggerOptions.contextualLogger is + // only true if a logger was set. + return *globalLogger + } + + return klogLogger +} + +// LoggerWithValues returns logger.WithValues(...kv) when +// contextual logging is enabled, otherwise the logger. +// +// Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func LoggerWithValues(logger Logger, kv ...interface{}) Logger { + if contextualLoggingEnabled { + return logger.WithValues(kv...) + } + return logger +} + +// LoggerWithName returns logger.WithName(name) when contextual logging is +// enabled, otherwise the logger. +// +// Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func LoggerWithName(logger Logger, name string) Logger { + if contextualLoggingEnabled { + return logger.WithName(name) + } + return logger +} + +// NewContext returns logr.NewContext(ctx, logger) when +// contextual logging is enabled, otherwise ctx. +// +// Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func NewContext(ctx context.Context, logger Logger) context.Context { + if contextualLoggingEnabled { + return logr.NewContext(ctx, logger) + } + return ctx +} diff --git a/vendor/k8s.io/klog/v2/exit.go b/vendor/k8s.io/klog/v2/exit.go new file mode 100644 index 0000000000..320a147728 --- /dev/null +++ b/vendor/k8s.io/klog/v2/exit.go @@ -0,0 +1,69 @@ +// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ +// +// Copyright 2013 Google Inc. All Rights Reserved. +// Copyright 2022 The Kubernetes Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package klog + +import ( + "fmt" + "os" + "time" +) + +var ( + + // ExitFlushTimeout is the timeout that klog has traditionally used during + // calls like Fatal or Exit when flushing log data right before exiting. + // Applications that replace those calls and do not have some specific + // requirements like "exit immediately" can use this value as parameter + // for FlushAndExit. + // + // Can be set for testing purpose or to change the application's + // default. + ExitFlushTimeout = 10 * time.Second + + // OsExit is the function called by FlushAndExit to terminate the program. + // + // Can be set for testing purpose or to change the application's + // default behavior. Note that the function should not simply return + // because callers of functions like Fatal will not expect that. + OsExit = os.Exit +) + +// FlushAndExit flushes log data for a certain amount of time and then calls +// os.Exit. Combined with some logging call it provides a replacement for +// traditional calls like Fatal or Exit. +func FlushAndExit(flushTimeout time.Duration, exitCode int) { + timeoutFlush(flushTimeout) + OsExit(exitCode) +} + +// timeoutFlush calls Flush and returns when it completes or after timeout +// elapses, whichever happens first. This is needed because the hooks invoked +// by Flush may deadlock when klog.Fatal is called from a hook that holds +// a lock. Flushing also might take too long. +func timeoutFlush(timeout time.Duration) { + done := make(chan bool, 1) + go func() { + Flush() // calls logging.lockAndFlushAll() + done <- true + }() + select { + case <-done: + case <-time.After(timeout): + fmt.Fprintln(os.Stderr, "klog: Flush took longer than", timeout) + } +} diff --git a/vendor/k8s.io/klog/v2/go.mod b/vendor/k8s.io/klog/v2/go.mod index eb297b6a1e..6385c6cde2 100644 --- a/vendor/k8s.io/klog/v2/go.mod +++ b/vendor/k8s.io/klog/v2/go.mod @@ -2,4 +2,7 @@ module k8s.io/klog/v2 go 1.13 -require github.com/go-logr/logr v0.4.0 +require ( + github.com/go-logr/logr v1.2.0 + k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 +) diff --git a/vendor/k8s.io/klog/v2/go.sum b/vendor/k8s.io/klog/v2/go.sum index 5778f81742..ef731394a5 100644 --- a/vendor/k8s.io/klog/v2/go.sum +++ b/vendor/k8s.io/klog/v2/go.sum @@ -1,2 +1,13 @@ -github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc= -github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v1.2.0 h1:QK40JKJyMdUDz+h+xvCsru/bJhvG0UxvePV0ufL/AcE= +github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 h1:HNSDgDCrr/6Ly3WEGKZftiE7IY19Vz2GdbOCyI4qqhc= +k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= diff --git a/vendor/k8s.io/klog/v2/imports.go b/vendor/k8s.io/klog/v2/imports.go new file mode 100644 index 0000000000..43cd08190f --- /dev/null +++ b/vendor/k8s.io/klog/v2/imports.go @@ -0,0 +1,58 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package klog + +import ( + "github.com/go-logr/logr" +) + +// The reason for providing these aliases is to allow code to work with logr +// without directly importing it. + +// Logger in this package is exactly the same as logr.Logger. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type Logger = logr.Logger + +// LogSink in this package is exactly the same as logr.LogSink. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type LogSink = logr.LogSink + +// Runtimeinfo in this package is exactly the same as logr.RuntimeInfo. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type RuntimeInfo = logr.RuntimeInfo + +var ( + // New is an alias for logr.New. + // + // Experimental + // + // Notice: This variable is EXPERIMENTAL and may be changed or removed in a + // later release. + New = logr.New +) diff --git a/vendor/k8s.io/klog/v2/internal/buffer/buffer.go b/vendor/k8s.io/klog/v2/internal/buffer/buffer.go new file mode 100644 index 0000000000..ac88682a2c --- /dev/null +++ b/vendor/k8s.io/klog/v2/internal/buffer/buffer.go @@ -0,0 +1,159 @@ +// Copyright 2013 Google Inc. All Rights Reserved. +// Copyright 2022 The Kubernetes Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package buffer provides a cache for byte.Buffer instances that can be reused +// to avoid frequent allocation and deallocation. It also has utility code +// for log header formatting that use these buffers. +package buffer + +import ( + "bytes" + "os" + "sync" + "time" + + "k8s.io/klog/v2/internal/severity" +) + +var ( + // Pid is inserted into log headers. Can be overridden for tests. + Pid = os.Getpid() +) + +// Buffer holds a single byte.Buffer for reuse. The zero value is ready for +// use. It also provides some helper methods for output formatting. +type Buffer struct { + bytes.Buffer + Tmp [64]byte // temporary byte array for creating headers. + next *Buffer +} + +// Buffers manages the reuse of individual buffer instances. It is thread-safe. +type Buffers struct { + // mu protects the free list. It is separate from the main mutex + // so buffers can be grabbed and printed to without holding the main lock, + // for better parallelization. + mu sync.Mutex + + // freeList is a list of byte buffers, maintained under mu. + freeList *Buffer +} + +// GetBuffer returns a new, ready-to-use buffer. +func (bl *Buffers) GetBuffer() *Buffer { + bl.mu.Lock() + b := bl.freeList + if b != nil { + bl.freeList = b.next + } + bl.mu.Unlock() + if b == nil { + b = new(Buffer) + } else { + b.next = nil + b.Reset() + } + return b +} + +// PutBuffer returns a buffer to the free list. +func (bl *Buffers) PutBuffer(b *Buffer) { + if b.Len() >= 256 { + // Let big buffers die a natural death. + return + } + bl.mu.Lock() + b.next = bl.freeList + bl.freeList = b + bl.mu.Unlock() +} + +// Some custom tiny helper functions to print the log header efficiently. + +const digits = "0123456789" + +// twoDigits formats a zero-prefixed two-digit integer at buf.Tmp[i]. +func (buf *Buffer) twoDigits(i, d int) { + buf.Tmp[i+1] = digits[d%10] + d /= 10 + buf.Tmp[i] = digits[d%10] +} + +// nDigits formats an n-digit integer at buf.Tmp[i], +// padding with pad on the left. +// It assumes d >= 0. +func (buf *Buffer) nDigits(n, i, d int, pad byte) { + j := n - 1 + for ; j >= 0 && d > 0; j-- { + buf.Tmp[i+j] = digits[d%10] + d /= 10 + } + for ; j >= 0; j-- { + buf.Tmp[i+j] = pad + } +} + +// someDigits formats a zero-prefixed variable-width integer at buf.Tmp[i]. +func (buf *Buffer) someDigits(i, d int) int { + // Print into the top, then copy down. We know there's space for at least + // a 10-digit number. + j := len(buf.Tmp) + for { + j-- + buf.Tmp[j] = digits[d%10] + d /= 10 + if d == 0 { + break + } + } + return copy(buf.Tmp[i:], buf.Tmp[j:]) +} + +// FormatHeader formats a log header using the provided file name and line number. +func (buf *Buffer) FormatHeader(s severity.Severity, file string, line int, now time.Time) { + if line < 0 { + line = 0 // not a real line number, but acceptable to someDigits + } + if s > severity.FatalLog { + s = severity.InfoLog // for safety. + } + + // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. + // It's worth about 3X. Fprintf is hard. + _, month, day := now.Date() + hour, minute, second := now.Clock() + // Lmmdd hh:mm:ss.uuuuuu threadid file:line] + buf.Tmp[0] = severity.Char[s] + buf.twoDigits(1, int(month)) + buf.twoDigits(3, day) + buf.Tmp[5] = ' ' + buf.twoDigits(6, hour) + buf.Tmp[8] = ':' + buf.twoDigits(9, minute) + buf.Tmp[11] = ':' + buf.twoDigits(12, second) + buf.Tmp[14] = '.' + buf.nDigits(6, 15, now.Nanosecond()/1000, '0') + buf.Tmp[21] = ' ' + buf.nDigits(7, 22, Pid, ' ') // TODO: should be TID + buf.Tmp[29] = ' ' + buf.Write(buf.Tmp[:30]) + buf.WriteString(file) + buf.Tmp[0] = ':' + n := buf.someDigits(1, line) + buf.Tmp[n+1] = ']' + buf.Tmp[n+2] = ' ' + buf.Write(buf.Tmp[:n+3]) +} diff --git a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go new file mode 100644 index 0000000000..d897313682 --- /dev/null +++ b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go @@ -0,0 +1,225 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serialize + +import ( + "bytes" + "fmt" + "strconv" +) + +// WithValues implements LogSink.WithValues. The old key/value pairs are +// assumed to be well-formed, the new ones are checked and padded if +// necessary. It returns a new slice. +func WithValues(oldKV, newKV []interface{}) []interface{} { + if len(newKV) == 0 { + return oldKV + } + newLen := len(oldKV) + len(newKV) + hasMissingValue := newLen%2 != 0 + if hasMissingValue { + newLen++ + } + // The new LogSink must have its own slice. + kv := make([]interface{}, 0, newLen) + kv = append(kv, oldKV...) + kv = append(kv, newKV...) + if hasMissingValue { + kv = append(kv, missingValue) + } + return kv +} + +// TrimDuplicates deduplicates elements provided in multiple key/value tuple +// slices, whilst maintaining the distinction between where the items are +// contained. +func TrimDuplicates(kvLists ...[]interface{}) [][]interface{} { + // maintain a map of all seen keys + seenKeys := map[interface{}]struct{}{} + // build the same number of output slices as inputs + outs := make([][]interface{}, len(kvLists)) + // iterate over the input slices backwards, as 'later' kv specifications + // of the same key will take precedence over earlier ones + for i := len(kvLists) - 1; i >= 0; i-- { + // initialise this output slice + outs[i] = []interface{}{} + // obtain a reference to the kvList we are processing + // and make sure it has an even number of entries + kvList := kvLists[i] + if len(kvList)%2 != 0 { + kvList = append(kvList, missingValue) + } + + // start iterating at len(kvList) - 2 (i.e. the 2nd last item) for + // slices that have an even number of elements. + // We add (len(kvList) % 2) here to handle the case where there is an + // odd number of elements in a kvList. + // If there is an odd number, then the last element in the slice will + // have the value 'null'. + for i2 := len(kvList) - 2 + (len(kvList) % 2); i2 >= 0; i2 -= 2 { + k := kvList[i2] + // if we have already seen this key, do not include it again + if _, ok := seenKeys[k]; ok { + continue + } + // make a note that we've observed a new key + seenKeys[k] = struct{}{} + // attempt to obtain the value of the key + var v interface{} + // i2+1 should only ever be out of bounds if we handling the first + // iteration over a slice with an odd number of elements + if i2+1 < len(kvList) { + v = kvList[i2+1] + } + // add this KV tuple to the *start* of the output list to maintain + // the original order as we are iterating over the slice backwards + outs[i] = append([]interface{}{k, v}, outs[i]...) + } + } + return outs +} + +const missingValue = "(MISSING)" + +// KVListFormat serializes all key/value pairs into the provided buffer. +// A space gets inserted before the first pair and between each pair. +func KVListFormat(b *bytes.Buffer, keysAndValues ...interface{}) { + for i := 0; i < len(keysAndValues); i += 2 { + var v interface{} + k := keysAndValues[i] + if i+1 < len(keysAndValues) { + v = keysAndValues[i+1] + } else { + v = missingValue + } + b.WriteByte(' ') + // Keys are assumed to be well-formed according to + // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments + // for the sake of performance. Keys with spaces, + // special characters, etc. will break parsing. + if k, ok := k.(string); ok { + // Avoid one allocation when the key is a string, which + // normally it should be. + b.WriteString(k) + } else { + b.WriteString(fmt.Sprintf("%s", k)) + } + + // The type checks are sorted so that more frequently used ones + // come first because that is then faster in the common + // cases. In Kubernetes, ObjectRef (a Stringer) is more common + // than plain strings + // (https://github.com/kubernetes/kubernetes/pull/106594#issuecomment-975526235). + switch v := v.(type) { + case fmt.Stringer: + writeStringValue(b, true, StringerToString(v)) + case string: + writeStringValue(b, true, v) + case error: + writeStringValue(b, true, ErrorToString(v)) + case []byte: + // In https://github.com/kubernetes/klog/pull/237 it was decided + // to format byte slices with "%+q". The advantages of that are: + // - readable output if the bytes happen to be printable + // - non-printable bytes get represented as unicode escape + // sequences (\uxxxx) + // + // The downsides are that we cannot use the faster + // strconv.Quote here and that multi-line output is not + // supported. If developers know that a byte array is + // printable and they want multi-line output, they can + // convert the value to string before logging it. + b.WriteByte('=') + b.WriteString(fmt.Sprintf("%+q", v)) + default: + writeStringValue(b, false, fmt.Sprintf("%+v", v)) + } + } +} + +// StringerToString converts a Stringer to a string, +// handling panics if they occur. +func StringerToString(s fmt.Stringer) (ret string) { + defer func() { + if err := recover(); err != nil { + ret = fmt.Sprintf("", err) + } + }() + ret = s.String() + return +} + +// ErrorToString converts an error to a string, +// handling panics if they occur. +func ErrorToString(err error) (ret string) { + defer func() { + if err := recover(); err != nil { + ret = fmt.Sprintf("", err) + } + }() + ret = err.Error() + return +} + +func writeStringValue(b *bytes.Buffer, quote bool, v string) { + data := []byte(v) + index := bytes.IndexByte(data, '\n') + if index == -1 { + b.WriteByte('=') + if quote { + // Simple string, quote quotation marks and non-printable characters. + b.WriteString(strconv.Quote(v)) + return + } + // Non-string with no line breaks. + b.WriteString(v) + return + } + + // Complex multi-line string, show as-is with indention like this: + // I... "hello world" key=< + // line 1 + // line 2 + // > + // + // Tabs indent the lines of the value while the end of string delimiter + // is indented with a space. That has two purposes: + // - visual difference between the two for a human reader because indention + // will be different + // - no ambiguity when some value line starts with the end delimiter + // + // One downside is that the output cannot distinguish between strings that + // end with a line break and those that don't because the end delimiter + // will always be on the next line. + b.WriteString("=<\n") + for index != -1 { + b.WriteByte('\t') + b.Write(data[0 : index+1]) + data = data[index+1:] + index = bytes.IndexByte(data, '\n') + } + if len(data) == 0 { + // String ended with line break, don't add another. + b.WriteString(" >") + } else { + // No line break at end of last line, write rest of string and + // add one. + b.WriteByte('\t') + b.Write(data) + b.WriteString("\n >") + } +} diff --git a/vendor/k8s.io/klog/v2/internal/severity/severity.go b/vendor/k8s.io/klog/v2/internal/severity/severity.go new file mode 100644 index 0000000000..30fa1834f0 --- /dev/null +++ b/vendor/k8s.io/klog/v2/internal/severity/severity.go @@ -0,0 +1,58 @@ +// Copyright 2013 Google Inc. All Rights Reserved. +// Copyright 2022 The Kubernetes Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package severity provides definitions for klog severity (info, warning, ...) +package severity + +import ( + "strings" +) + +// severity identifies the sort of log: info, warning etc. The binding to flag.Value +// is handled in klog.go +type Severity int32 // sync/atomic int32 + +// These constants identify the log levels in order of increasing severity. +// A message written to a high-severity log file is also written to each +// lower-severity log file. +const ( + InfoLog Severity = iota + WarningLog + ErrorLog + FatalLog + NumSeverity = 4 +) + +// Char contains one shortcut letter per severity level. +const Char = "IWEF" + +// Name contains one name per severity level. +var Name = []string{ + InfoLog: "INFO", + WarningLog: "WARNING", + ErrorLog: "ERROR", + FatalLog: "FATAL", +} + +// ByName looks up a severity level by name. +func ByName(s string) (Severity, bool) { + s = strings.ToUpper(s) + for i, name := range Name { + if name == s { + return Severity(i), true + } + } + return 0, false +} diff --git a/vendor/k8s.io/klog/v2/k8s_references.go b/vendor/k8s.io/klog/v2/k8s_references.go new file mode 100644 index 0000000000..db58f8baa6 --- /dev/null +++ b/vendor/k8s.io/klog/v2/k8s_references.go @@ -0,0 +1,94 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package klog + +import ( + "fmt" + "reflect" + + "github.com/go-logr/logr" +) + +// ObjectRef references a kubernetes object +type ObjectRef struct { + Name string `json:"name"` + Namespace string `json:"namespace,omitempty"` +} + +func (ref ObjectRef) String() string { + if ref.Namespace != "" { + return fmt.Sprintf("%s/%s", ref.Namespace, ref.Name) + } + return ref.Name +} + +// MarshalLog ensures that loggers with support for structured output will log +// as a struct by removing the String method via a custom type. +func (ref ObjectRef) MarshalLog() interface{} { + type or ObjectRef + return or(ref) +} + +var _ logr.Marshaler = ObjectRef{} + +// KMetadata is a subset of the kubernetes k8s.io/apimachinery/pkg/apis/meta/v1.Object interface +// this interface may expand in the future, but will always be a subset of the +// kubernetes k8s.io/apimachinery/pkg/apis/meta/v1.Object interface +type KMetadata interface { + GetName() string + GetNamespace() string +} + +// KObj returns ObjectRef from ObjectMeta +func KObj(obj KMetadata) ObjectRef { + if obj == nil { + return ObjectRef{} + } + if val := reflect.ValueOf(obj); val.Kind() == reflect.Ptr && val.IsNil() { + return ObjectRef{} + } + + return ObjectRef{ + Name: obj.GetName(), + Namespace: obj.GetNamespace(), + } +} + +// KRef returns ObjectRef from name and namespace +func KRef(namespace, name string) ObjectRef { + return ObjectRef{ + Name: name, + Namespace: namespace, + } +} + +// KObjs returns slice of ObjectRef from an slice of ObjectMeta +func KObjs(arg interface{}) []ObjectRef { + s := reflect.ValueOf(arg) + if s.Kind() != reflect.Slice { + return nil + } + objectRefs := make([]ObjectRef, 0, s.Len()) + for i := 0; i < s.Len(); i++ { + if v, ok := s.Index(i).Interface().(KMetadata); ok { + objectRefs = append(objectRefs, KObj(v)) + } else { + return nil + } + } + return objectRefs +} diff --git a/vendor/k8s.io/klog/v2/klog.go b/vendor/k8s.io/klog/v2/klog.go index 1e187f7635..bb6f64be49 100644 --- a/vendor/k8s.io/klog/v2/klog.go +++ b/vendor/k8s.io/klog/v2/klog.go @@ -81,7 +81,6 @@ import ( "math" "os" "path/filepath" - "reflect" "runtime" "strconv" "strings" @@ -90,81 +89,58 @@ import ( "time" "github.com/go-logr/logr" + + "k8s.io/klog/v2/internal/buffer" + "k8s.io/klog/v2/internal/serialize" + "k8s.io/klog/v2/internal/severity" + "k8s.io/utils/clock" ) -// severity identifies the sort of log: info, warning etc. It also implements +// severityValue identifies the sort of log: info, warning etc. It also implements // the flag.Value interface. The -stderrthreshold flag is of type severity and // should be modified only through the flag.Value interface. The values match // the corresponding constants in C++. -type severity int32 // sync/atomic int32 - -// These constants identify the log levels in order of increasing severity. -// A message written to a high-severity log file is also written to each -// lower-severity log file. -const ( - infoLog severity = iota - warningLog - errorLog - fatalLog - numSeverity = 4 -) - -const severityChar = "IWEF" - -var severityName = []string{ - infoLog: "INFO", - warningLog: "WARNING", - errorLog: "ERROR", - fatalLog: "FATAL", +type severityValue struct { + severity.Severity } // get returns the value of the severity. -func (s *severity) get() severity { - return severity(atomic.LoadInt32((*int32)(s))) +func (s *severityValue) get() severity.Severity { + return severity.Severity(atomic.LoadInt32((*int32)(&s.Severity))) } // set sets the value of the severity. -func (s *severity) set(val severity) { - atomic.StoreInt32((*int32)(s), int32(val)) +func (s *severityValue) set(val severity.Severity) { + atomic.StoreInt32((*int32)(&s.Severity), int32(val)) } // String is part of the flag.Value interface. -func (s *severity) String() string { - return strconv.FormatInt(int64(*s), 10) +func (s *severityValue) String() string { + return strconv.FormatInt(int64(s.Severity), 10) } // Get is part of the flag.Getter interface. -func (s *severity) Get() interface{} { - return *s +func (s *severityValue) Get() interface{} { + return s.Severity } // Set is part of the flag.Value interface. -func (s *severity) Set(value string) error { - var threshold severity +func (s *severityValue) Set(value string) error { + var threshold severity.Severity // Is it a known name? - if v, ok := severityByName(value); ok { + if v, ok := severity.ByName(value); ok { threshold = v } else { v, err := strconv.ParseInt(value, 10, 32) if err != nil { return err } - threshold = severity(v) + threshold = severity.Severity(v) } logging.stderrThreshold.set(threshold) return nil } -func severityByName(s string) (severity, bool) { - s = strings.ToUpper(s) - for i, name := range severityName { - if name == s { - return severity(i), true - } - } - return 0, false -} - // OutputStats tracks the number of output lines and bytes written. type OutputStats struct { lines int64 @@ -187,10 +163,10 @@ var Stats struct { Info, Warning, Error OutputStats } -var severityStats = [numSeverity]*OutputStats{ - infoLog: &Stats.Info, - warningLog: &Stats.Warning, - errorLog: &Stats.Error, +var severityStats = [severity.NumSeverity]*OutputStats{ + severity.InfoLog: &Stats.Info, + severity.WarningLog: &Stats.Warning, + severity.ErrorLog: &Stats.Error, } // Level is exported because it appears in the arguments to V and is @@ -404,9 +380,11 @@ type flushSyncWriter interface { io.Writer } -// init sets up the defaults and runs flushDaemon. +// init sets up the defaults. func init() { - logging.stderrThreshold = errorLog // Default stderrThreshold is ERROR. + logging.stderrThreshold = severityValue{ + Severity: severity.ErrorLog, // Default stderrThreshold is ERROR. + } logging.setVState(0, nil, false) logging.logDir = "" logging.logFile = "" @@ -417,7 +395,7 @@ func init() { logging.addDirHeader = false logging.skipLogHeaders = false logging.oneOutput = false - go logging.flushDaemon() + logging.flushD = newFlushDaemon(logging.lockAndFlushAll, nil) } // InitFlags is for explicitly initializing the flags. @@ -457,20 +435,23 @@ type loggingT struct { alsoToStderr bool // The -alsologtostderr flag. // Level flag. Handled atomically. - stderrThreshold severity // The -stderrthreshold flag. + stderrThreshold severityValue // The -stderrthreshold flag. - // freeList is a list of byte buffers, maintained under freeListMu. - freeList *buffer - // freeListMu maintains the free list. It is separate from the main mutex + // bufferCache maintains the free list. It uses its own mutex // so buffers can be grabbed and printed to without holding the main lock, // for better parallelization. - freeListMu sync.Mutex + bufferCache buffer.Buffers // mu protects the remaining elements of this structure and is // used to synchronize logging. mu sync.Mutex // file holds writer for each of the log types. - file [numSeverity]flushSyncWriter + file [severity.NumSeverity]flushSyncWriter + // flushD holds a flushDaemon that frequently flushes log file buffers. + flushD *flushDaemon + // flushInterval is the interval for periodic flushing. If zero, + // the global default will be used. + flushInterval time.Duration // pcs is used in V to avoid an allocation when computing the caller's PC. pcs [1]uintptr // vmap is a cache of the V Level for each V() call site, identified by PC. @@ -508,9 +489,6 @@ type loggingT struct { // If true, add the file directory to the header addDirHeader bool - // If set, all output will be redirected unconditionally to the provided logr.Logger - logr logr.Logger - // If true, messages will not be propagated to lower severity log levels oneOutput bool @@ -518,13 +496,6 @@ type loggingT struct { filter LogFilter } -// buffer holds a byte Buffer for reuse. The zero value is ready for use. -type buffer struct { - bytes.Buffer - tmp [64]byte // temporary byte array for creating headers. - next *buffer -} - var logging loggingT // setVState sets a consistent state for V logging. @@ -547,35 +518,6 @@ func (l *loggingT) setVState(verbosity Level, filter []modulePat, setFilter bool l.verbosity.set(verbosity) } -// getBuffer returns a new, ready-to-use buffer. -func (l *loggingT) getBuffer() *buffer { - l.freeListMu.Lock() - b := l.freeList - if b != nil { - l.freeList = b.next - } - l.freeListMu.Unlock() - if b == nil { - b = new(buffer) - } else { - b.next = nil - b.Reset() - } - return b -} - -// putBuffer returns a buffer to the free list. -func (l *loggingT) putBuffer(b *buffer) { - if b.Len() >= 256 { - // Let big buffers die a natural death. - return - } - l.freeListMu.Lock() - b.next = l.freeList - l.freeList = b - l.freeListMu.Unlock() -} - var timeNow = time.Now // Stubbed out for testing. /* @@ -595,7 +537,7 @@ where the fields are defined as follows: line The line number msg The user-supplied message */ -func (l *loggingT) header(s severity, depth int) (*buffer, string, int) { +func (l *loggingT) header(s severity.Severity, depth int) (*buffer.Buffer, string, int) { _, file, line, ok := runtime.Caller(3 + depth) if !ok { file = "???" @@ -615,133 +557,68 @@ func (l *loggingT) header(s severity, depth int) (*buffer, string, int) { } // formatHeader formats a log header using the provided file name and line number. -func (l *loggingT) formatHeader(s severity, file string, line int) *buffer { - now := timeNow() - if line < 0 { - line = 0 // not a real line number, but acceptable to someDigits - } - if s > fatalLog { - s = infoLog // for safety. - } - buf := l.getBuffer() +func (l *loggingT) formatHeader(s severity.Severity, file string, line int) *buffer.Buffer { + buf := l.bufferCache.GetBuffer() if l.skipHeaders { return buf } - - // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. - // It's worth about 3X. Fprintf is hard. - _, month, day := now.Date() - hour, minute, second := now.Clock() - // Lmmdd hh:mm:ss.uuuuuu threadid file:line] - buf.tmp[0] = severityChar[s] - buf.twoDigits(1, int(month)) - buf.twoDigits(3, day) - buf.tmp[5] = ' ' - buf.twoDigits(6, hour) - buf.tmp[8] = ':' - buf.twoDigits(9, minute) - buf.tmp[11] = ':' - buf.twoDigits(12, second) - buf.tmp[14] = '.' - buf.nDigits(6, 15, now.Nanosecond()/1000, '0') - buf.tmp[21] = ' ' - buf.nDigits(7, 22, pid, ' ') // TODO: should be TID - buf.tmp[29] = ' ' - buf.Write(buf.tmp[:30]) - buf.WriteString(file) - buf.tmp[0] = ':' - n := buf.someDigits(1, line) - buf.tmp[n+1] = ']' - buf.tmp[n+2] = ' ' - buf.Write(buf.tmp[:n+3]) + now := timeNow() + buf.FormatHeader(s, file, line, now) return buf } -// Some custom tiny helper functions to print the log header efficiently. - -const digits = "0123456789" - -// twoDigits formats a zero-prefixed two-digit integer at buf.tmp[i]. -func (buf *buffer) twoDigits(i, d int) { - buf.tmp[i+1] = digits[d%10] - d /= 10 - buf.tmp[i] = digits[d%10] -} - -// nDigits formats an n-digit integer at buf.tmp[i], -// padding with pad on the left. -// It assumes d >= 0. -func (buf *buffer) nDigits(n, i, d int, pad byte) { - j := n - 1 - for ; j >= 0 && d > 0; j-- { - buf.tmp[i+j] = digits[d%10] - d /= 10 - } - for ; j >= 0; j-- { - buf.tmp[i+j] = pad - } -} - -// someDigits formats a zero-prefixed variable-width integer at buf.tmp[i]. -func (buf *buffer) someDigits(i, d int) int { - // Print into the top, then copy down. We know there's space for at least - // a 10-digit number. - j := len(buf.tmp) - for { - j-- - buf.tmp[j] = digits[d%10] - d /= 10 - if d == 0 { - break - } - } - return copy(buf.tmp[i:], buf.tmp[j:]) +func (l *loggingT) println(s severity.Severity, logger *logr.Logger, filter LogFilter, args ...interface{}) { + l.printlnDepth(s, logger, filter, 1, args...) } -func (l *loggingT) println(s severity, logr logr.Logger, filter LogFilter, args ...interface{}) { - buf, file, line := l.header(s, 0) - // if logr is set, we clear the generated header as we rely on the backing - // logr implementation to print headers - if logr != nil { - l.putBuffer(buf) - buf = l.getBuffer() +func (l *loggingT) printlnDepth(s severity.Severity, logger *logr.Logger, filter LogFilter, depth int, args ...interface{}) { + buf, file, line := l.header(s, depth) + // if logger is set, we clear the generated header as we rely on the backing + // logger implementation to print headers + if logger != nil { + l.bufferCache.PutBuffer(buf) + buf = l.bufferCache.GetBuffer() } if filter != nil { args = filter.Filter(args) } fmt.Fprintln(buf, args...) - l.output(s, logr, buf, 0 /* depth */, file, line, false) + l.output(s, logger, buf, depth, file, line, false) } -func (l *loggingT) print(s severity, logr logr.Logger, filter LogFilter, args ...interface{}) { - l.printDepth(s, logr, filter, 1, args...) +func (l *loggingT) print(s severity.Severity, logger *logr.Logger, filter LogFilter, args ...interface{}) { + l.printDepth(s, logger, filter, 1, args...) } -func (l *loggingT) printDepth(s severity, logr logr.Logger, filter LogFilter, depth int, args ...interface{}) { +func (l *loggingT) printDepth(s severity.Severity, logger *logr.Logger, filter LogFilter, depth int, args ...interface{}) { buf, file, line := l.header(s, depth) // if logr is set, we clear the generated header as we rely on the backing // logr implementation to print headers - if logr != nil { - l.putBuffer(buf) - buf = l.getBuffer() + if logger != nil { + l.bufferCache.PutBuffer(buf) + buf = l.bufferCache.GetBuffer() } if filter != nil { args = filter.Filter(args) } fmt.Fprint(buf, args...) - if buf.Bytes()[buf.Len()-1] != '\n' { + if buf.Len() == 0 || buf.Bytes()[buf.Len()-1] != '\n' { buf.WriteByte('\n') } - l.output(s, logr, buf, depth, file, line, false) + l.output(s, logger, buf, depth, file, line, false) } -func (l *loggingT) printf(s severity, logr logr.Logger, filter LogFilter, format string, args ...interface{}) { - buf, file, line := l.header(s, 0) +func (l *loggingT) printf(s severity.Severity, logger *logr.Logger, filter LogFilter, format string, args ...interface{}) { + l.printfDepth(s, logger, filter, 1, format, args...) +} + +func (l *loggingT) printfDepth(s severity.Severity, logger *logr.Logger, filter LogFilter, depth int, format string, args ...interface{}) { + buf, file, line := l.header(s, depth) // if logr is set, we clear the generated header as we rely on the backing // logr implementation to print headers - if logr != nil { - l.putBuffer(buf) - buf = l.getBuffer() + if logger != nil { + l.bufferCache.PutBuffer(buf) + buf = l.bufferCache.GetBuffer() } if filter != nil { format, args = filter.FilterF(format, args) @@ -750,19 +627,19 @@ func (l *loggingT) printf(s severity, logr logr.Logger, filter LogFilter, format if buf.Bytes()[buf.Len()-1] != '\n' { buf.WriteByte('\n') } - l.output(s, logr, buf, 0 /* depth */, file, line, false) + l.output(s, logger, buf, depth, file, line, false) } // printWithFileLine behaves like print but uses the provided file and line number. If // alsoLogToStderr is true, the log message always appears on standard error; it // will also appear in the log file unless --logtostderr is set. -func (l *loggingT) printWithFileLine(s severity, logr logr.Logger, filter LogFilter, file string, line int, alsoToStderr bool, args ...interface{}) { +func (l *loggingT) printWithFileLine(s severity.Severity, logger *logr.Logger, filter LogFilter, file string, line int, alsoToStderr bool, args ...interface{}) { buf := l.formatHeader(s, file, line) // if logr is set, we clear the generated header as we rely on the backing // logr implementation to print headers - if logr != nil { - l.putBuffer(buf) - buf = l.getBuffer() + if logger != nil { + l.bufferCache.PutBuffer(buf) + buf = l.bufferCache.GetBuffer() } if filter != nil { args = filter.Filter(args) @@ -771,72 +648,49 @@ func (l *loggingT) printWithFileLine(s severity, logr logr.Logger, filter LogFil if buf.Bytes()[buf.Len()-1] != '\n' { buf.WriteByte('\n') } - l.output(s, logr, buf, 2 /* depth */, file, line, alsoToStderr) + l.output(s, logger, buf, 2 /* depth */, file, line, alsoToStderr) } // if loggr is specified, will call loggr.Error, otherwise output with logging module. -func (l *loggingT) errorS(err error, loggr logr.Logger, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) { +func (l *loggingT) errorS(err error, logger *logr.Logger, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) { if filter != nil { msg, keysAndValues = filter.FilterS(msg, keysAndValues) } - if loggr != nil { - logr.WithCallDepth(loggr, depth+2).Error(err, msg, keysAndValues...) + if logger != nil { + logger.WithCallDepth(depth+2).Error(err, msg, keysAndValues...) return } - l.printS(err, errorLog, depth+1, msg, keysAndValues...) + l.printS(err, severity.ErrorLog, depth+1, msg, keysAndValues...) } // if loggr is specified, will call loggr.Info, otherwise output with logging module. -func (l *loggingT) infoS(loggr logr.Logger, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) { +func (l *loggingT) infoS(logger *logr.Logger, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) { if filter != nil { msg, keysAndValues = filter.FilterS(msg, keysAndValues) } - if loggr != nil { - logr.WithCallDepth(loggr, depth+2).Info(msg, keysAndValues...) + if logger != nil { + logger.WithCallDepth(depth+2).Info(msg, keysAndValues...) return } - l.printS(nil, infoLog, depth+1, msg, keysAndValues...) + l.printS(nil, severity.InfoLog, depth+1, msg, keysAndValues...) } // printS is called from infoS and errorS if loggr is not specified. // set log severity by s -func (l *loggingT) printS(err error, s severity, depth int, msg string, keysAndValues ...interface{}) { - b := &bytes.Buffer{} - b.WriteString(fmt.Sprintf("%q", msg)) +func (l *loggingT) printS(err error, s severity.Severity, depth int, msg string, keysAndValues ...interface{}) { + // Only create a new buffer if we don't have one cached. + b := l.bufferCache.GetBuffer() + // The message is always quoted, even if it contains line breaks. + // If developers want multi-line output, they should use a small, fixed + // message and put the multi-line output into a value. + b.WriteString(strconv.Quote(msg)) if err != nil { - b.WriteByte(' ') - b.WriteString(fmt.Sprintf("err=%q", err.Error())) - } - kvListFormat(b, keysAndValues...) - l.printDepth(s, logging.logr, nil, depth+1, b) -} - -const missingValue = "(MISSING)" - -func kvListFormat(b *bytes.Buffer, keysAndValues ...interface{}) { - for i := 0; i < len(keysAndValues); i += 2 { - var v interface{} - k := keysAndValues[i] - if i+1 < len(keysAndValues) { - v = keysAndValues[i+1] - } else { - v = missingValue - } - b.WriteByte(' ') - - switch v.(type) { - case string, error: - b.WriteString(fmt.Sprintf("%s=%q", k, v)) - case []byte: - b.WriteString(fmt.Sprintf("%s=%+q", k, v)) - default: - if _, ok := v.(fmt.Stringer); ok { - b.WriteString(fmt.Sprintf("%s=%q", k, v)) - } else { - b.WriteString(fmt.Sprintf("%s=%+v", k, v)) - } - } + serialize.KVListFormat(&b.Buffer, "err", err) } + serialize.KVListFormat(&b.Buffer, keysAndValues...) + l.printDepth(s, globalLogger, nil, depth+1, &b.Buffer) + // Make the buffer available for reuse. + l.bufferCache.PutBuffer(b) } // redirectBuffer is used to set an alternate destination for the logs @@ -856,24 +710,11 @@ func (rb *redirectBuffer) Write(bytes []byte) (n int, err error) { return rb.w.Write(bytes) } -// SetLogger will set the backing logr implementation for klog. -// If set, all log lines will be suppressed from the regular Output, and -// redirected to the logr implementation. -// Use as: -// ... -// klog.SetLogger(zapr.NewLogger(zapLog)) -func SetLogger(logr logr.Logger) { - logging.mu.Lock() - defer logging.mu.Unlock() - - logging.logr = logr -} - // SetOutput sets the output destination for all severities func SetOutput(w io.Writer) { logging.mu.Lock() defer logging.mu.Unlock() - for s := fatalLog; s >= infoLog; s-- { + for s := severity.FatalLog; s >= severity.InfoLog; s-- { rb := &redirectBuffer{ w: w, } @@ -885,7 +726,7 @@ func SetOutput(w io.Writer) { func SetOutputBySeverity(name string, w io.Writer) { logging.mu.Lock() defer logging.mu.Unlock() - sev, ok := severityByName(name) + sev, ok := severity.ByName(name) if !ok { panic(fmt.Sprintf("SetOutputBySeverity(%q): unrecognized severity name", name)) } @@ -904,8 +745,16 @@ func LogToStderr(stderr bool) { } // output writes the data to the log files and releases the buffer. -func (l *loggingT) output(s severity, log logr.Logger, buf *buffer, depth int, file string, line int, alsoToStderr bool) { +func (l *loggingT) output(s severity.Severity, log *logr.Logger, buf *buffer.Buffer, depth int, file string, line int, alsoToStderr bool) { + var isLocked = true l.mu.Lock() + defer func() { + if isLocked { + // Unlock before returning in case that it wasn't done already. + l.mu.Unlock() + } + }() + if l.traceLocation.isSet() { if l.traceLocation.match(file, line) { buf.Write(stacks(false)) @@ -915,10 +764,10 @@ func (l *loggingT) output(s severity, log logr.Logger, buf *buffer, depth int, f if log != nil { // TODO: set 'severity' and caller information as structured log info // keysAndValues := []interface{}{"severity", severityName[s], "file", file, "line", line} - if s == errorLog { - logr.WithCallDepth(l.logr, depth+3).Error(nil, string(data)) + if s == severity.ErrorLog { + globalLogger.WithCallDepth(depth+3).Error(nil, string(data)) } else { - logr.WithCallDepth(log, depth+3).Info(string(data)) + log.WithCallDepth(depth + 3).Info(string(data)) } } else if l.toStderr { os.Stderr.Write(data) @@ -930,13 +779,13 @@ func (l *loggingT) output(s severity, log logr.Logger, buf *buffer, depth int, f if logging.logFile != "" { // Since we are using a single log file, all of the items in l.file array // will point to the same file, so just use one of them to write data. - if l.file[infoLog] == nil { - if err := l.createFiles(infoLog); err != nil { + if l.file[severity.InfoLog] == nil { + if err := l.createFiles(severity.InfoLog); err != nil { os.Stderr.Write(data) // Make sure the message appears somewhere. l.exit(err) } } - l.file[infoLog].Write(data) + l.file[severity.InfoLog].Write(data) } else { if l.file[s] == nil { if err := l.createFiles(s); err != nil { @@ -949,27 +798,28 @@ func (l *loggingT) output(s severity, log logr.Logger, buf *buffer, depth int, f l.file[s].Write(data) } else { switch s { - case fatalLog: - l.file[fatalLog].Write(data) + case severity.FatalLog: + l.file[severity.FatalLog].Write(data) fallthrough - case errorLog: - l.file[errorLog].Write(data) + case severity.ErrorLog: + l.file[severity.ErrorLog].Write(data) fallthrough - case warningLog: - l.file[warningLog].Write(data) + case severity.WarningLog: + l.file[severity.WarningLog].Write(data) fallthrough - case infoLog: - l.file[infoLog].Write(data) + case severity.InfoLog: + l.file[severity.InfoLog].Write(data) } } } } - if s == fatalLog { + if s == severity.FatalLog { // If we got here via Exit rather than Fatal, print no stacks. if atomic.LoadUint32(&fatalNoStacks) > 0 { l.mu.Unlock() - timeoutFlush(10 * time.Second) - os.Exit(1) + isLocked = false + timeoutFlush(ExitFlushTimeout) + OsExit(1) } // Dump all goroutine stacks before exiting. trace := stacks(true) @@ -979,40 +829,24 @@ func (l *loggingT) output(s severity, log logr.Logger, buf *buffer, depth int, f } // Write the stack trace for all goroutines to the files. logExitFunc = func(error) {} // If we get a write error, we'll still exit below. - for log := fatalLog; log >= infoLog; log-- { + for log := severity.FatalLog; log >= severity.InfoLog; log-- { if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set. f.Write(trace) } } l.mu.Unlock() - timeoutFlush(10 * time.Second) - os.Exit(255) // C++ uses -1, which is silly because it's anded with 255 anyway. + isLocked = false + timeoutFlush(ExitFlushTimeout) + OsExit(255) // C++ uses -1, which is silly because it's anded with 255 anyway. } - l.putBuffer(buf) - l.mu.Unlock() + l.bufferCache.PutBuffer(buf) + if stats := severityStats[s]; stats != nil { atomic.AddInt64(&stats.lines, 1) atomic.AddInt64(&stats.bytes, int64(len(data))) } } -// timeoutFlush calls Flush and returns when it completes or after timeout -// elapses, whichever happens first. This is needed because the hooks invoked -// by Flush may deadlock when klog.Fatal is called from a hook that holds -// a lock. -func timeoutFlush(timeout time.Duration) { - done := make(chan bool, 1) - go func() { - Flush() // calls logging.lockAndFlushAll() - done <- true - }() - select { - case <-done: - case <-time.After(timeout): - fmt.Fprintln(os.Stderr, "klog: Flush took longer than", timeout) - } -} - // stacks is a wrapper for runtime.Stack that attempts to recover the data for all goroutines. func stacks(all bool) []byte { // We don't know how big the traces are, so grow a few times if they don't fit. Start large, though. @@ -1049,7 +883,7 @@ func (l *loggingT) exit(err error) { return } l.flushAll() - os.Exit(2) + OsExit(2) } // syncBuffer joins a bufio.Writer to its underlying file, providing access to the @@ -1060,7 +894,7 @@ type syncBuffer struct { logger *loggingT *bufio.Writer file *os.File - sev severity + sev severity.Severity nbytes uint64 // The number of bytes written to this file maxbytes uint64 // The max number of bytes this syncBuffer.file can hold before cleaning up. } @@ -1106,7 +940,7 @@ func (sb *syncBuffer) rotateFile(now time.Time, startup bool) error { sb.file.Close() } var err error - sb.file, _, err = create(severityName[sb.sev], now, startup) + sb.file, _, err = create(severity.Name[sb.sev], now, startup) if err != nil { return err } @@ -1144,11 +978,16 @@ const bufferSize = 256 * 1024 // createFiles creates all the log files for severity from sev down to infoLog. // l.mu is held. -func (l *loggingT) createFiles(sev severity) error { +func (l *loggingT) createFiles(sev severity.Severity) error { + interval := l.flushInterval + if interval == 0 { + interval = flushInterval + } + l.flushD.run(interval) now := time.Now() // Files are created in decreasing severity order, so as soon as we find one // has already been created, we can stop. - for s := sev; s >= infoLog && l.file[s] == nil; s-- { + for s := sev; s >= severity.InfoLog && l.file[s] == nil; s-- { sb := &syncBuffer{ logger: l, sev: s, @@ -1165,12 +1004,93 @@ func (l *loggingT) createFiles(sev severity) error { const flushInterval = 5 * time.Second // flushDaemon periodically flushes the log file buffers. -func (l *loggingT) flushDaemon() { - for range time.NewTicker(flushInterval).C { - l.lockAndFlushAll() +type flushDaemon struct { + mu sync.Mutex + clock clock.WithTicker + flush func() + stopC chan struct{} + stopDone chan struct{} +} + +// newFlushDaemon returns a new flushDaemon. If the passed clock is nil, a +// clock.RealClock is used. +func newFlushDaemon(flush func(), tickClock clock.WithTicker) *flushDaemon { + if tickClock == nil { + tickClock = clock.RealClock{} + } + return &flushDaemon{ + flush: flush, + clock: tickClock, } } +// run starts a goroutine that periodically calls the daemons flush function. +// Calling run on an already running daemon will have no effect. +func (f *flushDaemon) run(interval time.Duration) { + f.mu.Lock() + defer f.mu.Unlock() + + if f.stopC != nil { // daemon already running + return + } + + f.stopC = make(chan struct{}, 1) + f.stopDone = make(chan struct{}, 1) + + ticker := f.clock.NewTicker(interval) + go func() { + defer ticker.Stop() + defer func() { f.stopDone <- struct{}{} }() + for { + select { + case <-ticker.C(): + f.flush() + case <-f.stopC: + f.flush() + return + } + } + }() +} + +// stop stops the running flushDaemon and waits until the daemon has shut down. +// Calling stop on a daemon that isn't running will have no effect. +func (f *flushDaemon) stop() { + f.mu.Lock() + defer f.mu.Unlock() + + if f.stopC == nil { // daemon not running + return + } + + f.stopC <- struct{}{} + <-f.stopDone + + f.stopC = nil + f.stopDone = nil +} + +// isRunning returns true if the flush daemon is running. +func (f *flushDaemon) isRunning() bool { + f.mu.Lock() + defer f.mu.Unlock() + return f.stopC != nil +} + +// StopFlushDaemon stops the flush daemon, if running. +// This prevents klog from leaking goroutines on shutdown. After stopping +// the daemon, you can still manually flush buffers by calling Flush(). +func StopFlushDaemon() { + logging.flushD.stop() +} + +// StartFlushDaemon ensures that the flush daemon runs with the given delay +// between flush calls. If it is already running, it gets restarted. +func StartFlushDaemon(interval time.Duration) { + StopFlushDaemon() + logging.flushD.run(interval) +} + // lockAndFlushAll is like flushAll but locks l.mu first. func (l *loggingT) lockAndFlushAll() { l.mu.Lock() @@ -1182,13 +1102,16 @@ func (l *loggingT) lockAndFlushAll() { // l.mu is held. func (l *loggingT) flushAll() { // Flush from fatal down, in case there's trouble flushing. - for s := fatalLog; s >= infoLog; s-- { + for s := severity.FatalLog; s >= severity.InfoLog; s-- { file := l.file[s] if file != nil { file.Flush() // ignore error file.Sync() // ignore error } } + if globalLoggerOptions.flush != nil { + globalLoggerOptions.flush() + } } // CopyStandardLogTo arranges for messages written to the Go "log" package's @@ -1199,7 +1122,7 @@ func (l *loggingT) flushAll() { // Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not // recognized, CopyStandardLogTo panics. func CopyStandardLogTo(name string) { - sev, ok := severityByName(name) + sev, ok := severity.ByName(name) if !ok { panic(fmt.Sprintf("log.CopyStandardLogTo(%q): unrecognized severity name", name)) } @@ -1211,7 +1134,7 @@ func CopyStandardLogTo(name string) { // logBridge provides the Write method that enables CopyStandardLogTo to connect // Go's standard logs to the logs provided by this package. -type logBridge severity +type logBridge severity.Severity // Write parses the standard logging line and passes its components to the // logger for severity(lb). @@ -1235,7 +1158,7 @@ func (lb logBridge) Write(b []byte) (n int, err error) { } // printWithFileLine with alsoToStderr=true, so standard log messages // always appear on standard error. - logging.printWithFileLine(severity(lb), logging.logr, logging.filter, file, line, true, text) + logging.printWithFileLine(severity.Severity(lb), globalLogger, logging.filter, file, line, true, text) return len(b), nil } @@ -1269,22 +1192,22 @@ func (l *loggingT) setV(pc uintptr) Level { // See the documentation of V for more information. type Verbose struct { enabled bool - logr logr.Logger - filter LogFilter + logr *logr.Logger } func newVerbose(level Level, b bool) Verbose { - if logging.logr == nil { - return Verbose{b, nil, logging.filter} + if globalLogger == nil { + return Verbose{b, nil} } - return Verbose{b, logging.logr.V(int(level)), logging.filter} + v := globalLogger.V(int(level)) + return Verbose{b, &v} } // V reports whether verbosity at the call site is at least the requested level. // The returned value is a struct of type Verbose, which implements Info, Infoln // and Infof. These methods will write to the Info log if called. // Thus, one may write either -// if glog.V(2).Enabled() { klog.Info("log this") } +// if klog.V(2).Enabled() { klog.Info("log this") } // or // klog.V(2).Info("log this") // The second form is shorter but the first is cheaper if logging is off because it does @@ -1315,9 +1238,14 @@ func V(level Level) Verbose { if runtime.Callers(2, logging.pcs[:]) == 0 { return newVerbose(level, false) } - v, ok := logging.vmap[logging.pcs[0]] + // runtime.Callers returns "return PCs", but we want + // to look up the symbolic information for the call, + // so subtract 1 from the PC. runtime.CallersFrames + // would be cleaner, but allocates. + pc := logging.pcs[0] - 1 + v, ok := logging.vmap[pc] if !ok { - v = logging.setV(logging.pcs[0]) + v = logging.setV(pc) } return newVerbose(level, v >= level) } @@ -1335,7 +1263,15 @@ func (v Verbose) Enabled() bool { // See the documentation of V for usage. func (v Verbose) Info(args ...interface{}) { if v.enabled { - logging.print(infoLog, v.logr, v.filter, args...) + logging.print(severity.InfoLog, v.logr, logging.filter, args...) + } +} + +// InfoDepth is equivalent to the global InfoDepth function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) InfoDepth(depth int, args ...interface{}) { + if v.enabled { + logging.printDepth(severity.InfoLog, v.logr, logging.filter, depth, args...) } } @@ -1343,7 +1279,15 @@ func (v Verbose) Info(args ...interface{}) { // See the documentation of V for usage. func (v Verbose) Infoln(args ...interface{}) { if v.enabled { - logging.println(infoLog, v.logr, v.filter, args...) + logging.println(severity.InfoLog, v.logr, logging.filter, args...) + } +} + +// InfolnDepth is equivalent to the global InfolnDepth function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) InfolnDepth(depth int, args ...interface{}) { + if v.enabled { + logging.printlnDepth(severity.InfoLog, v.logr, logging.filter, depth, args...) } } @@ -1351,7 +1295,15 @@ func (v Verbose) Infoln(args ...interface{}) { // See the documentation of V for usage. func (v Verbose) Infof(format string, args ...interface{}) { if v.enabled { - logging.printf(infoLog, v.logr, v.filter, format, args...) + logging.printf(severity.InfoLog, v.logr, logging.filter, format, args...) + } +} + +// InfofDepth is equivalent to the global InfofDepth function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) InfofDepth(depth int, format string, args ...interface{}) { + if v.enabled { + logging.printfDepth(severity.InfoLog, v.logr, logging.filter, depth, format, args...) } } @@ -1359,20 +1311,28 @@ func (v Verbose) Infof(format string, args ...interface{}) { // See the documentation of V for usage. func (v Verbose) InfoS(msg string, keysAndValues ...interface{}) { if v.enabled { - logging.infoS(v.logr, v.filter, 0, msg, keysAndValues...) + logging.infoS(v.logr, logging.filter, 0, msg, keysAndValues...) } } // InfoSDepth acts as InfoS but uses depth to determine which call frame to log. // InfoSDepth(0, "msg") is the same as InfoS("msg"). func InfoSDepth(depth int, msg string, keysAndValues ...interface{}) { - logging.infoS(logging.logr, logging.filter, depth, msg, keysAndValues...) + logging.infoS(globalLogger, logging.filter, depth, msg, keysAndValues...) +} + +// InfoSDepth is equivalent to the global InfoSDepth function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) InfoSDepth(depth int, msg string, keysAndValues ...interface{}) { + if v.enabled { + logging.infoS(v.logr, logging.filter, depth, msg, keysAndValues...) + } } // Deprecated: Use ErrorS instead. func (v Verbose) Error(err error, msg string, args ...interface{}) { if v.enabled { - logging.errorS(err, v.logr, v.filter, 0, msg, args...) + logging.errorS(err, v.logr, logging.filter, 0, msg, args...) } } @@ -1380,32 +1340,44 @@ func (v Verbose) Error(err error, msg string, args ...interface{}) { // See the documentation of V for usage. func (v Verbose) ErrorS(err error, msg string, keysAndValues ...interface{}) { if v.enabled { - logging.errorS(err, v.logr, v.filter, 0, msg, keysAndValues...) + logging.errorS(err, v.logr, logging.filter, 0, msg, keysAndValues...) } } // Info logs to the INFO log. // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Info(args ...interface{}) { - logging.print(infoLog, logging.logr, logging.filter, args...) + logging.print(severity.InfoLog, globalLogger, logging.filter, args...) } // InfoDepth acts as Info but uses depth to determine which call frame to log. // InfoDepth(0, "msg") is the same as Info("msg"). func InfoDepth(depth int, args ...interface{}) { - logging.printDepth(infoLog, logging.logr, logging.filter, depth, args...) + logging.printDepth(severity.InfoLog, globalLogger, logging.filter, depth, args...) } // Infoln logs to the INFO log. // Arguments are handled in the manner of fmt.Println; a newline is always appended. func Infoln(args ...interface{}) { - logging.println(infoLog, logging.logr, logging.filter, args...) + logging.println(severity.InfoLog, globalLogger, logging.filter, args...) +} + +// InfolnDepth acts as Infoln but uses depth to determine which call frame to log. +// InfolnDepth(0, "msg") is the same as Infoln("msg"). +func InfolnDepth(depth int, args ...interface{}) { + logging.printlnDepth(severity.InfoLog, globalLogger, logging.filter, depth, args...) } // Infof logs to the INFO log. // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Infof(format string, args ...interface{}) { - logging.printf(infoLog, logging.logr, logging.filter, format, args...) + logging.printf(severity.InfoLog, globalLogger, logging.filter, format, args...) +} + +// InfofDepth acts as Infof but uses depth to determine which call frame to log. +// InfofDepth(0, "msg", args...) is the same as Infof("msg", args...). +func InfofDepth(depth int, format string, args ...interface{}) { + logging.printfDepth(severity.InfoLog, globalLogger, logging.filter, depth, format, args...) } // InfoS structured logs to the INFO log. @@ -1417,55 +1389,79 @@ func Infof(format string, args ...interface{}) { // output: // >> I1025 00:15:15.525108 1 controller_utils.go:116] "Pod status updated" pod="kubedns" status="ready" func InfoS(msg string, keysAndValues ...interface{}) { - logging.infoS(logging.logr, logging.filter, 0, msg, keysAndValues...) + logging.infoS(globalLogger, logging.filter, 0, msg, keysAndValues...) } // Warning logs to the WARNING and INFO logs. // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Warning(args ...interface{}) { - logging.print(warningLog, logging.logr, logging.filter, args...) + logging.print(severity.WarningLog, globalLogger, logging.filter, args...) } // WarningDepth acts as Warning but uses depth to determine which call frame to log. // WarningDepth(0, "msg") is the same as Warning("msg"). func WarningDepth(depth int, args ...interface{}) { - logging.printDepth(warningLog, logging.logr, logging.filter, depth, args...) + logging.printDepth(severity.WarningLog, globalLogger, logging.filter, depth, args...) } // Warningln logs to the WARNING and INFO logs. // Arguments are handled in the manner of fmt.Println; a newline is always appended. func Warningln(args ...interface{}) { - logging.println(warningLog, logging.logr, logging.filter, args...) + logging.println(severity.WarningLog, globalLogger, logging.filter, args...) +} + +// WarninglnDepth acts as Warningln but uses depth to determine which call frame to log. +// WarninglnDepth(0, "msg") is the same as Warningln("msg"). +func WarninglnDepth(depth int, args ...interface{}) { + logging.printlnDepth(severity.WarningLog, globalLogger, logging.filter, depth, args...) } // Warningf logs to the WARNING and INFO logs. // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Warningf(format string, args ...interface{}) { - logging.printf(warningLog, logging.logr, logging.filter, format, args...) + logging.printf(severity.WarningLog, globalLogger, logging.filter, format, args...) +} + +// WarningfDepth acts as Warningf but uses depth to determine which call frame to log. +// WarningfDepth(0, "msg", args...) is the same as Warningf("msg", args...). +func WarningfDepth(depth int, format string, args ...interface{}) { + logging.printfDepth(severity.WarningLog, globalLogger, logging.filter, depth, format, args...) } // Error logs to the ERROR, WARNING, and INFO logs. // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Error(args ...interface{}) { - logging.print(errorLog, logging.logr, logging.filter, args...) + logging.print(severity.ErrorLog, globalLogger, logging.filter, args...) } // ErrorDepth acts as Error but uses depth to determine which call frame to log. // ErrorDepth(0, "msg") is the same as Error("msg"). func ErrorDepth(depth int, args ...interface{}) { - logging.printDepth(errorLog, logging.logr, logging.filter, depth, args...) + logging.printDepth(severity.ErrorLog, globalLogger, logging.filter, depth, args...) } // Errorln logs to the ERROR, WARNING, and INFO logs. // Arguments are handled in the manner of fmt.Println; a newline is always appended. func Errorln(args ...interface{}) { - logging.println(errorLog, logging.logr, logging.filter, args...) + logging.println(severity.ErrorLog, globalLogger, logging.filter, args...) +} + +// ErrorlnDepth acts as Errorln but uses depth to determine which call frame to log. +// ErrorlnDepth(0, "msg") is the same as Errorln("msg"). +func ErrorlnDepth(depth int, args ...interface{}) { + logging.printlnDepth(severity.ErrorLog, globalLogger, logging.filter, depth, args...) } // Errorf logs to the ERROR, WARNING, and INFO logs. // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Errorf(format string, args ...interface{}) { - logging.printf(errorLog, logging.logr, logging.filter, format, args...) + logging.printf(severity.ErrorLog, globalLogger, logging.filter, format, args...) +} + +// ErrorfDepth acts as Errorf but uses depth to determine which call frame to log. +// ErrorfDepth(0, "msg", args...) is the same as Errorf("msg", args...). +func ErrorfDepth(depth int, format string, args ...interface{}) { + logging.printfDepth(severity.ErrorLog, globalLogger, logging.filter, depth, format, args...) } // ErrorS structured logs to the ERROR, WARNING, and INFO logs. @@ -1478,71 +1474,97 @@ func Errorf(format string, args ...interface{}) { // output: // >> E1025 00:15:15.525108 1 controller_utils.go:114] "Failed to update pod status" err="timeout" func ErrorS(err error, msg string, keysAndValues ...interface{}) { - logging.errorS(err, logging.logr, logging.filter, 0, msg, keysAndValues...) + logging.errorS(err, globalLogger, logging.filter, 0, msg, keysAndValues...) } // ErrorSDepth acts as ErrorS but uses depth to determine which call frame to log. // ErrorSDepth(0, "msg") is the same as ErrorS("msg"). func ErrorSDepth(depth int, err error, msg string, keysAndValues ...interface{}) { - logging.errorS(err, logging.logr, logging.filter, depth, msg, keysAndValues...) + logging.errorS(err, globalLogger, logging.filter, depth, msg, keysAndValues...) } // Fatal logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). +// including a stack trace of all running goroutines, then calls OsExit(255). // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Fatal(args ...interface{}) { - logging.print(fatalLog, logging.logr, logging.filter, args...) + logging.print(severity.FatalLog, globalLogger, logging.filter, args...) } // FatalDepth acts as Fatal but uses depth to determine which call frame to log. // FatalDepth(0, "msg") is the same as Fatal("msg"). func FatalDepth(depth int, args ...interface{}) { - logging.printDepth(fatalLog, logging.logr, logging.filter, depth, args...) + logging.printDepth(severity.FatalLog, globalLogger, logging.filter, depth, args...) } // Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). +// including a stack trace of all running goroutines, then calls OsExit(255). // Arguments are handled in the manner of fmt.Println; a newline is always appended. func Fatalln(args ...interface{}) { - logging.println(fatalLog, logging.logr, logging.filter, args...) + logging.println(severity.FatalLog, globalLogger, logging.filter, args...) +} + +// FatallnDepth acts as Fatalln but uses depth to determine which call frame to log. +// FatallnDepth(0, "msg") is the same as Fatalln("msg"). +func FatallnDepth(depth int, args ...interface{}) { + logging.printlnDepth(severity.FatalLog, globalLogger, logging.filter, depth, args...) } // Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). +// including a stack trace of all running goroutines, then calls OsExit(255). // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Fatalf(format string, args ...interface{}) { - logging.printf(fatalLog, logging.logr, logging.filter, format, args...) + logging.printf(severity.FatalLog, globalLogger, logging.filter, format, args...) +} + +// FatalfDepth acts as Fatalf but uses depth to determine which call frame to log. +// FatalfDepth(0, "msg", args...) is the same as Fatalf("msg", args...). +func FatalfDepth(depth int, format string, args ...interface{}) { + logging.printfDepth(severity.FatalLog, globalLogger, logging.filter, depth, format, args...) } // fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks. // It allows Exit and relatives to use the Fatal logs. var fatalNoStacks uint32 -// Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). +// Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls OsExit(1). // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Exit(args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) - logging.print(fatalLog, logging.logr, logging.filter, args...) + logging.print(severity.FatalLog, globalLogger, logging.filter, args...) } // ExitDepth acts as Exit but uses depth to determine which call frame to log. // ExitDepth(0, "msg") is the same as Exit("msg"). func ExitDepth(depth int, args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) - logging.printDepth(fatalLog, logging.logr, logging.filter, depth, args...) + logging.printDepth(severity.FatalLog, globalLogger, logging.filter, depth, args...) } -// Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). +// Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls OsExit(1). func Exitln(args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) - logging.println(fatalLog, logging.logr, logging.filter, args...) + logging.println(severity.FatalLog, globalLogger, logging.filter, args...) +} + +// ExitlnDepth acts as Exitln but uses depth to determine which call frame to log. +// ExitlnDepth(0, "msg") is the same as Exitln("msg"). +func ExitlnDepth(depth int, args ...interface{}) { + atomic.StoreUint32(&fatalNoStacks, 1) + logging.printlnDepth(severity.FatalLog, globalLogger, logging.filter, depth, args...) } -// Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). +// Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls OsExit(1). // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Exitf(format string, args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) - logging.printf(fatalLog, logging.logr, logging.filter, format, args...) + logging.printf(severity.FatalLog, globalLogger, logging.filter, format, args...) +} + +// ExitfDepth acts as Exitf but uses depth to determine which call frame to log. +// ExitfDepth(0, "msg", args...) is the same as Exitf("msg", args...). +func ExitfDepth(depth int, format string, args ...interface{}) { + atomic.StoreUint32(&fatalNoStacks, 1) + logging.printfDepth(severity.FatalLog, globalLogger, logging.filter, depth, format, args...) } // LogFilter is a collection of functions that can filter all logging calls, @@ -1553,53 +1575,10 @@ type LogFilter interface { FilterS(msg string, keysAndValues []interface{}) (string, []interface{}) } +// SetLogFilter installs a filter that is used for all log calls. +// +// Modifying the filter is not thread-safe and should be done while no other +// goroutines invoke log calls, usually during program initialization. func SetLogFilter(filter LogFilter) { - logging.mu.Lock() - defer logging.mu.Unlock() - logging.filter = filter } - -// ObjectRef references a kubernetes object -type ObjectRef struct { - Name string `json:"name"` - Namespace string `json:"namespace,omitempty"` -} - -func (ref ObjectRef) String() string { - if ref.Namespace != "" { - return fmt.Sprintf("%s/%s", ref.Namespace, ref.Name) - } - return ref.Name -} - -// KMetadata is a subset of the kubernetes k8s.io/apimachinery/pkg/apis/meta/v1.Object interface -// this interface may expand in the future, but will always be a subset of the -// kubernetes k8s.io/apimachinery/pkg/apis/meta/v1.Object interface -type KMetadata interface { - GetName() string - GetNamespace() string -} - -// KObj returns ObjectRef from ObjectMeta -func KObj(obj KMetadata) ObjectRef { - if obj == nil { - return ObjectRef{} - } - if val := reflect.ValueOf(obj); val.Kind() == reflect.Ptr && val.IsNil() { - return ObjectRef{} - } - - return ObjectRef{ - Name: obj.GetName(), - Namespace: obj.GetNamespace(), - } -} - -// KRef returns ObjectRef from name and namespace -func KRef(namespace, name string) ObjectRef { - return ObjectRef{ - Name: name, - Namespace: namespace, - } -} diff --git a/vendor/k8s.io/klog/v2/klog_file.go b/vendor/k8s.io/klog/v2/klog_file.go index de830d9221..1025d644f3 100644 --- a/vendor/k8s.io/klog/v2/klog_file.go +++ b/vendor/k8s.io/klog/v2/klog_file.go @@ -22,9 +22,7 @@ import ( "errors" "fmt" "os" - "os/user" "path/filepath" - "runtime" "strings" "sync" "time" @@ -57,38 +55,6 @@ func init() { } } -func getUserName() string { - userNameOnce.Do(func() { - // On Windows, the Go 'user' package requires netapi32.dll. - // This affects Windows Nano Server: - // https://github.com/golang/go/issues/21867 - // Fallback to using environment variables. - if runtime.GOOS == "windows" { - u := os.Getenv("USERNAME") - if len(u) == 0 { - return - } - // Sanitize the USERNAME since it may contain filepath separators. - u = strings.Replace(u, `\`, "_", -1) - - // user.Current().Username normally produces something like 'USERDOMAIN\USERNAME' - d := os.Getenv("USERDOMAIN") - if len(d) != 0 { - userName = d + "_" + u - } else { - userName = u - } - } else { - current, err := user.Current() - if err == nil { - userName = current.Username - } - } - }) - - return userName -} - // shortHostname returns its argument, truncating at the first period. // For instance, given "www.google.com" it returns "www". func shortHostname(hostname string) string { diff --git a/vendor/k8s.io/klog/v2/klog_file_others.go b/vendor/k8s.io/klog/v2/klog_file_others.go new file mode 100644 index 0000000000..aa46726851 --- /dev/null +++ b/vendor/k8s.io/klog/v2/klog_file_others.go @@ -0,0 +1,19 @@ +//go:build !windows +// +build !windows + +package klog + +import ( + "os/user" +) + +func getUserName() string { + userNameOnce.Do(func() { + current, err := user.Current() + if err == nil { + userName = current.Username + } + }) + + return userName +} diff --git a/vendor/k8s.io/klog/v2/klog_file_windows.go b/vendor/k8s.io/klog/v2/klog_file_windows.go new file mode 100644 index 0000000000..2517f9c538 --- /dev/null +++ b/vendor/k8s.io/klog/v2/klog_file_windows.go @@ -0,0 +1,34 @@ +//go:build windows +// +build windows + +package klog + +import ( + "os" + "strings" +) + +func getUserName() string { + userNameOnce.Do(func() { + // On Windows, the Go 'user' package requires netapi32.dll. + // This affects Windows Nano Server: + // https://github.com/golang/go/issues/21867 + // Fallback to using environment variables. + u := os.Getenv("USERNAME") + if len(u) == 0 { + return + } + // Sanitize the USERNAME since it may contain filepath separators. + u = strings.Replace(u, `\`, "_", -1) + + // user.Current().Username normally produces something like 'USERDOMAIN\USERNAME' + d := os.Getenv("USERDOMAIN") + if len(d) != 0 { + userName = d + "_" + u + } else { + userName = u + } + }) + + return userName +} diff --git a/vendor/k8s.io/klog/v2/klogr.go b/vendor/k8s.io/klog/v2/klogr.go new file mode 100644 index 0000000000..cdb3834fa1 --- /dev/null +++ b/vendor/k8s.io/klog/v2/klogr.go @@ -0,0 +1,92 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package klog + +import ( + "github.com/go-logr/logr" + + "k8s.io/klog/v2/internal/serialize" +) + +// NewKlogr returns a logger that is functionally identical to +// klogr.NewWithOptions(klogr.FormatKlog), i.e. it passes through to klog. The +// difference is that it uses a simpler implementation. +// +// Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func NewKlogr() Logger { + return New(&klogger{}) +} + +// klogger is a subset of klogr/klogr.go. It had to be copied to break an +// import cycle (klogr wants to use klog, and klog wants to use klogr). +type klogger struct { + level int + callDepth int + prefix string + values []interface{} +} + +func (l *klogger) Init(info logr.RuntimeInfo) { + l.callDepth += info.CallDepth +} + +func (l klogger) Info(level int, msg string, kvList ...interface{}) { + trimmed := serialize.TrimDuplicates(l.values, kvList) + if l.prefix != "" { + msg = l.prefix + ": " + msg + } + V(Level(level)).InfoSDepth(l.callDepth+1, msg, append(trimmed[0], trimmed[1]...)...) +} + +func (l klogger) Enabled(level int) bool { + return V(Level(level)).Enabled() +} + +func (l klogger) Error(err error, msg string, kvList ...interface{}) { + trimmed := serialize.TrimDuplicates(l.values, kvList) + if l.prefix != "" { + msg = l.prefix + ": " + msg + } + ErrorSDepth(l.callDepth+1, err, msg, append(trimmed[0], trimmed[1]...)...) +} + +// WithName returns a new logr.Logger with the specified name appended. klogr +// uses '/' characters to separate name elements. Callers should not pass '/' +// in the provided name string, but this library does not actually enforce that. +func (l klogger) WithName(name string) logr.LogSink { + if len(l.prefix) > 0 { + l.prefix = l.prefix + "/" + } + l.prefix += name + return &l +} + +func (l klogger) WithValues(kvList ...interface{}) logr.LogSink { + l.values = serialize.WithValues(l.values, kvList) + return &l +} + +func (l klogger) WithCallDepth(depth int) logr.LogSink { + l.callDepth += depth + return &l +} + +var _ logr.LogSink = &klogger{} +var _ logr.CallDepthLogSink = &klogger{} diff --git a/vendor/k8s.io/utils/clock/README.md b/vendor/k8s.io/utils/clock/README.md new file mode 100644 index 0000000000..ad2a8868aa --- /dev/null +++ b/vendor/k8s.io/utils/clock/README.md @@ -0,0 +1,4 @@ +# Clock + +This package provides an interface for time-based operations. It allows +mocking time for testing. diff --git a/vendor/k8s.io/utils/clock/clock.go b/vendor/k8s.io/utils/clock/clock.go new file mode 100644 index 0000000000..b8b6af5c81 --- /dev/null +++ b/vendor/k8s.io/utils/clock/clock.go @@ -0,0 +1,178 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clock + +import "time" + +// PassiveClock allows for injecting fake or real clocks into code +// that needs to read the current time but does not support scheduling +// activity in the future. +type PassiveClock interface { + Now() time.Time + Since(time.Time) time.Duration +} + +// Clock allows for injecting fake or real clocks into code that +// needs to do arbitrary things based on time. +type Clock interface { + PassiveClock + // After returns the channel of a new Timer. + // This method does not allow to free/GC the backing timer before it fires. Use + // NewTimer instead. + After(d time.Duration) <-chan time.Time + // NewTimer returns a new Timer. + NewTimer(d time.Duration) Timer + // Sleep sleeps for the provided duration d. + // Consider making the sleep interruptible by using 'select' on a context channel and a timer channel. + Sleep(d time.Duration) + // Tick returns the channel of a new Ticker. + // This method does not allow to free/GC the backing ticker. Use + // NewTicker from WithTicker instead. + Tick(d time.Duration) <-chan time.Time +} + +// WithTicker allows for injecting fake or real clocks into code that +// needs to do arbitrary things based on time. +type WithTicker interface { + Clock + // NewTicker returns a new Ticker. + NewTicker(time.Duration) Ticker +} + +// WithDelayedExecution allows for injecting fake or real clocks into +// code that needs to make use of AfterFunc functionality. +type WithDelayedExecution interface { + Clock + // AfterFunc executes f in its own goroutine after waiting + // for d duration and returns a Timer whose channel can be + // closed by calling Stop() on the Timer. + AfterFunc(d time.Duration, f func()) Timer +} + +// WithTickerAndDelayedExecution allows for injecting fake or real clocks +// into code that needs Ticker and AfterFunc functionality +type WithTickerAndDelayedExecution interface { + WithTicker + // AfterFunc executes f in its own goroutine after waiting + // for d duration and returns a Timer whose channel can be + // closed by calling Stop() on the Timer. + AfterFunc(d time.Duration, f func()) Timer +} + +// Ticker defines the Ticker interface. +type Ticker interface { + C() <-chan time.Time + Stop() +} + +var _ = WithTicker(RealClock{}) + +// RealClock really calls time.Now() +type RealClock struct{} + +// Now returns the current time. +func (RealClock) Now() time.Time { + return time.Now() +} + +// Since returns time since the specified timestamp. +func (RealClock) Since(ts time.Time) time.Duration { + return time.Since(ts) +} + +// After is the same as time.After(d). +// This method does not allow to free/GC the backing timer before it fires. Use +// NewTimer instead. +func (RealClock) After(d time.Duration) <-chan time.Time { + return time.After(d) +} + +// NewTimer is the same as time.NewTimer(d) +func (RealClock) NewTimer(d time.Duration) Timer { + return &realTimer{ + timer: time.NewTimer(d), + } +} + +// AfterFunc is the same as time.AfterFunc(d, f). +func (RealClock) AfterFunc(d time.Duration, f func()) Timer { + return &realTimer{ + timer: time.AfterFunc(d, f), + } +} + +// Tick is the same as time.Tick(d) +// This method does not allow to free/GC the backing ticker. Use +// NewTicker instead. +func (RealClock) Tick(d time.Duration) <-chan time.Time { + return time.Tick(d) +} + +// NewTicker returns a new Ticker. +func (RealClock) NewTicker(d time.Duration) Ticker { + return &realTicker{ + ticker: time.NewTicker(d), + } +} + +// Sleep is the same as time.Sleep(d) +// Consider making the sleep interruptible by using 'select' on a context channel and a timer channel. +func (RealClock) Sleep(d time.Duration) { + time.Sleep(d) +} + +// Timer allows for injecting fake or real timers into code that +// needs to do arbitrary things based on time. +type Timer interface { + C() <-chan time.Time + Stop() bool + Reset(d time.Duration) bool +} + +var _ = Timer(&realTimer{}) + +// realTimer is backed by an actual time.Timer. +type realTimer struct { + timer *time.Timer +} + +// C returns the underlying timer's channel. +func (r *realTimer) C() <-chan time.Time { + return r.timer.C +} + +// Stop calls Stop() on the underlying timer. +func (r *realTimer) Stop() bool { + return r.timer.Stop() +} + +// Reset calls Reset() on the underlying timer. +func (r *realTimer) Reset(d time.Duration) bool { + return r.timer.Reset(d) +} + +type realTicker struct { + ticker *time.Ticker +} + +func (r *realTicker) C() <-chan time.Time { + return r.ticker.C +} + +func (r *realTicker) Stop() { + r.ticker.Stop() +} diff --git a/vendor/k8s.io/utils/pointer/pointer.go b/vendor/k8s.io/utils/pointer/pointer.go index 2cab2c5800..f5802d2e81 100644 --- a/vendor/k8s.io/utils/pointer/pointer.go +++ b/vendor/k8s.io/utils/pointer/pointer.go @@ -19,6 +19,7 @@ package pointer import ( "fmt" "reflect" + "time" ) // AllPtrFieldsNil tests whether all pointer fields in a struct are nil. This is useful when, @@ -184,7 +185,7 @@ func StringEqual(a, b *string) bool { return *a == *b } -// Float32 returns a pointer to the a float32. +// Float32 returns a pointer to a float32. func Float32(i float32) *float32 { return &i } @@ -214,7 +215,7 @@ func Float32Equal(a, b *float32) bool { return *a == *b } -// Float64 returns a pointer to the a float64. +// Float64 returns a pointer to a float64. func Float64(i float64) *float64 { return &i } @@ -243,3 +244,29 @@ func Float64Equal(a, b *float64) bool { } return *a == *b } + +// Duration returns a pointer to a time.Duration. +func Duration(d time.Duration) *time.Duration { + return &d +} + +// DurationDeref dereferences the time.Duration ptr and returns it if not nil, or else +// returns def. +func DurationDeref(ptr *time.Duration, def time.Duration) time.Duration { + if ptr != nil { + return *ptr + } + return def +} + +// DurationEqual returns true if both arguments are nil or both arguments +// dereference to the same value. +func DurationEqual(a, b *time.Duration) bool { + if (a == nil) != (b == nil) { + return false + } + if a == nil { + return true + } + return *a == *b +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 71dabf2039..d07f10743c 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -80,7 +80,7 @@ github.com/envoyproxy/go-control-plane/envoy/type/v3 github.com/envoyproxy/protoc-gen-validate/validate # github.com/evanphx/json-patch v4.11.0+incompatible github.com/evanphx/json-patch -# github.com/go-logr/logr v0.4.0 +# github.com/go-logr/logr v1.2.0 github.com/go-logr/logr # github.com/go-openapi/jsonpointer v0.19.5 ## explicit @@ -804,11 +804,12 @@ k8s.io/component-base/config/options k8s.io/component-base/metrics k8s.io/component-base/metrics/legacyregistry k8s.io/component-base/version -# k8s.io/klog v1.0.0 +# k8s.io/klog/v2 v2.50.2 ## explicit -k8s.io/klog -# k8s.io/klog/v2 v2.9.0 k8s.io/klog/v2 +k8s.io/klog/v2/internal/buffer +k8s.io/klog/v2/internal/serialize +k8s.io/klog/v2/internal/severity # k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e ## explicit k8s.io/kube-openapi/pkg/common @@ -817,9 +818,10 @@ k8s.io/kube-openapi/pkg/validation/spec # k8s.io/legacy-cloud-providers v0.20.0 ## explicit k8s.io/legacy-cloud-providers/gce -# k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a +# k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 ## explicit k8s.io/utils/buffer +k8s.io/utils/clock k8s.io/utils/integer k8s.io/utils/internal/third_party/forked/golang/net k8s.io/utils/net