From 41d1280ce623b68cb2fb4ee70a831244b56978c9 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Tue, 8 Mar 2022 06:56:35 +0000 Subject: [PATCH] fix: add tags matching in storage account search --- go.mod | 4 +- go.sum | 8 +- .../mgmt/2021-07-01/compute/_meta.json | 4 +- .../2020-04-01/containerservice/_meta.json | 4 +- .../2016-10-01/keyvault/dataplane_meta.json | 4 +- .../mgmt/2021-02-01/network/_meta.json | 4 +- .../mgmt/2018-09-01/privatedns/_meta.json | 4 +- .../mgmt/2017-05-10/resources/_meta.json | 4 +- .../mgmt/2018-05-01/resources/_meta.json | 4 +- .../mgmt/2021-02-01/storage/_meta.json | 4 +- .../Azure/azure-sdk-for-go/version/version.go | 2 +- .../api/well_known_annotations.go | 26 + .../cloud-provider/api/well_known_taints.go | 28 + .../cloud-provider/node/helpers/address.go | 38 + .../cloud-provider/node/helpers/labels.go | 103 +++ .../cloud-provider/node/helpers/taints.go | 240 ++++++ .../component-helpers/node/util/cidr.go | 58 ++ .../component-helpers/node/util/conditions.go | 57 ++ .../component-helpers/node/util/status.go | 134 +++ vendor/modules.txt | 11 +- .../azureclients/armclient/azure_armclient.go | 167 ++-- .../diskclient/azure_diskclient.go | 59 +- .../pkg/azureclients/diskclient/interface.go | 10 +- .../diskclient/mockdiskclient/interface.go | 76 +- .../fileclient/azure_fileclient.go | 10 + .../mockinterfaceclient/interface.go | 58 +- .../azure_loadbalancerclient.go | 43 + .../loadbalancerclient/interface.go | 3 + .../mockloadbalancerclient/interface.go | 76 +- .../mockpublicipclient/interface.go | 58 +- .../routeclient/mockrouteclient/interface.go | 2 +- .../mockroutetableclient/interface.go | 30 +- .../mocksecuritygroupclient/interface.go | 58 +- .../snapshotclient/azure_snapshotclient.go | 55 +- .../azureclients/snapshotclient/interface.go | 8 +- .../mocksnapshotclient/interface.go | 58 +- .../mockstorageaccountclient/interface.go | 58 +- .../mocksubnetclient/interface.go | 58 +- .../vmclient/mockvmclient/interface.go | 58 +- .../vmssclient/mockvmssclient/interface.go | 146 ++-- .../mockvmssvmclient/interface.go | 53 +- .../cloud-provider-azure/pkg/consts/consts.go | 23 + .../pkg/consts/helpers.go | 109 +++ .../pkg/nodemanager/nodemanager.go | 786 ++++++++++++++++++ .../pkg/provider/azure.go | 56 +- .../pkg/provider/azure_backoff.go | 28 + .../pkg/provider/azure_controller_common.go | 6 +- .../pkg/provider/azure_controller_vmss.go | 59 +- .../pkg/provider/azure_loadbalancer.go | 518 ++++++------ .../azure_loadbalancer_backendpool.go | 73 +- .../provider/azure_managedDiskController.go | 56 +- .../pkg/provider/azure_standard.go | 4 + .../pkg/provider/azure_storageaccount.go | 21 + .../pkg/provider/azure_utils.go | 16 + .../pkg/provider/azure_vmss.go | 150 ++-- .../cloud-provider-azure/pkg/provider/doc.go | 2 +- .../provider/virtualmachine/virtualmachine.go | 146 ++++ 57 files changed, 3005 insertions(+), 933 deletions(-) create mode 100644 vendor/k8s.io/cloud-provider/api/well_known_annotations.go create mode 100644 vendor/k8s.io/cloud-provider/api/well_known_taints.go create mode 100644 vendor/k8s.io/cloud-provider/node/helpers/address.go create mode 100644 vendor/k8s.io/cloud-provider/node/helpers/labels.go create mode 100644 vendor/k8s.io/cloud-provider/node/helpers/taints.go create mode 100644 vendor/k8s.io/component-helpers/node/util/cidr.go create mode 100644 vendor/k8s.io/component-helpers/node/util/conditions.go create mode 100644 vendor/k8s.io/component-helpers/node/util/status.go create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/consts/helpers.go create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/nodemanager/nodemanager.go create mode 100644 vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/virtualmachine/virtualmachine.go diff --git a/go.mod b/go.mod index b4d479495..be8ced678 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module sigs.k8s.io/blob-csi-driver go 1.17 require ( - github.com/Azure/azure-sdk-for-go v61.3.0+incompatible + github.com/Azure/azure-sdk-for-go v62.0.0+incompatible github.com/Azure/go-autorest/autorest v0.11.24 github.com/Azure/go-autorest/autorest/adal v0.9.18 github.com/Azure/go-autorest/autorest/to v0.4.0 @@ -145,5 +145,5 @@ replace ( k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.23.3 k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.23.3 k8s.io/sample-controller => k8s.io/sample-controller v0.23.3 - sigs.k8s.io/cloud-provider-azure => sigs.k8s.io/cloud-provider-azure v0.7.4-0.20220205033256-055704f6ecfd + sigs.k8s.io/cloud-provider-azure => sigs.k8s.io/cloud-provider-azure v0.7.4-0.20220308004558-7d51a9890cbe ) diff --git a/go.sum b/go.sum index 66da8093d..136340a87 100644 --- a/go.sum +++ b/go.sum @@ -51,8 +51,8 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9 dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20201218220906-28db891af037/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/azure-sdk-for-go v55.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go v61.3.0+incompatible h1:k7MKrYcGwX5qh+fC9xVhcEuaZajFfbDYMEgo8oemTLo= -github.com/Azure/azure-sdk-for-go v61.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v62.0.0+incompatible h1:8N2k27SYtc12qj5nTsuFMFJPZn5CGmgMWqTy4y9I7Jw= +github.com/Azure/azure-sdk-for-go v62.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= @@ -1331,8 +1331,8 @@ rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.27 h1:KQOkVzXrLNb0EP6W0FD6u3CCPAwgXFYwZitbj7K0P0Y= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.27/go.mod h1:tq2nT0Kx7W+/f2JVE+zxYtUhdjuELJkVpNz+x/QN5R4= -sigs.k8s.io/cloud-provider-azure v0.7.4-0.20220205033256-055704f6ecfd h1:OFgUdcP3GjgBZkINqoTlrL1XpYJUve+rfXQnhaMvnMs= -sigs.k8s.io/cloud-provider-azure v0.7.4-0.20220205033256-055704f6ecfd/go.mod h1:n9+83mdqFbChS2RlWUIhGr1wIWj7t7ETQnKspiLwyKM= +sigs.k8s.io/cloud-provider-azure v0.7.4-0.20220308004558-7d51a9890cbe h1:JLpokaDLtpZaVgRZy1m4eMTMQFq/4BIx8BSvaevQ0IE= +sigs.k8s.io/cloud-provider-azure v0.7.4-0.20220308004558-7d51a9890cbe/go.mod h1:C2fXARI7NV4bj8wMA7NmKhXrmtULMJhpb60paIapEJ4= sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 h1:fD1pz4yfdADVNfFmcP2aBEtudwUQ1AlLnRBALr33v3s= sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs= sigs.k8s.io/kustomize/api v0.10.1/go.mod h1:2FigT1QN6xKdcnGS2Ppp1uIWrtWN28Ms8A3OZUZhwr8= diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/_meta.json b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/_meta.json index 4e37b0c3a..ee881857d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/_meta.json +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute/_meta.json @@ -4,8 +4,8 @@ "tag": "package-2021-07-01", "use": "@microsoft.azure/autorest.go@2.1.187", "repository_url": "https://github.com/Azure/azure-rest-api-specs.git", - "autorest_command": "autorest --use=@microsoft.azure/autorest.go@2.1.187 --tag=package-2021-07-01 --go-sdk-folder=/_/azure-sdk-for-go --go --verbose --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION --enum-prefix /_/azure-rest-api-specs/specification/compute/resource-manager/readme.md", + "autorest_command": "autorest --use=@microsoft.azure/autorest.go@2.1.187 --tag=package-2021-07-01 --go-sdk-folder=/_/azure-sdk-for-go --go --verbose --use-onever --version=2.0.4421 --go.license-header=MICROSOFT_MIT_NO_VERSION --enum-prefix /_/azure-rest-api-specs/specification/compute/resource-manager/readme.md", "additional_properties": { - "additional_options": "--go --verbose --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION --enum-prefix" + "additional_options": "--go --verbose --use-onever --version=2.0.4421 --go.license-header=MICROSOFT_MIT_NO_VERSION --enum-prefix" } } \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-04-01/containerservice/_meta.json b/vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-04-01/containerservice/_meta.json index 4d3ed7b9b..4c368f7df 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-04-01/containerservice/_meta.json +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-04-01/containerservice/_meta.json @@ -4,8 +4,8 @@ "tag": "package-2020-04", "use": "@microsoft.azure/autorest.go@2.1.187", "repository_url": "https://github.com/Azure/azure-rest-api-specs.git", - "autorest_command": "autorest --use=@microsoft.azure/autorest.go@2.1.187 --tag=package-2020-04 --go-sdk-folder=/_/azure-sdk-for-go --go --verbose --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION /_/azure-rest-api-specs/specification/containerservice/resource-manager/readme.md", + "autorest_command": "autorest --use=@microsoft.azure/autorest.go@2.1.187 --tag=package-2020-04 --go-sdk-folder=/_/azure-sdk-for-go --go --verbose --use-onever --version=2.0.4421 --go.license-header=MICROSOFT_MIT_NO_VERSION /_/azure-rest-api-specs/specification/containerservice/resource-manager/readme.md", "additional_properties": { - "additional_options": "--go --verbose --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION" + "additional_options": "--go --verbose --use-onever --version=2.0.4421 --go.license-header=MICROSOFT_MIT_NO_VERSION" } } \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/2016-10-01/keyvault/dataplane_meta.json b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/2016-10-01/keyvault/dataplane_meta.json index b80d213c9..fcefce523 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/2016-10-01/keyvault/dataplane_meta.json +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/keyvault/2016-10-01/keyvault/dataplane_meta.json @@ -4,8 +4,8 @@ "tag": "package-2016-10", "use": "@microsoft.azure/autorest.go@2.1.183", "repository_url": "https://github.com/Azure/azure-rest-api-specs.git", - "autorest_command": "autorest --use=@microsoft.azure/autorest.go@2.1.183 --tag=package-2016-10 --go-sdk-folder=/_/azure-sdk-for-go --go --verbose --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION /_/azure-rest-api-specs/specification/keyvault/data-plane/readme.md", + "autorest_command": "autorest --use=@microsoft.azure/autorest.go@2.1.183 --tag=package-2016-10 --go-sdk-folder=/_/azure-sdk-for-go --go --verbose --use-onever --version=2.0.4421 --go.license-header=MICROSOFT_MIT_NO_VERSION /_/azure-rest-api-specs/specification/keyvault/data-plane/readme.md", "additional_properties": { - "additional_options": "--go --verbose --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION" + "additional_options": "--go --verbose --use-onever --version=2.0.4421 --go.license-header=MICROSOFT_MIT_NO_VERSION" } } \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-02-01/network/_meta.json b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-02-01/network/_meta.json index 3ff842f1a..3da7957dc 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-02-01/network/_meta.json +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-02-01/network/_meta.json @@ -4,8 +4,8 @@ "tag": "package-2021-02", "use": "@microsoft.azure/autorest.go@2.1.187", "repository_url": "https://github.com/Azure/azure-rest-api-specs.git", - "autorest_command": "autorest --use=@microsoft.azure/autorest.go@2.1.187 --tag=package-2021-02 --go-sdk-folder=/_/azure-sdk-for-go --go --verbose --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION --enum-prefix /_/azure-rest-api-specs/specification/network/resource-manager/readme.md", + "autorest_command": "autorest --use=@microsoft.azure/autorest.go@2.1.187 --tag=package-2021-02 --go-sdk-folder=/_/azure-sdk-for-go --go --verbose --use-onever --version=2.0.4421 --go.license-header=MICROSOFT_MIT_NO_VERSION --enum-prefix /_/azure-rest-api-specs/specification/network/resource-manager/readme.md", "additional_properties": { - "additional_options": "--go --verbose --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION --enum-prefix" + "additional_options": "--go --verbose --use-onever --version=2.0.4421 --go.license-header=MICROSOFT_MIT_NO_VERSION --enum-prefix" } } \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/_meta.json b/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/_meta.json index 3d97252b9..10e0d5369 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/_meta.json +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/privatedns/mgmt/2018-09-01/privatedns/_meta.json @@ -4,8 +4,8 @@ "tag": "package-2018-09", "use": "@microsoft.azure/autorest.go@2.1.187", "repository_url": "https://github.com/Azure/azure-rest-api-specs.git", - "autorest_command": "autorest --use=@microsoft.azure/autorest.go@2.1.187 --tag=package-2018-09 --go-sdk-folder=/_/azure-sdk-for-go --go --verbose --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION /_/azure-rest-api-specs/specification/privatedns/resource-manager/readme.md", + "autorest_command": "autorest --use=@microsoft.azure/autorest.go@2.1.187 --tag=package-2018-09 --go-sdk-folder=/_/azure-sdk-for-go --go --verbose --use-onever --version=2.0.4421 --go.license-header=MICROSOFT_MIT_NO_VERSION /_/azure-rest-api-specs/specification/privatedns/resource-manager/readme.md", "additional_properties": { - "additional_options": "--go --verbose --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION" + "additional_options": "--go --verbose --use-onever --version=2.0.4421 --go.license-header=MICROSOFT_MIT_NO_VERSION" } } \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources/_meta.json b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources/_meta.json index feca60e5c..a8b691021 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources/_meta.json +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources/_meta.json @@ -4,8 +4,8 @@ "tag": "package-resources-2017-05", "use": "@microsoft.azure/autorest.go@2.1.187", "repository_url": "https://github.com/Azure/azure-rest-api-specs.git", - "autorest_command": "autorest --use=@microsoft.azure/autorest.go@2.1.187 --tag=package-resources-2017-05 --go-sdk-folder=/_/azure-sdk-for-go --go --verbose --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION /_/azure-rest-api-specs/specification/resources/resource-manager/readme.md", + "autorest_command": "autorest --use=@microsoft.azure/autorest.go@2.1.187 --tag=package-resources-2017-05 --go-sdk-folder=/_/azure-sdk-for-go --go --verbose --use-onever --version=2.0.4421 --go.license-header=MICROSOFT_MIT_NO_VERSION /_/azure-rest-api-specs/specification/resources/resource-manager/readme.md", "additional_properties": { - "additional_options": "--go --verbose --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION" + "additional_options": "--go --verbose --use-onever --version=2.0.4421 --go.license-header=MICROSOFT_MIT_NO_VERSION" } } \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-05-01/resources/_meta.json b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-05-01/resources/_meta.json index eb91bc67e..d93378dba 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-05-01/resources/_meta.json +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-05-01/resources/_meta.json @@ -4,8 +4,8 @@ "tag": "package-resources-2018-05", "use": "@microsoft.azure/autorest.go@2.1.187", "repository_url": "https://github.com/Azure/azure-rest-api-specs.git", - "autorest_command": "autorest --use=@microsoft.azure/autorest.go@2.1.187 --tag=package-resources-2018-05 --go-sdk-folder=/_/azure-sdk-for-go --go --verbose --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION /_/azure-rest-api-specs/specification/resources/resource-manager/readme.md", + "autorest_command": "autorest --use=@microsoft.azure/autorest.go@2.1.187 --tag=package-resources-2018-05 --go-sdk-folder=/_/azure-sdk-for-go --go --verbose --use-onever --version=2.0.4421 --go.license-header=MICROSOFT_MIT_NO_VERSION /_/azure-rest-api-specs/specification/resources/resource-manager/readme.md", "additional_properties": { - "additional_options": "--go --verbose --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION" + "additional_options": "--go --verbose --use-onever --version=2.0.4421 --go.license-header=MICROSOFT_MIT_NO_VERSION" } } \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/_meta.json b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/_meta.json index 82d1df37d..b6d9ac079 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/_meta.json +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-02-01/storage/_meta.json @@ -4,8 +4,8 @@ "tag": "package-2021-02", "use": "@microsoft.azure/autorest.go@2.1.187", "repository_url": "https://github.com/Azure/azure-rest-api-specs.git", - "autorest_command": "autorest --use=@microsoft.azure/autorest.go@2.1.187 --tag=package-2021-02 --go-sdk-folder=/_/azure-sdk-for-go --go --verbose --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION --enum-prefix /_/azure-rest-api-specs/specification/storage/resource-manager/readme.md", + "autorest_command": "autorest --use=@microsoft.azure/autorest.go@2.1.187 --tag=package-2021-02 --go-sdk-folder=/_/azure-sdk-for-go --go --verbose --use-onever --version=2.0.4421 --go.license-header=MICROSOFT_MIT_NO_VERSION --enum-prefix /_/azure-rest-api-specs/specification/storage/resource-manager/readme.md", "additional_properties": { - "additional_options": "--go --verbose --use-onever --version=V2 --go.license-header=MICROSOFT_MIT_NO_VERSION --enum-prefix" + "additional_options": "--go --verbose --use-onever --version=2.0.4421 --go.license-header=MICROSOFT_MIT_NO_VERSION --enum-prefix" } } \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/version/version.go b/vendor/github.com/Azure/azure-sdk-for-go/version/version.go index b9622b6fc..431bf47dd 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/version/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/version/version.go @@ -4,4 +4,4 @@ package version // Licensed under the MIT License. See License.txt in the project root for license information. // Number contains the semantic version of this SDK. -const Number = "v61.3.0" +const Number = "v62.0.0" diff --git a/vendor/k8s.io/cloud-provider/api/well_known_annotations.go b/vendor/k8s.io/cloud-provider/api/well_known_annotations.go new file mode 100644 index 000000000..fd03ea0a0 --- /dev/null +++ b/vendor/k8s.io/cloud-provider/api/well_known_annotations.go @@ -0,0 +1,26 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +const ( + // AnnotationAlphaProvidedIPAddr is a node IP annotation set by the "external" cloud provider. + // When kubelet is started with the "external" cloud provider, then + // it sets this annotation on the node to denote an ip address set from the + // cmd line flag (--node-ip). This ip is verified with the cloudprovider as valid by + // the cloud-controller-manager + AnnotationAlphaProvidedIPAddr = "alpha.kubernetes.io/provided-node-ip" +) diff --git a/vendor/k8s.io/cloud-provider/api/well_known_taints.go b/vendor/k8s.io/cloud-provider/api/well_known_taints.go new file mode 100644 index 000000000..765bf2260 --- /dev/null +++ b/vendor/k8s.io/cloud-provider/api/well_known_taints.go @@ -0,0 +1,28 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +const ( + // TaintExternalCloudProvider sets this taint on a node to mark it as unusable, + // when kubelet is started with the "external" cloud provider, until a controller + // from the cloud-controller-manager intitializes this node, and then removes + // the taint + TaintExternalCloudProvider = "node.cloudprovider.kubernetes.io/uninitialized" + + // TaintNodeShutdown when node is shutdown in external cloud provider + TaintNodeShutdown = "node.cloudprovider.kubernetes.io/shutdown" +) diff --git a/vendor/k8s.io/cloud-provider/node/helpers/address.go b/vendor/k8s.io/cloud-provider/node/helpers/address.go new file mode 100644 index 000000000..028a1fbe4 --- /dev/null +++ b/vendor/k8s.io/cloud-provider/node/helpers/address.go @@ -0,0 +1,38 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package helpers + +import ( + "k8s.io/api/core/v1" +) + +// AddToNodeAddresses appends the NodeAddresses to the passed-by-pointer slice, +// only if they do not already exist +func AddToNodeAddresses(addresses *[]v1.NodeAddress, addAddresses ...v1.NodeAddress) { + for _, add := range addAddresses { + exists := false + for _, existing := range *addresses { + if existing.Address == add.Address && existing.Type == add.Type { + exists = true + break + } + } + if !exists { + *addresses = append(*addresses, add) + } + } +} diff --git a/vendor/k8s.io/cloud-provider/node/helpers/labels.go b/vendor/k8s.io/cloud-provider/node/helpers/labels.go new file mode 100644 index 000000000..2a6e1bd97 --- /dev/null +++ b/vendor/k8s.io/cloud-provider/node/helpers/labels.go @@ -0,0 +1,103 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package helpers + +import ( + "context" + "encoding/json" + "fmt" + "time" + + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/apimachinery/pkg/util/wait" + clientset "k8s.io/client-go/kubernetes" + clientretry "k8s.io/client-go/util/retry" + "k8s.io/klog/v2" +) + +var updateLabelBackoff = wait.Backoff{ + Steps: 5, + Duration: 100 * time.Millisecond, + Jitter: 1.0, +} + +// AddOrUpdateLabelsOnNode updates the labels on the node and returns true on +// success and false on failure. +func AddOrUpdateLabelsOnNode(kubeClient clientset.Interface, labelsToUpdate map[string]string, node *v1.Node) bool { + err := addOrUpdateLabelsOnNode(kubeClient, node.Name, labelsToUpdate) + if err != nil { + utilruntime.HandleError( + fmt.Errorf( + "unable to update labels %+v for Node %q: %v", + labelsToUpdate, + node.Name, + err)) + return false + } + + klog.V(4).Infof("Updated labels %+v to Node %v", labelsToUpdate, node.Name) + return true +} + +func addOrUpdateLabelsOnNode(kubeClient clientset.Interface, nodeName string, labelsToUpdate map[string]string) error { + firstTry := true + return clientretry.RetryOnConflict(updateLabelBackoff, func() error { + var err error + var node *v1.Node + // First we try getting node from the API server cache, as it's cheaper. If it fails + // we get it from etcd to be sure to have fresh data. + if firstTry { + node, err = kubeClient.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{ResourceVersion: "0"}) + firstTry = false + } else { + node, err = kubeClient.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) + } + if err != nil { + return err + } + + // Make a copy of the node and update the labels. + newNode := node.DeepCopy() + if newNode.Labels == nil { + newNode.Labels = make(map[string]string) + } + for key, value := range labelsToUpdate { + newNode.Labels[key] = value + } + + oldData, err := json.Marshal(node) + if err != nil { + return fmt.Errorf("failed to marshal the existing node %#v: %v", node, err) + } + newData, err := json.Marshal(newNode) + if err != nil { + return fmt.Errorf("failed to marshal the new node %#v: %v", newNode, err) + } + patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, &v1.Node{}) + if err != nil { + return fmt.Errorf("failed to create a two-way merge patch: %v", err) + } + if _, err := kubeClient.CoreV1().Nodes().Patch(context.TODO(), node.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}); err != nil { + return fmt.Errorf("failed to patch the node: %v", err) + } + return nil + }) +} diff --git a/vendor/k8s.io/cloud-provider/node/helpers/taints.go b/vendor/k8s.io/cloud-provider/node/helpers/taints.go new file mode 100644 index 000000000..ca6d27336 --- /dev/null +++ b/vendor/k8s.io/cloud-provider/node/helpers/taints.go @@ -0,0 +1,240 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* + +NOTE: the contents of this file has been copied from k8s.io/kubernetes/pkg/controller +and k8s.io/kubernetes/pkg/util/taints. The reason for duplicating this code is to remove +dependencies to k8s.io/kubernetes in all the cloud providers. Once k8s.io/kubernetes/pkg/util/taints +is moved to an external repository, this file should be removed and replaced with that one. +*/ + +package helpers + +import ( + "context" + "encoding/json" + "fmt" + "time" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/apimachinery/pkg/util/wait" + clientset "k8s.io/client-go/kubernetes" + clientretry "k8s.io/client-go/util/retry" +) + +var updateTaintBackoff = wait.Backoff{ + Steps: 5, + Duration: 100 * time.Millisecond, + Jitter: 1.0, +} + +// AddOrUpdateTaintOnNode add taints to the node. If taint was added into node, it'll issue API calls +// to update nodes; otherwise, no API calls. Return error if any. +func AddOrUpdateTaintOnNode(c clientset.Interface, nodeName string, taints ...*v1.Taint) error { + if len(taints) == 0 { + return nil + } + firstTry := true + return clientretry.RetryOnConflict(updateTaintBackoff, func() error { + var err error + var oldNode *v1.Node + // First we try getting node from the API server cache, as it's cheaper. If it fails + // we get it from etcd to be sure to have fresh data. + if firstTry { + oldNode, err = c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{ResourceVersion: "0"}) + firstTry = false + } else { + oldNode, err = c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) + } + if err != nil { + return err + } + + var newNode *v1.Node + oldNodeCopy := oldNode + updated := false + for _, taint := range taints { + curNewNode, ok, err := addOrUpdateTaint(oldNodeCopy, taint) + if err != nil { + return fmt.Errorf("failed to update taint of node") + } + updated = updated || ok + newNode = curNewNode + oldNodeCopy = curNewNode + } + if !updated { + return nil + } + return PatchNodeTaints(c, nodeName, oldNode, newNode) + }) +} + +// PatchNodeTaints patches node's taints. +func PatchNodeTaints(c clientset.Interface, nodeName string, oldNode *v1.Node, newNode *v1.Node) error { + oldData, err := json.Marshal(oldNode) + if err != nil { + return fmt.Errorf("failed to marshal old node %#v for node %q: %v", oldNode, nodeName, err) + } + + newTaints := newNode.Spec.Taints + newNodeClone := oldNode.DeepCopy() + newNodeClone.Spec.Taints = newTaints + newData, err := json.Marshal(newNodeClone) + if err != nil { + return fmt.Errorf("failed to marshal new node %#v for node %q: %v", newNodeClone, nodeName, err) + } + + patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, v1.Node{}) + if err != nil { + return fmt.Errorf("failed to create patch for node %q: %v", nodeName, err) + } + + _, err = c.CoreV1().Nodes().Patch(context.TODO(), nodeName, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) + return err +} + +// addOrUpdateTaint tries to add a taint to annotations list. Returns a new copy of updated Node and true if something was updated +// false otherwise. +func addOrUpdateTaint(node *v1.Node, taint *v1.Taint) (*v1.Node, bool, error) { + newNode := node.DeepCopy() + nodeTaints := newNode.Spec.Taints + + var newTaints []v1.Taint + updated := false + for i := range nodeTaints { + if taint.MatchTaint(&nodeTaints[i]) { + if equality.Semantic.DeepEqual(*taint, nodeTaints[i]) { + return newNode, false, nil + } + newTaints = append(newTaints, *taint) + updated = true + continue + } + + newTaints = append(newTaints, nodeTaints[i]) + } + + if !updated { + newTaints = append(newTaints, *taint) + } + + newNode.Spec.Taints = newTaints + return newNode, true, nil +} + +// RemoveTaintOffNode is for cleaning up taints temporarily added to node, +// won't fail if target taint doesn't exist or has been removed. +// If passed a node it'll check if there's anything to be done, if taint is not present it won't issue +// any API calls. +func RemoveTaintOffNode(c clientset.Interface, nodeName string, node *v1.Node, taints ...*v1.Taint) error { + if len(taints) == 0 { + return nil + } + // Short circuit for limiting amount of API calls. + if node != nil { + match := false + for _, taint := range taints { + if taintExists(node.Spec.Taints, taint) { + match = true + break + } + } + if !match { + return nil + } + } + + firstTry := true + return clientretry.RetryOnConflict(updateTaintBackoff, func() error { + var err error + var oldNode *v1.Node + // First we try getting node from the API server cache, as it's cheaper. If it fails + // we get it from etcd to be sure to have fresh data. + if firstTry { + oldNode, err = c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{ResourceVersion: "0"}) + firstTry = false + } else { + oldNode, err = c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) + } + if err != nil { + return err + } + + var newNode *v1.Node + oldNodeCopy := oldNode + updated := false + for _, taint := range taints { + curNewNode, ok, err := removeTaint(oldNodeCopy, taint) + if err != nil { + return fmt.Errorf("failed to remove taint of node") + } + updated = updated || ok + newNode = curNewNode + oldNodeCopy = curNewNode + } + if !updated { + return nil + } + return PatchNodeTaints(c, nodeName, oldNode, newNode) + }) +} + +// taintExists checks if the given taint exists in list of taints. Returns true if exists false otherwise. +func taintExists(taints []v1.Taint, taintToFind *v1.Taint) bool { + for _, taint := range taints { + if taint.MatchTaint(taintToFind) { + return true + } + } + return false +} + +// removeTaint tries to remove a taint from annotations list. Returns a new copy of updated Node and true if something was updated +// false otherwise. +func removeTaint(node *v1.Node, taint *v1.Taint) (*v1.Node, bool, error) { + newNode := node.DeepCopy() + nodeTaints := newNode.Spec.Taints + if len(nodeTaints) == 0 { + return newNode, false, nil + } + + if !taintExists(nodeTaints, taint) { + return newNode, false, nil + } + + newTaints, _ := deleteTaint(nodeTaints, taint) + newNode.Spec.Taints = newTaints + return newNode, true, nil +} + +// deleteTaint removes all the taints that have the same key and effect to given taintToDelete. +func deleteTaint(taints []v1.Taint, taintToDelete *v1.Taint) ([]v1.Taint, bool) { + newTaints := []v1.Taint{} + deleted := false + for i := range taints { + if taintToDelete.MatchTaint(&taints[i]) { + deleted = true + continue + } + newTaints = append(newTaints, taints[i]) + } + return newTaints, deleted +} diff --git a/vendor/k8s.io/component-helpers/node/util/cidr.go b/vendor/k8s.io/component-helpers/node/util/cidr.go new file mode 100644 index 000000000..4d626ee00 --- /dev/null +++ b/vendor/k8s.io/component-helpers/node/util/cidr.go @@ -0,0 +1,58 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "context" + "encoding/json" + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/klog/v2" +) + +type nodeForCIDRMergePatch struct { + Spec nodeSpecForMergePatch `json:"spec"` +} + +type nodeSpecForMergePatch struct { + PodCIDR string `json:"podCIDR"` + PodCIDRs []string `json:"podCIDRs,omitempty"` +} + +// PatchNodeCIDRs patches the specified node.CIDR=cidrs[0] and node.CIDRs to the given value. +func PatchNodeCIDRs(c clientset.Interface, node types.NodeName, cidrs []string) error { + // set the pod cidrs list and set the old pod cidr field + patch := nodeForCIDRMergePatch{ + Spec: nodeSpecForMergePatch{ + PodCIDR: cidrs[0], + PodCIDRs: cidrs, + }, + } + + patchBytes, err := json.Marshal(&patch) + if err != nil { + return fmt.Errorf("failed to json.Marshal CIDR: %v", err) + } + klog.V(4).Infof("cidrs patch bytes are:%s", string(patchBytes)) + if _, err := c.CoreV1().Nodes().Patch(context.TODO(), string(node), types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}); err != nil { + return fmt.Errorf("failed to patch node CIDR: %v", err) + } + return nil +} diff --git a/vendor/k8s.io/component-helpers/node/util/conditions.go b/vendor/k8s.io/component-helpers/node/util/conditions.go new file mode 100644 index 000000000..3ad4dda89 --- /dev/null +++ b/vendor/k8s.io/component-helpers/node/util/conditions.go @@ -0,0 +1,57 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "context" + "encoding/json" + "time" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + clientset "k8s.io/client-go/kubernetes" +) + +// GetNodeCondition extracts the provided condition from the given status and returns that. +// Returns nil and -1 if the condition is not present, and the index of the located condition. +func GetNodeCondition(status *v1.NodeStatus, conditionType v1.NodeConditionType) (int, *v1.NodeCondition) { + if status == nil { + return -1, nil + } + for i := range status.Conditions { + if status.Conditions[i].Type == conditionType { + return i, &status.Conditions[i] + } + } + return -1, nil +} + +// SetNodeCondition updates specific node condition with patch operation. +func SetNodeCondition(c clientset.Interface, node types.NodeName, condition v1.NodeCondition) error { + condition.LastHeartbeatTime = metav1.NewTime(time.Now()) + patch, err := json.Marshal(map[string]interface{}{ + "status": map[string]interface{}{ + "conditions": []v1.NodeCondition{condition}, + }, + }) + if err != nil { + return err + } + _, err = c.CoreV1().Nodes().PatchStatus(context.TODO(), string(node), patch) + return err +} diff --git a/vendor/k8s.io/component-helpers/node/util/status.go b/vendor/k8s.io/component-helpers/node/util/status.go new file mode 100644 index 000000000..a3666be31 --- /dev/null +++ b/vendor/k8s.io/component-helpers/node/util/status.go @@ -0,0 +1,134 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "context" + "encoding/json" + "fmt" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/strategicpatch" + v1core "k8s.io/client-go/kubernetes/typed/core/v1" +) + +// PatchNodeStatus patches node status. +func PatchNodeStatus(c v1core.CoreV1Interface, nodeName types.NodeName, oldNode *v1.Node, newNode *v1.Node) (*v1.Node, []byte, error) { + patchBytes, err := preparePatchBytesforNodeStatus(nodeName, oldNode, newNode) + if err != nil { + return nil, nil, err + } + + updatedNode, err := c.Nodes().Patch(context.TODO(), string(nodeName), types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, "status") + if err != nil { + return nil, nil, fmt.Errorf("failed to patch status %q for node %q: %v", patchBytes, nodeName, err) + } + return updatedNode, patchBytes, nil +} + +func preparePatchBytesforNodeStatus(nodeName types.NodeName, oldNode *v1.Node, newNode *v1.Node) ([]byte, error) { + oldData, err := json.Marshal(oldNode) + if err != nil { + return nil, fmt.Errorf("failed to Marshal oldData for node %q: %v", nodeName, err) + } + + // NodeStatus.Addresses is incorrectly annotated as patchStrategy=merge, which + // will cause strategicpatch.CreateTwoWayMergePatch to create an incorrect patch + // if it changed. + manuallyPatchAddresses := (len(oldNode.Status.Addresses) > 0) && !equality.Semantic.DeepEqual(oldNode.Status.Addresses, newNode.Status.Addresses) + + // Reset spec to make sure only patch for Status or ObjectMeta is generated. + // Note that we don't reset ObjectMeta here, because: + // 1. This aligns with Nodes().UpdateStatus(). + // 2. Some component does use this to update node annotations. + diffNode := newNode.DeepCopy() + diffNode.Spec = oldNode.Spec + if manuallyPatchAddresses { + diffNode.Status.Addresses = oldNode.Status.Addresses + } + newData, err := json.Marshal(diffNode) + if err != nil { + return nil, fmt.Errorf("failed to Marshal newData for node %q: %v", nodeName, err) + } + + patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, v1.Node{}) + if err != nil { + return nil, fmt.Errorf("failed to CreateTwoWayMergePatch for node %q: %v", nodeName, err) + } + if manuallyPatchAddresses { + patchBytes, err = fixupPatchForNodeStatusAddresses(patchBytes, newNode.Status.Addresses) + if err != nil { + return nil, fmt.Errorf("failed to fix up NodeAddresses in patch for node %q: %v", nodeName, err) + } + } + + return patchBytes, nil +} + +// fixupPatchForNodeStatusAddresses adds a replace-strategy patch for Status.Addresses to +// the existing patch +func fixupPatchForNodeStatusAddresses(patchBytes []byte, addresses []v1.NodeAddress) ([]byte, error) { + // Given patchBytes='{"status": {"conditions": [ ... ], "phase": ...}}' and + // addresses=[{"type": "InternalIP", "address": "10.0.0.1"}], we need to generate: + // + // { + // "status": { + // "conditions": [ ... ], + // "phase": ..., + // "addresses": [ + // { + // "type": "InternalIP", + // "address": "10.0.0.1" + // }, + // { + // "$patch": "replace" + // } + // ] + // } + // } + + var patchMap map[string]interface{} + if err := json.Unmarshal(patchBytes, &patchMap); err != nil { + return nil, err + } + + addrBytes, err := json.Marshal(addresses) + if err != nil { + return nil, err + } + var addrArray []interface{} + if err := json.Unmarshal(addrBytes, &addrArray); err != nil { + return nil, err + } + addrArray = append(addrArray, map[string]interface{}{"$patch": "replace"}) + + status := patchMap["status"] + if status == nil { + status = map[string]interface{}{} + patchMap["status"] = status + } + statusMap, ok := status.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("unexpected data in patch") + } + statusMap["addresses"] = addrArray + + return json.Marshal(patchMap) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index f8831fe4d..531e7e47f 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,4 +1,4 @@ -# github.com/Azure/azure-sdk-for-go v61.3.0+incompatible +# github.com/Azure/azure-sdk-for-go v62.0.0+incompatible ## explicit github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-04-01/containerservice @@ -952,6 +952,8 @@ k8s.io/client-go/util/workqueue # k8s.io/cloud-provider v0.23.3 => k8s.io/cloud-provider v0.23.3 ## explicit; go 1.16 k8s.io/cloud-provider +k8s.io/cloud-provider/api +k8s.io/cloud-provider/node/helpers k8s.io/cloud-provider/service/helpers k8s.io/cloud-provider/volume k8s.io/cloud-provider/volume/errors @@ -968,6 +970,7 @@ k8s.io/component-base/traces k8s.io/component-base/version # k8s.io/component-helpers v0.23.3 => k8s.io/component-helpers v0.23.3 ## explicit; go 1.16 +k8s.io/component-helpers/node/util k8s.io/component-helpers/node/util/sysctl k8s.io/component-helpers/scheduling/corev1 k8s.io/component-helpers/scheduling/corev1/nodeaffinity @@ -1080,7 +1083,7 @@ k8s.io/utils/trace ## explicit; go 1.17 sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client -# sigs.k8s.io/cloud-provider-azure v0.7.4 => sigs.k8s.io/cloud-provider-azure v0.7.4-0.20220205033256-055704f6ecfd +# sigs.k8s.io/cloud-provider-azure v0.7.4 => sigs.k8s.io/cloud-provider-azure v0.7.4-0.20220308004558-7d51a9890cbe ## explicit; go 1.17 sigs.k8s.io/cloud-provider-azure/pkg/auth sigs.k8s.io/cloud-provider-azure/pkg/azureclients @@ -1124,7 +1127,9 @@ sigs.k8s.io/cloud-provider-azure/pkg/azureclients/zoneclient sigs.k8s.io/cloud-provider-azure/pkg/cache sigs.k8s.io/cloud-provider-azure/pkg/consts sigs.k8s.io/cloud-provider-azure/pkg/metrics +sigs.k8s.io/cloud-provider-azure/pkg/nodemanager sigs.k8s.io/cloud-provider-azure/pkg/provider +sigs.k8s.io/cloud-provider-azure/pkg/provider/virtualmachine sigs.k8s.io/cloud-provider-azure/pkg/retry sigs.k8s.io/cloud-provider-azure/pkg/version # sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 @@ -1172,4 +1177,4 @@ sigs.k8s.io/yaml # k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.23.3 # k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.23.3 # k8s.io/sample-controller => k8s.io/sample-controller v0.23.3 -# sigs.k8s.io/cloud-provider-azure => sigs.k8s.io/cloud-provider-azure v0.7.4-0.20220205033256-055704f6ecfd +# sigs.k8s.io/cloud-provider-azure => sigs.k8s.io/cloud-provider-azure v0.7.4-0.20220308004558-7d51a9890cbe diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient/azure_armclient.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient/azure_armclient.go index 8e63f5b02..7b64f53bd 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient/azure_armclient.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient/azure_armclient.go @@ -27,6 +27,7 @@ import ( "net/http" "net/http/cookiejar" "net/http/httputil" + "net/url" "strings" "sync" "time" @@ -72,9 +73,9 @@ type Client struct { client autorest.Client backoff *retry.Backoff - baseURI string - apiVersion string - clientRegion string + baseURI string + apiVersion string + regionalEndpoint string } // New creates a ARM client @@ -115,12 +116,14 @@ func New(authorizer autorest.Authorizer, clientConfig azureclients.ClientConfig, backoff.Steps = 1 } + url, _ := url.Parse(baseURI) + return &Client{ - client: restClient, - baseURI: baseURI, - backoff: backoff, - apiVersion: apiVersion, - clientRegion: NormalizeAzureRegion(clientConfig.Location), + client: restClient, + baseURI: baseURI, + backoff: backoff, + apiVersion: apiVersion, + regionalEndpoint: fmt.Sprintf("%s.%s", clientConfig.Location, url.Host), } } @@ -151,15 +154,77 @@ func NormalizeAzureRegion(name string) string { return strings.ToLower(region) } +// DoExponentialBackoffRetry returns an autorest.SendDecorator which performs retry with customizable backoff policy. +func DoHackRegionalRetryDecorator(c *Client) autorest.SendDecorator { + return func(s autorest.Sender) autorest.Sender { + return autorest.SenderFunc(func(request *http.Request) (*http.Response, error) { + response, rerr := s.Do(request) + if rerr == nil || response.StatusCode == http.StatusNotFound || c.regionalEndpoint == "" { + return response, rerr + } + // Hack: retry the regional ARM endpoint in case of ARM traffic split and arm resource group replication is too slow + bodyBytes, _ := ioutil.ReadAll(response.Body) + defer func() { + response.Body = ioutil.NopCloser(bytes.NewBuffer(bodyBytes)) + }() + + bodyString := string(bodyBytes) + var body map[string]interface{} + if e := json.Unmarshal(bodyBytes, &body); e != nil { + klog.Errorf("Send.sendRequest: error in parsing response body string: %s, Skip retrying regional host", e.Error()) + return response, rerr + } + klog.V(5).Infof("Send.sendRequest original response: %s", bodyString) + + if err, ok := body["error"].(map[string]interface{}); !ok || + err["code"] == nil || + !strings.EqualFold(err["code"].(string), "ResourceGroupNotFound") { + klog.V(5).Infof("Send.sendRequest: response body does not contain ResourceGroupNotFound error code. Skip retrying regional host") + return response, rerr + } + + currentHost := request.URL.Host + if request.Host != "" { + currentHost = request.Host + } + + if strings.HasPrefix(strings.ToLower(currentHost), c.regionalEndpoint) { + klog.V(5).Infof("Send.sendRequest: current host %s is regional host. Skip retrying regional host.", html.EscapeString(currentHost)) + return response, rerr + } + + request.Host = c.regionalEndpoint + request.URL.Host = c.regionalEndpoint + klog.V(5).Infof("Send.sendRegionalRequest on ResourceGroupNotFound error. Retrying regional host: %s", html.EscapeString(request.Host)) + + regionalResponse, regionalError := s.Do(request) + // only use the result if the regional request actually goes through and returns 2xx status code, for two reasons: + // 1. the retry on regional ARM host approach is a hack. + // 2. the concatenated regional uri could be wrong as the rule is not officially declared by ARM. + if regionalResponse == nil || regionalResponse.StatusCode > 299 { + regionalErrStr := "" + if regionalError != nil { + regionalErrStr = regionalError.Error() + } + + klog.V(5).Infof("Send.sendRegionalRequest failed to get response from regional host, error: '%s'. Ignoring the result.", regionalErrStr) + return response, rerr + } + return regionalResponse, regionalError + }) + } +} + // sendRequest sends a http request to ARM service. // Although Azure SDK supports retries per https://github.com/azure/azure-sdk-for-go#request-retry-policy, we // disable it since we want to fully control the retry policies. -func (c *Client) sendRequest(ctx context.Context, request *http.Request) (*http.Response, *retry.Error) { +func (c *Client) sendRequest(request *http.Request) (*http.Response, *retry.Error) { sendBackoff := *c.backoff response, err := autorest.SendWithSender( c.client, request, retry.DoExponentialBackoffRetry(&sendBackoff), + DoHackRegionalRetryDecorator(c), ) if response == nil && err == nil { @@ -171,77 +236,17 @@ func (c *Client) sendRequest(ctx context.Context, request *http.Request) (*http. // Send sends a http request to ARM service with possible retry to regional ARM endpoint. func (c *Client) Send(ctx context.Context, request *http.Request) (*http.Response, *retry.Error) { - response, rerr := c.sendRequest(ctx, request) - if rerr != nil { - return response, rerr - } - - if response.StatusCode != http.StatusNotFound || c.clientRegion == "" { - dumpResponse(response, 10) - return response, rerr - } - - bodyBytes, _ := ioutil.ReadAll(response.Body) - defer func() { - response.Body = ioutil.NopCloser(bytes.NewBuffer(bodyBytes)) - }() - - bodyString := string(bodyBytes) - klog.V(5).Infof("Send.sendRequest original error message: %s", bodyString) - - // Hack: retry the regional ARM endpoint in case of ARM traffic split and arm resource group replication is too slow - var body map[string]interface{} - if e := json.Unmarshal(bodyBytes, &body); e != nil { - klog.Errorf("Send.sendRequest: error in parsing response body string: %s, Skip retrying regional host", e) - return response, rerr - } - - if err, ok := body["error"].(map[string]interface{}); !ok || - err["code"] == nil || - !strings.EqualFold(err["code"].(string), "ResourceGroupNotFound") { - klog.V(5).Infof("Send.sendRequest: response body does not contain ResourceGroupNotFound error code. Skip retrying regional host") - return response, rerr - } - - currentHost := request.URL.Host - if request.Host != "" { - currentHost = request.Host - } - - if strings.HasPrefix(strings.ToLower(currentHost), c.clientRegion) { - klog.V(5).Infof("Send.sendRequest: current host %s is regional host. Skip retrying regional host.", html.EscapeString(currentHost)) - return response, rerr - } - - request.Host = fmt.Sprintf("%s.%s", c.clientRegion, strings.ToLower(currentHost)) - klog.V(5).Infof("Send.sendRegionalRequest on ResourceGroupNotFound error. Retrying regional host: %s", html.EscapeString(request.Host)) - regionalResponse, regionalError := c.sendRequest(ctx, request) - - // only use the result if the regional request actually goes through and returns 2xx status code, for two reasons: - // 1. the retry on regional ARM host approach is a hack. - // 2. the concatenated regional uri could be wrong as the rule is not officially declared by ARM. - if regionalResponse == nil || regionalResponse.StatusCode > 299 { - regionalErrStr := "" - if regionalError != nil { - regionalErrStr = regionalError.Error().Error() - } - - klog.V(5).Infof("Send.sendRegionalRequest failed to get response from regional host, error: '%s'. Ignoring the result.", regionalErrStr) - return response, rerr - } - - dumpResponse(response, 10) - return regionalResponse, regionalError + return c.sendRequest(request) } -func dumpResponse(resp *http.Response, v klog.Level) { - responseDump, err := httputil.DumpResponse(resp, true) - if err != nil { - klog.Errorf("Failed to dump response: %v", err) - } else { - klog.V(v).Infof("Dumping response: %s", string(responseDump)) - } -} +// func dumpResponse(resp *http.Response, v klog.Level) { +// responseDump, err := httputil.DumpResponse(resp, true) +// if err != nil { +// klog.Errorf("Failed to dump response: %v", err) +// } else { +// klog.V(v).Infof("Dumping response: %s", string(responseDump)) +// } +// } func dumpRequest(req *http.Request, v klog.Level) { if req == nil { @@ -691,7 +696,7 @@ func (c *Client) PostResource(ctx context.Context, resourceID, action string, pa return nil, retry.NewError(false, err) } - return c.sendRequest(ctx, request) + return c.sendRequest(request) } // DeleteResource deletes a resource by resource ID @@ -725,7 +730,7 @@ func (c *Client) HeadResource(ctx context.Context, resourceID string) (*http.Res return nil, retry.NewError(false, err) } - return c.sendRequest(ctx, request) + return c.sendRequest(request) } // DeleteResourceAsync delete a resource by resource ID and returns a future representing the async result @@ -743,7 +748,7 @@ func (c *Client) DeleteResourceAsync(ctx context.Context, resourceID, ifMatch st return nil, retry.NewError(false, err) } - resp, rerr := c.sendRequest(ctx, deleteRequest) + resp, rerr := c.sendRequest(deleteRequest) defer c.CloseResponse(ctx, resp) if rerr != nil { klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "deleteAsync.send", resourceID, rerr.Error()) diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/diskclient/azure_diskclient.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/diskclient/azure_diskclient.go index ac77f2139..e424eebb9 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/diskclient/azure_diskclient.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/diskclient/azure_diskclient.go @@ -89,8 +89,11 @@ func New(config *azclients.ClientConfig) *Client { } // Get gets a Disk. -func (c *Client) Get(ctx context.Context, resourceGroupName string, diskName string) (compute.Disk, *retry.Error) { - mc := metrics.NewMetricContext("disks", "get", resourceGroupName, c.subscriptionID, "") +func (c *Client) Get(ctx context.Context, subsID, resourceGroupName, diskName string) (compute.Disk, *retry.Error) { + if subsID == "" { + subsID = c.subscriptionID + } + mc := metrics.NewMetricContext("disks", "get", resourceGroupName, subsID, "") // Report errors if the client is rate limited. if !c.rateLimiterReader.TryAccept() { @@ -105,7 +108,7 @@ func (c *Client) Get(ctx context.Context, resourceGroupName string, diskName str return compute.Disk{}, rerr } - result, rerr := c.getDisk(ctx, resourceGroupName, diskName) + result, rerr := c.getDisk(ctx, subsID, resourceGroupName, diskName) mc.Observe(rerr) if rerr != nil { if rerr.IsThrottled() { @@ -120,9 +123,9 @@ func (c *Client) Get(ctx context.Context, resourceGroupName string, diskName str } // getDisk gets a Disk. -func (c *Client) getDisk(ctx context.Context, resourceGroupName string, diskName string) (compute.Disk, *retry.Error) { +func (c *Client) getDisk(ctx context.Context, subsID, resourceGroupName, diskName string) (compute.Disk, *retry.Error) { resourceID := armclient.GetResourceID( - c.subscriptionID, + subsID, resourceGroupName, "Microsoft.Compute/disks", diskName, @@ -150,8 +153,11 @@ func (c *Client) getDisk(ctx context.Context, resourceGroupName string, diskName } // CreateOrUpdate creates or updates a Disk. -func (c *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, diskName string, diskParameter compute.Disk) *retry.Error { - mc := metrics.NewMetricContext("disks", "create_or_update", resourceGroupName, c.subscriptionID, "") +func (c *Client) CreateOrUpdate(ctx context.Context, subsID, resourceGroupName, diskName string, diskParameter compute.Disk) *retry.Error { + if subsID == "" { + subsID = c.subscriptionID + } + mc := metrics.NewMetricContext("disks", "create_or_update", resourceGroupName, subsID, "") // Report errors if the client is rate limited. if !c.rateLimiterWriter.TryAccept() { @@ -166,7 +172,7 @@ func (c *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, d return rerr } - rerr := c.createOrUpdateDisk(ctx, resourceGroupName, diskName, diskParameter) + rerr := c.createOrUpdateDisk(ctx, subsID, resourceGroupName, diskName, diskParameter) mc.Observe(rerr) if rerr != nil { if rerr.IsThrottled() { @@ -181,9 +187,9 @@ func (c *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, d } // createOrUpdateDisk creates or updates a Disk. -func (c *Client) createOrUpdateDisk(ctx context.Context, resourceGroupName string, diskName string, diskParameter compute.Disk) *retry.Error { +func (c *Client) createOrUpdateDisk(ctx context.Context, subsID, resourceGroupName, diskName string, diskParameter compute.Disk) *retry.Error { resourceID := armclient.GetResourceID( - c.subscriptionID, + subsID, resourceGroupName, "Microsoft.Compute/disks", diskName, @@ -218,8 +224,11 @@ func (c *Client) createOrUpdateResponder(resp *http.Response) (*compute.Disk, *r } // Update creates or updates a Disk. -func (c *Client) Update(ctx context.Context, resourceGroupName string, diskName string, diskParameter compute.DiskUpdate) *retry.Error { - mc := metrics.NewMetricContext("disks", "update", resourceGroupName, c.subscriptionID, "") +func (c *Client) Update(ctx context.Context, subsID, resourceGroupName, diskName string, diskParameter compute.DiskUpdate) *retry.Error { + if subsID == "" { + subsID = c.subscriptionID + } + mc := metrics.NewMetricContext("disks", "update", resourceGroupName, subsID, "") // Report errors if the client is rate limited. if !c.rateLimiterWriter.TryAccept() { @@ -234,7 +243,7 @@ func (c *Client) Update(ctx context.Context, resourceGroupName string, diskName return rerr } - rerr := c.updateDisk(ctx, resourceGroupName, diskName, diskParameter) + rerr := c.updateDisk(ctx, subsID, resourceGroupName, diskName, diskParameter) mc.Observe(rerr) if rerr != nil { if rerr.IsThrottled() { @@ -249,9 +258,9 @@ func (c *Client) Update(ctx context.Context, resourceGroupName string, diskName } // updateDisk updates a Disk. -func (c *Client) updateDisk(ctx context.Context, resourceGroupName string, diskName string, diskParameter compute.DiskUpdate) *retry.Error { +func (c *Client) updateDisk(ctx context.Context, subsID, resourceGroupName, diskName string, diskParameter compute.DiskUpdate) *retry.Error { resourceID := armclient.GetResourceID( - c.subscriptionID, + subsID, resourceGroupName, "Microsoft.Compute/disks", diskName, @@ -286,8 +295,11 @@ func (c *Client) updateResponder(resp *http.Response) (*compute.Disk, *retry.Err } // Delete deletes a Disk by name. -func (c *Client) Delete(ctx context.Context, resourceGroupName string, diskName string) *retry.Error { - mc := metrics.NewMetricContext("disks", "delete", resourceGroupName, c.subscriptionID, "") +func (c *Client) Delete(ctx context.Context, subsID, resourceGroupName, diskName string) *retry.Error { + if subsID == "" { + subsID = c.subscriptionID + } + mc := metrics.NewMetricContext("disks", "delete", resourceGroupName, subsID, "") // Report errors if the client is rate limited. if !c.rateLimiterWriter.TryAccept() { @@ -302,7 +314,7 @@ func (c *Client) Delete(ctx context.Context, resourceGroupName string, diskName return rerr } - rerr := c.deleteDisk(ctx, resourceGroupName, diskName) + rerr := c.deleteDisk(ctx, subsID, resourceGroupName, diskName) mc.Observe(rerr) if rerr != nil { if rerr.IsThrottled() { @@ -317,9 +329,9 @@ func (c *Client) Delete(ctx context.Context, resourceGroupName string, diskName } // deleteDisk deletes a PublicIPAddress by name. -func (c *Client) deleteDisk(ctx context.Context, resourceGroupName string, diskName string) *retry.Error { +func (c *Client) deleteDisk(ctx context.Context, subsID, resourceGroupName string, diskName string) *retry.Error { resourceID := armclient.GetResourceID( - c.subscriptionID, + subsID, resourceGroupName, "Microsoft.Compute/disks", diskName, @@ -329,9 +341,12 @@ func (c *Client) deleteDisk(ctx context.Context, resourceGroupName string, diskN } // ListByResourceGroup lists all the disks under a resource group. -func (c *Client) ListByResourceGroup(ctx context.Context, resourceGroupName string) ([]compute.Disk, *retry.Error) { +func (c *Client) ListByResourceGroup(ctx context.Context, subsID, resourceGroupName string) ([]compute.Disk, *retry.Error) { + if subsID == "" { + subsID = c.subscriptionID + } resourceID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/disks", - autorest.Encode("path", c.subscriptionID), + autorest.Encode("path", subsID), autorest.Encode("path", resourceGroupName)) result := make([]compute.Disk, 0) diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/diskclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/diskclient/interface.go index 6d22cc815..ffcd52096 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/diskclient/interface.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/diskclient/interface.go @@ -37,17 +37,17 @@ const ( // Don't forget to run "hack/update-mock-clients.sh" command to generate the mock client. type Interface interface { // Get gets a Disk. - Get(ctx context.Context, resourceGroupName string, diskName string) (result compute.Disk, rerr *retry.Error) + Get(ctx context.Context, subsID, resourceGroupName, diskName string) (result compute.Disk, rerr *retry.Error) // CreateOrUpdate creates or updates a Disk. - CreateOrUpdate(ctx context.Context, resourceGroupName string, diskName string, diskParameter compute.Disk) *retry.Error + CreateOrUpdate(ctx context.Context, subsID, resourceGroupName, diskName string, diskParameter compute.Disk) *retry.Error // Update updates a Disk. - Update(ctx context.Context, resourceGroupName string, diskName string, diskParameter compute.DiskUpdate) *retry.Error + Update(ctx context.Context, subsID, resourceGroupName, diskName string, diskParameter compute.DiskUpdate) *retry.Error // Delete deletes a Disk by name. - Delete(ctx context.Context, resourceGroupName string, diskName string) *retry.Error + Delete(ctx context.Context, subsID, resourceGroupName, diskName string) *retry.Error // ListByResourceGroup lists all the disks under a resource group. - ListByResourceGroup(ctx context.Context, resourceGroupName string) ([]compute.Disk, *retry.Error) + ListByResourceGroup(ctx context.Context, subsID, resourceGroupName string) ([]compute.Disk, *retry.Error) } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/diskclient/mockdiskclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/diskclient/mockdiskclient/interface.go index 2808b17f9..e3af24459 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/diskclient/mockdiskclient/interface.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/diskclient/mockdiskclient/interface.go @@ -16,7 +16,7 @@ // // Code generated by MockGen. DO NOT EDIT. -// Source: /go/src/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/diskclient/interface.go +// Source: pkg/azureclients/diskclient/interface.go // Package mockdiskclient is a generated GoMock package. package mockdiskclient @@ -53,74 +53,74 @@ func (m *MockInterface) EXPECT() *MockInterfaceMockRecorder { return m.recorder } -// Get mocks base method. -func (m *MockInterface) Get(ctx context.Context, resourceGroupName, diskName string) (compute.Disk, *retry.Error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, diskName) - ret0, _ := ret[0].(compute.Disk) - ret1, _ := ret[1].(*retry.Error) - return ret0, ret1 -} - -// Get indicates an expected call of Get. -func (mr *MockInterfaceMockRecorder) Get(ctx, resourceGroupName, diskName interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockInterface)(nil).Get), ctx, resourceGroupName, diskName) -} - // CreateOrUpdate mocks base method. -func (m *MockInterface) CreateOrUpdate(ctx context.Context, resourceGroupName, diskName string, diskParameter compute.Disk) *retry.Error { +func (m *MockInterface) CreateOrUpdate(ctx context.Context, subsID, resourceGroupName, diskName string, diskParameter compute.Disk) *retry.Error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CreateOrUpdate", ctx, resourceGroupName, diskName, diskParameter) + ret := m.ctrl.Call(m, "CreateOrUpdate", ctx, subsID, resourceGroupName, diskName, diskParameter) ret0, _ := ret[0].(*retry.Error) return ret0 } // CreateOrUpdate indicates an expected call of CreateOrUpdate. -func (mr *MockInterfaceMockRecorder) CreateOrUpdate(ctx, resourceGroupName, diskName, diskParameter interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) CreateOrUpdate(ctx, subsID, resourceGroupName, diskName, diskParameter interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdate", reflect.TypeOf((*MockInterface)(nil).CreateOrUpdate), ctx, resourceGroupName, diskName, diskParameter) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdate", reflect.TypeOf((*MockInterface)(nil).CreateOrUpdate), ctx, subsID, resourceGroupName, diskName, diskParameter) } -// Update mocks base method. -func (m *MockInterface) Update(ctx context.Context, resourceGroupName, diskName string, diskParameter compute.DiskUpdate) *retry.Error { +// Delete mocks base method. +func (m *MockInterface) Delete(ctx context.Context, subsID, resourceGroupName, diskName string) *retry.Error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Update", ctx, resourceGroupName, diskName, diskParameter) + ret := m.ctrl.Call(m, "Delete", ctx, subsID, resourceGroupName, diskName) ret0, _ := ret[0].(*retry.Error) return ret0 } -// Update indicates an expected call of Update. -func (mr *MockInterfaceMockRecorder) Update(ctx, resourceGroupName, diskName, diskParameter interface{}) *gomock.Call { +// Delete indicates an expected call of Delete. +func (mr *MockInterfaceMockRecorder) Delete(ctx, subsID, resourceGroupName, diskName interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Update", reflect.TypeOf((*MockInterface)(nil).Update), ctx, resourceGroupName, diskName, diskParameter) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockInterface)(nil).Delete), ctx, subsID, resourceGroupName, diskName) } -// Delete mocks base method. -func (m *MockInterface) Delete(ctx context.Context, resourceGroupName, diskName string) *retry.Error { +// Get mocks base method. +func (m *MockInterface) Get(ctx context.Context, subsID, resourceGroupName, diskName string) (compute.Disk, *retry.Error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Delete", ctx, resourceGroupName, diskName) - ret0, _ := ret[0].(*retry.Error) - return ret0 + ret := m.ctrl.Call(m, "Get", ctx, subsID, resourceGroupName, diskName) + ret0, _ := ret[0].(compute.Disk) + ret1, _ := ret[1].(*retry.Error) + return ret0, ret1 } -// Delete indicates an expected call of Delete. -func (mr *MockInterfaceMockRecorder) Delete(ctx, resourceGroupName, diskName interface{}) *gomock.Call { +// Get indicates an expected call of Get. +func (mr *MockInterfaceMockRecorder) Get(ctx, subsID, resourceGroupName, diskName interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockInterface)(nil).Delete), ctx, resourceGroupName, diskName) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockInterface)(nil).Get), ctx, subsID, resourceGroupName, diskName) } // ListByResourceGroup mocks base method. -func (m *MockInterface) ListByResourceGroup(ctx context.Context, resourceGroupName string) ([]compute.Disk, *retry.Error) { +func (m *MockInterface) ListByResourceGroup(ctx context.Context, subsID, resourceGroupName string) ([]compute.Disk, *retry.Error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListByResourceGroup", ctx, resourceGroupName) + ret := m.ctrl.Call(m, "ListByResourceGroup", ctx, subsID, resourceGroupName) ret0, _ := ret[0].([]compute.Disk) ret1, _ := ret[1].(*retry.Error) return ret0, ret1 } // ListByResourceGroup indicates an expected call of ListByResourceGroup. -func (mr *MockInterfaceMockRecorder) ListByResourceGroup(ctx, resourceGroupName interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) ListByResourceGroup(ctx, subsID, resourceGroupName interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListByResourceGroup", reflect.TypeOf((*MockInterface)(nil).ListByResourceGroup), ctx, subsID, resourceGroupName) +} + +// Update mocks base method. +func (m *MockInterface) Update(ctx context.Context, subsID, resourceGroupName, diskName string, diskParameter compute.DiskUpdate) *retry.Error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Update", ctx, subsID, resourceGroupName, diskName, diskParameter) + ret0, _ := ret[0].(*retry.Error) + return ret0 +} + +// Update indicates an expected call of Update. +func (mr *MockInterfaceMockRecorder) Update(ctx, subsID, resourceGroupName, diskName, diskParameter interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListByResourceGroup", reflect.TypeOf((*MockInterface)(nil).ListByResourceGroup), ctx, resourceGroupName) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Update", reflect.TypeOf((*MockInterface)(nil).Update), ctx, subsID, resourceGroupName, diskName, diskParameter) } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/fileclient/azure_fileclient.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/fileclient/azure_fileclient.go index 6b6a4d34c..59f4770a2 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/fileclient/azure_fileclient.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/fileclient/azure_fileclient.go @@ -44,6 +44,10 @@ type ShareOptions struct { RequestGiB int // supported values: ""(by default), "TransactionOptimized", "Cool", "Hot", "Premium" AccessTier string + // supported values: ""(by default), "AllSquash", "NoRootSquash", "RootSquash" + RootSquash string + // Metadata - A name-value pair to associate with the share as metadata. + Metadata map[string]*string } // New creates a azure file client @@ -77,6 +81,12 @@ func (c *Client) CreateFileShare(resourceGroupName, accountName string, shareOpt if shareOptions.AccessTier != "" { fileShareProperties.AccessTier = storage.ShareAccessTier(shareOptions.AccessTier) } + if shareOptions.RootSquash != "" { + fileShareProperties.RootSquash = storage.RootSquashType(shareOptions.RootSquash) + } + if shareOptions.Metadata != nil { + fileShareProperties.Metadata = shareOptions.Metadata + } fileShare := storage.FileShare{ Name: &shareOptions.Name, FileShareProperties: fileShareProperties, diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/interfaceclient/mockinterfaceclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/interfaceclient/mockinterfaceclient/interface.go index 311acabf6..37dcb6bb3 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/interfaceclient/mockinterfaceclient/interface.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/interfaceclient/mockinterfaceclient/interface.go @@ -16,7 +16,7 @@ // // Code generated by MockGen. DO NOT EDIT. -// Source: /go/src/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/interfaceclient/interface.go +// Source: pkg/azureclients/interfaceclient/interface.go // Package mockinterfaceclient is a generated GoMock package. package mockinterfaceclient @@ -53,6 +53,34 @@ func (m *MockInterface) EXPECT() *MockInterfaceMockRecorder { return m.recorder } +// CreateOrUpdate mocks base method. +func (m *MockInterface) CreateOrUpdate(ctx context.Context, resourceGroupName, networkInterfaceName string, parameters network.Interface) *retry.Error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateOrUpdate", ctx, resourceGroupName, networkInterfaceName, parameters) + ret0, _ := ret[0].(*retry.Error) + return ret0 +} + +// CreateOrUpdate indicates an expected call of CreateOrUpdate. +func (mr *MockInterfaceMockRecorder) CreateOrUpdate(ctx, resourceGroupName, networkInterfaceName, parameters interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdate", reflect.TypeOf((*MockInterface)(nil).CreateOrUpdate), ctx, resourceGroupName, networkInterfaceName, parameters) +} + +// Delete mocks base method. +func (m *MockInterface) Delete(ctx context.Context, resourceGroupName, networkInterfaceName string) *retry.Error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Delete", ctx, resourceGroupName, networkInterfaceName) + ret0, _ := ret[0].(*retry.Error) + return ret0 +} + +// Delete indicates an expected call of Delete. +func (mr *MockInterfaceMockRecorder) Delete(ctx, resourceGroupName, networkInterfaceName interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockInterface)(nil).Delete), ctx, resourceGroupName, networkInterfaceName) +} + // Get mocks base method. func (m *MockInterface) Get(ctx context.Context, resourceGroupName, networkInterfaceName, expand string) (network.Interface, *retry.Error) { m.ctrl.T.Helper() @@ -82,31 +110,3 @@ func (mr *MockInterfaceMockRecorder) GetVirtualMachineScaleSetNetworkInterface(c mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetVirtualMachineScaleSetNetworkInterface", reflect.TypeOf((*MockInterface)(nil).GetVirtualMachineScaleSetNetworkInterface), ctx, resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName, expand) } - -// CreateOrUpdate mocks base method. -func (m *MockInterface) CreateOrUpdate(ctx context.Context, resourceGroupName, networkInterfaceName string, parameters network.Interface) *retry.Error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CreateOrUpdate", ctx, resourceGroupName, networkInterfaceName, parameters) - ret0, _ := ret[0].(*retry.Error) - return ret0 -} - -// CreateOrUpdate indicates an expected call of CreateOrUpdate. -func (mr *MockInterfaceMockRecorder) CreateOrUpdate(ctx, resourceGroupName, networkInterfaceName, parameters interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdate", reflect.TypeOf((*MockInterface)(nil).CreateOrUpdate), ctx, resourceGroupName, networkInterfaceName, parameters) -} - -// Delete mocks base method. -func (m *MockInterface) Delete(ctx context.Context, resourceGroupName, networkInterfaceName string) *retry.Error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Delete", ctx, resourceGroupName, networkInterfaceName) - ret0, _ := ret[0].(*retry.Error) - return ret0 -} - -// Delete indicates an expected call of Delete. -func (mr *MockInterfaceMockRecorder) Delete(ctx, resourceGroupName, networkInterfaceName interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockInterface)(nil).Delete), ctx, resourceGroupName, networkInterfaceName) -} diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/loadbalancerclient/azure_loadbalancerclient.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/loadbalancerclient/azure_loadbalancerclient.go index ca17e982f..8f7d4bd99 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/loadbalancerclient/azure_loadbalancerclient.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/loadbalancerclient/azure_loadbalancerclient.go @@ -492,6 +492,49 @@ func (c *Client) createOrUpdateLBBackendPool(ctx context.Context, resourceGroupN return nil } +// DeleteLBBackendPool deletes a LoadBalancer backend pool by name. +func (c *Client) DeleteLBBackendPool(ctx context.Context, resourceGroupName, loadBalancerName, backendPoolName string) *retry.Error { + mc := metrics.NewMetricContext("load_balancers", "delete_backend_pool", resourceGroupName, c.subscriptionID, "") + + // Report errors if the client is rate limited. + if !c.rateLimiterWriter.TryAccept() { + mc.RateLimitedCount() + return retry.GetRateLimitError(true, "LBDeleteBackendPool") + } + + // Report errors if the client is throttled. + if c.RetryAfterWriter.After(time.Now()) { + mc.ThrottledCount() + rerr := retry.GetThrottlingError("LBDeleteBackendPool", "client throttled", c.RetryAfterWriter) + return rerr + } + + rerr := c.deleteLBBackendPool(ctx, resourceGroupName, loadBalancerName, backendPoolName) + mc.Observe(rerr) + if rerr != nil { + if rerr.IsThrottled() { + // Update RetryAfterReader so that no more requests would be sent until RetryAfter expires. + c.RetryAfterWriter = rerr.RetryAfter + } + + return rerr + } + + return nil +} + +func (c *Client) deleteLBBackendPool(ctx context.Context, resourceGroupName, loadBalancerName, backendPoolName string) *retry.Error { + resourceID := armclient.GetChildResourceID( + c.subscriptionID, + resourceGroupName, + "Microsoft.Network/loadBalancers", + loadBalancerName, + "backendAddressPools", + backendPoolName, + ) + return c.armClient.DeleteResource(ctx, resourceID, "") +} + func (c *Client) createOrUpdateBackendPoolResponder(resp *http.Response) (*network.BackendAddressPool, *retry.Error) { result := &network.BackendAddressPool{} err := autorest.Respond( diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/loadbalancerclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/loadbalancerclient/interface.go index 519715158..8ea9e9804 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/loadbalancerclient/interface.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/loadbalancerclient/interface.go @@ -50,4 +50,7 @@ type Interface interface { // Delete deletes a LoadBalancer by name. Delete(ctx context.Context, resourceGroupName string, loadBalancerName string) *retry.Error + + // DeleteLBBackendPool deletes a LoadBalancer backend pool by name. + DeleteLBBackendPool(ctx context.Context, resourceGroupName, loadBalancerName, backendPoolName string) *retry.Error } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/loadbalancerclient/mockloadbalancerclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/loadbalancerclient/mockloadbalancerclient/interface.go index 7e45c8aa9..989ea1152 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/loadbalancerclient/mockloadbalancerclient/interface.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/loadbalancerclient/mockloadbalancerclient/interface.go @@ -16,7 +16,7 @@ // // Code generated by MockGen. DO NOT EDIT. -// Source: /go/src/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/loadbalancerclient/interface.go +// Source: pkg/azureclients/loadbalancerclient/interface.go // Package mockloadbalancerclient is a generated GoMock package. package mockloadbalancerclient @@ -53,36 +53,6 @@ func (m *MockInterface) EXPECT() *MockInterfaceMockRecorder { return m.recorder } -// Get mocks base method. -func (m *MockInterface) Get(ctx context.Context, resourceGroupName, loadBalancerName, expand string) (network.LoadBalancer, *retry.Error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, loadBalancerName, expand) - ret0, _ := ret[0].(network.LoadBalancer) - ret1, _ := ret[1].(*retry.Error) - return ret0, ret1 -} - -// Get indicates an expected call of Get. -func (mr *MockInterfaceMockRecorder) Get(ctx, resourceGroupName, loadBalancerName, expand interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockInterface)(nil).Get), ctx, resourceGroupName, loadBalancerName, expand) -} - -// List mocks base method. -func (m *MockInterface) List(ctx context.Context, resourceGroupName string) ([]network.LoadBalancer, *retry.Error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "List", ctx, resourceGroupName) - ret0, _ := ret[0].([]network.LoadBalancer) - ret1, _ := ret[1].(*retry.Error) - return ret0, ret1 -} - -// List indicates an expected call of List. -func (mr *MockInterfaceMockRecorder) List(ctx, resourceGroupName interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockInterface)(nil).List), ctx, resourceGroupName) -} - // CreateOrUpdate mocks base method. func (m *MockInterface) CreateOrUpdate(ctx context.Context, resourceGroupName, loadBalancerName string, parameters network.LoadBalancer, etag string) *retry.Error { m.ctrl.T.Helper() @@ -124,3 +94,47 @@ func (mr *MockInterfaceMockRecorder) Delete(ctx, resourceGroupName, loadBalancer mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockInterface)(nil).Delete), ctx, resourceGroupName, loadBalancerName) } + +// DeleteLBBackendPool mocks base method. +func (m *MockInterface) DeleteLBBackendPool(ctx context.Context, resourceGroupName, loadBalancerName, backendPoolName string) *retry.Error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteLBBackendPool", ctx, resourceGroupName, loadBalancerName, backendPoolName) + ret0, _ := ret[0].(*retry.Error) + return ret0 +} + +// DeleteLBBackendPool indicates an expected call of DeleteLBBackendPool. +func (mr *MockInterfaceMockRecorder) DeleteLBBackendPool(ctx, resourceGroupName, loadBalancerName, backendPoolName interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteLBBackendPool", reflect.TypeOf((*MockInterface)(nil).DeleteLBBackendPool), ctx, resourceGroupName, loadBalancerName, backendPoolName) +} + +// Get mocks base method. +func (m *MockInterface) Get(ctx context.Context, resourceGroupName, loadBalancerName, expand string) (network.LoadBalancer, *retry.Error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, loadBalancerName, expand) + ret0, _ := ret[0].(network.LoadBalancer) + ret1, _ := ret[1].(*retry.Error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockInterfaceMockRecorder) Get(ctx, resourceGroupName, loadBalancerName, expand interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockInterface)(nil).Get), ctx, resourceGroupName, loadBalancerName, expand) +} + +// List mocks base method. +func (m *MockInterface) List(ctx context.Context, resourceGroupName string) ([]network.LoadBalancer, *retry.Error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "List", ctx, resourceGroupName) + ret0, _ := ret[0].([]network.LoadBalancer) + ret1, _ := ret[1].(*retry.Error) + return ret0, ret1 +} + +// List indicates an expected call of List. +func (mr *MockInterfaceMockRecorder) List(ctx, resourceGroupName interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockInterface)(nil).List), ctx, resourceGroupName) +} diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/publicipclient/mockpublicipclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/publicipclient/mockpublicipclient/interface.go index 44a8efe86..4030f145e 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/publicipclient/mockpublicipclient/interface.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/publicipclient/mockpublicipclient/interface.go @@ -16,7 +16,7 @@ // // Code generated by MockGen. DO NOT EDIT. -// Source: /go/src/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/publicipclient/interface.go +// Source: pkg/azureclients/publicipclient/interface.go // Package mockpublicipclient is a generated GoMock package. package mockpublicipclient @@ -53,6 +53,34 @@ func (m *MockInterface) EXPECT() *MockInterfaceMockRecorder { return m.recorder } +// CreateOrUpdate mocks base method. +func (m *MockInterface) CreateOrUpdate(ctx context.Context, resourceGroupName, publicIPAddressName string, parameters network.PublicIPAddress) *retry.Error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateOrUpdate", ctx, resourceGroupName, publicIPAddressName, parameters) + ret0, _ := ret[0].(*retry.Error) + return ret0 +} + +// CreateOrUpdate indicates an expected call of CreateOrUpdate. +func (mr *MockInterfaceMockRecorder) CreateOrUpdate(ctx, resourceGroupName, publicIPAddressName, parameters interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdate", reflect.TypeOf((*MockInterface)(nil).CreateOrUpdate), ctx, resourceGroupName, publicIPAddressName, parameters) +} + +// Delete mocks base method. +func (m *MockInterface) Delete(ctx context.Context, resourceGroupName, publicIPAddressName string) *retry.Error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Delete", ctx, resourceGroupName, publicIPAddressName) + ret0, _ := ret[0].(*retry.Error) + return ret0 +} + +// Delete indicates an expected call of Delete. +func (mr *MockInterfaceMockRecorder) Delete(ctx, resourceGroupName, publicIPAddressName interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockInterface)(nil).Delete), ctx, resourceGroupName, publicIPAddressName) +} + // Get mocks base method. func (m *MockInterface) Get(ctx context.Context, resourceGroupName, publicIPAddressName, expand string) (network.PublicIPAddress, *retry.Error) { m.ctrl.T.Helper() @@ -112,31 +140,3 @@ func (mr *MockInterfaceMockRecorder) ListAll(ctx interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAll", reflect.TypeOf((*MockInterface)(nil).ListAll), ctx) } - -// CreateOrUpdate mocks base method. -func (m *MockInterface) CreateOrUpdate(ctx context.Context, resourceGroupName, publicIPAddressName string, parameters network.PublicIPAddress) *retry.Error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CreateOrUpdate", ctx, resourceGroupName, publicIPAddressName, parameters) - ret0, _ := ret[0].(*retry.Error) - return ret0 -} - -// CreateOrUpdate indicates an expected call of CreateOrUpdate. -func (mr *MockInterfaceMockRecorder) CreateOrUpdate(ctx, resourceGroupName, publicIPAddressName, parameters interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdate", reflect.TypeOf((*MockInterface)(nil).CreateOrUpdate), ctx, resourceGroupName, publicIPAddressName, parameters) -} - -// Delete mocks base method. -func (m *MockInterface) Delete(ctx context.Context, resourceGroupName, publicIPAddressName string) *retry.Error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Delete", ctx, resourceGroupName, publicIPAddressName) - ret0, _ := ret[0].(*retry.Error) - return ret0 -} - -// Delete indicates an expected call of Delete. -func (mr *MockInterfaceMockRecorder) Delete(ctx, resourceGroupName, publicIPAddressName interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockInterface)(nil).Delete), ctx, resourceGroupName, publicIPAddressName) -} diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/routeclient/mockrouteclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/routeclient/mockrouteclient/interface.go index bdec03add..ba2ba061e 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/routeclient/mockrouteclient/interface.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/routeclient/mockrouteclient/interface.go @@ -16,7 +16,7 @@ // // Code generated by MockGen. DO NOT EDIT. -// Source: /go/src/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/routeclient/interface.go +// Source: pkg/azureclients/routeclient/interface.go // Package mockrouteclient is a generated GoMock package. package mockrouteclient diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/routetableclient/mockroutetableclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/routetableclient/mockroutetableclient/interface.go index 70aa3351e..6ff402517 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/routetableclient/mockroutetableclient/interface.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/routetableclient/mockroutetableclient/interface.go @@ -16,7 +16,7 @@ // // Code generated by MockGen. DO NOT EDIT. -// Source: /go/src/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/routetableclient/interface.go +// Source: pkg/azureclients/routetableclient/interface.go // Package mockroutetableclient is a generated GoMock package. package mockroutetableclient @@ -53,6 +53,20 @@ func (m *MockInterface) EXPECT() *MockInterfaceMockRecorder { return m.recorder } +// CreateOrUpdate mocks base method. +func (m *MockInterface) CreateOrUpdate(ctx context.Context, resourceGroupName, routeTableName string, parameters network.RouteTable, etag string) *retry.Error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateOrUpdate", ctx, resourceGroupName, routeTableName, parameters, etag) + ret0, _ := ret[0].(*retry.Error) + return ret0 +} + +// CreateOrUpdate indicates an expected call of CreateOrUpdate. +func (mr *MockInterfaceMockRecorder) CreateOrUpdate(ctx, resourceGroupName, routeTableName, parameters, etag interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdate", reflect.TypeOf((*MockInterface)(nil).CreateOrUpdate), ctx, resourceGroupName, routeTableName, parameters, etag) +} + // Get mocks base method. func (m *MockInterface) Get(ctx context.Context, resourceGroupName, routeTableName, expand string) (network.RouteTable, *retry.Error) { m.ctrl.T.Helper() @@ -67,17 +81,3 @@ func (mr *MockInterfaceMockRecorder) Get(ctx, resourceGroupName, routeTableName, mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockInterface)(nil).Get), ctx, resourceGroupName, routeTableName, expand) } - -// CreateOrUpdate mocks base method. -func (m *MockInterface) CreateOrUpdate(ctx context.Context, resourceGroupName, routeTableName string, parameters network.RouteTable, etag string) *retry.Error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CreateOrUpdate", ctx, resourceGroupName, routeTableName, parameters, etag) - ret0, _ := ret[0].(*retry.Error) - return ret0 -} - -// CreateOrUpdate indicates an expected call of CreateOrUpdate. -func (mr *MockInterfaceMockRecorder) CreateOrUpdate(ctx, resourceGroupName, routeTableName, parameters, etag interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdate", reflect.TypeOf((*MockInterface)(nil).CreateOrUpdate), ctx, resourceGroupName, routeTableName, parameters, etag) -} diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/securitygroupclient/mocksecuritygroupclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/securitygroupclient/mocksecuritygroupclient/interface.go index d4629d7e9..54db4542d 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/securitygroupclient/mocksecuritygroupclient/interface.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/securitygroupclient/mocksecuritygroupclient/interface.go @@ -16,7 +16,7 @@ // // Code generated by MockGen. DO NOT EDIT. -// Source: /go/src/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/securitygroupclient/interface.go +// Source: pkg/azureclients/securitygroupclient/interface.go // Package mocksecuritygroupclient is a generated GoMock package. package mocksecuritygroupclient @@ -53,6 +53,34 @@ func (m *MockInterface) EXPECT() *MockInterfaceMockRecorder { return m.recorder } +// CreateOrUpdate mocks base method. +func (m *MockInterface) CreateOrUpdate(ctx context.Context, resourceGroupName, networkSecurityGroupName string, parameters network.SecurityGroup, etag string) *retry.Error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateOrUpdate", ctx, resourceGroupName, networkSecurityGroupName, parameters, etag) + ret0, _ := ret[0].(*retry.Error) + return ret0 +} + +// CreateOrUpdate indicates an expected call of CreateOrUpdate. +func (mr *MockInterfaceMockRecorder) CreateOrUpdate(ctx, resourceGroupName, networkSecurityGroupName, parameters, etag interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdate", reflect.TypeOf((*MockInterface)(nil).CreateOrUpdate), ctx, resourceGroupName, networkSecurityGroupName, parameters, etag) +} + +// Delete mocks base method. +func (m *MockInterface) Delete(ctx context.Context, resourceGroupName, networkSecurityGroupName string) *retry.Error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Delete", ctx, resourceGroupName, networkSecurityGroupName) + ret0, _ := ret[0].(*retry.Error) + return ret0 +} + +// Delete indicates an expected call of Delete. +func (mr *MockInterfaceMockRecorder) Delete(ctx, resourceGroupName, networkSecurityGroupName interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockInterface)(nil).Delete), ctx, resourceGroupName, networkSecurityGroupName) +} + // Get mocks base method. func (m *MockInterface) Get(ctx context.Context, resourceGroupName, networkSecurityGroupName, expand string) (network.SecurityGroup, *retry.Error) { m.ctrl.T.Helper() @@ -82,31 +110,3 @@ func (mr *MockInterfaceMockRecorder) List(ctx, resourceGroupName interface{}) *g mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockInterface)(nil).List), ctx, resourceGroupName) } - -// CreateOrUpdate mocks base method. -func (m *MockInterface) CreateOrUpdate(ctx context.Context, resourceGroupName, networkSecurityGroupName string, parameters network.SecurityGroup, etag string) *retry.Error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CreateOrUpdate", ctx, resourceGroupName, networkSecurityGroupName, parameters, etag) - ret0, _ := ret[0].(*retry.Error) - return ret0 -} - -// CreateOrUpdate indicates an expected call of CreateOrUpdate. -func (mr *MockInterfaceMockRecorder) CreateOrUpdate(ctx, resourceGroupName, networkSecurityGroupName, parameters, etag interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdate", reflect.TypeOf((*MockInterface)(nil).CreateOrUpdate), ctx, resourceGroupName, networkSecurityGroupName, parameters, etag) -} - -// Delete mocks base method. -func (m *MockInterface) Delete(ctx context.Context, resourceGroupName, networkSecurityGroupName string) *retry.Error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Delete", ctx, resourceGroupName, networkSecurityGroupName) - ret0, _ := ret[0].(*retry.Error) - return ret0 -} - -// Delete indicates an expected call of Delete. -func (mr *MockInterfaceMockRecorder) Delete(ctx, resourceGroupName, networkSecurityGroupName interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockInterface)(nil).Delete), ctx, resourceGroupName, networkSecurityGroupName) -} diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/snapshotclient/azure_snapshotclient.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/snapshotclient/azure_snapshotclient.go index 64b27918f..91608e721 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/snapshotclient/azure_snapshotclient.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/snapshotclient/azure_snapshotclient.go @@ -86,8 +86,11 @@ func New(config *azclients.ClientConfig) *Client { } // Get gets a Snapshot. -func (c *Client) Get(ctx context.Context, resourceGroupName string, snapshotName string) (compute.Snapshot, *retry.Error) { - mc := metrics.NewMetricContext("snapshot", "get", resourceGroupName, c.subscriptionID, "") +func (c *Client) Get(ctx context.Context, subsID, resourceGroupName, snapshotName string) (compute.Snapshot, *retry.Error) { + if subsID == "" { + subsID = c.subscriptionID + } + mc := metrics.NewMetricContext("snapshot", "get", resourceGroupName, subsID, "") // Report errors if the client is rate limited. if !c.rateLimiterReader.TryAccept() { @@ -102,7 +105,7 @@ func (c *Client) Get(ctx context.Context, resourceGroupName string, snapshotName return compute.Snapshot{}, rerr } - result, rerr := c.getSnapshot(ctx, resourceGroupName, snapshotName) + result, rerr := c.getSnapshot(ctx, subsID, resourceGroupName, snapshotName) mc.Observe(rerr) if rerr != nil { if rerr.IsThrottled() { @@ -117,9 +120,9 @@ func (c *Client) Get(ctx context.Context, resourceGroupName string, snapshotName } // getSnapshot gets a Snapshot. -func (c *Client) getSnapshot(ctx context.Context, resourceGroupName string, snapshotName string) (compute.Snapshot, *retry.Error) { +func (c *Client) getSnapshot(ctx context.Context, subsID, resourceGroupName, snapshotName string) (compute.Snapshot, *retry.Error) { resourceID := armclient.GetResourceID( - c.subscriptionID, + subsID, resourceGroupName, "Microsoft.Compute/snapshots", snapshotName, @@ -147,8 +150,11 @@ func (c *Client) getSnapshot(ctx context.Context, resourceGroupName string, snap } // Delete deletes a Snapshot by name. -func (c *Client) Delete(ctx context.Context, resourceGroupName string, snapshotName string) *retry.Error { - mc := metrics.NewMetricContext("snapshot", "delete", resourceGroupName, c.subscriptionID, "") +func (c *Client) Delete(ctx context.Context, subsID, resourceGroupName, snapshotName string) *retry.Error { + if subsID == "" { + subsID = c.subscriptionID + } + mc := metrics.NewMetricContext("snapshot", "delete", resourceGroupName, subsID, "") // Report errors if the client is rate limited. if !c.rateLimiterWriter.TryAccept() { @@ -163,7 +169,7 @@ func (c *Client) Delete(ctx context.Context, resourceGroupName string, snapshotN return rerr } - rerr := c.deleteSnapshot(ctx, resourceGroupName, snapshotName) + rerr := c.deleteSnapshot(ctx, subsID, resourceGroupName, snapshotName) mc.Observe(rerr) if rerr != nil { if rerr.IsThrottled() { @@ -178,9 +184,9 @@ func (c *Client) Delete(ctx context.Context, resourceGroupName string, snapshotN } // deleteSnapshot deletes a PublicIPAddress by name. -func (c *Client) deleteSnapshot(ctx context.Context, resourceGroupName string, snapshotName string) *retry.Error { +func (c *Client) deleteSnapshot(ctx context.Context, subsID, resourceGroupName, snapshotName string) *retry.Error { resourceID := armclient.GetResourceID( - c.subscriptionID, + subsID, resourceGroupName, "Microsoft.Compute/snapshots", snapshotName, @@ -190,8 +196,11 @@ func (c *Client) deleteSnapshot(ctx context.Context, resourceGroupName string, s } // CreateOrUpdate creates or updates a Snapshot. -func (c *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, snapshotName string, snapshot compute.Snapshot) *retry.Error { - mc := metrics.NewMetricContext("snapshot", "create_or_update", resourceGroupName, c.subscriptionID, "") +func (c *Client) CreateOrUpdate(ctx context.Context, subsID, resourceGroupName, snapshotName string, snapshot compute.Snapshot) *retry.Error { + if subsID == "" { + subsID = c.subscriptionID + } + mc := metrics.NewMetricContext("snapshot", "create_or_update", resourceGroupName, subsID, "") // Report errors if the client is rate limited. if !c.rateLimiterWriter.TryAccept() { @@ -206,7 +215,7 @@ func (c *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, s return rerr } - rerr := c.createOrUpdateSnapshot(ctx, resourceGroupName, snapshotName, snapshot) + rerr := c.createOrUpdateSnapshot(ctx, subsID, resourceGroupName, snapshotName, snapshot) mc.Observe(rerr) if rerr != nil { if rerr.IsThrottled() { @@ -221,9 +230,9 @@ func (c *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, s } // createOrUpdateSnapshot creates or updates a Snapshot. -func (c *Client) createOrUpdateSnapshot(ctx context.Context, resourceGroupName string, snapshotName string, snapshot compute.Snapshot) *retry.Error { +func (c *Client) createOrUpdateSnapshot(ctx context.Context, subsID, resourceGroupName, snapshotName string, snapshot compute.Snapshot) *retry.Error { resourceID := armclient.GetResourceID( - c.subscriptionID, + subsID, resourceGroupName, "Microsoft.Compute/snapshots", snapshotName, @@ -258,8 +267,11 @@ func (c *Client) createOrUpdateResponder(resp *http.Response) (*compute.Snapshot } // ListByResourceGroup get a list snapshots by resourceGroup. -func (c *Client) ListByResourceGroup(ctx context.Context, resourceGroupName string) ([]compute.Snapshot, *retry.Error) { - mc := metrics.NewMetricContext("snapshot", "list_by_resource_group", resourceGroupName, c.subscriptionID, "") +func (c *Client) ListByResourceGroup(ctx context.Context, subsID, resourceGroupName string) ([]compute.Snapshot, *retry.Error) { + if subsID == "" { + subsID = c.subscriptionID + } + mc := metrics.NewMetricContext("snapshot", "list_by_resource_group", resourceGroupName, subsID, "") // Report errors if the client is rate limited. if !c.rateLimiterReader.TryAccept() { @@ -274,7 +286,7 @@ func (c *Client) ListByResourceGroup(ctx context.Context, resourceGroupName stri return nil, rerr } - result, rerr := c.listSnapshotsByResourceGroup(ctx, resourceGroupName) + result, rerr := c.listSnapshotsByResourceGroup(ctx, subsID, resourceGroupName) mc.Observe(rerr) if rerr != nil { if rerr.IsThrottled() { @@ -289,9 +301,12 @@ func (c *Client) ListByResourceGroup(ctx context.Context, resourceGroupName stri } // listSnapshotsByResourceGroup gets a list of snapshots in the resource group. -func (c *Client) listSnapshotsByResourceGroup(ctx context.Context, resourceGroupName string) ([]compute.Snapshot, *retry.Error) { +func (c *Client) listSnapshotsByResourceGroup(ctx context.Context, subsID, resourceGroupName string) ([]compute.Snapshot, *retry.Error) { + if subsID == "" { + subsID = c.subscriptionID + } resourceID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/snapshots", - autorest.Encode("path", c.subscriptionID), + autorest.Encode("path", subsID), autorest.Encode("path", resourceGroupName)) result := make([]compute.Snapshot, 0) page := &SnapshotListPage{} diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/snapshotclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/snapshotclient/interface.go index f0f6f285a..731db0d29 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/snapshotclient/interface.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/snapshotclient/interface.go @@ -37,14 +37,14 @@ const ( // Don't forget to run "hack/update-mock-clients.sh" command to generate the mock client. type Interface interface { // Get gets a Snapshot. - Get(ctx context.Context, resourceGroupName string, snapshotName string) (compute.Snapshot, *retry.Error) + Get(ctx context.Context, subsID, resourceGroupName, snapshotName string) (compute.Snapshot, *retry.Error) // Delete deletes a Snapshot by name. - Delete(ctx context.Context, resourceGroupName string, snapshotName string) *retry.Error + Delete(ctx context.Context, subsID, resourceGroupName, snapshotName string) *retry.Error // ListByResourceGroup get a list snapshots by resourceGroup. - ListByResourceGroup(ctx context.Context, resourceGroupName string) ([]compute.Snapshot, *retry.Error) + ListByResourceGroup(ctx context.Context, subsID, resourceGroupName string) ([]compute.Snapshot, *retry.Error) // CreateOrUpdate creates or updates a Snapshot. - CreateOrUpdate(ctx context.Context, resourceGroupName string, snapshotName string, snapshot compute.Snapshot) *retry.Error + CreateOrUpdate(ctx context.Context, subsID, resourceGroupName, snapshotName string, snapshot compute.Snapshot) *retry.Error } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/snapshotclient/mocksnapshotclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/snapshotclient/mocksnapshotclient/interface.go index 8228f1f1d..6c9809abf 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/snapshotclient/mocksnapshotclient/interface.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/snapshotclient/mocksnapshotclient/interface.go @@ -16,7 +16,7 @@ // // Code generated by MockGen. DO NOT EDIT. -// Source: /go/src/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/snapshotclient/interface.go +// Source: pkg/azureclients/snapshotclient/interface.go // Package mocksnapshotclient is a generated GoMock package. package mocksnapshotclient @@ -53,60 +53,60 @@ func (m *MockInterface) EXPECT() *MockInterfaceMockRecorder { return m.recorder } -// Get mocks base method. -func (m *MockInterface) Get(ctx context.Context, resourceGroupName, snapshotName string) (compute.Snapshot, *retry.Error) { +// CreateOrUpdate mocks base method. +func (m *MockInterface) CreateOrUpdate(ctx context.Context, subsID, resourceGroupName, snapshotName string, snapshot compute.Snapshot) *retry.Error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, snapshotName) - ret0, _ := ret[0].(compute.Snapshot) - ret1, _ := ret[1].(*retry.Error) - return ret0, ret1 + ret := m.ctrl.Call(m, "CreateOrUpdate", ctx, subsID, resourceGroupName, snapshotName, snapshot) + ret0, _ := ret[0].(*retry.Error) + return ret0 } -// Get indicates an expected call of Get. -func (mr *MockInterfaceMockRecorder) Get(ctx, resourceGroupName, snapshotName interface{}) *gomock.Call { +// CreateOrUpdate indicates an expected call of CreateOrUpdate. +func (mr *MockInterfaceMockRecorder) CreateOrUpdate(ctx, subsID, resourceGroupName, snapshotName, snapshot interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockInterface)(nil).Get), ctx, resourceGroupName, snapshotName) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdate", reflect.TypeOf((*MockInterface)(nil).CreateOrUpdate), ctx, subsID, resourceGroupName, snapshotName, snapshot) } // Delete mocks base method. -func (m *MockInterface) Delete(ctx context.Context, resourceGroupName, snapshotName string) *retry.Error { +func (m *MockInterface) Delete(ctx context.Context, subsID, resourceGroupName, snapshotName string) *retry.Error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Delete", ctx, resourceGroupName, snapshotName) + ret := m.ctrl.Call(m, "Delete", ctx, subsID, resourceGroupName, snapshotName) ret0, _ := ret[0].(*retry.Error) return ret0 } // Delete indicates an expected call of Delete. -func (mr *MockInterfaceMockRecorder) Delete(ctx, resourceGroupName, snapshotName interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) Delete(ctx, subsID, resourceGroupName, snapshotName interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockInterface)(nil).Delete), ctx, resourceGroupName, snapshotName) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockInterface)(nil).Delete), ctx, subsID, resourceGroupName, snapshotName) } -// ListByResourceGroup mocks base method. -func (m *MockInterface) ListByResourceGroup(ctx context.Context, resourceGroupName string) ([]compute.Snapshot, *retry.Error) { +// Get mocks base method. +func (m *MockInterface) Get(ctx context.Context, subsID, resourceGroupName, snapshotName string) (compute.Snapshot, *retry.Error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListByResourceGroup", ctx, resourceGroupName) - ret0, _ := ret[0].([]compute.Snapshot) + ret := m.ctrl.Call(m, "Get", ctx, subsID, resourceGroupName, snapshotName) + ret0, _ := ret[0].(compute.Snapshot) ret1, _ := ret[1].(*retry.Error) return ret0, ret1 } -// ListByResourceGroup indicates an expected call of ListByResourceGroup. -func (mr *MockInterfaceMockRecorder) ListByResourceGroup(ctx, resourceGroupName interface{}) *gomock.Call { +// Get indicates an expected call of Get. +func (mr *MockInterfaceMockRecorder) Get(ctx, subsID, resourceGroupName, snapshotName interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListByResourceGroup", reflect.TypeOf((*MockInterface)(nil).ListByResourceGroup), ctx, resourceGroupName) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockInterface)(nil).Get), ctx, subsID, resourceGroupName, snapshotName) } -// CreateOrUpdate mocks base method. -func (m *MockInterface) CreateOrUpdate(ctx context.Context, resourceGroupName, snapshotName string, snapshot compute.Snapshot) *retry.Error { +// ListByResourceGroup mocks base method. +func (m *MockInterface) ListByResourceGroup(ctx context.Context, subsID, resourceGroupName string) ([]compute.Snapshot, *retry.Error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CreateOrUpdate", ctx, resourceGroupName, snapshotName, snapshot) - ret0, _ := ret[0].(*retry.Error) - return ret0 + ret := m.ctrl.Call(m, "ListByResourceGroup", ctx, subsID, resourceGroupName) + ret0, _ := ret[0].([]compute.Snapshot) + ret1, _ := ret[1].(*retry.Error) + return ret0, ret1 } -// CreateOrUpdate indicates an expected call of CreateOrUpdate. -func (mr *MockInterfaceMockRecorder) CreateOrUpdate(ctx, resourceGroupName, snapshotName, snapshot interface{}) *gomock.Call { +// ListByResourceGroup indicates an expected call of ListByResourceGroup. +func (mr *MockInterfaceMockRecorder) ListByResourceGroup(ctx, subsID, resourceGroupName interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdate", reflect.TypeOf((*MockInterface)(nil).CreateOrUpdate), ctx, resourceGroupName, snapshotName, snapshot) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListByResourceGroup", reflect.TypeOf((*MockInterface)(nil).ListByResourceGroup), ctx, subsID, resourceGroupName) } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/storageaccountclient/mockstorageaccountclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/storageaccountclient/mockstorageaccountclient/interface.go index d96113e8c..8bad6f46c 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/storageaccountclient/mockstorageaccountclient/interface.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/storageaccountclient/mockstorageaccountclient/interface.go @@ -16,7 +16,7 @@ // // Code generated by MockGen. DO NOT EDIT. -// Source: /go/src/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/storageaccountclient/interface.go +// Source: pkg/azureclients/storageaccountclient/interface.go // Package mockstorageaccountclient is a generated GoMock package. package mockstorageaccountclient @@ -67,20 +67,6 @@ func (mr *MockInterfaceMockRecorder) Create(ctx, resourceGroupName, accountName, return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Create", reflect.TypeOf((*MockInterface)(nil).Create), ctx, resourceGroupName, accountName, parameters) } -// Update mocks base method. -func (m *MockInterface) Update(ctx context.Context, resourceGroupName, accountName string, parameters storage.AccountUpdateParameters) *retry.Error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Update", ctx, resourceGroupName, accountName, parameters) - ret0, _ := ret[0].(*retry.Error) - return ret0 -} - -// Update indicates an expected call of Update. -func (mr *MockInterfaceMockRecorder) Update(ctx, resourceGroupName, accountName, parameters interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Update", reflect.TypeOf((*MockInterface)(nil).Update), ctx, resourceGroupName, accountName, parameters) -} - // Delete mocks base method. func (m *MockInterface) Delete(ctx context.Context, resourceGroupName, accountName string) *retry.Error { m.ctrl.T.Helper() @@ -95,19 +81,19 @@ func (mr *MockInterfaceMockRecorder) Delete(ctx, resourceGroupName, accountName return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockInterface)(nil).Delete), ctx, resourceGroupName, accountName) } -// ListKeys mocks base method. -func (m *MockInterface) ListKeys(ctx context.Context, resourceGroupName, accountName string) (storage.AccountListKeysResult, *retry.Error) { +// GetProperties mocks base method. +func (m *MockInterface) GetProperties(ctx context.Context, resourceGroupName, accountName string) (storage.Account, *retry.Error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListKeys", ctx, resourceGroupName, accountName) - ret0, _ := ret[0].(storage.AccountListKeysResult) + ret := m.ctrl.Call(m, "GetProperties", ctx, resourceGroupName, accountName) + ret0, _ := ret[0].(storage.Account) ret1, _ := ret[1].(*retry.Error) return ret0, ret1 } -// ListKeys indicates an expected call of ListKeys. -func (mr *MockInterfaceMockRecorder) ListKeys(ctx, resourceGroupName, accountName interface{}) *gomock.Call { +// GetProperties indicates an expected call of GetProperties. +func (mr *MockInterfaceMockRecorder) GetProperties(ctx, resourceGroupName, accountName interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListKeys", reflect.TypeOf((*MockInterface)(nil).ListKeys), ctx, resourceGroupName, accountName) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProperties", reflect.TypeOf((*MockInterface)(nil).GetProperties), ctx, resourceGroupName, accountName) } // ListByResourceGroup mocks base method. @@ -125,17 +111,31 @@ func (mr *MockInterfaceMockRecorder) ListByResourceGroup(ctx, resourceGroupName return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListByResourceGroup", reflect.TypeOf((*MockInterface)(nil).ListByResourceGroup), ctx, resourceGroupName) } -// GetProperties mocks base method. -func (m *MockInterface) GetProperties(ctx context.Context, resourceGroupName, accountName string) (storage.Account, *retry.Error) { +// ListKeys mocks base method. +func (m *MockInterface) ListKeys(ctx context.Context, resourceGroupName, accountName string) (storage.AccountListKeysResult, *retry.Error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetProperties", ctx, resourceGroupName, accountName) - ret0, _ := ret[0].(storage.Account) + ret := m.ctrl.Call(m, "ListKeys", ctx, resourceGroupName, accountName) + ret0, _ := ret[0].(storage.AccountListKeysResult) ret1, _ := ret[1].(*retry.Error) return ret0, ret1 } -// GetProperties indicates an expected call of GetProperties. -func (mr *MockInterfaceMockRecorder) GetProperties(ctx, resourceGroupName, accountName interface{}) *gomock.Call { +// ListKeys indicates an expected call of ListKeys. +func (mr *MockInterfaceMockRecorder) ListKeys(ctx, resourceGroupName, accountName interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProperties", reflect.TypeOf((*MockInterface)(nil).GetProperties), ctx, resourceGroupName, accountName) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListKeys", reflect.TypeOf((*MockInterface)(nil).ListKeys), ctx, resourceGroupName, accountName) +} + +// Update mocks base method. +func (m *MockInterface) Update(ctx context.Context, resourceGroupName, accountName string, parameters storage.AccountUpdateParameters) *retry.Error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Update", ctx, resourceGroupName, accountName, parameters) + ret0, _ := ret[0].(*retry.Error) + return ret0 +} + +// Update indicates an expected call of Update. +func (mr *MockInterfaceMockRecorder) Update(ctx, resourceGroupName, accountName, parameters interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Update", reflect.TypeOf((*MockInterface)(nil).Update), ctx, resourceGroupName, accountName, parameters) } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/subnetclient/mocksubnetclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/subnetclient/mocksubnetclient/interface.go index c4f1f24f2..9866e668b 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/subnetclient/mocksubnetclient/interface.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/subnetclient/mocksubnetclient/interface.go @@ -16,7 +16,7 @@ // // Code generated by MockGen. DO NOT EDIT. -// Source: /go/src/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/subnetclient/interface.go +// Source: pkg/azureclients/subnetclient/interface.go // Package mocksubnetclient is a generated GoMock package. package mocksubnetclient @@ -53,6 +53,34 @@ func (m *MockInterface) EXPECT() *MockInterfaceMockRecorder { return m.recorder } +// CreateOrUpdate mocks base method. +func (m *MockInterface) CreateOrUpdate(ctx context.Context, resourceGroupName, virtualNetworkName, subnetName string, subnetParameters network.Subnet) *retry.Error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateOrUpdate", ctx, resourceGroupName, virtualNetworkName, subnetName, subnetParameters) + ret0, _ := ret[0].(*retry.Error) + return ret0 +} + +// CreateOrUpdate indicates an expected call of CreateOrUpdate. +func (mr *MockInterfaceMockRecorder) CreateOrUpdate(ctx, resourceGroupName, virtualNetworkName, subnetName, subnetParameters interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdate", reflect.TypeOf((*MockInterface)(nil).CreateOrUpdate), ctx, resourceGroupName, virtualNetworkName, subnetName, subnetParameters) +} + +// Delete mocks base method. +func (m *MockInterface) Delete(ctx context.Context, resourceGroupName, virtualNetworkName, subnetName string) *retry.Error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Delete", ctx, resourceGroupName, virtualNetworkName, subnetName) + ret0, _ := ret[0].(*retry.Error) + return ret0 +} + +// Delete indicates an expected call of Delete. +func (mr *MockInterfaceMockRecorder) Delete(ctx, resourceGroupName, virtualNetworkName, subnetName interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockInterface)(nil).Delete), ctx, resourceGroupName, virtualNetworkName, subnetName) +} + // Get mocks base method. func (m *MockInterface) Get(ctx context.Context, resourceGroupName, virtualNetworkName, subnetName, expand string) (network.Subnet, *retry.Error) { m.ctrl.T.Helper() @@ -82,31 +110,3 @@ func (mr *MockInterfaceMockRecorder) List(ctx, resourceGroupName, virtualNetwork mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockInterface)(nil).List), ctx, resourceGroupName, virtualNetworkName) } - -// CreateOrUpdate mocks base method. -func (m *MockInterface) CreateOrUpdate(ctx context.Context, resourceGroupName, virtualNetworkName, subnetName string, subnetParameters network.Subnet) *retry.Error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CreateOrUpdate", ctx, resourceGroupName, virtualNetworkName, subnetName, subnetParameters) - ret0, _ := ret[0].(*retry.Error) - return ret0 -} - -// CreateOrUpdate indicates an expected call of CreateOrUpdate. -func (mr *MockInterfaceMockRecorder) CreateOrUpdate(ctx, resourceGroupName, virtualNetworkName, subnetName, subnetParameters interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdate", reflect.TypeOf((*MockInterface)(nil).CreateOrUpdate), ctx, resourceGroupName, virtualNetworkName, subnetName, subnetParameters) -} - -// Delete mocks base method. -func (m *MockInterface) Delete(ctx context.Context, resourceGroupName, virtualNetworkName, subnetName string) *retry.Error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Delete", ctx, resourceGroupName, virtualNetworkName, subnetName) - ret0, _ := ret[0].(*retry.Error) - return ret0 -} - -// Delete indicates an expected call of Delete. -func (mr *MockInterfaceMockRecorder) Delete(ctx, resourceGroupName, virtualNetworkName, subnetName interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockInterface)(nil).Delete), ctx, resourceGroupName, virtualNetworkName, subnetName) -} diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmclient/mockvmclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmclient/mockvmclient/interface.go index 3396143a4..b204a97be 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmclient/mockvmclient/interface.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmclient/mockvmclient/interface.go @@ -16,7 +16,7 @@ // // Code generated by MockGen. DO NOT EDIT. -// Source: /go/src/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmclient/interface.go +// Source: pkg/azureclients/vmclient/interface.go // Package mockvmclient is a generated GoMock package. package mockvmclient @@ -54,6 +54,34 @@ func (m *MockInterface) EXPECT() *MockInterfaceMockRecorder { return m.recorder } +// CreateOrUpdate mocks base method. +func (m *MockInterface) CreateOrUpdate(ctx context.Context, resourceGroupName, VMName string, parameters compute.VirtualMachine, source string) *retry.Error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateOrUpdate", ctx, resourceGroupName, VMName, parameters, source) + ret0, _ := ret[0].(*retry.Error) + return ret0 +} + +// CreateOrUpdate indicates an expected call of CreateOrUpdate. +func (mr *MockInterfaceMockRecorder) CreateOrUpdate(ctx, resourceGroupName, VMName, parameters, source interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdate", reflect.TypeOf((*MockInterface)(nil).CreateOrUpdate), ctx, resourceGroupName, VMName, parameters, source) +} + +// Delete mocks base method. +func (m *MockInterface) Delete(ctx context.Context, resourceGroupName, VMName string) *retry.Error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Delete", ctx, resourceGroupName, VMName) + ret0, _ := ret[0].(*retry.Error) + return ret0 +} + +// Delete indicates an expected call of Delete. +func (mr *MockInterfaceMockRecorder) Delete(ctx, resourceGroupName, VMName interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockInterface)(nil).Delete), ctx, resourceGroupName, VMName) +} + // Get mocks base method. func (m *MockInterface) Get(ctx context.Context, resourceGroupName, VMName string, expand compute.InstanceViewTypes) (compute.VirtualMachine, *retry.Error) { m.ctrl.T.Helper() @@ -84,20 +112,6 @@ func (mr *MockInterfaceMockRecorder) List(ctx, resourceGroupName interface{}) *g return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockInterface)(nil).List), ctx, resourceGroupName) } -// CreateOrUpdate mocks base method. -func (m *MockInterface) CreateOrUpdate(ctx context.Context, resourceGroupName, VMName string, parameters compute.VirtualMachine, source string) *retry.Error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CreateOrUpdate", ctx, resourceGroupName, VMName, parameters, source) - ret0, _ := ret[0].(*retry.Error) - return ret0 -} - -// CreateOrUpdate indicates an expected call of CreateOrUpdate. -func (mr *MockInterfaceMockRecorder) CreateOrUpdate(ctx, resourceGroupName, VMName, parameters, source interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdate", reflect.TypeOf((*MockInterface)(nil).CreateOrUpdate), ctx, resourceGroupName, VMName, parameters, source) -} - // Update mocks base method. func (m *MockInterface) Update(ctx context.Context, resourceGroupName, VMName string, parameters compute.VirtualMachineUpdate, source string) *retry.Error { m.ctrl.T.Helper() @@ -140,17 +154,3 @@ func (mr *MockInterfaceMockRecorder) WaitForUpdateResult(ctx, future, resourceGr mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForUpdateResult", reflect.TypeOf((*MockInterface)(nil).WaitForUpdateResult), ctx, future, resourceGroupName, source) } - -// Delete mocks base method. -func (m *MockInterface) Delete(ctx context.Context, resourceGroupName, VMName string) *retry.Error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Delete", ctx, resourceGroupName, VMName) - ret0, _ := ret[0].(*retry.Error) - return ret0 -} - -// Delete indicates an expected call of Delete. -func (mr *MockInterfaceMockRecorder) Delete(ctx, resourceGroupName, VMName interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockInterface)(nil).Delete), ctx, resourceGroupName, VMName) -} diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssclient/mockvmssclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssclient/mockvmssclient/interface.go index 7ed1a26e5..449983234 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssclient/mockvmssclient/interface.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssclient/mockvmssclient/interface.go @@ -16,7 +16,7 @@ // // Code generated by MockGen. DO NOT EDIT. -// Source: /go/src/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssclient/interface.go +// Source: pkg/azureclients/vmssclient/interface.go // Package mockvmssclient is a generated GoMock package. package mockvmssclient @@ -55,36 +55,6 @@ func (m *MockInterface) EXPECT() *MockInterfaceMockRecorder { return m.recorder } -// Get mocks base method. -func (m *MockInterface) Get(ctx context.Context, resourceGroupName, VMScaleSetName string) (compute.VirtualMachineScaleSet, *retry.Error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, VMScaleSetName) - ret0, _ := ret[0].(compute.VirtualMachineScaleSet) - ret1, _ := ret[1].(*retry.Error) - return ret0, ret1 -} - -// Get indicates an expected call of Get. -func (mr *MockInterfaceMockRecorder) Get(ctx, resourceGroupName, VMScaleSetName interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockInterface)(nil).Get), ctx, resourceGroupName, VMScaleSetName) -} - -// List mocks base method. -func (m *MockInterface) List(ctx context.Context, resourceGroupName string) ([]compute.VirtualMachineScaleSet, *retry.Error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "List", ctx, resourceGroupName) - ret0, _ := ret[0].([]compute.VirtualMachineScaleSet) - ret1, _ := ret[1].(*retry.Error) - return ret0, ret1 -} - -// List indicates an expected call of List. -func (mr *MockInterfaceMockRecorder) List(ctx, resourceGroupName interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockInterface)(nil).List), ctx, resourceGroupName) -} - // CreateOrUpdate mocks base method. func (m *MockInterface) CreateOrUpdate(ctx context.Context, resourceGroupName, VMScaleSetName string, parameters compute.VirtualMachineScaleSet) *retry.Error { m.ctrl.T.Helper() @@ -114,19 +84,19 @@ func (mr *MockInterfaceMockRecorder) CreateOrUpdateAsync(ctx, resourceGroupName, return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdateAsync", reflect.TypeOf((*MockInterface)(nil).CreateOrUpdateAsync), ctx, resourceGroupName, VMScaleSetName, parameters) } -// WaitForAsyncOperationResult mocks base method. -func (m *MockInterface) WaitForAsyncOperationResult(ctx context.Context, future *azure.Future, resourceGroupName, request, asyncOpName string) (*http.Response, error) { +// DeallocateInstancesAsync mocks base method. +func (m *MockInterface) DeallocateInstancesAsync(ctx context.Context, resourceGroupName, vmScaleSetName string, vmInstanceIDs compute.VirtualMachineScaleSetVMInstanceRequiredIDs) (*azure.Future, *retry.Error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WaitForAsyncOperationResult", ctx, future, resourceGroupName, request, asyncOpName) - ret0, _ := ret[0].(*http.Response) - ret1, _ := ret[1].(error) + ret := m.ctrl.Call(m, "DeallocateInstancesAsync", ctx, resourceGroupName, vmScaleSetName, vmInstanceIDs) + ret0, _ := ret[0].(*azure.Future) + ret1, _ := ret[1].(*retry.Error) return ret0, ret1 } -// WaitForAsyncOperationResult indicates an expected call of WaitForAsyncOperationResult. -func (mr *MockInterfaceMockRecorder) WaitForAsyncOperationResult(ctx, future, resourceGroupName, request, asyncOpName interface{}) *gomock.Call { +// DeallocateInstancesAsync indicates an expected call of DeallocateInstancesAsync. +func (mr *MockInterfaceMockRecorder) DeallocateInstancesAsync(ctx, resourceGroupName, vmScaleSetName, vmInstanceIDs interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForAsyncOperationResult", reflect.TypeOf((*MockInterface)(nil).WaitForAsyncOperationResult), ctx, future, resourceGroupName, request, asyncOpName) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeallocateInstancesAsync", reflect.TypeOf((*MockInterface)(nil).DeallocateInstancesAsync), ctx, resourceGroupName, vmScaleSetName, vmInstanceIDs) } // DeleteInstances mocks base method. @@ -144,63 +114,93 @@ func (mr *MockInterfaceMockRecorder) DeleteInstances(ctx, resourceGroupName, vmS } // DeleteInstancesAsync mocks base method. -func (m *MockInterface) DeleteInstancesAsync(ctx context.Context, resourceGroupName, VMScaleSetName string, vmInstanceIDs compute.VirtualMachineScaleSetVMInstanceRequiredIDs, forceDelete bool) (*azure.Future, *retry.Error) { +func (m *MockInterface) DeleteInstancesAsync(ctx context.Context, resourceGroupName, vmScaleSetName string, vmInstanceIDs compute.VirtualMachineScaleSetVMInstanceRequiredIDs, forceDelete bool) (*azure.Future, *retry.Error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteInstancesAsync", ctx, resourceGroupName, VMScaleSetName, vmInstanceIDs, forceDelete) + ret := m.ctrl.Call(m, "DeleteInstancesAsync", ctx, resourceGroupName, vmScaleSetName, vmInstanceIDs, forceDelete) ret0, _ := ret[0].(*azure.Future) ret1, _ := ret[1].(*retry.Error) return ret0, ret1 } // DeleteInstancesAsync indicates an expected call of DeleteInstancesAsync. -func (mr *MockInterfaceMockRecorder) DeleteInstancesAsync(ctx, resourceGroupName, VMScaleSetName, vmInstanceIDs, forceDelete interface{}) *gomock.Call { +func (mr *MockInterfaceMockRecorder) DeleteInstancesAsync(ctx, resourceGroupName, vmScaleSetName, vmInstanceIDs, forceDelete interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteInstancesAsync", reflect.TypeOf((*MockInterface)(nil).DeleteInstancesAsync), ctx, resourceGroupName, VMScaleSetName, vmInstanceIDs, forceDelete) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteInstancesAsync", reflect.TypeOf((*MockInterface)(nil).DeleteInstancesAsync), ctx, resourceGroupName, vmScaleSetName, vmInstanceIDs, forceDelete) } -// WaitForCreateOrUpdateResult mocks base method. -func (m *MockInterface) WaitForCreateOrUpdateResult(ctx context.Context, future *azure.Future, resourceGroupName string) (*http.Response, error) { +// Get mocks base method. +func (m *MockInterface) Get(ctx context.Context, resourceGroupName, VMScaleSetName string) (compute.VirtualMachineScaleSet, *retry.Error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WaitForCreateOrUpdateResult", ctx, future, resourceGroupName) - ret0, _ := ret[0].(*http.Response) - ret1, _ := ret[1].(error) + ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, VMScaleSetName) + ret0, _ := ret[0].(compute.VirtualMachineScaleSet) + ret1, _ := ret[1].(*retry.Error) return ret0, ret1 } -// WaitForCreateOrUpdateResult indicates an expected call of WaitForCreateOrUpdateResult. -func (mr *MockInterfaceMockRecorder) WaitForCreateOrUpdateResult(ctx, future, resourceGroupName interface{}) *gomock.Call { +// Get indicates an expected call of Get. +func (mr *MockInterfaceMockRecorder) Get(ctx, resourceGroupName, VMScaleSetName interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForCreateOrUpdateResult", reflect.TypeOf((*MockInterface)(nil).WaitForCreateOrUpdateResult), ctx, future, resourceGroupName) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockInterface)(nil).Get), ctx, resourceGroupName, VMScaleSetName) } -// WaitForDeleteInstancesResult mocks base method. -func (m *MockInterface) WaitForDeleteInstancesResult(ctx context.Context, future *azure.Future, resourceGroupName string) (*http.Response, error) { +// List mocks base method. +func (m *MockInterface) List(ctx context.Context, resourceGroupName string) ([]compute.VirtualMachineScaleSet, *retry.Error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WaitForDeleteInstancesResult", ctx, future, resourceGroupName) - ret0, _ := ret[0].(*http.Response) - ret1, _ := ret[1].(error) + ret := m.ctrl.Call(m, "List", ctx, resourceGroupName) + ret0, _ := ret[0].([]compute.VirtualMachineScaleSet) + ret1, _ := ret[1].(*retry.Error) return ret0, ret1 } -// WaitForDeleteInstancesResult indicates an expected call of WaitForDeleteInstancesResult. -func (mr *MockInterfaceMockRecorder) WaitForDeleteInstancesResult(ctx, future, resourceGroupName interface{}) *gomock.Call { +// List indicates an expected call of List. +func (mr *MockInterfaceMockRecorder) List(ctx, resourceGroupName interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForDeleteInstancesResult", reflect.TypeOf((*MockInterface)(nil).WaitForDeleteInstancesResult), ctx, future, resourceGroupName) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockInterface)(nil).List), ctx, resourceGroupName) } -// DeallocateInstancesAsync mocks base method. -func (m *MockInterface) DeallocateInstancesAsync(ctx context.Context, resourceGroupName, vmScaleSetName string, vmInstanceIDs compute.VirtualMachineScaleSetVMInstanceRequiredIDs) (*azure.Future, *retry.Error) { +// StartInstancesAsync mocks base method. +func (m *MockInterface) StartInstancesAsync(ctx context.Context, resourceGroupName, vmScaleSetName string, vmInstanceIDs compute.VirtualMachineScaleSetVMInstanceRequiredIDs) (*azure.Future, *retry.Error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeallocateInstancesAsync", ctx, resourceGroupName, vmScaleSetName, vmInstanceIDs) + ret := m.ctrl.Call(m, "StartInstancesAsync", ctx, resourceGroupName, vmScaleSetName, vmInstanceIDs) ret0, _ := ret[0].(*azure.Future) ret1, _ := ret[1].(*retry.Error) return ret0, ret1 } -// DeallocateInstancesAsync indicates an expected call of DeallocateInstancesAsync. -func (mr *MockInterfaceMockRecorder) DeallocateInstancesAsync(ctx, resourceGroupName, vmScaleSetName, vmInstanceIDs interface{}) *gomock.Call { +// StartInstancesAsync indicates an expected call of StartInstancesAsync. +func (mr *MockInterfaceMockRecorder) StartInstancesAsync(ctx, resourceGroupName, vmScaleSetName, vmInstanceIDs interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeallocateInstancesAsync", reflect.TypeOf((*MockInterface)(nil).DeallocateInstancesAsync), ctx, resourceGroupName, vmScaleSetName, vmInstanceIDs) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartInstancesAsync", reflect.TypeOf((*MockInterface)(nil).StartInstancesAsync), ctx, resourceGroupName, vmScaleSetName, vmInstanceIDs) +} + +// WaitForAsyncOperationResult mocks base method. +func (m *MockInterface) WaitForAsyncOperationResult(ctx context.Context, future *azure.Future, resourceGroupName, request, asyncOpName string) (*http.Response, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WaitForAsyncOperationResult", ctx, future, resourceGroupName, request, asyncOpName) + ret0, _ := ret[0].(*http.Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WaitForAsyncOperationResult indicates an expected call of WaitForAsyncOperationResult. +func (mr *MockInterfaceMockRecorder) WaitForAsyncOperationResult(ctx, future, resourceGroupName, request, asyncOpName interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForAsyncOperationResult", reflect.TypeOf((*MockInterface)(nil).WaitForAsyncOperationResult), ctx, future, resourceGroupName, request, asyncOpName) +} + +// WaitForCreateOrUpdateResult mocks base method. +func (m *MockInterface) WaitForCreateOrUpdateResult(ctx context.Context, future *azure.Future, resourceGroupName string) (*http.Response, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WaitForCreateOrUpdateResult", ctx, future, resourceGroupName) + ret0, _ := ret[0].(*http.Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// WaitForCreateOrUpdateResult indicates an expected call of WaitForCreateOrUpdateResult. +func (mr *MockInterfaceMockRecorder) WaitForCreateOrUpdateResult(ctx, future, resourceGroupName interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForCreateOrUpdateResult", reflect.TypeOf((*MockInterface)(nil).WaitForCreateOrUpdateResult), ctx, future, resourceGroupName) } // WaitForDeallocateInstancesResult mocks base method. @@ -218,19 +218,19 @@ func (mr *MockInterfaceMockRecorder) WaitForDeallocateInstancesResult(ctx, futur return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForDeallocateInstancesResult", reflect.TypeOf((*MockInterface)(nil).WaitForDeallocateInstancesResult), ctx, future, resourceGroupName) } -// StartInstancesAsync mocks base method. -func (m *MockInterface) StartInstancesAsync(ctx context.Context, resourceGroupName, vmScaleSetName string, vmInstanceIDs compute.VirtualMachineScaleSetVMInstanceRequiredIDs) (*azure.Future, *retry.Error) { +// WaitForDeleteInstancesResult mocks base method. +func (m *MockInterface) WaitForDeleteInstancesResult(ctx context.Context, future *azure.Future, resourceGroupName string) (*http.Response, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "StartInstancesAsync", ctx, resourceGroupName, vmScaleSetName, vmInstanceIDs) - ret0, _ := ret[0].(*azure.Future) - ret1, _ := ret[1].(*retry.Error) + ret := m.ctrl.Call(m, "WaitForDeleteInstancesResult", ctx, future, resourceGroupName) + ret0, _ := ret[0].(*http.Response) + ret1, _ := ret[1].(error) return ret0, ret1 } -// StartInstancesAsync indicates an expected call of StartInstancesAsync. -func (mr *MockInterfaceMockRecorder) StartInstancesAsync(ctx, resourceGroupName, vmScaleSetName, vmInstanceIDs interface{}) *gomock.Call { +// WaitForDeleteInstancesResult indicates an expected call of WaitForDeleteInstancesResult. +func (mr *MockInterfaceMockRecorder) WaitForDeleteInstancesResult(ctx, future, resourceGroupName interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartInstancesAsync", reflect.TypeOf((*MockInterface)(nil).StartInstancesAsync), ctx, resourceGroupName, vmScaleSetName, vmInstanceIDs) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForDeleteInstancesResult", reflect.TypeOf((*MockInterface)(nil).WaitForDeleteInstancesResult), ctx, future, resourceGroupName) } // WaitForStartInstancesResult mocks base method. diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/mockvmssvmclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/mockvmssvmclient/interface.go index 0985ac328..918d22be7 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/mockvmssvmclient/interface.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/mockvmssvmclient/interface.go @@ -16,44 +16,45 @@ // // Code generated by MockGen. DO NOT EDIT. -// Source: /go/src/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssvmclient/interface.go +// Source: pkg/azureclients/vmssvmclient/interface.go // Package mockvmssvmclient is a generated GoMock package. package mockvmssvmclient import ( context "context" + reflect "reflect" + compute "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute" azure "github.com/Azure/go-autorest/autorest/azure" gomock "github.com/golang/mock/gomock" - reflect "reflect" retry "sigs.k8s.io/cloud-provider-azure/pkg/retry" ) -// MockInterface is a mock of Interface interface +// MockInterface is a mock of Interface interface. type MockInterface struct { ctrl *gomock.Controller recorder *MockInterfaceMockRecorder } -// MockInterfaceMockRecorder is the mock recorder for MockInterface +// MockInterfaceMockRecorder is the mock recorder for MockInterface. type MockInterfaceMockRecorder struct { mock *MockInterface } -// NewMockInterface creates a new mock instance +// NewMockInterface creates a new mock instance. func NewMockInterface(ctrl *gomock.Controller) *MockInterface { mock := &MockInterface{ctrl: ctrl} mock.recorder = &MockInterfaceMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use +// EXPECT returns an object that allows the caller to indicate expected use. func (m *MockInterface) EXPECT() *MockInterfaceMockRecorder { return m.recorder } -// Get mocks base method +// Get mocks base method. func (m *MockInterface) Get(ctx context.Context, resourceGroupName, VMScaleSetName, instanceID string, expand compute.InstanceViewTypes) (compute.VirtualMachineScaleSetVM, *retry.Error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, VMScaleSetName, instanceID, expand) @@ -62,13 +63,13 @@ func (m *MockInterface) Get(ctx context.Context, resourceGroupName, VMScaleSetNa return ret0, ret1 } -// Get indicates an expected call of Get +// Get indicates an expected call of Get. func (mr *MockInterfaceMockRecorder) Get(ctx, resourceGroupName, VMScaleSetName, instanceID, expand interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockInterface)(nil).Get), ctx, resourceGroupName, VMScaleSetName, instanceID, expand) } -// List mocks base method +// List mocks base method. func (m *MockInterface) List(ctx context.Context, resourceGroupName, virtualMachineScaleSetName, expand string) ([]compute.VirtualMachineScaleSetVM, *retry.Error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "List", ctx, resourceGroupName, virtualMachineScaleSetName, expand) @@ -77,13 +78,13 @@ func (m *MockInterface) List(ctx context.Context, resourceGroupName, virtualMach return ret0, ret1 } -// List indicates an expected call of List +// List indicates an expected call of List. func (mr *MockInterfaceMockRecorder) List(ctx, resourceGroupName, virtualMachineScaleSetName, expand interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockInterface)(nil).List), ctx, resourceGroupName, virtualMachineScaleSetName, expand) } -// Update mocks base method +// Update mocks base method. func (m *MockInterface) Update(ctx context.Context, resourceGroupName, VMScaleSetName, instanceID string, parameters compute.VirtualMachineScaleSetVM, source string) *retry.Error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Update", ctx, resourceGroupName, VMScaleSetName, instanceID, parameters, source) @@ -91,13 +92,13 @@ func (m *MockInterface) Update(ctx context.Context, resourceGroupName, VMScaleSe return ret0 } -// Update indicates an expected call of Update +// Update indicates an expected call of Update. func (mr *MockInterfaceMockRecorder) Update(ctx, resourceGroupName, VMScaleSetName, instanceID, parameters, source interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Update", reflect.TypeOf((*MockInterface)(nil).Update), ctx, resourceGroupName, VMScaleSetName, instanceID, parameters, source) } -// UpdateAsync mocks base method +// UpdateAsync mocks base method. func (m *MockInterface) UpdateAsync(ctx context.Context, resourceGroupName, VMScaleSetName, instanceID string, parameters compute.VirtualMachineScaleSetVM, source string) (*azure.Future, *retry.Error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "UpdateAsync", ctx, resourceGroupName, VMScaleSetName, instanceID, parameters, source) @@ -106,36 +107,36 @@ func (m *MockInterface) UpdateAsync(ctx context.Context, resourceGroupName, VMSc return ret0, ret1 } -// UpdateAsync indicates an expected call of UpdateAsync +// UpdateAsync indicates an expected call of UpdateAsync. func (mr *MockInterfaceMockRecorder) UpdateAsync(ctx, resourceGroupName, VMScaleSetName, instanceID, parameters, source interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateAsync", reflect.TypeOf((*MockInterface)(nil).UpdateAsync), ctx, resourceGroupName, VMScaleSetName, instanceID, parameters, source) } -// WaitForUpdateResult mocks base method -func (m *MockInterface) WaitForUpdateResult(ctx context.Context, future *azure.Future, resourceGroupName, source string) *retry.Error { +// UpdateVMs mocks base method. +func (m *MockInterface) UpdateVMs(ctx context.Context, resourceGroupName, VMScaleSetName string, instances map[string]compute.VirtualMachineScaleSetVM, source string, batchSize int) *retry.Error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WaitForUpdateResult", ctx, future, resourceGroupName, source) + ret := m.ctrl.Call(m, "UpdateVMs", ctx, resourceGroupName, VMScaleSetName, instances, source, batchSize) ret0, _ := ret[0].(*retry.Error) return ret0 } -// WaitForUpdateResult indicates an expected call of WaitForUpdateResult -func (mr *MockInterfaceMockRecorder) WaitForUpdateResult(ctx, future, resourceGroupName, source interface{}) *gomock.Call { +// UpdateVMs indicates an expected call of UpdateVMs. +func (mr *MockInterfaceMockRecorder) UpdateVMs(ctx, resourceGroupName, VMScaleSetName, instances, source, batchSize interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForUpdateResult", reflect.TypeOf((*MockInterface)(nil).WaitForUpdateResult), ctx, future, resourceGroupName, source) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateVMs", reflect.TypeOf((*MockInterface)(nil).UpdateVMs), ctx, resourceGroupName, VMScaleSetName, instances, source, batchSize) } -// UpdateVMs mocks base method -func (m *MockInterface) UpdateVMs(ctx context.Context, resourceGroupName, VMScaleSetName string, instances map[string]compute.VirtualMachineScaleSetVM, source string, batchSize int) *retry.Error { +// WaitForUpdateResult mocks base method. +func (m *MockInterface) WaitForUpdateResult(ctx context.Context, future *azure.Future, resourceGroupName, source string) *retry.Error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UpdateVMs", ctx, resourceGroupName, VMScaleSetName, instances, source, batchSize) + ret := m.ctrl.Call(m, "WaitForUpdateResult", ctx, future, resourceGroupName, source) ret0, _ := ret[0].(*retry.Error) return ret0 } -// UpdateVMs indicates an expected call of UpdateVMs -func (mr *MockInterfaceMockRecorder) UpdateVMs(ctx, resourceGroupName, VMScaleSetName, instances, source, batchSize interface{}) *gomock.Call { +// WaitForUpdateResult indicates an expected call of WaitForUpdateResult. +func (mr *MockInterfaceMockRecorder) WaitForUpdateResult(ctx, future, resourceGroupName, source interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateVMs", reflect.TypeOf((*MockInterface)(nil).UpdateVMs), ctx, resourceGroupName, VMScaleSetName, instances, source, batchSize) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForUpdateResult", reflect.TypeOf((*MockInterface)(nil).WaitForUpdateResult), ctx, future, resourceGroupName, source) } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/consts/consts.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/consts/consts.go index e409486fe..2e0f9c292 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/consts/consts.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/consts/consts.go @@ -382,3 +382,26 @@ const RateLimited = "rate limited" // CreatedByTag tag key for CSI drivers const CreatedByTag = "k8s-azure-created-by" + +// health probe +const ( + HealthProbeAnnotationPrefixPattern = "service.beta.kubernetes.io/port_%d_health-probe_" + + // HealthProbeParamsProbeInterval determines the probe interval of the load balancer health probe. + // The minimum probe interval is 5 seconds and the default value is 5. The total duration of all intervals cannot exceed 120 seconds. + HealthProbeParamsProbeInterval HealthProbeParams = "interval" + HealthProbeDefaultProbeInterval int32 = 5 + + // HealthProbeParamsNumOfProbe determines the minimum number of unhealthy responses which load balancer cannot tolerate. + // The minimum number of probe is 2. The total duration of all intervals cannot exceed 120 seconds. + HealthProbeParamsNumOfProbe HealthProbeParams = "num-of-probe" + HealthProbeDefaultNumOfProbe int32 = 2 + + // HealthProbeParamsRequestPath determines the request path of the load balancer health probe. + // This is only useful for the HTTP and HTTPS, and would be ignored when using TCP. If not set, + // `/healthz` would be configured by default. + HealthProbeParamsRequestPath HealthProbeParams = "request-path" + HealthProbeDefaultRequestPath string = "/healthz" +) + +type HealthProbeParams string diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/consts/helpers.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/consts/helpers.go new file mode 100644 index 000000000..f86236218 --- /dev/null +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/consts/helpers.go @@ -0,0 +1,109 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package consts stages all the consts under pkg/. +package consts + +import ( + "fmt" + "strconv" + "strings" + + v1 "k8s.io/api/core/v1" +) + +// IsK8sServiceHasHAModeEnabled return if HA Mode is enabled in kuberntes service annotations +func IsK8sServiceHasHAModeEnabled(service *v1.Service) bool { + return expectAttributeInSvcAnnotationBeEqualTo(service.Annotations, ServiceAnnotationLoadBalancerEnableHighAvailabilityPorts, TrueAnnotationValue) +} + +// IsK8sServiceUsingInternalLoadBalancer return if service is using an internal load balancer. +func IsK8sServiceUsingInternalLoadBalancer(service *v1.Service) bool { + return expectAttributeInSvcAnnotationBeEqualTo(service.Annotations, ServiceAnnotationLoadBalancerInternal, TrueAnnotationValue) +} + +// GetHealthProbeConfigOfPortFromK8sSvcAnnotation get health probe configuration for port +func GetHealthProbeConfigOfPortFromK8sSvcAnnotation(annotations map[string]string, port int32, key HealthProbeParams, validators ...BusinessValidator) (*string, error) { + return GetAttributeValueInSvcAnnotation(annotations, BuildHealthProbeAnnotationKeyForPort(port, key), validators...) +} + +// Getint32ValueFromK8sSvcAnnotation get health probe configuration for port +func Getint32ValueFromK8sSvcAnnotation(annotations map[string]string, key string, validators ...Int32BusinessValidator) (*int32, error) { + val, err := GetAttributeValueInSvcAnnotation(annotations, key) + if err == nil && val != nil { + return extractInt32FromString(*val, validators...) + } + return nil, err +} + +// BuildHealthProbeAnnotationKeyForPort get health probe configuration key for port +func BuildHealthProbeAnnotationKeyForPort(port int32, key HealthProbeParams) string { + return fmt.Sprintf(HealthProbeAnnotationPrefixPattern, port) + string(key) +} + +// GetInt32HealthProbeConfigOfPortFromK8sSvcAnnotation get health probe configuration for port +func GetInt32HealthProbeConfigOfPortFromK8sSvcAnnotation(annotations map[string]string, port int32, key HealthProbeParams, validators ...Int32BusinessValidator) (*int32, error) { + return Getint32ValueFromK8sSvcAnnotation(annotations, BuildHealthProbeAnnotationKeyForPort(port, key), validators...) +} + +// Int32BusinessValidator is validator function which is invoked after values are parsed in order to make sure input value meets the businees need. +type Int32BusinessValidator func(*int32) error + +// getInt32FromAnnotations parse integer value from annotation and return an reference to int32 object +func extractInt32FromString(val string, businessValidator ...Int32BusinessValidator) (*int32, error) { + val = strings.TrimSpace(val) + errKey := fmt.Errorf("%s value must be a whole number", val) + toInt, err := strconv.ParseInt(val, 10, 32) + if err != nil { + return nil, fmt.Errorf("error value: %w: %v", err, errKey) + } + parsedInt := int32(toInt) + for _, validator := range businessValidator { + if validator != nil { + err := validator(&parsedInt) + if err != nil { + return nil, fmt.Errorf("error parsing value: %w", err) + } + } + } + return &parsedInt, nil +} + +// BusinessValidator is validator function which is invoked after values are parsed in order to make sure input value meets the businees need. +type BusinessValidator func(*string) error + +// GetAttributeValueInSvcAnnotation get value in annotation map using key +func GetAttributeValueInSvcAnnotation(annotations map[string]string, key string, validators ...BusinessValidator) (*string, error) { + if l, found := annotations[key]; found { + for _, validateFunc := range validators { + if validateFunc != nil { + if err := validateFunc(&l); err != nil { + return nil, err + } + } + } + return &l, nil + } + return nil, nil +} + +// expectAttributeInSvcAnnotation get key in svc annotation and compare with target value +func expectAttributeInSvcAnnotationBeEqualTo(annotations map[string]string, key string, value string) bool { + if l, err := GetAttributeValueInSvcAnnotation(annotations, key); err == nil && l != nil { + return strings.EqualFold(*l, value) + } + return false +} diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/nodemanager/nodemanager.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/nodemanager/nodemanager.go new file mode 100644 index 000000000..9f1322709 --- /dev/null +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/nodemanager/nodemanager.go @@ -0,0 +1,786 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nodemanager + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "strings" + "time" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/apimachinery/pkg/util/wait" + coreinformers "k8s.io/client-go/informers/core/v1" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + v1core "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" + clientretry "k8s.io/client-go/util/retry" + cloudprovider "k8s.io/cloud-provider" + cloudproviderapi "k8s.io/cloud-provider/api" + cloudnodeutil "k8s.io/cloud-provider/node/helpers" + nodeutil "k8s.io/component-helpers/node/util" + "k8s.io/klog/v2" + + "sigs.k8s.io/cloud-provider-azure/pkg/consts" +) + +// NodeProvider defines the interfaces for node provider. +type NodeProvider interface { + // NodeAddresses returns the addresses of the specified instance. + NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.NodeAddress, error) + // InstanceID returns the cloud provider ID of the specified instance. + InstanceID(ctx context.Context, name types.NodeName) (string, error) + // InstanceType returns the type of the specified instance. + InstanceType(ctx context.Context, name types.NodeName) (string, error) + // GetZone returns the Zone containing the current failure zone and locality region that the program is running in + GetZone(ctx context.Context, name types.NodeName) (cloudprovider.Zone, error) + // GetPlatformSubFaultDomain returns the PlatformSubFaultDomain from IMDS if set. + GetPlatformSubFaultDomain() (string, error) +} + +// labelReconcileInfo lists Node labels to reconcile, and how to reconcile them. +// primaryKey and secondaryKey are keys of labels to reconcile. +// - If both keys exist, but their values don't match. Use the value from the +// primaryKey as the source of truth to reconcile. +// - If ensureSecondaryExists is true, and the secondaryKey does not +// exist, secondaryKey will be added with the value of the primaryKey. +var labelReconcileInfo = []struct { + primaryKey string + secondaryKey string + ensureSecondaryExists bool +}{ + { + // Reconcile the beta and the GA zone label using the beta label as + // the source of truth + // TODO: switch the primary key to GA labels in v1.21 + primaryKey: v1.LabelZoneFailureDomain, + secondaryKey: v1.LabelZoneFailureDomainStable, + ensureSecondaryExists: true, + }, + { + // Reconcile the beta and the stable region label using the beta label as + // the source of truth + // TODO: switch the primary key to GA labels in v1.21 + primaryKey: v1.LabelZoneRegion, + secondaryKey: v1.LabelZoneRegionStable, + ensureSecondaryExists: true, + }, + { + // Reconcile the beta and the stable instance-type label using the beta label as + // the source of truth + // TODO: switch the primary key to GA labels in v1.21 + primaryKey: v1.LabelInstanceType, + secondaryKey: v1.LabelInstanceTypeStable, + ensureSecondaryExists: true, + }, +} + +// UpdateNodeSpecBackoff is the back configure for node update. +var UpdateNodeSpecBackoff = wait.Backoff{ + Steps: 20, + Duration: 50 * time.Millisecond, + Jitter: 1.0, +} + +var updateNetworkConditionBackoff = wait.Backoff{ + Steps: 5, // Maximum number of retries. + Duration: 100 * time.Millisecond, + Jitter: 1.0, +} + +// CloudNodeController reconciles node information. +type CloudNodeController struct { + nodeName string + waitForRoutes bool + nodeProvider NodeProvider + nodeInformer coreinformers.NodeInformer + kubeClient clientset.Interface + recorder record.EventRecorder + + nodeStatusUpdateFrequency time.Duration +} + +// NewCloudNodeController creates a CloudNodeController object +func NewCloudNodeController( + nodeName string, + nodeInformer coreinformers.NodeInformer, + kubeClient clientset.Interface, + nodeProvider NodeProvider, + nodeStatusUpdateFrequency time.Duration, + waitForRoutes bool) *CloudNodeController { + + eventBroadcaster := record.NewBroadcaster() + recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloud-node-controller"}) + eventBroadcaster.StartLogging(klog.Infof) + if kubeClient != nil { + klog.V(0).Infof("Sending events to api server.") + eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")}) + } else { + klog.V(0).Infof("No api server defined - no events will be sent to API server.") + } + + cnc := &CloudNodeController{ + nodeName: nodeName, + nodeInformer: nodeInformer, + kubeClient: kubeClient, + recorder: recorder, + nodeProvider: nodeProvider, + waitForRoutes: waitForRoutes, + nodeStatusUpdateFrequency: nodeStatusUpdateFrequency, + } + + // Use shared informer to listen to add/update of nodes. Note that any nodes + // that exist before node controller starts will show up in the update method + cnc.nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { cnc.AddCloudNode(context.TODO(), obj) }, + UpdateFunc: func(oldObj, newObj interface{}) { cnc.UpdateCloudNode(context.TODO(), oldObj, newObj) }, + }) + + return cnc +} + +// Run controller updates newly registered nodes with information +// from the cloud provider. This call is blocking so should be called +// via a goroutine +func (cnc *CloudNodeController) Run(stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + + // The following loops run communicate with the APIServer with a worst case complexity + // of O(num_nodes) per cycle. These functions are justified here because these events fire + // very infrequently. DO NOT MODIFY this to perform frequent operations. + + // Start a loop to periodically update the node addresses obtained from the cloud + wait.Until(func() { cnc.UpdateNodeStatus(context.TODO()) }, cnc.nodeStatusUpdateFrequency, stopCh) +} + +// UpdateNodeStatus updates the node status, such as node addresses +func (cnc *CloudNodeController) UpdateNodeStatus(ctx context.Context) { + node, err := cnc.nodeInformer.Lister().Get(cnc.nodeName) + if err != nil { + // If node not found, just ignore it. + if apierrors.IsNotFound(err) { + return + } + + klog.Errorf("Error getting node %q from informer, err: %v", cnc.nodeName, err) + return + } + + err = cnc.updateNodeAddress(ctx, node) + if err != nil { + klog.Errorf("Error reconciling node address for node %q, err: %v", node.Name, err) + } + + err = cnc.reconcileNodeLabels(node) + if err != nil { + klog.Errorf("Error reconciling node labels for node %q, err: %v", node.Name, err) + } +} + +// reconcileNodeLabels reconciles node labels transitioning from beta to GA +func (cnc *CloudNodeController) reconcileNodeLabels(node *v1.Node) error { + if node.Labels == nil { + // Nothing to reconcile. + return nil + } + + labelsToUpdate := map[string]string{} + for _, r := range labelReconcileInfo { + primaryValue, primaryExists := node.Labels[r.primaryKey] + secondaryValue, secondaryExists := node.Labels[r.secondaryKey] + + if !primaryExists { + // The primary label key does not exist. This should not happen + // within our supported version skew range, when no external + // components/factors modifying the node object. Ignore this case. + continue + } + if secondaryExists && primaryValue != secondaryValue { + // Secondary label exists, but not consistent with the primary + // label. Need to reconcile. + labelsToUpdate[r.secondaryKey] = primaryValue + + } else if !secondaryExists && r.ensureSecondaryExists { + // Apply secondary label based on primary label. + labelsToUpdate[r.secondaryKey] = primaryValue + } + } + + if len(labelsToUpdate) == 0 { + return nil + } + + if !cloudnodeutil.AddOrUpdateLabelsOnNode(cnc.kubeClient, labelsToUpdate, node) { + return fmt.Errorf("failed update labels for node %+v", node) + } + + return nil +} + +// UpdateNodeAddress updates the nodeAddress of a single node +func (cnc *CloudNodeController) updateNodeAddress(ctx context.Context, node *v1.Node) error { + // Do not process nodes that are still tainted + cloudTaint := GetCloudTaint(node.Spec.Taints) + if cloudTaint != nil { + klog.V(5).Infof("This node %s is still tainted. Will not process.", node.Name) + return nil + } + + // Node that isn't present according to the cloud provider shouldn't have its address updated + exists, err := cnc.ensureNodeExistsByProviderID(ctx, node) + if err != nil { + // Continue to update node address when not sure the node is not exists + klog.Warningf("ensureNodeExistsByProviderID (node %s) reported an error (%v), continue to update its address", node.Name, err) + } else if !exists { + klog.V(4).Infof("The node %s is no longer present according to the cloud provider, do not process.", node.Name) + return nil + } + + nodeAddresses, err := cnc.getNodeAddressesByName(ctx, node) + if err != nil { + return fmt.Errorf("Error getting node addresses for node %q: %v", node.Name, err) + } + + if len(nodeAddresses) == 0 { + klog.V(5).Infof("Skipping node address update for node %q since cloud provider did not return any", node.Name) + return nil + } + + // Check if a hostname address exists in the cloud provided addresses + hostnameExists := false + for i := range nodeAddresses { + if nodeAddresses[i].Type == v1.NodeHostName { + hostnameExists = true + break + } + } + // If hostname was not present in cloud provided addresses, use the hostname + // from the existing node (populated by kubelet) + if !hostnameExists { + for _, addr := range node.Status.Addresses { + if addr.Type == v1.NodeHostName { + nodeAddresses = append(nodeAddresses, addr) + } + } + } + // If nodeIP was suggested by user, ensure that + // it can be found in the cloud as well (consistent with the behaviour in kubelet) + if nodeIP, ok := ensureNodeProvidedIPExists(node, nodeAddresses); ok { + if nodeIP == nil { + return fmt.Errorf("specified Node IP %s not found in cloudprovider for node %q", nodeAddresses, node.Name) + } + } + if !nodeAddressesChangeDetected(node.Status.Addresses, nodeAddresses) { + return nil + } + + newNode := node.DeepCopy() + newNode.Status.Addresses = nodeAddresses + _, _, err = PatchNodeStatus(cnc.kubeClient.CoreV1(), types.NodeName(node.Name), node, newNode) + if err != nil { + return fmt.Errorf("Error patching node with cloud ip addresses = [%v]", err) + } + + return nil +} + +// nodeModifier is used to carry changes to node objects across multiple attempts to update them +// in a retry-if-conflict loop. +type nodeModifier func(*v1.Node) + +// UpdateCloudNode handles node update event. +func (cnc *CloudNodeController) UpdateCloudNode(ctx context.Context, _, newObj interface{}) { + node, ok := newObj.(*v1.Node) + if !ok { + utilruntime.HandleError(fmt.Errorf("unexpected object type: %v", newObj)) + return + } + + // Skip other nodes other than cnc.nodeName. + if !strings.EqualFold(cnc.nodeName, node.Name) { + return + } + + cloudTaint := GetCloudTaint(node.Spec.Taints) + if cloudTaint == nil { + // The node has already been initialized so nothing to do. + return + } + + cnc.initializeNode(ctx, node) +} + +// AddCloudNode handles initializing new nodes registered with the cloud taint. +func (cnc *CloudNodeController) AddCloudNode(ctx context.Context, obj interface{}) { + node := obj.(*v1.Node) + + // Skip other nodes other than cnc.nodeName. + if !strings.EqualFold(cnc.nodeName, node.Name) { + return + } + + cloudTaint := GetCloudTaint(node.Spec.Taints) + if cloudTaint == nil { + klog.V(2).Infof("This node %s is registered without the cloud taint. Will not process.", node.Name) + return + } + + cnc.initializeNode(ctx, node) +} + +// This processes nodes that were added into the cluster, and cloud initialize them if appropriate +func (cnc *CloudNodeController) initializeNode(ctx context.Context, node *v1.Node) { + klog.Infof("Initializing node %s with cloud provider", node.Name) + curNode, err := cnc.kubeClient.CoreV1().Nodes().Get(ctx, node.Name, metav1.GetOptions{}) + if err != nil { + utilruntime.HandleError(fmt.Errorf("failed to get node %s: %w", node.Name, err)) + return + } + + cloudTaint := GetCloudTaint(curNode.Spec.Taints) + if cloudTaint == nil { + // Node object received from event had the cloud taint but was outdated, + // the node has actually already been initialized. + return + } + + if cnc.waitForRoutes { + // Set node condition node NodeNetworkUnavailable=true so that Pods won't + // be scheduled to this node until routes have been created. + err = cnc.updateNetworkingCondition(node, false) + if err != nil { + utilruntime.HandleError(fmt.Errorf("failed to patch condition for node %s: %w", node.Name, err)) + return + } + } + + var nodeModifiers []nodeModifier + err = clientretry.OnError(UpdateNodeSpecBackoff, func(err error) bool { + return err != nil && strings.HasPrefix(err.Error(), "failed to set node provider id") + }, func() error { + nodeModifiers, err = cnc.getNodeModifiersFromCloudProvider(ctx, curNode) + return err + }) + if err != nil { + // Instead of just logging the error, panic and node manager can restart + utilruntime.Must(fmt.Errorf("failed to initialize node %s at cloudprovider: %w", node.Name, err)) + return + } + + nodeModifiers = append(nodeModifiers, func(n *v1.Node) { + n.Spec.Taints = excludeCloudTaint(n.Spec.Taints) + }) + + err = clientretry.RetryOnConflict(UpdateNodeSpecBackoff, func() error { + curNode, err := cnc.kubeClient.CoreV1().Nodes().Get(ctx, node.Name, metav1.GetOptions{}) + if err != nil { + return err + } + + for _, modify := range nodeModifiers { + modify(curNode) + } + + _, err = cnc.kubeClient.CoreV1().Nodes().Update(ctx, curNode, metav1.UpdateOptions{}) + if err != nil { + return err + } + + // After adding, call UpdateNodeAddress to set the CloudProvider provided IPAddresses + // So that users do not see any significant delay in IP addresses being filled into the node + err = cnc.updateNodeAddress(ctx, curNode) + if err != nil { + return err + } + + klog.Infof("Successfully initialized node %s with cloud provider", node.Name) + return nil + }) + if err != nil { + utilruntime.HandleError(err) + return + } +} + +// getNodeModifiersFromCloudProvider returns a slice of nodeModifiers that update +// a node object with provider-specific information. +// All of the returned functions are idempotent, because they are used in a retry-if-conflict +// loop, meaning they could get called multiple times. +func (cnc *CloudNodeController) getNodeModifiersFromCloudProvider(ctx context.Context, node *v1.Node) ([]nodeModifier, error) { + var nodeModifiers []nodeModifier + + if node.Spec.ProviderID == "" { + providerID, err := cnc.nodeProvider.InstanceID(ctx, types.NodeName(node.Name)) + if err == nil { + nodeModifiers = append(nodeModifiers, func(n *v1.Node) { + if n.Spec.ProviderID == "" { + n.Spec.ProviderID = providerID + } + }) + } else { + // if we are not able to get node provider id, + // we return error here and retry in the caller initializeNode() + return nil, fmt.Errorf("failed to set node provider id: %w", err) + } + } + + nodeAddresses, err := cnc.getNodeAddressesByName(ctx, node) + if err != nil { + return nil, err + } + + // If user provided an IP address, ensure that IP address is found + // in the cloud provider before removing the taint on the node + if nodeIP, ok := ensureNodeProvidedIPExists(node, nodeAddresses); ok { + if nodeIP == nil { + return nil, errors.New("failed to find kubelet node IP from cloud provider") + } + } + + if instanceType, err := cnc.getInstanceTypeByName(ctx, node); err != nil { + return nil, err + } else if instanceType != "" { + klog.V(2).Infof("Adding node label from cloud provider: %s=%s", v1.LabelInstanceType, instanceType) + klog.V(2).Infof("Adding node label from cloud provider: %s=%s", v1.LabelInstanceTypeStable, instanceType) + nodeModifiers = append(nodeModifiers, func(n *v1.Node) { + if n.Labels == nil { + n.Labels = map[string]string{} + } + n.Labels[v1.LabelInstanceType] = instanceType + n.Labels[v1.LabelInstanceTypeStable] = instanceType + }) + } + + zone, err := cnc.getZoneByName(ctx, node) + if err != nil { + return nil, fmt.Errorf("failed to get zone from cloud provider: %w", err) + } + if zone.FailureDomain != "" { + klog.V(2).Infof("Adding node label from cloud provider: %s=%s", v1.LabelZoneFailureDomain, zone.FailureDomain) + klog.V(2).Infof("Adding node label from cloud provider: %s=%s", v1.LabelZoneFailureDomainStable, zone.FailureDomain) + nodeModifiers = append(nodeModifiers, func(n *v1.Node) { + if n.Labels == nil { + n.Labels = map[string]string{} + } + n.Labels[v1.LabelZoneFailureDomain] = zone.FailureDomain + n.Labels[v1.LabelZoneFailureDomainStable] = zone.FailureDomain + }) + } + if zone.Region != "" { + klog.V(2).Infof("Adding node label from cloud provider: %s=%s", v1.LabelZoneRegion, zone.Region) + klog.V(2).Infof("Adding node label from cloud provider: %s=%s", v1.LabelZoneRegionStable, zone.Region) + nodeModifiers = append(nodeModifiers, func(n *v1.Node) { + if n.Labels == nil { + n.Labels = map[string]string{} + } + n.Labels[v1.LabelZoneRegion] = zone.Region + n.Labels[v1.LabelZoneRegionStable] = zone.Region + }) + } + + platformSubFaultDomain, err := cnc.getPlatformSubFaultDomain() + if err != nil { + return nil, fmt.Errorf("failed to get platformSubFaultDomain: %w", err) + } + if platformSubFaultDomain != "" { + klog.V(2).Infof("Adding node label from cloud provider: %s=%s", consts.LabelPlatformSubFaultDomain, platformSubFaultDomain) + nodeModifiers = append(nodeModifiers, func(n *v1.Node) { + if n.Labels == nil { + n.Labels = map[string]string{} + } + n.Labels[consts.LabelPlatformSubFaultDomain] = platformSubFaultDomain + }) + } + + return nodeModifiers, nil +} + +func GetCloudTaint(taints []v1.Taint) *v1.Taint { + for _, taint := range taints { + if taint.Key == cloudproviderapi.TaintExternalCloudProvider { + return &taint + } + } + return nil +} + +func excludeCloudTaint(taints []v1.Taint) []v1.Taint { + newTaints := []v1.Taint{} + for _, taint := range taints { + if taint.Key == cloudproviderapi.TaintExternalCloudProvider { + continue + } + newTaints = append(newTaints, taint) + } + return newTaints +} + +// ensureNodeExistsByProviderID checks if the instance exists by the provider id, +// If provider id in spec is empty it calls instanceId with node name to get provider id +func (cnc *CloudNodeController) ensureNodeExistsByProviderID(ctx context.Context, node *v1.Node) (bool, error) { + providerID := node.Spec.ProviderID + if providerID == "" { + var err error + providerID, err = cnc.nodeProvider.InstanceID(ctx, types.NodeName(node.Name)) + if err != nil { + if errors.Is(err, cloudprovider.InstanceNotFound) { + return false, nil + } + return false, err + } + + if providerID == "" { + klog.Warningf("Cannot find valid providerID for node name %q, assuming non existence", node.Name) + return false, nil + } + } + + return true, nil +} + +func (cnc *CloudNodeController) getNodeAddressesByName(ctx context.Context, node *v1.Node) ([]v1.NodeAddress, error) { + nodeAddresses, err := cnc.nodeProvider.NodeAddresses(ctx, types.NodeName(node.Name)) + if err != nil { + return nil, fmt.Errorf("error fetching node by name %s: %w", node.Name, err) + } + return nodeAddresses, nil +} + +func nodeAddressesChangeDetected(addressSet1, addressSet2 []v1.NodeAddress) bool { + if len(addressSet1) != len(addressSet2) { + return true + } + addressMap1 := map[v1.NodeAddressType]string{} + + for i := range addressSet1 { + addressMap1[addressSet1[i].Type] = addressSet1[i].Address + } + + for _, v := range addressSet2 { + if addressMap1[v.Type] != v.Address { + return true + } + } + return false +} + +func ensureNodeProvidedIPExists(node *v1.Node, nodeAddresses []v1.NodeAddress) (*v1.NodeAddress, bool) { + var nodeIP *v1.NodeAddress + nodeIPExists := false + if providedIP, ok := node.ObjectMeta.Annotations[cloudproviderapi.AnnotationAlphaProvidedIPAddr]; ok { + nodeIPExists = true + for i := range nodeAddresses { + if nodeAddresses[i].Address == providedIP { + nodeIP = &nodeAddresses[i] + break + } + } + } + return nodeIP, nodeIPExists +} + +func (cnc *CloudNodeController) getInstanceTypeByName(ctx context.Context, node *v1.Node) (string, error) { + instanceType, err := cnc.nodeProvider.InstanceType(ctx, types.NodeName(node.Name)) + if err != nil { + return "", fmt.Errorf("InstanceType: Error fetching by NodeName %s: %w", node.Name, err) + } + return instanceType, err +} + +// getZoneByName will attempt to get the zone of node using its providerID +// then it's name. If both attempts fail, an error is returned +func (cnc *CloudNodeController) getZoneByName(ctx context.Context, node *v1.Node) (cloudprovider.Zone, error) { + zone, err := cnc.nodeProvider.GetZone(ctx, types.NodeName(node.Name)) + if err != nil { + return cloudprovider.Zone{}, fmt.Errorf("Zone: Error fetching by NodeName %s: %w", node.Name, err) + } + + return zone, nil +} + +func (cnc *CloudNodeController) getPlatformSubFaultDomain() (string, error) { + subFD, err := cnc.nodeProvider.GetPlatformSubFaultDomain() + if err != nil { + return "", fmt.Errorf("cnc.getPlatformSubfaultDomain: %w", err) + } + return subFD, nil +} + +func (cnc *CloudNodeController) updateNetworkingCondition(node *v1.Node, networkReady bool) error { + _, condition := nodeutil.GetNodeCondition(&(node.Status), v1.NodeNetworkUnavailable) + if networkReady && condition != nil && condition.Status == v1.ConditionFalse { + klog.V(4).Infof("set node %v with NodeNetworkUnavailable=false was canceled because it is already set", node.Name) + return nil + } + + if !networkReady && condition != nil && condition.Status == v1.ConditionTrue { + klog.V(4).Infof("set node %v with NodeNetworkUnavailable=true was canceled because it is already set", node.Name) + return nil + } + + klog.V(2).Infof("Patching node status %v with %v previous condition was:%+v", node.Name, networkReady, condition) + + // either condition is not there, or has a value != to what we need + // start setting it + err := clientretry.RetryOnConflict(updateNetworkConditionBackoff, func() error { + var err error + // Patch could also fail, even though the chance is very slim. So we still do + // patch in the retry loop. + currentTime := metav1.Now() + if networkReady { + err = nodeutil.SetNodeCondition(cnc.kubeClient, types.NodeName(node.Name), v1.NodeCondition{ + Type: v1.NodeNetworkUnavailable, + Status: v1.ConditionFalse, + Reason: "NodeInitialization", + Message: "Should wait for cloud routes", + LastTransitionTime: currentTime, + }) + } else { + err = nodeutil.SetNodeCondition(cnc.kubeClient, types.NodeName(node.Name), v1.NodeCondition{ + Type: v1.NodeNetworkUnavailable, + Status: v1.ConditionTrue, + Reason: "NodeInitialization", + Message: "Don't need to wait for cloud routes", + LastTransitionTime: currentTime, + }) + } + if err != nil { + klog.V(4).Infof("Error updating node %s, retrying: %v", types.NodeName(node.Name), err) + } + return err + }) + + if err != nil { + klog.Errorf("Error updating node %s: %v", node.Name, err) + } + + return err +} + +// PatchNodeStatus patches node status. +func PatchNodeStatus(c v1core.CoreV1Interface, nodeName types.NodeName, oldNode *v1.Node, newNode *v1.Node) (*v1.Node, []byte, error) { + patchBytes, err := preparePatchBytesforNodeStatus(nodeName, oldNode, newNode) + if err != nil { + return nil, nil, err + } + + updatedNode, err := c.Nodes().Patch(context.TODO(), string(nodeName), types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, "status") + if err != nil { + return nil, nil, fmt.Errorf("failed to patch status %q for node %q: %w", patchBytes, nodeName, err) + } + return updatedNode, patchBytes, nil +} + +func preparePatchBytesforNodeStatus(nodeName types.NodeName, oldNode *v1.Node, newNode *v1.Node) ([]byte, error) { + oldData, err := json.Marshal(oldNode) + if err != nil { + return nil, fmt.Errorf("failed to Marshal oldData for node %q: %w", nodeName, err) + } + + // NodeStatus.Addresses is incorrectly annotated as patchStrategy=merge, which + // will cause strategicpatch.CreateTwoWayMergePatch to create an incorrect patch + // if it changed. + manuallyPatchAddresses := (len(oldNode.Status.Addresses) > 0) && !equality.Semantic.DeepEqual(oldNode.Status.Addresses, newNode.Status.Addresses) + + // Reset spec to make sure only patch for Status or ObjectMeta is generated. + // Note that we don't reset ObjectMeta here, because: + // 1. This aligns with Nodes().UpdateStatus(). + // 2. Some component does use this to update node annotations. + diffNode := newNode.DeepCopy() + diffNode.Spec = oldNode.Spec + if manuallyPatchAddresses { + diffNode.Status.Addresses = oldNode.Status.Addresses + } + newData, err := json.Marshal(diffNode) + if err != nil { + return nil, fmt.Errorf("failed to Marshal newData for node %q: %w", nodeName, err) + } + + patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, v1.Node{}) + if err != nil { + return nil, fmt.Errorf("failed to CreateTwoWayMergePatch for node %q: %w", nodeName, err) + } + if manuallyPatchAddresses { + patchBytes, err = fixupPatchForNodeStatusAddresses(patchBytes, newNode.Status.Addresses) + if err != nil { + return nil, fmt.Errorf("failed to fix up NodeAddresses in patch for node %q: %w", nodeName, err) + } + } + + return patchBytes, nil +} + +// fixupPatchForNodeStatusAddresses adds a replace-strategy patch for Status.Addresses to +// the existing patch +func fixupPatchForNodeStatusAddresses(patchBytes []byte, addresses []v1.NodeAddress) ([]byte, error) { + // Given patchBytes='{"status": {"conditions": [ ... ], "phase": ...}}' and + // addresses=[{"type": "InternalIP", "address": "10.0.0.1"}], we need to generate: + // + // { + // "status": { + // "conditions": [ ... ], + // "phase": ..., + // "addresses": [ + // { + // "type": "InternalIP", + // "address": "10.0.0.1" + // }, + // { + // "$patch": "replace" + // } + // ] + // } + // } + + var patchMap map[string]interface{} + if err := json.Unmarshal(patchBytes, &patchMap); err != nil { + return nil, err + } + + addrBytes, err := json.Marshal(addresses) + if err != nil { + return nil, err + } + var addrArray []interface{} + if err := json.Unmarshal(addrBytes, &addrArray); err != nil { + return nil, err + } + addrArray = append(addrArray, map[string]interface{}{"$patch": "replace"}) + + status := patchMap["status"] + if status == nil { + status = map[string]interface{}{} + patchMap["status"] = status + } + statusMap, ok := status.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("unexpected data in patch") + } + statusMap["addresses"] = addrArray + + return json.Marshal(patchMap) +} diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure.go index 45fa3984f..3e32a65ea 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure.go @@ -70,6 +70,7 @@ import ( "sigs.k8s.io/cloud-provider-azure/pkg/azureclients/zoneclient" azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache" "sigs.k8s.io/cloud-provider-azure/pkg/consts" + nodemanager "sigs.k8s.io/cloud-provider-azure/pkg/nodemanager" "sigs.k8s.io/cloud-provider-azure/pkg/retry" // ensure the newly added package from azure-sdk-for-go is in vendor/ @@ -1036,16 +1037,20 @@ func (az *Cloud) updateNodeCaches(prevNode, newNode *v1.Node) { delete(az.nodeResourceGroups, prevNode.ObjectMeta.Name) } - // Remove from unmanagedNodes cache. managed, ok := prevNode.ObjectMeta.Labels[consts.ManagedByAzureLabel] - if ok && strings.EqualFold(managed, consts.NotManagedByAzureLabelValue) { + isNodeManagedByCloudProvider := !ok || !strings.EqualFold(managed, consts.NotManagedByAzureLabelValue) + + klog.Infof("managed=%v, ok=%v, isNodeManagedByCloudProvider=%v", + managed, ok, isNodeManagedByCloudProvider) + + // Remove from unmanagedNodes cache + if !isNodeManagedByCloudProvider { az.unmanagedNodes.Delete(prevNode.ObjectMeta.Name) - az.excludeLoadBalancerNodes.Delete(prevNode.ObjectMeta.Name) } - // Remove from excludeLoadBalancerNodes cache. - if _, hasExcludeBalancerLabel := prevNode.ObjectMeta.Labels[v1.LabelNodeExcludeBalancers]; hasExcludeBalancerLabel { - az.excludeLoadBalancerNodes.Delete(prevNode.ObjectMeta.Name) + // if the node is being deleted from the cluster, exclude it from load balancers + if newNode == nil { + az.excludeLoadBalancerNodes.Insert(prevNode.ObjectMeta.Name) } // Remove from nodePrivateIPs cache. @@ -1074,17 +1079,35 @@ func (az *Cloud) updateNodeCaches(prevNode, newNode *v1.Node) { az.nodeResourceGroups[newNode.ObjectMeta.Name] = strings.ToLower(newRG) } - // Add to unmanagedNodes cache. + _, hasExcludeBalancerLabel := newNode.ObjectMeta.Labels[v1.LabelNodeExcludeBalancers] managed, ok := newNode.ObjectMeta.Labels[consts.ManagedByAzureLabel] - if ok && strings.EqualFold(managed, consts.NotManagedByAzureLabelValue) { + isNodeManagedByCloudProvider := !ok || !strings.EqualFold(managed, consts.NotManagedByAzureLabelValue) + + // Update unmanagedNodes cache + if !isNodeManagedByCloudProvider { az.unmanagedNodes.Insert(newNode.ObjectMeta.Name) - az.excludeLoadBalancerNodes.Insert(newNode.ObjectMeta.Name) } - // Add to excludeLoadBalancerNodes cache. - if _, hasExcludeBalancerLabel := newNode.ObjectMeta.Labels[v1.LabelNodeExcludeBalancers]; hasExcludeBalancerLabel { - klog.V(4).Infof("adding node %s from the exclude-from-lb list because the label %s is found", newNode.Name, v1.LabelNodeExcludeBalancers) + // Update excludeLoadBalancerNodes cache + switch { + case !isNodeManagedByCloudProvider: az.excludeLoadBalancerNodes.Insert(newNode.ObjectMeta.Name) + + case hasExcludeBalancerLabel: + az.excludeLoadBalancerNodes.Insert(newNode.ObjectMeta.Name) + + case !isNodeReady(newNode) && nodemanager.GetCloudTaint(newNode.Spec.Taints) == nil: + // If not in ready state and not a newly created node, add to excludeLoadBalancerNodes cache. + // New nodes (tainted with "node.cloudprovider.kubernetes.io/uninitialized") should not be + // excluded from load balancers regardless of their state, so as to reduce the number of + // VMSS API calls and not provoke VMScaleSetActiveModelsCountLimitReached. + // (https://github.com/kubernetes-sigs/cloud-provider-azure/issues/851) + az.excludeLoadBalancerNodes.Insert(newNode.ObjectMeta.Name) + + default: + // Nodes not falling into the three cases above are valid backends and + // should not appear in excludeLoadBalancerNodes cache. + az.excludeLoadBalancerNodes.Delete(newNode.ObjectMeta.Name) } // Add to nodePrivateIPs cache @@ -1220,3 +1243,12 @@ func (az *Cloud) ShouldNodeExcludedFromLoadBalancer(nodeName string) (bool, erro return az.excludeLoadBalancerNodes.Has(nodeName), nil } + +func isNodeReady(node *v1.Node) bool { + for _, cond := range node.Status.Conditions { + if cond.Type == v1.NodeReady && cond.Status == v1.ConditionTrue { + return true + } + } + return false +} diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_backoff.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_backoff.go index e83e9b1f0..73aa33949 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_backoff.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_backoff.go @@ -293,6 +293,34 @@ func (az *Cloud) CreateOrUpdateLBBackendPool(lbName string, backendPool network. return rerr.Error() } +func (az *Cloud) DeleteLBBackendPool(lbName, backendPoolName string) error { + ctx, cancel := getContextWithCancel() + defer cancel() + + klog.V(4).Infof("DeleteLBBackendPool: deleting backend pool %s in LB %s", backendPoolName, lbName) + rerr := az.LoadBalancerClient.DeleteLBBackendPool(ctx, az.getLoadBalancerResourceGroup(), lbName, backendPoolName) + if rerr == nil { + // Invalidate the cache right after updating + _ = az.lbCache.Delete(lbName) + return nil + } + + // Invalidate the cache because ETAG precondition mismatch. + if rerr.HTTPStatusCode == http.StatusPreconditionFailed { + klog.V(3).Infof("LoadBalancer cache for %s is cleanup because of http.StatusPreconditionFailed", lbName) + _ = az.lbCache.Delete(lbName) + } + + retryErrorMessage := rerr.Error().Error() + // Invalidate the cache because another new operation has canceled the current request. + if strings.Contains(strings.ToLower(retryErrorMessage), consts.OperationCanceledErrorMessage) { + klog.V(3).Infof("LoadBalancer cache for %s is cleanup because CreateOrUpdate is canceled by another operation", lbName) + _ = az.lbCache.Delete(lbName) + } + + return rerr.Error() +} + // ListManagedLBs invokes az.LoadBalancerClient.List and filter out // those that are not managed by cloud provider azure or not associated to a managed VMSet. func (az *Cloud) ListManagedLBs(service *v1.Service, nodes []*v1.Node, clusterName string) ([]network.LoadBalancer, error) { diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_common.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_common.go index 01488e1b8..17538238a 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_common.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_common.go @@ -255,7 +255,7 @@ func (c *controllerCommon) AttachDisk(ctx context.Context, async bool, diskName, } else { klog.Warningf("azureDisk - switch to batch operation due to rate limited(async: %t), QPS: %f", async, c.diskOpRateLimiter.QPS()) } - resourceGroup, err := getResourceGroupFromDiskURI(diskURI) + resourceGroup, _, err := getInfoFromDiskURI(diskURI) if err != nil { return -1, err } @@ -597,12 +597,12 @@ func (c *controllerCommon) filterNonExistingDisks(ctx context.Context, unfiltere func (c *controllerCommon) checkDiskExists(ctx context.Context, diskURI string) (bool, error) { diskName := path.Base(diskURI) - resourceGroup, err := getResourceGroupFromDiskURI(diskURI) + resourceGroup, subsID, err := getInfoFromDiskURI(diskURI) if err != nil { return false, err } - if _, rerr := c.cloud.DisksClient.Get(ctx, resourceGroup, diskName); rerr != nil { + if _, rerr := c.cloud.DisksClient.Get(ctx, subsID, resourceGroup, diskName); rerr != nil { if rerr.HTTPStatusCode == http.StatusNotFound { return false, nil } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmss.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmss.go index 7106d89a8..5c16c8d28 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmss.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_vmss.go @@ -35,7 +35,7 @@ import ( // AttachDisk attaches a disk to vm func (ss *ScaleSet) AttachDisk(ctx context.Context, nodeName types.NodeName, diskMap map[string]*AttachDiskOptions) (*azure.Future, error) { vmName := mapNodeNameToVMName(nodeName) - ssName, instanceID, vm, err := ss.getVmssVM(vmName, azcache.CacheReadTypeDefault) + vm, err := ss.getVmssVM(vmName, azcache.CacheReadTypeDefault) if err != nil { return nil, err } @@ -45,17 +45,20 @@ func (ss *ScaleSet) AttachDisk(ctx context.Context, nodeName types.NodeName, dis return nil, err } - disks := []compute.DataDisk{} - if vm.StorageProfile != nil && vm.StorageProfile.DataDisks != nil { - disks = make([]compute.DataDisk, len(*vm.StorageProfile.DataDisks)) - copy(disks, *vm.StorageProfile.DataDisks) + var disks []compute.DataDisk + + storageProfile := vm.AsVirtualMachineScaleSetVM().StorageProfile + + if storageProfile != nil && storageProfile.DataDisks != nil { + disks = make([]compute.DataDisk, len(*storageProfile.DataDisks)) + copy(disks, *storageProfile.DataDisks) } for k, v := range diskMap { diskURI := k opt := v attached := false - for _, disk := range *vm.StorageProfile.DataDisks { + for _, disk := range *storageProfile.DataDisks { if disk.ManagedDisk != nil && strings.EqualFold(*disk.ManagedDisk.ID, diskURI) { attached = true break @@ -68,12 +71,12 @@ func (ss *ScaleSet) AttachDisk(ctx context.Context, nodeName types.NodeName, dis managedDisk := &compute.ManagedDiskParameters{ID: &diskURI} if opt.diskEncryptionSetID == "" { - if vm.StorageProfile.OsDisk != nil && - vm.StorageProfile.OsDisk.ManagedDisk != nil && - vm.StorageProfile.OsDisk.ManagedDisk.DiskEncryptionSet != nil && - vm.StorageProfile.OsDisk.ManagedDisk.DiskEncryptionSet.ID != nil { + if storageProfile.OsDisk != nil && + storageProfile.OsDisk.ManagedDisk != nil && + storageProfile.OsDisk.ManagedDisk.DiskEncryptionSet != nil && + storageProfile.OsDisk.ManagedDisk.DiskEncryptionSet.ID != nil { // set diskEncryptionSet as value of os disk by default - opt.diskEncryptionSetID = *vm.StorageProfile.OsDisk.ManagedDisk.DiskEncryptionSet.ID + opt.diskEncryptionSetID = *storageProfile.OsDisk.ManagedDisk.DiskEncryptionSet.ID } } if opt.diskEncryptionSetID != "" { @@ -104,14 +107,14 @@ func (ss *ScaleSet) AttachDisk(ctx context.Context, nodeName types.NodeName, dis }() klog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk list(%s)", nodeResourceGroup, nodeName, diskMap) - future, rerr := ss.VirtualMachineScaleSetVMsClient.UpdateAsync(ctx, nodeResourceGroup, ssName, instanceID, newVM, "attach_disk") + future, rerr := ss.VirtualMachineScaleSetVMsClient.UpdateAsync(ctx, nodeResourceGroup, vm.VMSSName, vm.InstanceID, newVM, "attach_disk") if rerr != nil { klog.Errorf("azureDisk - attach disk list(%s) on rg(%s) vm(%s) failed, err: %v", diskMap, nodeResourceGroup, nodeName, rerr) if rerr.HTTPStatusCode == http.StatusNotFound { klog.Errorf("azureDisk - begin to filterNonExistingDisks(%v) on rg(%s) vm(%s)", diskMap, nodeResourceGroup, nodeName) disks := ss.filterNonExistingDisks(ctx, *newVM.VirtualMachineScaleSetVMProperties.StorageProfile.DataDisks) newVM.VirtualMachineScaleSetVMProperties.StorageProfile.DataDisks = &disks - future, rerr = ss.VirtualMachineScaleSetVMsClient.UpdateAsync(ctx, nodeResourceGroup, ssName, instanceID, newVM, "attach_disk") + future, rerr = ss.VirtualMachineScaleSetVMsClient.UpdateAsync(ctx, nodeResourceGroup, vm.VMSSName, vm.InstanceID, newVM, "attach_disk") } } @@ -133,7 +136,7 @@ func (ss *ScaleSet) WaitForUpdateResult(ctx context.Context, future *azure.Futur // DetachDisk detaches a disk from VM func (ss *ScaleSet) DetachDisk(ctx context.Context, nodeName types.NodeName, diskMap map[string]string) error { vmName := mapNodeNameToVMName(nodeName) - ssName, instanceID, vm, err := ss.getVmssVM(vmName, azcache.CacheReadTypeDefault) + vm, err := ss.getVmssVM(vmName, azcache.CacheReadTypeDefault) if err != nil { return err } @@ -143,10 +146,11 @@ func (ss *ScaleSet) DetachDisk(ctx context.Context, nodeName types.NodeName, dis return err } - disks := []compute.DataDisk{} - if vm.StorageProfile != nil && vm.StorageProfile.DataDisks != nil { - disks = make([]compute.DataDisk, len(*vm.StorageProfile.DataDisks)) - copy(disks, *vm.StorageProfile.DataDisks) + var disks []compute.DataDisk + storageProfile := vm.VirtualMachineScaleSetVMProperties.StorageProfile + if storageProfile != nil && storageProfile.DataDisks != nil { + disks = make([]compute.DataDisk, len(*storageProfile.DataDisks)) + copy(disks, *storageProfile.DataDisks) } bFoundDisk := false for i, disk := range disks { @@ -168,7 +172,7 @@ func (ss *ScaleSet) DetachDisk(ctx context.Context, nodeName types.NodeName, dis } else { if strings.EqualFold(ss.cloud.Environment.Name, consts.AzureStackCloudName) && !ss.Config.DisableAzureStackCloud { // Azure stack does not support ToBeDetached flag, use original way to detach disk - newDisks := []compute.DataDisk{} + var newDisks []compute.DataDisk for _, disk := range disks { if !to.Bool(disk.ToBeDetached) { newDisks = append(newDisks, disk) @@ -191,14 +195,15 @@ func (ss *ScaleSet) DetachDisk(ctx context.Context, nodeName types.NodeName, dis }() klog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk list(%s)", nodeResourceGroup, nodeName, diskMap) - rerr := ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM, "detach_disk") + rerr := ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, vm.VMSSName, vm.InstanceID, newVM, + "detach_disk") if rerr != nil { klog.Errorf("azureDisk - detach disk list(%s) on rg(%s) vm(%s) failed, err: %v", diskMap, nodeResourceGroup, nodeName, rerr) if rerr.HTTPStatusCode == http.StatusNotFound { klog.Errorf("azureDisk - begin to filterNonExistingDisks(%v) on rg(%s) vm(%s)", diskMap, nodeResourceGroup, nodeName) disks := ss.filterNonExistingDisks(ctx, *newVM.VirtualMachineScaleSetVMProperties.StorageProfile.DataDisks) newVM.VirtualMachineScaleSetVMProperties.StorageProfile.DataDisks = &disks - rerr = ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM, "detach_disk") + rerr = ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, vm.VMSSName, vm.InstanceID, newVM, "detach_disk") } } @@ -212,7 +217,7 @@ func (ss *ScaleSet) DetachDisk(ctx context.Context, nodeName types.NodeName, dis // UpdateVM updates a vm func (ss *ScaleSet) UpdateVM(ctx context.Context, nodeName types.NodeName) error { vmName := mapNodeNameToVMName(nodeName) - ssName, instanceID, _, err := ss.getVmssVM(vmName, azcache.CacheReadTypeDefault) + vm, err := ss.getVmssVM(vmName, azcache.CacheReadTypeDefault) if err != nil { return err } @@ -228,7 +233,7 @@ func (ss *ScaleSet) UpdateVM(ctx context.Context, nodeName types.NodeName) error }() klog.V(2).Infof("azureDisk - update(%s): vm(%s)", nodeResourceGroup, nodeName) - rerr := ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, compute.VirtualMachineScaleSetVM{}, "update_vmss_instance") + rerr := ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, vm.VMSSName, vm.InstanceID, compute.VirtualMachineScaleSetVM{}, "update_vmss_instance") klog.V(2).Infof("azureDisk - update(%s): vm(%s) - returned with %v", nodeResourceGroup, nodeName, rerr) if rerr != nil { @@ -239,14 +244,16 @@ func (ss *ScaleSet) UpdateVM(ctx context.Context, nodeName types.NodeName) error // GetDataDisks gets a list of data disks attached to the node. func (ss *ScaleSet) GetDataDisks(nodeName types.NodeName, crt azcache.AzureCacheReadType) ([]compute.DataDisk, *string, error) { - _, _, vm, err := ss.getVmssVM(string(nodeName), crt) + vm, err := ss.getVmssVM(string(nodeName), crt) if err != nil { return nil, nil, err } - if vm.StorageProfile == nil || vm.StorageProfile.DataDisks == nil { + storageProfile := vm.AsVirtualMachineScaleSetVM().StorageProfile + + if storageProfile == nil || storageProfile.DataDisks == nil { return nil, nil, nil } - return *vm.StorageProfile.DataDisks, vm.ProvisioningState, nil + return *storageProfile.DataDisks, vm.AsVirtualMachineScaleSetVM().ProvisioningState, nil } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer.go index 2b3177ba7..5db795fd5 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer.go @@ -379,7 +379,7 @@ func (az *Cloud) cleanOrphanedLoadBalancer(lb *network.LoadBalancer, existingLBs // safeDeleteLoadBalancer deletes the load balancer after decoupling it from the vmSet func (az *Cloud) safeDeleteLoadBalancer(lb network.LoadBalancer, clusterName, vmSetName string, service *v1.Service) *retry.Error { - if strings.EqualFold(az.LoadBalancerBackendPoolConfigurationType, consts.LoadBalancerBackendPoolConfigurationTypeNodeIPConfiguration) { + if isLBBackendPoolTypeIPConfig(service, &lb, clusterName) { lbBackendPoolID := az.getBackendPoolID(to.String(lb.Name), az.getLoadBalancerResourceGroup(), getBackendPoolName(clusterName, service)) err := az.VMSet.EnsureBackendPoolDeleted(service, lbBackendPoolID, vmSetName, lb.BackendAddressPools, true) if err != nil { @@ -1171,112 +1171,6 @@ func getDomainNameLabel(pip *network.PublicIPAddress) string { return to.String(pip.PublicIPAddressPropertiesFormat.DNSSettings.DomainNameLabel) } -func getIdleTimeout(s *v1.Service) (*int32, error) { - const ( - min = 4 - max = 30 - ) - - val, ok := s.Annotations[consts.ServiceAnnotationLoadBalancerIdleTimeout] - if !ok { - // Return a nil here as this will set the value to the azure default - return nil, nil - } - - errInvalidTimeout := fmt.Errorf("idle timeout value must be a whole number representing minutes between %d and %d", min, max) - toInt, err := strconv.ParseInt(val, 10, 32) - if err != nil { - return nil, fmt.Errorf("error parsing idle timeout value: %w: %v", err, errInvalidTimeout) - } - to32 := int32(toInt) - - if to32 < min || to32 > max { - return nil, errInvalidTimeout - } - return &to32, nil -} - -// getProbeIntervalInSecondsAndNumOfProbe parse probeInterval and numberOfProbes from the annotations of service object. -func getProbeIntervalInSecondsAndNumOfProbe(s *v1.Service) (*int32, *int32, error) { - // get number of probes - numberOfProbes, err := getInt32FromAnnotations(s.Annotations, consts.ServiceAnnotationLoadBalancerHealthProbeNumOfProbe, func(val *int32) error { - //minimum number of unhealthy responses is 2. ref: https://docs.microsoft.com/en-us/rest/api/load-balancer/load-balancers/create-or-update#probe - const ( - MinimumNumOfProbe = 2 - ) - if *val < MinimumNumOfProbe { - return fmt.Errorf("the minimum value of %s is %d", consts.ServiceAnnotationLoadBalancerHealthProbeNumOfProbe, MinimumNumOfProbe) - } - return nil - }) - if err != nil { - return nil, nil, err - } - // if numberOfProbes is not set, set it to default instead ref: https://docs.microsoft.com/en-us/rest/api/load-balancer/load-balancers/create-or-update#probe - if numberOfProbes == nil { - numberOfProbes = to.Int32Ptr(2) - } - - probeInterval, err := getInt32FromAnnotations(s.Annotations, consts.ServiceAnnotationLoadBalancerHealthProbeInterval, func(val *int32) error { - //minimum probe interval in seconds is 5. ref: https://docs.microsoft.com/en-us/rest/api/load-balancer/load-balancers/create-or-update#probe - const ( - MinimumProbeIntervalInSecond = 5 - ) - if *val < 5 { - return fmt.Errorf("the minimum value of %s is %d", consts.ServiceAnnotationLoadBalancerHealthProbeInterval, MinimumProbeIntervalInSecond) - } - return nil - }) - if err != nil { - return nil, nil, err - } - // if probeInterval is not set, set it to default instead ref: https://docs.microsoft.com/en-us/rest/api/load-balancer/load-balancers/create-or-update#probe - if probeInterval == nil { - probeInterval = to.Int32Ptr(5) - } - - // total probe should be less than 120 seconds ref: https://docs.microsoft.com/en-us/rest/api/load-balancer/load-balancers/create-or-update#probe - if (*probeInterval)*(*numberOfProbes) >= 120 { - return nil, nil, fmt.Errorf("total probe should be less than 120, please adjust interval and number of probe accordingly") - } - - return probeInterval, numberOfProbes, nil -} - -// Int32BusinessValidator is validator function which is invoked after values are parsed in order to make sure input value meets the businees need. -type Int32BusinessValidator func(*int32) error - -// getInt32FromAnnotations parse integer value from annotation and return an reference to int32 object -func getInt32FromAnnotations(annotations map[string]string, key string, businessValidator ...Int32BusinessValidator) (*int32, error) { - if len(key) <= 0 { - return nil, fmt.Errorf("annotation key should not be empty") - } - if annotations == nil { - // Return a nil here as this will set the value to the azure default - return nil, nil - } - val, ok := annotations[key] - if !ok { - // Return a nil here as this will set the value to the azure default - return nil, nil - } - errKey := fmt.Errorf("%s value must be a whole number", key) - toInt, err := strconv.ParseInt(val, 10, 32) - if err != nil { - return nil, fmt.Errorf("error parsing %s value: %w: %v", key, err, errKey) - } - parsedInt := int32(toInt) - for _, validator := range businessValidator { - if validator != nil { - err := validator(&parsedInt) - if err != nil { - return nil, fmt.Errorf("error parsing %s value: %w", key, err) - } - } - } - return &parsedInt, nil -} - func (az *Cloud) isFrontendIPChanged(clusterName string, config network.FrontendIPConfiguration, service *v1.Service, lbFrontendIPConfigName string) (bool, error) { isServiceOwnsFrontendIP, isPrimaryService, err := az.serviceOwnsFrontendIP(config, service) if err != nil { @@ -1482,11 +1376,6 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, defaultLBFrontendIPConfigID := az.getFrontendIPConfigID(lbName, lbResourceGroup, defaultLBFrontendIPConfigName) dirtyLb := false - lbIdleTimeout, err := getIdleTimeout(service) - if wantLb && err != nil { - return nil, err - } - // reconcile the load balancer's backend pool configuration. if wantLb { preConfig, changed, err := az.LoadBalancerBackendPool.ReconcileBackendPools(clusterName, service, lb) @@ -1524,9 +1413,13 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, } } - expectedProbes, expectedRules, err := az.getExpectedLBRules(service, wantLb, defaultLBFrontendIPConfigID, lbBackendPoolID, lbName, lbIdleTimeout) - if err != nil { - return nil, err + var expectedProbes []network.Probe + var expectedRules []network.LoadBalancingRule + if wantLb { + expectedProbes, expectedRules, err = az.getExpectedLBRules(service, defaultLBFrontendIPConfigID, lbBackendPoolID, lbName) + if err != nil { + return nil, err + } } if changed := az.reconcileLBProbes(lb, service, serviceName, wantLb, expectedProbes); changed { @@ -1962,164 +1855,302 @@ func lbRuleConflictsWithPort(rule network.LoadBalancingRule, frontendIPConfigID *rule.FrontendPort == port.Port } -func parseHealthProbeProtocolAndPath(service *v1.Service) (string, string) { - var protocol, path string - if v, ok := service.Annotations[consts.ServiceAnnotationLoadBalancerHealthProbeProtocol]; ok { - protocol = v - } else { - return protocol, path +// buildHealthProbeRulesForPort +// for following sku: basic loadbalancer vs standard load balancer +// for following protocols: TCP HTTP HTTPS(SLB only) +func (az *Cloud) buildHealthProbeRulesForPort(annotations map[string]string, port v1.ServicePort, lbrule string) (*network.Probe, error) { + properties := &network.ProbePropertiesFormat{} + // get request path ,only used with http/https probe + path, err := consts.GetHealthProbeConfigOfPortFromK8sSvcAnnotation(annotations, port.Port, consts.HealthProbeParamsRequestPath) + if err != nil { + return nil, fmt.Errorf("failed to parse annotation %s: %w", consts.BuildHealthProbeAnnotationKeyForPort(port.Port, consts.HealthProbeParamsRequestPath), err) + } + if path == nil { + if path, err = consts.GetAttributeValueInSvcAnnotation(annotations, consts.ServiceAnnotationLoadBalancerHealthProbeRequestPath); err != nil { + return nil, fmt.Errorf("failed to parse annotation %s: %w", consts.ServiceAnnotationLoadBalancerHealthProbeRequestPath, err) + } + } + if path == nil { + path = to.StringPtr(consts.HealthProbeDefaultRequestPath) + } + if port.AppProtocol == nil { + if port.AppProtocol, err = consts.GetAttributeValueInSvcAnnotation(annotations, consts.ServiceAnnotationLoadBalancerHealthProbeProtocol); err != nil { + return nil, fmt.Errorf("failed to parse annotation %s: %w", consts.ServiceAnnotationLoadBalancerHealthProbeProtocol, err) + } + if port.AppProtocol == nil { + if port.Protocol == v1.ProtocolTCP { + port.AppProtocol = to.StringPtr(string(network.ProtocolTCP)) + } + } + if port.AppProtocol == nil { + // health probe not set, return + return nil, nil + } } - // ignore the request path if using TCP - if strings.EqualFold(protocol, string(network.ProbeProtocolHTTP)) || - strings.EqualFold(protocol, string(network.ProbeProtocolHTTPS)) { - if v, ok := service.Annotations[consts.ServiceAnnotationLoadBalancerHealthProbeRequestPath]; ok { - path = v + switch protocol := strings.TrimSpace(*port.AppProtocol); { + case strings.EqualFold(protocol, string(network.ProtocolHTTPS)): + //HTTPS probe is only supported in standard loadbalancer + if !az.useStandardLoadBalancer() { + return nil, fmt.Errorf("HTTPS protocol is not supported in health probe when basic lb is used") } + //HTTP and HTTPS share the same configuration + properties.Protocol = network.ProbeProtocolHTTPS + properties.RequestPath = path + case strings.EqualFold(protocol, string(network.ProtocolHTTP)): + properties.Protocol = network.ProbeProtocolHTTP + properties.RequestPath = path + case strings.EqualFold(protocol, string(network.ProtocolTCP)): + properties.Protocol = network.ProbeProtocolTCP + default: + return nil, fmt.Errorf("unsupported protocol %s", protocol) } - return protocol, path + + // get number of probes + var numOfProbeValidator = func(val *int32) error { + //minimum number of unhealthy responses is 2. ref: https://docs.microsoft.com/en-us/rest/api/load-balancer/load-balancers/create-or-update#probe + const ( + MinimumNumOfProbe = 2 + ) + if *val < MinimumNumOfProbe { + return fmt.Errorf("the minimum value of %s is %d", consts.HealthProbeParamsNumOfProbe, MinimumNumOfProbe) + } + return nil + } + numberOfProbes, err := consts.GetInt32HealthProbeConfigOfPortFromK8sSvcAnnotation(annotations, port.Port, consts.HealthProbeParamsNumOfProbe, numOfProbeValidator) + if err != nil { + return nil, fmt.Errorf("failed to parse annotation %s: %w", consts.BuildHealthProbeAnnotationKeyForPort(port.Port, consts.HealthProbeParamsNumOfProbe), err) + } + if numberOfProbes == nil { + if numberOfProbes, err = consts.Getint32ValueFromK8sSvcAnnotation(annotations, consts.ServiceAnnotationLoadBalancerHealthProbeNumOfProbe, numOfProbeValidator); err != nil { + return nil, fmt.Errorf("failed to parse annotation %s: %w", consts.ServiceAnnotationLoadBalancerHealthProbeNumOfProbe, err) + } + } + + // if numberOfProbes is not set, set it to default instead ref: https://docs.microsoft.com/en-us/rest/api/load-balancer/load-balancers/create-or-update#probe + if numberOfProbes == nil { + numberOfProbes = to.Int32Ptr(consts.HealthProbeDefaultNumOfProbe) + } + + // get probe interval in seconds + var probeIntervalValidator = func(val *int32) error { + //minimum probe interval in seconds is 5. ref: https://docs.microsoft.com/en-us/rest/api/load-balancer/load-balancers/create-or-update#probe + const ( + MinimumProbeIntervalInSecond = 5 + ) + if *val < 5 { + return fmt.Errorf("the minimum value of %s is %d", consts.HealthProbeParamsProbeInterval, MinimumProbeIntervalInSecond) + } + return nil + } + probeInterval, err := consts.GetInt32HealthProbeConfigOfPortFromK8sSvcAnnotation(annotations, port.Port, consts.HealthProbeParamsProbeInterval, probeIntervalValidator) + if err != nil { + return nil, fmt.Errorf("failed to parse annotation %s:%w", consts.BuildHealthProbeAnnotationKeyForPort(port.Port, consts.HealthProbeParamsProbeInterval), err) + } + if probeInterval == nil { + if probeInterval, err = consts.Getint32ValueFromK8sSvcAnnotation(annotations, consts.ServiceAnnotationLoadBalancerHealthProbeInterval, probeIntervalValidator); err != nil { + return nil, fmt.Errorf("failed to parse annotation %s: %w", consts.ServiceAnnotationLoadBalancerHealthProbeInterval, err) + } + } + // if probeInterval is not set, set it to default instead ref: https://docs.microsoft.com/en-us/rest/api/load-balancer/load-balancers/create-or-update#probe + if probeInterval == nil { + probeInterval = to.Int32Ptr(consts.HealthProbeDefaultProbeInterval) + } + + // total probe should be less than 120 seconds ref: https://docs.microsoft.com/en-us/rest/api/load-balancer/load-balancers/create-or-update#probe + if (*probeInterval)*(*numberOfProbes) >= 120 { + return nil, fmt.Errorf("total probe should be less than 120, please adjust interval and number of probe accordingly") + } + properties.IntervalInSeconds = probeInterval + properties.NumberOfProbes = numberOfProbes + properties.Port = &port.NodePort + probe := &network.Probe{ + Name: &lbrule, + ProbePropertiesFormat: properties, + } + return probe, nil } +// buildLBRules +// for following sku: basic loadbalancer vs standard load balancer +// for following scenario: internal vs external func (az *Cloud) getExpectedLBRules( service *v1.Service, - wantLb bool, lbFrontendIPConfigID string, lbBackendPoolID string, - lbName string, - lbIdleTimeout *int32) ([]network.Probe, []network.LoadBalancingRule, error) { + lbName string) ([]network.Probe, []network.LoadBalancingRule, error) { - var ports []v1.ServicePort - if wantLb { - ports = service.Spec.Ports - } else { - ports = []v1.ServicePort{} - } + var expectedRules []network.LoadBalancingRule + var expectedProbes []network.Probe - var enableTCPReset *bool - if az.useStandardLoadBalancer() { - enableTCPReset = to.BoolPtr(true) - } + // support podPresence health check when External Traffic Policy is local + // take precedence over user defined probe configuration + // healthcheck proxy server serves http requests + // https://github.com/kubernetes/kubernetes/blob/7c013c3f64db33cf19f38bb2fc8d9182e42b0b7b/pkg/proxy/healthcheck/service_health.go#L236 + var nodeEndpointHealthprobe *network.Probe + if servicehelpers.NeedsHealthCheck(service) { + podPresencePath, podPresencePort := servicehelpers.GetServiceHealthCheckPathPort(service) + lbRuleName := az.getLoadBalancerRuleName(service, v1.ProtocolTCP, podPresencePort) - var expectedProbes []network.Probe - var expectedRules []network.LoadBalancingRule - highAvailabilityPortsEnabled := false - for _, port := range ports { - if !requiresInternalLoadBalancer(service) && port.Protocol == v1.ProtocolSCTP { - return nil, nil, fmt.Errorf("SCTP is only supported on internal LoadBalancer") + nodeEndpointHealthprobe = &network.Probe{ + Name: &lbRuleName, + ProbePropertiesFormat: &network.ProbePropertiesFormat{ + RequestPath: to.StringPtr(podPresencePath), + Protocol: network.ProbeProtocolHTTP, + Port: to.Int32Ptr(podPresencePort), + IntervalInSeconds: to.Int32Ptr(consts.HealthProbeDefaultProbeInterval), + NumberOfProbes: to.Int32Ptr(consts.HealthProbeDefaultNumOfProbe), + }, } + expectedProbes = append(expectedProbes, *nodeEndpointHealthprobe) + } - if highAvailabilityPortsEnabled { - // Since the port is always 0 when enabling HA, only one rule should be configured. - break - } + // In HA mode, lb forward traffic of all port to backend + // HA mode is only supported on standard loadbalancer SKU in internal mode + if consts.IsK8sServiceUsingInternalLoadBalancer(service) && + az.useStandardLoadBalancer() && + consts.IsK8sServiceHasHAModeEnabled(service) { - lbRuleName := az.getLoadBalancerRuleName(service, port.Protocol, port.Port) + lbRuleName := az.getloadbalancerHAmodeRuleName(service) klog.V(2).Infof("getExpectedLBRules lb name (%s) rule name (%s)", lbName, lbRuleName) - transportProto, _, probeProto, err := getProtocolsFromKubernetesProtocol(port.Protocol) + props, err := az.getExpectedHAModeLoadBalancingRuleProperties(service, lbFrontendIPConfigID, lbBackendPoolID) if err != nil { - return expectedProbes, expectedRules, err + return nil, nil, fmt.Errorf("error generate lb rule for ha mod loadbalancer. err: %w", err) + } + //Here we need to find one health probe rule for the HA lb rule. + var probe *network.Probe = nodeEndpointHealthprobe + if probe == nil { + // use user customized health probe rule if any + for _, port := range service.Spec.Ports { + if probe, err = az.buildHealthProbeRulesForPort(service.Annotations, port, lbRuleName); err != nil { + klog.V(2).ErrorS(err, "error occurred when buildHealthProbeRulesForPort", "service", service.Name, "namespace", service.Namespace, + "rule-name", lbRuleName, "port", port.Port) + //ignore error because we only need one correct rule + } else if probe != nil { + expectedProbes = append(expectedProbes, *probe) + break + } + } } - probeProtocol, requestPath := parseHealthProbeProtocolAndPath(service) - probeInterval, numberOfProbe, err := getProbeIntervalInSecondsAndNumOfProbe(service) - if err != nil { - return expectedProbes, expectedRules, err + // if we found one valid probe, append it to lb rule. + if probe != nil { + props.Probe = &network.SubResource{ + ID: to.StringPtr(az.getLoadBalancerProbeID(lbName, az.getLoadBalancerResourceGroup(), *probe.Name)), + } } - if servicehelpers.NeedsHealthCheck(service) { - podPresencePath, podPresencePort := servicehelpers.GetServiceHealthCheckPathPort(service) - if probeProtocol == "" { - probeProtocol = string(network.ProbeProtocolHTTP) + + expectedRules = append(expectedRules, network.LoadBalancingRule{ + Name: &lbRuleName, + LoadBalancingRulePropertiesFormat: props, + }) + // end of HA mode handling + } else { + // generate lb rule for each port defined in svc object + + for _, port := range service.Spec.Ports { + lbRuleName := az.getLoadBalancerRuleName(service, port.Protocol, port.Port) + klog.V(2).Infof("getExpectedLBRules lb name (%s) rule name (%s)", lbName, lbRuleName) + + if port.Protocol == v1.ProtocolSCTP && !(az.useStandardLoadBalancer() && consts.IsK8sServiceUsingInternalLoadBalancer(service)) { + return expectedProbes, expectedRules, fmt.Errorf("SCTP is only supported on standard loadbalancer in internal mode") } - needRequestPath := strings.EqualFold(probeProtocol, string(network.ProbeProtocolHTTP)) || strings.EqualFold(probeProtocol, string(network.ProbeProtocolHTTPS)) - if requestPath == "" && needRequestPath { - requestPath = podPresencePath + transportProto, _, _, err := getProtocolsFromKubernetesProtocol(port.Protocol) + if err != nil { + return expectedProbes, expectedRules, fmt.Errorf("failed to parse transport protocol: %w", err) + } + props, err := az.getExpectedLoadBalancingRulePropertiesForPort(service, lbFrontendIPConfigID, lbBackendPoolID, to.Int32Ptr(port.Port), *transportProto) + if err != nil { + return expectedProbes, expectedRules, fmt.Errorf("error generate lb rule for ha mod loadbalancer. err: %w", err) } - expectedProbes = append(expectedProbes, network.Probe{ - Name: &lbRuleName, - ProbePropertiesFormat: &network.ProbePropertiesFormat{ - RequestPath: to.StringPtr(requestPath), - Protocol: network.ProbeProtocol(probeProtocol), - Port: to.Int32Ptr(podPresencePort), - IntervalInSeconds: probeInterval, - NumberOfProbes: numberOfProbe, - }, - }) - } else if port.Protocol != v1.ProtocolUDP && port.Protocol != v1.ProtocolSCTP { - // we only add the expected probe if we're doing TCP - if probeProtocol == "" { - probeProtocol = string(*probeProto) - } - var actualPath *string - if !strings.EqualFold(probeProtocol, string(network.ProbeProtocolTCP)) { - if requestPath != "" { - actualPath = to.StringPtr(requestPath) - } else { - actualPath = to.StringPtr("/healthz") + var probe *network.Probe = nodeEndpointHealthprobe + if probe == nil { + if probe, err = az.buildHealthProbeRulesForPort(service.Annotations, port, lbRuleName); err != nil { + klog.V(2).ErrorS(err, "error occurred when buildHealthProbeRulesForPort", "service", service.Name, "namespace", service.Namespace, + "rule-name", lbRuleName, "port", port.Port) + return expectedProbes, expectedRules, err + } else if probe != nil { + expectedProbes = append(expectedProbes, *probe) } } - expectedProbes = append(expectedProbes, network.Probe{ - Name: &lbRuleName, - ProbePropertiesFormat: &network.ProbePropertiesFormat{ - Protocol: network.ProbeProtocol(probeProtocol), - RequestPath: actualPath, - Port: to.Int32Ptr(port.NodePort), - IntervalInSeconds: probeInterval, - NumberOfProbes: numberOfProbe, - }, + if probe != nil { + props.Probe = &network.SubResource{ + ID: to.StringPtr(az.getLoadBalancerProbeID(lbName, az.getLoadBalancerResourceGroup(), *probe.Name)), + } + } + expectedRules = append(expectedRules, network.LoadBalancingRule{ + Name: &lbRuleName, + LoadBalancingRulePropertiesFormat: props, }) - } - loadDistribution := network.LoadDistributionDefault - if service.Spec.SessionAffinity == v1.ServiceAffinityClientIP { - loadDistribution = network.LoadDistributionSourceIP } + } - expectedRule := network.LoadBalancingRule{ - Name: &lbRuleName, - LoadBalancingRulePropertiesFormat: &network.LoadBalancingRulePropertiesFormat{ - Protocol: *transportProto, - FrontendIPConfiguration: &network.SubResource{ - ID: to.StringPtr(lbFrontendIPConfigID), - }, - BackendAddressPool: &network.SubResource{ - ID: to.StringPtr(lbBackendPoolID), - }, - LoadDistribution: loadDistribution, - FrontendPort: to.Int32Ptr(port.Port), - BackendPort: to.Int32Ptr(port.Port), - DisableOutboundSnat: to.BoolPtr(az.disableLoadBalancerOutboundSNAT()), - EnableTCPReset: enableTCPReset, - EnableFloatingIP: to.BoolPtr(true), - }, - } + return expectedProbes, expectedRules, nil +} - if port.Protocol == v1.ProtocolTCP { - expectedRule.LoadBalancingRulePropertiesFormat.IdleTimeoutInMinutes = lbIdleTimeout - } +//getDefaultLoadBalancingRulePropertiesFormat returns the loadbalancing rule for one port +func (az *Cloud) getExpectedLoadBalancingRulePropertiesForPort( + service *v1.Service, + lbFrontendIPConfigID string, + lbBackendPoolID string, port *int32, transportProto network.TransportProtocol) (*network.LoadBalancingRulePropertiesFormat, error) { + var err error - if requiresInternalLoadBalancer(service) && - strings.EqualFold(az.LoadBalancerSku, consts.LoadBalancerSkuStandard) && - (strings.EqualFold(service.Annotations[consts.ServiceAnnotationLoadBalancerEnableHighAvailabilityPorts], consts.TrueAnnotationValue) || port.Protocol == v1.ProtocolSCTP) { - expectedRule.FrontendPort = to.Int32Ptr(0) - expectedRule.BackendPort = to.Int32Ptr(0) - expectedRule.Protocol = network.TransportProtocolAll - highAvailabilityPortsEnabled = true - } + loadDistribution := network.LoadDistributionDefault + if service.Spec.SessionAffinity == v1.ServiceAffinityClientIP { + loadDistribution = network.LoadDistributionSourceIP + } - // we didn't construct the probe objects for UDP or SCTP because they're not allowed on Azure. - // However, when externalTrafficPolicy is Local, Kubernetes HTTP health check would be used for probing. - if servicehelpers.NeedsHealthCheck(service) || (port.Protocol != v1.ProtocolUDP && port.Protocol != v1.ProtocolSCTP) { - expectedRule.Probe = &network.SubResource{ - ID: to.StringPtr(az.getLoadBalancerProbeID(lbName, az.getLoadBalancerResourceGroup(), lbRuleName)), - } + var lbIdleTimeout *int32 + if lbIdleTimeout, err = consts.Getint32ValueFromK8sSvcAnnotation(service.Annotations, consts.ServiceAnnotationLoadBalancerIdleTimeout, func(val *int32) error { + const ( + min = 4 + max = 30 + ) + if *val < min || *val > max { + return fmt.Errorf("idle timeout value must be a whole number representing minutes between %d and %d, actual value: %d", min, max, *val) } - - expectedRules = append(expectedRules, expectedRule) + return nil + }); err != nil { + return nil, fmt.Errorf("error parsing idle timeout key: %s, err: %w", consts.ServiceAnnotationLoadBalancerIdleTimeout, err) + } else if lbIdleTimeout == nil { + lbIdleTimeout = to.Int32Ptr(4) + } + + props := &network.LoadBalancingRulePropertiesFormat{ + Protocol: transportProto, + FrontendPort: port, + BackendPort: port, + DisableOutboundSnat: to.BoolPtr(az.disableLoadBalancerOutboundSNAT()), + EnableFloatingIP: to.BoolPtr(true), + LoadDistribution: loadDistribution, + FrontendIPConfiguration: &network.SubResource{ + ID: to.StringPtr(lbFrontendIPConfigID), + }, + BackendAddressPool: &network.SubResource{ + ID: to.StringPtr(lbBackendPoolID), + }, + IdleTimeoutInMinutes: lbIdleTimeout, + } + if strings.EqualFold(string(transportProto), string(network.TransportProtocolTCP)) && az.useStandardLoadBalancer() { + props.EnableTCPReset = to.BoolPtr(true) } + return props, nil +} - return expectedProbes, expectedRules, nil +//getExpectedHAModeLoadBalancingRuleProperties build load balancing rule for lb in HA mode +func (az *Cloud) getExpectedHAModeLoadBalancingRuleProperties( + service *v1.Service, + lbFrontendIPConfigID string, + lbBackendPoolID string) (*network.LoadBalancingRulePropertiesFormat, error) { + props, err := az.getExpectedLoadBalancingRulePropertiesForPort(service, lbFrontendIPConfigID, lbBackendPoolID, to.Int32Ptr(0), network.TransportProtocolAll) + if err != nil { + return nil, fmt.Errorf("error generate lb rule for ha mod loadbalancer. err: %w", err) + } + props.EnableTCPReset = to.BoolPtr(true) + return props, nil } // This reconciles the Network Security Group similar to how the LB is reconciled. @@ -2175,7 +2206,7 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service, var sourceAddressPrefixes []string if (sourceRanges == nil || servicehelpers.IsAllowAll(sourceRanges)) && len(serviceTags) == 0 { - if !requiresInternalLoadBalancer(service) { + if !requiresInternalLoadBalancer(service) || len(service.Spec.LoadBalancerSourceRanges) > 0 { sourceAddressPrefixes = []string{"Internet"} } } else { @@ -2874,14 +2905,21 @@ func equalLoadBalancingRulePropertiesFormat(s *network.LoadBalancingRuleProperti return false } - properties := reflect.DeepEqual(s.Protocol, t.Protocol) && - reflect.DeepEqual(s.FrontendIPConfiguration, t.FrontendIPConfiguration) && + properties := reflect.DeepEqual(s.Protocol, t.Protocol) + if !properties { + return false + } + + if reflect.DeepEqual(s.Protocol, network.TransportProtocolTCP) { + properties = properties && reflect.DeepEqual(to.Bool(s.EnableTCPReset), to.Bool(t.EnableTCPReset)) + } + + properties = properties && reflect.DeepEqual(s.FrontendIPConfiguration, t.FrontendIPConfiguration) && reflect.DeepEqual(s.BackendAddressPool, t.BackendAddressPool) && reflect.DeepEqual(s.LoadDistribution, t.LoadDistribution) && reflect.DeepEqual(s.FrontendPort, t.FrontendPort) && reflect.DeepEqual(s.BackendPort, t.BackendPort) && reflect.DeepEqual(s.EnableFloatingIP, t.EnableFloatingIP) && - reflect.DeepEqual(to.Bool(s.EnableTCPReset), to.Bool(t.EnableTCPReset)) && reflect.DeepEqual(to.Bool(s.DisableOutboundSnat), to.Bool(t.DisableOutboundSnat)) if wantLB && s.IdleTimeoutInMinutes != nil && t.IdleTimeoutInMinutes != nil { diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer_backendpool.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer_backendpool.go index ac1da8a47..858ff340e 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer_backendpool.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer_backendpool.go @@ -137,7 +137,6 @@ func (bc *backendPoolTypeNodeIPConfig) ReconcileBackendPools(clusterName string, } foundBackendPool := false - wantLb := true changed := false lbName := *lb.Name @@ -146,11 +145,30 @@ func (bc *backendPoolTypeNodeIPConfig) ReconcileBackendPools(clusterName string, lbBackendPoolID := bc.getBackendPoolID(lbName, bc.getLoadBalancerResourceGroup(), lbBackendPoolName) vmSetName := bc.mapLoadBalancerNameToVMSet(lbName, clusterName) - for _, bp := range newBackendPools { + for i := len(newBackendPools) - 1; i >= 0; i-- { + bp := newBackendPools[i] if strings.EqualFold(*bp.Name, lbBackendPoolName) { - klog.V(10).Infof("bc.ReconcileBackendPools for service (%s)(%t): lb backendpool - found wanted backendpool. not adding anything", serviceName, wantLb) + klog.V(10).Infof("bc.ReconcileBackendPools for service (%s): lb backendpool - found wanted backendpool. not adding anything", serviceName) foundBackendPool = true + // If the LB backend pool type is configured from nodeIP or podIP + // to nodeIPConfiguration, we need to decouple the VM NICs from the LB + // before attaching nodeIPs/podIPs to the LB backend pool. + if bp.BackendAddressPoolPropertiesFormat != nil && + bp.LoadBalancerBackendAddresses != nil && + len(*bp.LoadBalancerBackendAddresses) > 0 { + if removeNodeIPAddressesFromBackendPool(bp, []string{}, true) { + bp.Etag = nil + if err := bc.CreateOrUpdateLBBackendPool(lbName, bp); err != nil { + klog.Errorf("bc.ReconcileBackendPools for service (%s): failed to cleanup IP based backend pool %s: %s", serviceName, lbBackendPoolName, err.Error()) + return false, false, fmt.Errorf("bc.ReconcileBackendPools for service (%s): failed to cleanup IP based backend pool %s: %w", serviceName, lbBackendPoolName, err) + } + newBackendPools[i] = bp + lb.BackendAddressPools = &newBackendPools + lb.Etag = nil + } + } + var backendIPConfigurationsToBeDeleted []network.InterfaceIPConfiguration if bp.BackendAddressPoolPropertiesFormat != nil && bp.BackendIPConfigurations != nil { for _, ipConf := range *bp.BackendIPConfigurations { @@ -170,7 +188,7 @@ func (bc *backendPoolTypeNodeIPConfig) ReconcileBackendPools(clusterName string, return false, false, err } if shouldExcludeLoadBalancer { - klog.V(2).Infof("bc.ReconcileBackendPools for service (%s)(%t): lb backendpool - found unwanted node %s, decouple it from the LB %s", serviceName, wantLb, nodeName, lbName) + klog.V(2).Infof("bc.ReconcileBackendPools for service (%s): lb backendpool - found unwanted node %s, decouple it from the LB %s", serviceName, nodeName, lbName) // construct a backendPool that only contains the IP config of the node to be deleted backendIPConfigurationsToBeDeleted = append(backendIPConfigurationsToBeDeleted, network.InterfaceIPConfiguration{ID: to.StringPtr(ipConfID)}) } @@ -193,7 +211,7 @@ func (bc *backendPoolTypeNodeIPConfig) ReconcileBackendPools(clusterName string, } break } else { - klog.V(10).Infof("bc.ReconcileBackendPools for service (%s)(%t): lb backendpool - found unmanaged backendpool %s", serviceName, wantLb, *bp.Name) + klog.V(10).Infof("bc.ReconcileBackendPools for service (%s): lb backendpool - found unmanaged backendpool %s", serviceName, *bp.Name) } } @@ -358,42 +376,61 @@ func (bi *backendPoolTypeNodeIP) CleanupVMSetFromBackendPoolByCondition(slb *net func (bi *backendPoolTypeNodeIP) ReconcileBackendPools(clusterName string, service *v1.Service, lb *network.LoadBalancer) (bool, bool, error) { var newBackendPools []network.BackendAddressPool - var err error if lb.BackendAddressPools != nil { newBackendPools = *lb.BackendAddressPools } foundBackendPool := false - wantLb := true changed := false lbName := *lb.Name serviceName := getServiceName(service) lbBackendPoolName := getBackendPoolName(clusterName, service) + vmSetName := bi.mapLoadBalancerNameToVMSet(lbName, clusterName) + lbBackendPoolID := bi.getBackendPoolID(to.String(lb.Name), bi.getLoadBalancerResourceGroup(), getBackendPoolName(clusterName, service)) - for i, bp := range newBackendPools { + for i := len(newBackendPools) - 1; i >= 0; i-- { + bp := newBackendPools[i] if strings.EqualFold(*bp.Name, lbBackendPoolName) { - klog.V(10).Infof("bi.ReconcileBackendPools for service (%s)(%t): lb backendpool - found wanted backendpool. not adding anything", serviceName, wantLb) + klog.V(10).Infof("bi.ReconcileBackendPools for service (%s): found wanted backendpool. not adding anything", serviceName) foundBackendPool = true + // If the LB backend pool type is configured from nodeIPConfiguration + // to nodeIP, we need to decouple the VM NICs from the LB + // before attaching nodeIPs/podIPs to the LB backend pool. + if bp.BackendAddressPoolPropertiesFormat != nil && + bp.BackendIPConfigurations != nil && + len(*bp.BackendIPConfigurations) > 0 { + klog.V(2).Infof("bi.ReconcileBackendPools for service (%s): ensuring the LB is decoupled from the VMSet", serviceName) + if err := bi.VMSet.EnsureBackendPoolDeleted(service, lbBackendPoolID, vmSetName, lb.BackendAddressPools, true); err != nil { + klog.Errorf("bi.ReconcileBackendPools for service (%s): failed to EnsureBackendPoolDeleted: %s", serviceName, err.Error()) + return false, false, err + } + newBackendPools[i].BackendAddressPoolPropertiesFormat.LoadBalancerBackendAddresses = &[]network.LoadBalancerBackendAddress{} + newBackendPools[i].BackendAddressPoolPropertiesFormat.BackendIPConfigurations = &[]network.InterfaceIPConfiguration{} + newBackendPools[i].Etag = nil + lb.Etag = nil + break + } + var nodeIPAddressesToBeDeleted []string for nodeName := range bi.excludeLoadBalancerNodes { for ip := range bi.nodePrivateIPs[nodeName] { - klog.V(2).Infof("bi.ReconcileBackendPools for service (%s)(%t): lb backendpool - found unwanted node private IP %s, decouple it from the LB %s", serviceName, wantLb, ip, lbName) + klog.V(2).Infof("bi.ReconcileBackendPools for service (%s): found unwanted node private IP %s, decoupling it from the LB %s", serviceName, ip, lbName) nodeIPAddressesToBeDeleted = append(nodeIPAddressesToBeDeleted, ip) } } if len(nodeIPAddressesToBeDeleted) > 0 { - updated := removeNodeIPAddressesFromBackendPool(bp, nodeIPAddressesToBeDeleted) + updated := removeNodeIPAddressesFromBackendPool(bp, nodeIPAddressesToBeDeleted, false) if updated { (*lb.BackendAddressPools)[i] = bp if err := bi.CreateOrUpdateLBBackendPool(lbName, bp); err != nil { - return false, false, fmt.Errorf("bi.ReconcileBackendPools for service (%s)(%t): lb backendpool - failed to update backend pool %s for load balancer %s: %w", serviceName, wantLb, lbBackendPoolName, lbName, err) + return false, false, fmt.Errorf("bi.ReconcileBackendPools for service (%s): lb backendpool - failed to update backend pool %s for load balancer %s: %w", serviceName, lbBackendPoolName, lbName, err) } } } break } else { - klog.V(10).Infof("bi.ReconcileBackendPools for service (%s)(%t): lb backendpool - found unmanaged backendpool %s", serviceName, wantLb, *bp.Name) + klog.V(10).Infof("bi.ReconcileBackendPools for service (%s): found unmanaged backendpool %s", serviceName, *bp.Name) } } @@ -403,7 +440,7 @@ func (bi *backendPoolTypeNodeIP) ReconcileBackendPools(clusterName string, servi changed = true } - return isBackendPoolPreConfigured, changed, err + return isBackendPoolPreConfigured, changed, nil } func newBackendPool(lb *network.LoadBalancer, isBackendPoolPreConfigured bool, preConfiguredBackendPoolLoadBalancerTypes, serviceName, lbBackendPoolName string) bool { @@ -425,7 +462,7 @@ func newBackendPool(lb *network.LoadBalancer, isBackendPoolPreConfigured bool, p return isBackendPoolPreConfigured } -func removeNodeIPAddressesFromBackendPool(backendPool network.BackendAddressPool, nodeIPAddresses []string) bool { +func removeNodeIPAddressesFromBackendPool(backendPool network.BackendAddressPool, nodeIPAddresses []string, removeAll bool) bool { changed := false nodeIPsSet := sets.NewString(nodeIPAddresses...) if backendPool.BackendAddressPoolPropertiesFormat != nil && @@ -433,7 +470,11 @@ func removeNodeIPAddressesFromBackendPool(backendPool network.BackendAddressPool for i := len(*backendPool.LoadBalancerBackendAddresses) - 1; i >= 0; i-- { if (*backendPool.LoadBalancerBackendAddresses)[i].LoadBalancerBackendAddressPropertiesFormat != nil { ipAddress := to.String((*backendPool.LoadBalancerBackendAddresses)[i].IPAddress) - if nodeIPsSet.Has(ipAddress) { + if ipAddress == "" { + klog.V(4).Infof("removeNodeIPAddressFromBackendPool: LoadBalancerBackendAddress %s is not IP-based, skipping", to.String((*backendPool.LoadBalancerBackendAddresses)[i].Name)) + continue + } + if removeAll || nodeIPsSet.Has(ipAddress) { klog.V(4).Infof("removeNodeIPAddressFromBackendPool: removing %s from the backend pool %s", ipAddress, to.String(backendPool.Name)) *backendPool.LoadBalancerBackendAddresses = append((*backendPool.LoadBalancerBackendAddresses)[:i], (*backendPool.LoadBalancerBackendAddresses)[i+1:]...) changed = true diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_managedDiskController.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_managedDiskController.go index 4ff8bd5ee..0be69a8e4 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_managedDiskController.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_managedDiskController.go @@ -80,6 +80,8 @@ type ManagedDiskOptions struct { DiskAccessID *string // BurstingEnabled - Set to true to enable bursting beyond the provisioned performance target of the disk. BurstingEnabled *bool + // SubscrtionID - specify a different SubscrtionID + SubscrtionID string } //CreateManagedDisk : create managed disk @@ -111,7 +113,19 @@ func (c *ManagedDiskController) CreateManagedDisk(ctx context.Context, options * diskSizeGB := int32(options.SizeGB) diskSku := options.StorageAccountType - creationData, err := getValidCreationData(c.common.subscriptionID, options.ResourceGroup, options.SourceResourceID, options.SourceType) + rg := c.common.resourceGroup + if options.ResourceGroup != "" { + rg = options.ResourceGroup + } + subsID := c.common.subscriptionID + if options.SubscrtionID != "" { + subsID = options.SubscrtionID + } + if options.SubscrtionID != "" && !strings.EqualFold(options.SubscrtionID, c.common.subscriptionID) && options.ResourceGroup == "" { + return "", fmt.Errorf("resourceGroup must be specified when subscriptionID(%s) is not empty", subsID) + } + + creationData, err := getValidCreationData(subsID, rg, options.SourceResourceID, options.SourceType) if err != nil { return "", err } @@ -206,23 +220,17 @@ func (c *ManagedDiskController) CreateManagedDisk(ctx context.Context, options * model.Zones = &createZones } - if options.ResourceGroup == "" { - options.ResourceGroup = c.common.resourceGroup - } - - cloud := c.common.cloud - rerr := cloud.DisksClient.CreateOrUpdate(ctx, options.ResourceGroup, options.DiskName, model) - if rerr != nil { + if rerr := c.common.cloud.DisksClient.CreateOrUpdate(ctx, subsID, rg, options.DiskName, model); rerr != nil { return "", rerr.Error() } - diskID := fmt.Sprintf(managedDiskPath, cloud.subscriptionID, options.ResourceGroup, options.DiskName) + diskID := fmt.Sprintf(managedDiskPath, subsID, rg, options.DiskName) if options.SkipGetDiskOperation { klog.Warningf("azureDisk - GetDisk(%s, StorageAccountType:%s) is throttled, unable to confirm provisioningState in poll process", options.DiskName, options.StorageAccountType) } else { err = kwait.ExponentialBackoff(defaultBackOff, func() (bool, error) { - provisionState, id, err := c.GetDisk(ctx, options.ResourceGroup, options.DiskName) + provisionState, id, err := c.GetDisk(ctx, subsID, rg, options.DiskName) if err == nil { if id != "" { diskID = id @@ -250,7 +258,7 @@ func (c *ManagedDiskController) CreateManagedDisk(ctx context.Context, options * //DeleteManagedDisk : delete managed disk func (c *ManagedDiskController) DeleteManagedDisk(ctx context.Context, diskURI string) error { - resourceGroup, err := getResourceGroupFromDiskURI(diskURI) + resourceGroup, subsID, err := getInfoFromDiskURI(diskURI) if err != nil { return err } @@ -260,7 +268,7 @@ func (c *ManagedDiskController) DeleteManagedDisk(ctx context.Context, diskURI s } diskName := path.Base(diskURI) - disk, rerr := c.common.cloud.DisksClient.Get(ctx, resourceGroup, diskName) + disk, rerr := c.common.cloud.DisksClient.Get(ctx, subsID, resourceGroup, diskName) if rerr != nil { if rerr.HTTPStatusCode == http.StatusNotFound { klog.V(2).Infof("azureDisk - disk(%s) is already deleted", diskURI) @@ -275,7 +283,7 @@ func (c *ManagedDiskController) DeleteManagedDisk(ctx context.Context, diskURI s return fmt.Errorf("disk(%s) already attached to node(%s), could not be deleted", diskURI, *disk.ManagedBy) } - if rerr := c.common.cloud.DisksClient.Delete(ctx, resourceGroup, diskName); rerr != nil { + if rerr := c.common.cloud.DisksClient.Delete(ctx, subsID, resourceGroup, diskName); rerr != nil { return rerr.Error() } // We don't need poll here, k8s will immediately stop referencing the disk @@ -287,8 +295,8 @@ func (c *ManagedDiskController) DeleteManagedDisk(ctx context.Context, diskURI s } // GetDisk return: disk provisionState, diskID, error -func (c *ManagedDiskController) GetDisk(ctx context.Context, resourceGroup, diskName string) (string, string, error) { - result, rerr := c.common.cloud.DisksClient.Get(ctx, resourceGroup, diskName) +func (c *ManagedDiskController) GetDisk(ctx context.Context, subsID, resourceGroup, diskName string) (string, string, error) { + result, rerr := c.common.cloud.DisksClient.Get(ctx, subsID, resourceGroup, diskName) if rerr != nil { return "", "", rerr.Error() } @@ -302,12 +310,12 @@ func (c *ManagedDiskController) GetDisk(ctx context.Context, resourceGroup, disk // ResizeDisk Expand the disk to new size func (c *ManagedDiskController) ResizeDisk(ctx context.Context, diskURI string, oldSize resource.Quantity, newSize resource.Quantity, supportOnlineResize bool) (resource.Quantity, error) { diskName := path.Base(diskURI) - resourceGroup, err := getResourceGroupFromDiskURI(diskURI) + resourceGroup, subsID, err := getInfoFromDiskURI(diskURI) if err != nil { return oldSize, err } - result, rerr := c.common.cloud.DisksClient.Get(ctx, resourceGroup, diskName) + result, rerr := c.common.cloud.DisksClient.Get(ctx, subsID, resourceGroup, diskName) if rerr != nil { return oldSize, rerr.Error() } @@ -340,7 +348,7 @@ func (c *ManagedDiskController) ResizeDisk(ctx context.Context, diskURI string, }, } - if rerr := c.common.cloud.DisksClient.Update(ctx, resourceGroup, diskName, diskParameter); rerr != nil { + if rerr := c.common.cloud.DisksClient.Update(ctx, subsID, resourceGroup, diskName, diskParameter); rerr != nil { return oldSize, rerr.Error() } @@ -348,15 +356,15 @@ func (c *ManagedDiskController) ResizeDisk(ctx context.Context, diskURI string, return newSizeQuant, nil } -// get resource group name from a managed disk URI, e.g. return {group-name} according to +// get resource group name, subs id from a managed disk URI, e.g. return {group-name}, {sub-id} according to // /subscriptions/{sub-id}/resourcegroups/{group-name}/providers/microsoft.compute/disks/{disk-id} // according to https://docs.microsoft.com/en-us/rest/api/compute/disks/get -func getResourceGroupFromDiskURI(diskURI string) (string, error) { +func getInfoFromDiskURI(diskURI string) (string, string, error) { fields := strings.Split(diskURI, "/") if len(fields) != 9 || strings.ToLower(fields[3]) != "resourcegroups" { - return "", fmt.Errorf("invalid disk URI: %s", diskURI) + return "", "", fmt.Errorf("invalid disk URI: %s", diskURI) } - return fields[4], nil + return fields[4], fields[2], nil } // GetLabelsForVolume implements PVLabeler.GetLabelsForVolume @@ -378,7 +386,7 @@ func (c *Cloud) GetLabelsForVolume(ctx context.Context, pv *v1.PersistentVolume) func (c *Cloud) GetAzureDiskLabels(ctx context.Context, diskURI string) (map[string]string, error) { // Get disk's resource group. diskName := path.Base(diskURI) - resourceGroup, err := getResourceGroupFromDiskURI(diskURI) + resourceGroup, subsID, err := getInfoFromDiskURI(diskURI) if err != nil { klog.Errorf("Failed to get resource group for AzureDisk %q: %v", diskName, err) return nil, err @@ -391,7 +399,7 @@ func (c *Cloud) GetAzureDiskLabels(ctx context.Context, diskURI string) (map[str if c.DisksClient == nil { return labels, nil } - disk, rerr := c.DisksClient.Get(ctx, resourceGroup, diskName) + disk, rerr := c.DisksClient.Get(ctx, subsID, resourceGroup, diskName) if rerr != nil { klog.Errorf("Failed to get information for AzureDisk %q: %v", diskName, rerr) return nil, rerr.Error() diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_standard.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_standard.go index 414af7d90..e1dcd2592 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_standard.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_standard.go @@ -295,6 +295,10 @@ func (az *Cloud) getLoadBalancerRuleName(service *v1.Service, protocol v1.Protoc return fmt.Sprintf("%s-%s-%s-%d", prefix, subnetSegment, protocol, port) } +func (az *Cloud) getloadbalancerHAmodeRuleName(service *v1.Service) string { + return az.getLoadBalancerRuleName(service, service.Spec.Ports[0].Protocol, service.Spec.Ports[0].Port) +} + func (az *Cloud) getSecurityRuleName(service *v1.Service, port v1.ServicePort, sourceAddrPrefix string) string { if useSharedSecurityRule(service) { safePrefix := strings.Replace(sourceAddrPrefix, "/", "_", -1) diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_storageaccount.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_storageaccount.go index 49e70d633..184934c3f 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_storageaccount.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_storageaccount.go @@ -80,6 +80,7 @@ func (az *Cloud) getStorageAccounts(ctx context.Context, accountOptions *Account isLocationEqual(acct, accountOptions) && AreVNetRulesEqual(acct, accountOptions) && isLargeFileSharesPropertyEqual(acct, accountOptions) && + isTagsEqual(acct, accountOptions) && isTaggedWithSkip(acct) && isHnsPropertyEqual(acct, accountOptions) && isEnableNfsV3PropertyEqual(acct, accountOptions) && @@ -496,6 +497,26 @@ func isTaggedWithSkip(account storage.Account) bool { return true } +func isTagsEqual(account storage.Account, accountOptions *AccountOptions) bool { + // nil and empty map should be regarded as equal + if len(account.Tags) == 0 && len(accountOptions.Tags) == 0 { + return true + } + + for k, v := range account.Tags { + var value string + // nil and empty value should be regarded as equal + if v != nil { + value = *v + } + if accountOptions.Tags[k] != value { + return false + } + } + + return true +} + func isHnsPropertyEqual(account storage.Account, accountOptions *AccountOptions) bool { return to.Bool(account.IsHnsEnabled) == to.Bool(accountOptions.IsHnsEnabled) } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_utils.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_utils.go index 1bde0b561..b1177b188 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_utils.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_utils.go @@ -260,3 +260,19 @@ func getNodePrivateIPAddresses(node *v1.Node) []string { return addresses } + +func isLBBackendPoolTypeIPConfig(service *v1.Service, lb *network.LoadBalancer, clusterName string) bool { + if lb == nil || lb.LoadBalancerPropertiesFormat == nil || lb.BackendAddressPools == nil { + klog.V(4).Infof("isLBBackendPoolTypeIPConfig: no backend pools in the LB %s", to.String(lb.Name)) + return false + } + lbBackendPoolName := getBackendPoolName(clusterName, service) + for _, bp := range *lb.BackendAddressPools { + if strings.EqualFold(to.String(bp.Name), lbBackendPoolName) { + return bp.BackendAddressPoolPropertiesFormat != nil && + bp.BackendIPConfigurations != nil && + len(*bp.BackendIPConfigurations) != 0 + } + } + return false +} diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmss.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmss.go index 99a4b70ed..585bab3c2 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmss.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmss.go @@ -38,6 +38,7 @@ import ( azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache" "sigs.k8s.io/cloud-provider-azure/pkg/consts" "sigs.k8s.io/cloud-provider-azure/pkg/metrics" + "sigs.k8s.io/cloud-provider-azure/pkg/provider/virtualmachine" ) var ( @@ -152,73 +153,74 @@ func (ss *ScaleSet) getVMSS(vmssName string, crt azcache.AzureCacheReadType) (*c // getVmssVMByNodeIdentity find virtualMachineScaleSetVM by nodeIdentity, using node's parent VMSS cache. // Returns cloudprovider.InstanceNotFound if the node does not belong to the scale set named in nodeIdentity. -func (ss *ScaleSet) getVmssVMByNodeIdentity(node *nodeIdentity, crt azcache.AzureCacheReadType) (string, string, *compute.VirtualMachineScaleSetVM, error) { +func (ss *ScaleSet) getVmssVMByNodeIdentity(node *nodeIdentity, crt azcache.AzureCacheReadType) (*virtualmachine.VirtualMachine, error) { cacheKey, cache, err := ss.getVMSSVMCache(node.resourceGroup, node.vmssName) if err != nil { - return "", "", nil, err + return nil, err } - getter := func(nodeName string, crt azcache.AzureCacheReadType) (string, string, *compute.VirtualMachineScaleSetVM, bool, error) { + getter := func(nodeName string, crt azcache.AzureCacheReadType) (*virtualmachine.VirtualMachine, bool, error) { var found bool cached, err := cache.Get(cacheKey, crt) if err != nil { - return "", "", nil, found, err + return nil, found, err } virtualMachines := cached.(*sync.Map) if vm, ok := virtualMachines.Load(nodeName); ok { result := vm.(*vmssVirtualMachinesEntry) found = true - return result.vmssName, result.instanceID, result.virtualMachine, found, nil + return virtualmachine.FromVirtualMachineScaleSetVM(result.virtualMachine, virtualmachine.ByVMSS(result.vmssName)), found, nil } - return "", "", nil, found, nil + return nil, found, nil } + // FIXME(ccc): check only if vmss is uniform. _, err = getScaleSetVMInstanceID(node.nodeName) if err != nil { - return "", "", nil, err + return nil, err } - vmssName, instanceID, vm, found, err := getter(node.nodeName, crt) + vm, found, err := getter(node.nodeName, crt) if err != nil { - return "", "", nil, err + return nil, err } if !found { // lock and try find nodeName from cache again, refresh cache if still not found ss.lockMap.LockEntry(cacheKey) defer ss.lockMap.UnlockEntry(cacheKey) - vmssName, instanceID, vm, found, err = getter(node.nodeName, crt) + vm, found, err = getter(node.nodeName, crt) if err == nil && found && vm != nil { klog.V(2).Infof("found VMSS VM with nodeName %s after retry", node.nodeName) - return vmssName, instanceID, vm, nil + return vm, nil } klog.V(2).Infof("Couldn't find VMSS VM with nodeName %s, refreshing the cache(vmss: %s, rg: %s)", node.nodeName, node.vmssName, node.resourceGroup) - vmssName, instanceID, vm, found, err = getter(node.nodeName, azcache.CacheReadTypeForceRefresh) + vm, found, err = getter(node.nodeName, azcache.CacheReadTypeForceRefresh) if err != nil { - return "", "", nil, err + return nil, err } } if found && vm != nil { - return vmssName, instanceID, vm, nil + return vm, nil } if !found || vm == nil { klog.Warningf("Unable to find node %s: %v", node.nodeName, cloudprovider.InstanceNotFound) - return "", "", nil, cloudprovider.InstanceNotFound + return nil, cloudprovider.InstanceNotFound } - return vmssName, instanceID, vm, nil + return vm, nil } // getVmssVM gets virtualMachineScaleSetVM by nodeName from cache. // Returns cloudprovider.InstanceNotFound if nodeName does not belong to any scale set. -func (ss *ScaleSet) getVmssVM(nodeName string, crt azcache.AzureCacheReadType) (string, string, *compute.VirtualMachineScaleSetVM, error) { +func (ss *ScaleSet) getVmssVM(nodeName string, crt azcache.AzureCacheReadType) (*virtualmachine.VirtualMachine, error) { node, err := ss.getNodeIdentityByNodeName(nodeName, crt) if err != nil { - return "", "", nil, err + return nil, err } return ss.getVmssVMByNodeIdentity(node, crt) @@ -236,17 +238,20 @@ func (ss *ScaleSet) GetPowerStatusByNodeName(name string) (powerState string, er return ss.availabilitySet.GetPowerStatusByNodeName(name) } - _, _, vm, err := ss.getVmssVM(name, azcache.CacheReadTypeDefault) + vm, err := ss.getVmssVM(name, azcache.CacheReadTypeDefault) if err != nil { return powerState, err } - if vm.InstanceView != nil && vm.InstanceView.Statuses != nil { - statuses := *vm.InstanceView.Statuses - for _, status := range statuses { - state := to.String(status.Code) - if strings.HasPrefix(state, vmPowerStatePrefix) { - return strings.TrimPrefix(state, vmPowerStatePrefix), nil + if vm.IsVirtualMachineScaleSetVM() { + v := vm.AsVirtualMachineScaleSetVM() + if v.InstanceView != nil && v.InstanceView.Statuses != nil { + statuses := *v.InstanceView.Statuses + for _, status := range statuses { + state := to.String(status.Code) + if strings.HasPrefix(state, vmPowerStatePrefix) { + return strings.TrimPrefix(state, vmPowerStatePrefix), nil + } } } } @@ -268,7 +273,7 @@ func (ss *ScaleSet) GetProvisioningStateByNodeName(name string) (provisioningSta return ss.availabilitySet.GetProvisioningStateByNodeName(name) } - _, _, vm, err := ss.getVmssVM(name, azcache.CacheReadTypeDefault) + vm, err := ss.getVmssVM(name, azcache.CacheReadTypeDefault) if err != nil { return provisioningState, err } @@ -353,13 +358,13 @@ func (ss *ScaleSet) GetInstanceIDByNodeName(name string) (string, error) { return ss.availabilitySet.GetInstanceIDByNodeName(name) } - _, _, vm, err := ss.getVmssVM(name, azcache.CacheReadTypeUnsafe) + vm, err := ss.getVmssVM(name, azcache.CacheReadTypeUnsafe) if err != nil { klog.Errorf("Unable to find node %s: %v", name, err) return "", err } - resourceID := *vm.ID + resourceID := vm.ID convertedResourceID, err := convertResourceGroupNameToLower(resourceID) if err != nil { klog.Errorf("convertResourceGroupNameToLower failed with error: %v", err) @@ -427,13 +432,16 @@ func (ss *ScaleSet) GetInstanceTypeByNodeName(name string) (string, error) { return ss.availabilitySet.GetInstanceTypeByNodeName(name) } - _, _, vm, err := ss.getVmssVM(name, azcache.CacheReadTypeUnsafe) + vm, err := ss.getVmssVM(name, azcache.CacheReadTypeUnsafe) if err != nil { return "", err } - if vm.Sku != nil && vm.Sku.Name != nil { - return *vm.Sku.Name, nil + if vm.IsVirtualMachineScaleSetVM() { + v := vm.AsVirtualMachineScaleSetVM() + if v.Sku != nil && v.Sku.Name != nil { + return *v.Sku.Name, nil + } } return "", nil @@ -452,24 +460,26 @@ func (ss *ScaleSet) GetZoneByNodeName(name string) (cloudprovider.Zone, error) { return ss.availabilitySet.GetZoneByNodeName(name) } - _, _, vm, err := ss.getVmssVM(name, azcache.CacheReadTypeUnsafe) + vm, err := ss.getVmssVM(name, azcache.CacheReadTypeUnsafe) if err != nil { return cloudprovider.Zone{}, err } var failureDomain string - if vm.Zones != nil && len(*vm.Zones) > 0 { + if vm.Zones != nil && len(vm.Zones) > 0 { // Get availability zone for the node. - zones := *vm.Zones + zones := vm.Zones zoneID, err := strconv.Atoi(zones[0]) if err != nil { return cloudprovider.Zone{}, fmt.Errorf("failed to parse zone %q: %w", zones, err) } - failureDomain = ss.makeZone(to.String(vm.Location), zoneID) - } else if vm.InstanceView != nil && vm.InstanceView.PlatformFaultDomain != nil { + failureDomain = ss.makeZone(vm.Location, zoneID) + } else if vm.IsVirtualMachineScaleSetVM() && + vm.AsVirtualMachineScaleSetVM().InstanceView != nil && + vm.AsVirtualMachineScaleSetVM().InstanceView.PlatformFaultDomain != nil { // Availability zone is not used for the node, falling back to fault domain. - failureDomain = strconv.Itoa(int(*vm.InstanceView.PlatformFaultDomain)) + failureDomain = strconv.Itoa(int(*vm.AsVirtualMachineScaleSetVM().InstanceView.PlatformFaultDomain)) } else { err = fmt.Errorf("failed to get zone info") klog.Errorf("GetZoneByNodeName: got unexpected error %v", err) @@ -479,7 +489,7 @@ func (ss *ScaleSet) GetZoneByNodeName(name string) (cloudprovider.Zone, error) { return cloudprovider.Zone{ FailureDomain: strings.ToLower(failureDomain), - Region: strings.ToLower(to.String(vm.Location)), + Region: strings.ToLower(vm.Location), }, nil } @@ -574,7 +584,8 @@ func (ss *ScaleSet) GetPrivateIPsByNodeName(nodeName string) ([]string, error) { } // This returns the full identifier of the primary NIC for the given VM. -func (ss *ScaleSet) getPrimaryInterfaceID(machine compute.VirtualMachineScaleSetVM) (string, error) { +func (ss *ScaleSet) getPrimaryInterfaceID(vm *virtualmachine.VirtualMachine) (string, error) { + machine := vm.AsVirtualMachineScaleSetVM() if machine.NetworkProfile == nil || machine.NetworkProfile.NetworkInterfaces == nil { return "", fmt.Errorf("failed to find the network interfaces for vm %s", to.String(machine.Name)) } @@ -706,6 +717,7 @@ func (ss *ScaleSet) getNodeIdentityByNodeName(nodeName string, crt azcache.Azure return node, nil } + // FIXME(ccc): check only if vmss is uniform. if _, err := getScaleSetVMInstanceID(nodeName); err != nil { return nil, err } @@ -766,17 +778,17 @@ func (ss *ScaleSet) getAgentPoolScaleSets(nodes []*v1.Node) (*[]string, error) { continue } - ssName, _, _, err := ss.getVmssVM(nodeName, azcache.CacheReadTypeDefault) + vm, err := ss.getVmssVM(nodeName, azcache.CacheReadTypeDefault) if err != nil { return nil, err } - if ssName == "" { + if vm.VMSSName == "" { klog.V(3).Infof("Node %q is not belonging to any known scale sets", nodeName) continue } - *agentPoolScaleSets = append(*agentPoolScaleSets, ssName) + *agentPoolScaleSets = append(*agentPoolScaleSets, vm.VMSSName) } return agentPoolScaleSets, nil @@ -846,7 +858,7 @@ func (ss *ScaleSet) GetPrimaryInterface(nodeName string) (network.Interface, err return ss.availabilitySet.GetPrimaryInterface(nodeName) } - ssName, instanceID, vm, err := ss.getVmssVM(nodeName, azcache.CacheReadTypeDefault) + vm, err := ss.getVmssVM(nodeName, azcache.CacheReadTypeDefault) if err != nil { // VM is availability set, but not cached yet in availabilitySetNodesCache. if errors.Is(err, ErrorNotVmssInstance) { @@ -857,7 +869,7 @@ func (ss *ScaleSet) GetPrimaryInterface(nodeName string) (network.Interface, err return network.Interface{}, err } - primaryInterfaceID, err := ss.getPrimaryInterfaceID(*vm) + primaryInterfaceID, err := ss.getPrimaryInterfaceID(vm) if err != nil { klog.Errorf("error: ss.GetPrimaryInterface(%s), ss.getPrimaryInterfaceID(), err=%v", nodeName, err) return network.Interface{}, err @@ -875,11 +887,13 @@ func (ss *ScaleSet) GetPrimaryInterface(nodeName string) (network.Interface, err ctx, cancel := getContextWithCancel() defer cancel() - nic, rerr := ss.InterfacesClient.GetVirtualMachineScaleSetNetworkInterface(ctx, resourceGroup, ssName, instanceID, nicName, "") + nic, rerr := ss.InterfacesClient.GetVirtualMachineScaleSetNetworkInterface(ctx, resourceGroup, vm.VMSSName, + vm.InstanceID, + nicName, "") if rerr != nil { exists, realErr := checkResourceExistsFromError(rerr) if realErr != nil { - klog.Errorf("error: ss.GetPrimaryInterface(%s), ss.GetVirtualMachineScaleSetNetworkInterface.Get(%s, %s, %s), err=%v", nodeName, resourceGroup, ssName, nicName, realErr) + klog.Errorf("error: ss.GetPrimaryInterface(%s), ss.GetVirtualMachineScaleSetNetworkInterface.Get(%s, %s, %s), err=%v", nodeName, resourceGroup, vm.VMSSName, nicName, realErr) return network.Interface{}, realErr.Error() } @@ -891,7 +905,7 @@ func (ss *ScaleSet) GetPrimaryInterface(nodeName string) (network.Interface, err // Fix interface's location, which is required when updating the interface. // TODO: is this a bug of azure SDK? if nic.Location == nil || *nic.Location == "" { - nic.Location = vm.Location + nic.Location = &vm.Location } return nic, nil @@ -968,7 +982,7 @@ func (ss *ScaleSet) getConfigForScaleSetByIPFamily(config *compute.VirtualMachin // participating in the specified LoadBalancer Backend Pool, which returns (resourceGroup, vmasName, instanceID, vmssVM, error). func (ss *ScaleSet) EnsureHostInPool(service *v1.Service, nodeName types.NodeName, backendPoolID string, vmSetNameOfLB string) (string, string, string, *compute.VirtualMachineScaleSetVM, error) { vmName := mapNodeNameToVMName(nodeName) - ssName, instanceID, vm, err := ss.getVmssVM(vmName, azcache.CacheReadTypeDefault) + vm, err := ss.getVmssVM(vmName, azcache.CacheReadTypeDefault) if err != nil { if errors.Is(err, cloudprovider.InstanceNotFound) { klog.Infof("EnsureHostInPool: skipping node %s because it is not found", vmName) @@ -979,7 +993,7 @@ func (ss *ScaleSet) EnsureHostInPool(service *v1.Service, nodeName types.NodeNam return "", "", "", nil, err } - klog.V(2).Infof("ensuring node %q of scaleset %q in LB backendpool %q", nodeName, ssName, backendPoolID) + klog.V(2).Infof("ensuring node %q of scaleset %q in LB backendpool %q", nodeName, vm.VMSSName, backendPoolID) // Check scale set name: // - For basic SKU load balancer, return nil if the node's scale set is mismatched with vmSetNameOfLB. @@ -996,24 +1010,26 @@ func (ss *ScaleSet) EnsureHostInPool(service *v1.Service, nodeName types.NodeNam // ensure the vm that is supposed to share the primary SLB in the backendpool of the primary SLB if strings.EqualFold(ss.GetPrimaryVMSetName(), vmSetNameOfLB) && - ss.getVMSetNamesSharingPrimarySLB().Has(strings.ToLower(ssName)) { - klog.V(4).Infof("EnsureHostInPool: the vm %s in the vmSet %s is supposed to share the primary SLB", nodeName, ssName) + ss.getVMSetNamesSharingPrimarySLB().Has(strings.ToLower(vm.VMSSName)) { + klog.V(4).Infof("EnsureHostInPool: the vm %s in the vmSet %s is supposed to share the primary SLB", + nodeName, vm.VMSSName) needCheck = false } } - if vmSetNameOfLB != "" && needCheck && !strings.EqualFold(vmSetNameOfLB, ssName) { + if vmSetNameOfLB != "" && needCheck && !strings.EqualFold(vmSetNameOfLB, vm.VMSSName) { klog.V(3).Infof("EnsureHostInPool skips node %s because it is not in the ScaleSet %s", vmName, vmSetNameOfLB) return "", "", "", nil, nil } // Find primary network interface configuration. - if vm.NetworkProfileConfiguration.NetworkInterfaceConfigurations == nil { - klog.V(4).Infof("EnsureHostInPool: cannot obtain the primary network interface configuration, of vm %s, probably because the vm's being deleted", vmName) + if vm.VirtualMachineScaleSetVMProperties.NetworkProfileConfiguration.NetworkInterfaceConfigurations == nil { + klog.V(4).Infof("EnsureHostInPool: cannot obtain the primary network interface configuration, of vm %s, "+ + "probably because the vm's being deleted", vmName) return "", "", "", nil, nil } - networkInterfaceConfigurations := *vm.NetworkProfileConfiguration.NetworkInterfaceConfigurations + networkInterfaceConfigurations := *vm.VirtualMachineScaleSetVMProperties.NetworkProfileConfiguration.NetworkInterfaceConfigurations primaryNetworkInterfaceConfiguration, err := ss.getPrimaryNetworkInterfaceConfiguration(networkInterfaceConfigurations, vmName) if err != nil { return "", "", "", nil, err @@ -1084,9 +1100,9 @@ func (ss *ScaleSet) EnsureHostInPool(service *v1.Service, nodeName types.NodeNam }) primaryIPConfiguration.LoadBalancerBackendAddressPools = &newBackendPools newVM := &compute.VirtualMachineScaleSetVM{ - Location: vm.Location, + Location: &vm.Location, VirtualMachineScaleSetVMProperties: &compute.VirtualMachineScaleSetVMProperties{ - HardwareProfile: vm.HardwareProfile, + HardwareProfile: vm.VirtualMachineScaleSetVMProperties.HardwareProfile, NetworkProfileConfiguration: &compute.VirtualMachineScaleSetVMNetworkProfileConfiguration{ NetworkInterfaceConfigurations: &networkInterfaceConfigurations, }, @@ -1099,7 +1115,7 @@ func (ss *ScaleSet) EnsureHostInPool(service *v1.Service, nodeName types.NodeNam return "", "", "", nil, err } - return nodeResourceGroup, ssName, instanceID, newVM, nil + return nodeResourceGroup, vm.VMSSName, vm.InstanceID, newVM, nil } func getVmssAndResourceGroupNameByVMProviderID(providerID string) (string, string, error) { @@ -1371,7 +1387,7 @@ func (ss *ScaleSet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, bac // ensureBackendPoolDeletedFromNode ensures the loadBalancer backendAddressPools deleted // from the specified node, which returns (resourceGroup, vmasName, instanceID, vmssVM, error). func (ss *ScaleSet) ensureBackendPoolDeletedFromNode(nodeName, backendPoolID string) (string, string, string, *compute.VirtualMachineScaleSetVM, error) { - ssName, instanceID, vm, err := ss.getVmssVM(nodeName, azcache.CacheReadTypeDefault) + vm, err := ss.getVmssVM(nodeName, azcache.CacheReadTypeDefault) if err != nil { if errors.Is(err, cloudprovider.InstanceNotFound) { klog.Infof("ensureBackendPoolDeletedFromNode: skipping node %s because it is not found", nodeName) @@ -1382,11 +1398,12 @@ func (ss *ScaleSet) ensureBackendPoolDeletedFromNode(nodeName, backendPoolID str } // Find primary network interface configuration. - if vm.NetworkProfileConfiguration.NetworkInterfaceConfigurations == nil { - klog.V(4).Infof("EnsureHostInPool: cannot obtain the primary network interface configuration, of vm %s, probably because the vm's being deleted", nodeName) + if vm.VirtualMachineScaleSetVMProperties.NetworkProfileConfiguration.NetworkInterfaceConfigurations == nil { + klog.V(4).Infof("EnsureHostInPool: cannot obtain the primary network interface configuration, of vm %s, "+ + "probably because the vm's being deleted", nodeName) return "", "", "", nil, nil } - networkInterfaceConfigurations := *vm.NetworkProfileConfiguration.NetworkInterfaceConfigurations + networkInterfaceConfigurations := *vm.VirtualMachineScaleSetVMProperties.NetworkProfileConfiguration.NetworkInterfaceConfigurations primaryNetworkInterfaceConfiguration, err := ss.getPrimaryNetworkInterfaceConfiguration(networkInterfaceConfigurations, nodeName) if err != nil { return "", "", "", nil, err @@ -1422,9 +1439,9 @@ func (ss *ScaleSet) ensureBackendPoolDeletedFromNode(nodeName, backendPoolID str // Compose a new vmssVM with added backendPoolID. primaryIPConfiguration.LoadBalancerBackendAddressPools = &newBackendPools newVM := &compute.VirtualMachineScaleSetVM{ - Location: vm.Location, + Location: &vm.Location, VirtualMachineScaleSetVMProperties: &compute.VirtualMachineScaleSetVMProperties{ - HardwareProfile: vm.HardwareProfile, + HardwareProfile: vm.VirtualMachineScaleSetVMProperties.HardwareProfile, NetworkProfileConfiguration: &compute.VirtualMachineScaleSetVMNetworkProfileConfiguration{ NetworkInterfaceConfigurations: &networkInterfaceConfigurations, }, @@ -1437,7 +1454,7 @@ func (ss *ScaleSet) ensureBackendPoolDeletedFromNode(nodeName, backendPoolID str return "", "", "", nil, err } - return nodeResourceGroup, ssName, instanceID, newVM, nil + return nodeResourceGroup, vm.VMSSName, vm.InstanceID, newVM, nil } // GetNodeNameByIPConfigurationID gets the node name and the VMSS name by IP configuration ID. @@ -1553,7 +1570,8 @@ func (ss *ScaleSet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, } if errors.Is(err, cloudprovider.InstanceNotFound) { - klog.Infof("EnsureBackendPoolDeleted(%s): skipping ip config %s because the corresponding vmss vm is not found", getServiceName(service), ipConfigurationID) + klog.Infof("EnsureBackendPoolDeleted(%s): skipping ip config %s because the corresponding vmss vm is not"+ + " found", getServiceName(service), ipConfigurationID) continue } diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/doc.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/doc.go index 26587698b..fb46d8c80 100644 --- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/doc.go +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/doc.go @@ -16,4 +16,4 @@ limitations under the License. // Package provider is an implementation of CloudProvider Interface, LoadBalancer // and Instances for Azure. -package provider // import "sigs.k8s.io/cloud-provider-azure/pkg/provider +package provider // import "sigs.k8s.io/cloud-provider-azure/pkg/provider" diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/virtualmachine/virtualmachine.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/virtualmachine/virtualmachine.go new file mode 100644 index 000000000..fc028ff79 --- /dev/null +++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/virtualmachine/virtualmachine.go @@ -0,0 +1,146 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package virtualmachine + +import ( + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-07-01/compute" + "github.com/Azure/go-autorest/autorest/to" +) + +type Variant string + +const ( + VariantVirtualMachine Variant = "VirtualMachine" + VariantVirtualMachineScaleSetVM Variant = "VirtualMachineScaleSetVM" +) + +type Manage string + +const ( + VMSS Manage = "vmss" + VMAS Manage = "vmas" +) + +type ManageOption = func(*VirtualMachine) + +// ByVMSS specifies that the virtual machine is managed by a virtual machine scale set. +func ByVMSS(vmssName string) ManageOption { + return func(vm *VirtualMachine) { + vm.Manage = VMSS + vm.VMSSName = vmssName + } +} + +type VirtualMachine struct { + Variant Variant + vm *compute.VirtualMachine + vmssVM *compute.VirtualMachineScaleSetVM + + Manage Manage + VMSSName string + + // re-export fields + // common fields + ID string + Name string + Location string + Tags map[string]string + Zones []string + Type string + Plan *compute.Plan + Resources *[]compute.VirtualMachineExtension + + // fields of VirtualMachine + Identity *compute.VirtualMachineIdentity + VirtualMachineProperties *compute.VirtualMachineProperties + + // fields of VirtualMachineScaleSetVM + InstanceID string + SKU *compute.Sku + VirtualMachineScaleSetVMProperties *compute.VirtualMachineScaleSetVMProperties +} + +func FromVirtualMachine(vm *compute.VirtualMachine, opt ...ManageOption) *VirtualMachine { + v := &VirtualMachine{ + vm: vm, + Variant: VariantVirtualMachine, + + ID: to.String(vm.ID), + Name: to.String(vm.Name), + Type: to.String(vm.Type), + Location: to.String(vm.Location), + Tags: to.StringMap(vm.Tags), + Zones: to.StringSlice(vm.Zones), + Plan: vm.Plan, + Resources: vm.Resources, + + Identity: vm.Identity, + VirtualMachineProperties: vm.VirtualMachineProperties, + } + + for _, opt := range opt { + opt(v) + } + + return v +} + +func FromVirtualMachineScaleSetVM(vm *compute.VirtualMachineScaleSetVM, opt ManageOption) *VirtualMachine { + v := &VirtualMachine{ + Variant: VariantVirtualMachineScaleSetVM, + vmssVM: vm, + + ID: to.String(vm.ID), + Name: to.String(vm.Name), + Type: to.String(vm.Type), + Location: to.String(vm.Location), + Tags: to.StringMap(vm.Tags), + Zones: to.StringSlice(vm.Zones), + Plan: vm.Plan, + Resources: vm.Resources, + + SKU: vm.Sku, + InstanceID: to.String(vm.InstanceID), + VirtualMachineScaleSetVMProperties: vm.VirtualMachineScaleSetVMProperties, + } + + // TODO: should validate manage option + // VirtualMachineScaleSetVM should always be managed by VMSS + opt(v) + + return v +} + +func (vm *VirtualMachine) IsVirtualMachine() bool { + return vm.Variant == VariantVirtualMachine +} + +func (vm *VirtualMachine) IsVirtualMachineScaleSetVM() bool { + return vm.Variant == VariantVirtualMachineScaleSetVM +} + +func (vm *VirtualMachine) ManagedByVMSS() bool { + return vm.Manage == VMSS +} + +func (vm *VirtualMachine) AsVirtualMachine() *compute.VirtualMachine { + return vm.vm +} + +func (vm *VirtualMachine) AsVirtualMachineScaleSetVM() *compute.VirtualMachineScaleSetVM { + return vm.vmssVM +}