-
Notifications
You must be signed in to change notification settings - Fork 893
/
config-map.yaml
222 lines (195 loc) · 8.73 KB
/
config-map.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
---
apiVersion: v1
data:
healthcheck_route.yaml: |
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: default-routes
namespace: $(namespace)
spec:
hosts:
- "*"
gateways:
- kubeflow-gateway
http:
- match:
- uri:
exact: /healthz
route:
- destination:
port:
number: 80
host: whoami-app.kubeflow.svc.cluster.local
- match:
- uri:
exact: /whoami
route:
- destination:
port:
number: 80
host: whoami-app.kubeflow.svc.cluster.local
---
apiVersion: networking.istio.io/v1alpha3
kind: Gateway
metadata:
name: kubeflow-gateway
namespace: $(namespace)
spec:
selector:
istio: ingressgateway
servers:
- port:
number: 80
name: http
protocol: HTTP
hosts:
- "*"
setup_backend.sh: |
#!/usr/bin/env bash
#
# A simple shell script to configure the JWT audience used with ISTIO
set -x
[ -z ${NAMESPACE} ] && echo Error NAMESPACE must be set && exit 1
[ -z ${SERVICE} ] && echo Error SERVICE must be set && exit 1
[ -z ${INGRESS_NAME} ] && echo Error INGRESS_NAME must be set && exit 1
PROJECT=$(curl -s -H "Metadata-Flavor: Google" http://metadata.google.internal/computeMetadata/v1/project/project-id)
if [ -z ${PROJECT} ]; then
echo Error unable to fetch PROJECT from compute metadata
exit 1
fi
PROJECT_NUM=$(curl -s -H "Metadata-Flavor: Google" http://metadata.google.internal/computeMetadata/v1/project/numeric-project-id)
if [ -z ${PROJECT_NUM} ]; then
echo Error unable to fetch PROJECT_NUM from compute metadata
exit 1
fi
# Activate the service account
if [ ! -z "${GOOGLE_APPLICATION_CREDENTIALS}" ]; then
# As of 0.7.0 we should be using workload identity and never setting GOOGLE_APPLICATION_CREDENTIALS.
# But we kept this for backwards compatibility but can remove later.
gcloud auth activate-service-account --key-file=${GOOGLE_APPLICATION_CREDENTIALS}
fi
# Print out the config for debugging
gcloud config list
gcloud auth list
set_jwt_policy () {
NODE_PORT=$(kubectl --namespace=${NAMESPACE} get svc ${SERVICE} -o jsonpath='{.spec.ports[?(@.name=="http2")].nodePort}')
echo "node port is ${NODE_PORT}"
while [[ -z ${BACKEND_NAME} ]]; do
BACKENDS=$(kubectl --namespace=${NAMESPACE} get ingress ${INGRESS_NAME} -o jsonpath='{.metadata.annotations.ingress\.kubernetes\.io/backends}')
echo "fetching backends info with ${INGRESS_NAME}: ${BACKENDS}"
BACKEND_NAME=$(echo $BACKENDS | grep -o "k8s-be-${NODE_PORT}--[0-9a-z]\+")
echo "backend name is ${BACKEND_NAME}"
sleep 2
done
while [[ -z ${BACKEND_ID} ]]; do
BACKEND_ID=$(gcloud compute --project=${PROJECT} backend-services list --filter=name~${BACKEND_NAME} --format='value(id)')
echo "Waiting for backend id PROJECT=${PROJECT} NAMESPACE=${NAMESPACE} SERVICE=${SERVICE} filter=name~${BACKEND_NAME}"
sleep 2
done
echo BACKEND_ID=${BACKEND_ID}
JWT_AUDIENCE="/projects/${PROJECT_NUM}/global/backendServices/${BACKEND_ID}"
# Use kubectl patch.
echo patch JWT audience: ${JWT_AUDIENCE}
kubectl -n ${NAMESPACE} patch policy ingress-jwt --type json -p '[{"op": "replace", "path": "/spec/origins/0/jwt/audiences/0", "value": "'${JWT_AUDIENCE}'"}]'
echo "Clearing lock on service annotation"
kubectl patch svc "${SERVICE}" -p "{\"metadata\": { \"annotations\": {\"backendlock\": \"\" }}}"
}
while true; do
set_jwt_policy
# Every 5 minutes recheck the JWT policy and reset it if the backend has changed for some reason.
# This follows Kubernetes level based design.
# We have at least one report see
# https://github.com/kubeflow/kubeflow/issues/4342#issuecomment-544653657
# of the backend id changing over time.
sleep 300
done
update_backend.sh: |
#!/bin/bash
#
# A simple shell script to configure the health checks by using gcloud.
set -x
[ -z ${NAMESPACE} ] && echo Error NAMESPACE must be set && exit 1
[ -z ${SERVICE} ] && echo Error SERVICE must be set && exit 1
[ -z ${INGRESS_NAME} ] && echo Error INGRESS_NAME must be set && exit 1
PROJECT=$(curl -s -H "Metadata-Flavor: Google" http://metadata.google.internal/computeMetadata/v1/project/project-id)
if [ -z ${PROJECT} ]; then
echo Error unable to fetch PROJECT from compute metadata
exit 1
fi
if [[ ! -z "${GOOGLE_APPLICATION_CREDENTIALS}" ]]; then
# TODO(jlewi): As of 0.7 we should always be using workload identity. We can remove it post 0.7.0 once we have workload identity
# fully working
# Activate the service account, allow 5 retries
for i in {1..5}; do gcloud auth activate-service-account --key-file=${GOOGLE_APPLICATION_CREDENTIALS} && break || sleep 10; done
fi
set_health_check () {
NODE_PORT=$(kubectl --namespace=${NAMESPACE} get svc ${SERVICE} -o jsonpath='{.spec.ports[?(@.name=="http2")].nodePort}')
echo node port is ${NODE_PORT}
while [[ -z ${BACKEND_NAME} ]]; do
BACKENDS=$(kubectl --namespace=${NAMESPACE} get ingress ${INGRESS_NAME} -o jsonpath='{.metadata.annotations.ingress\.kubernetes\.io/backends}')
echo "fetching backends info with ${INGRESS_NAME}: ${BACKENDS}"
BACKEND_NAME=$(echo $BACKENDS | grep -o "k8s-be-${NODE_PORT}--[0-9a-z]\+")
echo "backend name is ${BACKEND_NAME}"
sleep 2
done
while [[ -z ${BACKEND_SERVICE} ]];
do BACKEND_SERVICE=$(gcloud --project=${PROJECT} compute backend-services list --filter=name~${BACKEND_NAME} --uri);
echo "Waiting for the backend-services resource PROJECT=${PROJECT} BACKEND_NAME=${BACKEND_NAME} SERVICE=${SERVICE}...";
sleep 2;
done
while [[ -z ${HEALTH_CHECK_URI} ]];
do HEALTH_CHECK_URI=$(gcloud compute --project=${PROJECT} health-checks list --filter=name~${BACKEND_NAME} --uri);
echo "Waiting for the healthcheck resource PROJECT=${PROJECT} NODEPORT=${NODE_PORT} SERVICE=${SERVICE}...";
sleep 2;
done
echo health check URI is ${HEALTH_CHECK_URI}
# Since we create the envoy-ingress ingress object before creating the envoy
# deployment object, healthcheck will not be configured correctly in the GCP
# load balancer. It will default the healthcheck request path to a value of
# / instead of the intended /healthz.
# Manually update the healthcheck request path to /healthz
if [[ ${HEALTHCHECK_PATH} ]]; then
# This is basic auth
echo Running health checks update ${HEALTH_CHECK_URI} with ${HEALTHCHECK_PATH}
gcloud --project=${PROJECT} compute health-checks update http ${HEALTH_CHECK_URI} --request-path=${HEALTHCHECK_PATH}
else
# /healthz/ready is the health check path for istio-ingressgateway
echo Running health checks update ${HEALTH_CHECK_URI} with /healthz/ready
gcloud --project=${PROJECT} compute health-checks update http ${HEALTH_CHECK_URI} --request-path=/healthz/ready
# We need the nodeport for istio-ingressgateway status-port
STATUS_NODE_PORT=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.ports[?(@.name=="status-port")].nodePort}')
gcloud --project=${PROJECT} compute health-checks update http ${HEALTH_CHECK_URI} --port=${STATUS_NODE_PORT}
fi
}
while true; do
set_health_check
echo "Backend updated successfully. Waiting 1 hour before updating again."
sleep 3600
done
kind: ConfigMap
metadata:
name: envoy-config
---
apiVersion: v1
data:
ingress_bootstrap.sh: |
#!/usr/bin/env bash
set -x
set -e
# This is a workaround until this is resolved: https://github.com/kubernetes/ingress-gce/pull/388
# The long-term solution is to use a managed SSL certificate on GKE once the feature is GA.
# The ingress is initially created without a tls spec.
# Wait until cert-manager generates the certificate using the http-01 challenge on the GCLB ingress.
# After the certificate is obtained, patch the ingress with the tls spec to enable SSL on the GCLB.
# Wait for certificate.
until kubectl -n ${NAMESPACE} get secret ${TLS_SECRET_NAME} 2>/dev/null; do
echo "Waiting for certificate..."
sleep 2
done
kubectl -n ${NAMESPACE} patch ingress ${INGRESS_NAME} --type='json' -p '[{"op": "add", "path": "/spec/tls", "value": [{"secretName": "'${TLS_SECRET_NAME}'", "hosts":["'${TLS_HOST_NAME}'"]}]}]'
echo "Done"
kind: ConfigMap
metadata:
name: ingress-bootstrap-config
---