forked from oracle/weblogic-kubernetes-operator
-
Notifications
You must be signed in to change notification settings - Fork 0
/
wercker.yml
327 lines (280 loc) · 12.7 KB
/
wercker.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
# Copyright 2017, 2018, Oracle Corporation and/or its affiliates. All rights reserved.
# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl.
#
# Wercker build file for Oracle WebLogic Server Kubernetes Operator
#
#
# Wercker application is at : https://app.wercker.com/Oracle/weblogic-kubernetes-operator
#
# Werkcer workflow looks like this:
#
# build -> integration-tests (1.7.9)
# -> integration-tests (1.8.5)
# -> quality
#
box:
id: store/oracle/serverjre
username: $DOCKER_USERNAME
password: $DOCKER_PASSWORD
tag: 8
# This is the main build pipeline that builds the codebase and runs unit tests.
build:
steps:
- script:
name: Hello
code: |
echo "Building Oracle WebLogic Server Kubernetes Operator..."
echo "The branch and commit id are $WERCKER_GIT_BRANCH, $WERCKER_GIT_COMMIT"
export JAR_VERSION="`grep -m1 "<version>" pom.xml | cut -f2 -d">" | cut -f1 -d "<"`"
export IMAGE_TAG_OPERATOR="${WERCKER_GIT_BRANCH//[_\/]/-}"
- script:
name: Install pre-reqs
code: |
yum -y install tar gzip procps
- wercker/maven:
goals: clean install
version: 3.5.2
cache_repo: true
- internal/docker-build:
dockerfile: Dockerfile
image-name: $REPO_REPOSITORY:$IMAGE_TAG_OPERATOR
no-cache: true
build-args: VERSION=$JAR_VERSION
registry-auth-config: |
{
"https://index.docker.io/v1/": {
"username": "${DOCKER_USERNAME}",
"password": "${DOCKER_PASSWORD}"
}
}
# push the image to Docker using the GIT branch as the tag
# this image needs to be available to the integration-test pipeline for testing
- internal/docker-push:
image-name: $REPO_REPOSITORY:$IMAGE_TAG_OPERATOR
username: $REPO_USERNAME
password: $REPO_PASSWORD
repository: $REPO_REPOSITORY
registry: $REPO_REGISTRY
tag: $IMAGE_TAG_OPERATOR
# This pipeline runs the integration tests against a k8s cluster on OCI.
command-timeout: 60
integration-test:
steps:
- script:
name: Run integration tests
code: |
#!/bin/bash
function cleanup_and_store {
# release lease in case run.sh failed to release it
# (the following command only releases the release after confirming this pipeline still owns it)
# TBD Calling this somehow seems to fail the wercker run
# $WERCKER_SOURCE_DIR/src/integration-tests/bash/lease.sh -d "$LEASE_ID" > /tmp/junk 2>&1
# clean up
yum clean all
# store the artifacts so we can download them easily
tar czvf ${WERCKER_REPORT_ARTIFACTS_DIR}/integration-test-data.tar.gz /pipeline/output/*
}
function finish {
exit_code=$?
export INTEGRATION_TEST_RESULT="$exit_code"
cleanup_and_store
exit 0
}
trap finish EXIT
# Copy Docker file to OCI host and load into local Docker registry
# yum install -y openssh-clients
# echo -e $OCI_K8S_SSHKEY > /tmp/ssh_key
# chmod 600 /tmp/ssh_key
# scp -o StrictHostKeyChecking=no -i /tmp/ssh_key $WERCKER_OUTPUT_DIR/build.tar opc@$OCI_K8S_WORKER0_IP:/scratch/build.tar
# ssh -o StrictHostKeyChecking=no -i /tmp/ssh_key opc@$OCI_K8S_WORKER0_IP "tar -xvf /scratch/operator.tar”
# ssh -o StrictHostKeyChecking=no -i /tmp/ssh_key opc@$OCI_K8S_WORKER0_IP "sudo docker build -t weblogic-kubernetes-operator:$WERCKER_GIT_BRANCH --no-cache=true /scratch/”
# ssh -o StrictHostKeyChecking=no -i /tmp/ssh_key opc@$OCI_K8S_WORKER0_IP "sudo docker save weblogic-kubernetes-operator:$WERCKER_GIT_BRANCH > /scratch/operator.tar”
# ssh -o StrictHostKeyChecking=no -i /tmp/ssh_key opc@$OCI_K8S_WORKER1_IP "sudo docker load < /scratch/operator.tar”
cp /etc/hosts $WERCKER_PIPELINE_DIR/hosts
sed -i "$ a ${OCI_K8S_WORKER0_IP} ${OCI_K8S_WORKER0_HOSTNAME}" $WERCKER_PIPELINE_DIR/hosts
cp $WERCKER_PIPELINE_DIR/hosts /etc/hosts
# Update KUBECONFIG for K8S cluster
export K8S_NODEPORT_HOST="${OCI_K8S_WORKER0_HOSTNAME}"
export K8S_NODEPORT_IP="${OCI_K8S_WORKER0_IP}"
sed -i -e "s,%ADDRESS%,https://$OCI_K8S_MASTER_IP:443,g" $WERCKER_SOURCE_DIR/build/kube.config
sed -i -e "s,%CLIENT_CERT_DATA%,$OCI_K8S_CLIENT_CERT_DATA,g" $WERCKER_SOURCE_DIR/build/kube.config
sed -i -e "s,%CLIENT_KEY_DATA%,$OCI_K8S_CLIENT_KEY_DATA,g" $WERCKER_SOURCE_DIR/build/kube.config
export KUBECONFIG="$WERCKER_SOURCE_DIR/build/kube.config"
# running on Wercker
export WERCKER="true"
# install kubectl
# export K8S_CLIENT_VERSION="$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)" #latest version
export K8S_CLIENT_VERSION="v1.10.5"
curl -LO https://storage.googleapis.com/kubernetes-release/release/${K8S_CLIENT_VERSION}/bin/linux/amd64/kubectl
chmod +x ./kubectl
mv ./kubectl /usr/local/bin/kubectl
# install maven, includes java as dependency
curl -LO http://repos.fedorapeople.org/repos/dchen/apache-maven/epel-apache-maven.repo
mv epel-apache-maven.repo /etc/yum.repos.d/
yum install -y apache-maven
export M2_HOME="/usr/share/apache-maven"
export PATH=$M2_HOME/bin:$PATH
# install opensll
yum install -y openssl
echo @@ "Calling 'kubectl version'"
kubectl version
# obtain an exclusive k8s cluster lease using the 'lease.sh' helper script
# - first set LEASE_ID to a unique value
# - then try obtain the lease, block up to 100 minutes (wercker pipeline should timeout before then)
# - finally, run.sh will periodically try renew the lease as it runs (using $LEASE_ID)
# - if run.sh fails when it tries to renew the lease (as something else took it, etc), it will exit early
# - when run.sh exits, it will try release the lease if it's still the owner...
export LEASE_ID="${WERCKER_STEP_ID}-pid$$"
echo @@
echo @@ "Obtaining lease!"
echo @@
echo @@ "About to block up to the 100 minutes trying to get exclusive access to the kubernetes cluster."
echo @@ "If this blocks unexpectedly and you are sure that the kubernetes cluster isn't in use by "
echo @@ "another Wercker pipeline, you can force the lease to free up via 'kubectl delete cm acceptance-test-lease'."
echo @@ "See LEASE_ID in run.sh for details about this heuristic."
echo @@ "LEASE_ID=$LEASE_ID host=$HOST date=`date` user=$USER."
echo @@
echo @@ "Current lease owner (if any):"
$WERCKER_SOURCE_DIR/src/integration-tests/bash/lease.sh -s
echo @@
echo @@ "About to try obtain lease:"
$WERCKER_SOURCE_DIR/src/integration-tests/bash/lease.sh -o "$LEASE_ID" -t $((100 * 60))
echo @@
export HOST_PATH="/scratch"
export PV_ROOT=$HOST_PATH
export RESULT_ROOT="$WERCKER_OUTPUT_DIR/k8s_dir"
mkdir -m 777 -p $RESULT_ROOT
export PROJECT_ROOT="${WERCKER_SOURCE_DIR}"
$WERCKER_SOURCE_DIR/src/integration-tests/bash/cleanup.sh
export IMAGE_NAME_OPERATOR="${REPO_REPOSITORY}"
export IMAGE_TAG_OPERATOR="${WERCKER_GIT_BRANCH//[_\/]/-}"
export IMAGE_PULL_POLICY_OPERATOR="Always"
export IMAGE_PULL_SECRET_OPERATOR="ocir-registry"
export IMAGE_PULL_SECRET_WEBLOGIC="docker-store"
echo "Integration test suite against the test image which is:"
echo "$IMAGE_NAME_OPERATOR:$IMAGE_TAG_OPERATOR"
# integration tests
$WERCKER_SOURCE_DIR/src/integration-tests/bash/run.sh
RUN_SH_RC=$?
if [ "$RUN_SH_RC" = "0" ]; then
echo "run.sh finished successfully"
else
echo "run.sh failed with return code ${RUN_SH_RC}"
exit $RUN_SH_RC
fi
cleanup_and_store
# This pipeline runs java integration tests against a k8s cluster on OCI.
command-timeout: 60
integration-test-java:
steps:
- script:
name: obtain lease on k8s cluster
code: |
#!/bin/bash
cp /etc/hosts $WERCKER_PIPELINE_DIR/hosts
sed -i "$ a ${OCI_K8S_WORKER0_IP} ${OCI_K8S_WORKER0_HOSTNAME}" $WERCKER_PIPELINE_DIR/hosts
cp $WERCKER_PIPELINE_DIR/hosts /etc/hosts
# Update KUBECONFIG for K8S cluster
export K8S_NODEPORT_HOST="${OCI_K8S_WORKER0_HOSTNAME}"
sed -i -e "s,%ADDRESS%,https://$OCI_K8S_MASTER_IP:443,g" $WERCKER_SOURCE_DIR/build/kube.config
sed -i -e "s,%CLIENT_CERT_DATA%,$OCI_K8S_CLIENT_CERT_DATA,g" $WERCKER_SOURCE_DIR/build/kube.config
sed -i -e "s,%CLIENT_KEY_DATA%,$OCI_K8S_CLIENT_KEY_DATA,g" $WERCKER_SOURCE_DIR/build/kube.config
export KUBECONFIG="$WERCKER_SOURCE_DIR/build/kube.config"
# running on Wercker
export WERCKER="true"
# install kubectl
# export K8S_CLIENT_VERSION="$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)" #latest version
export K8S_CLIENT_VERSION="v1.10.5"
curl -LO https://storage.googleapis.com/kubernetes-release/release/${K8S_CLIENT_VERSION}/bin/linux/amd64/kubectl
chmod +x ./kubectl
mv ./kubectl /usr/local/bin/kubectl
# install maven, includes java as dependency
curl -LO http://repos.fedorapeople.org/repos/dchen/apache-maven/epel-apache-maven.repo
mv epel-apache-maven.repo /etc/yum.repos.d/
yum install -y apache-maven
export M2_HOME="/usr/share/apache-maven"
export PATH=$M2_HOME/bin:$PATH
# install opensll
yum install -y openssl
echo @@ "Calling 'kubectl version'"
kubectl version
# obtain an exclusive k8s cluster lease using the 'lease.sh' helper script
# - first set LEASE_ID to a unique value
# - then try obtain the lease, block up to 100 minutes (wercker pipeline should timeout before then)
# - finally, run.sh will periodically try renew the lease as it runs (using $LEASE_ID)
# - if run.sh fails when it tries to renew the lease (as something else took it, etc), it will exit early
# - when run.sh exits, it will try release the lease if it's still the owner...
export LEASE_ID="${WERCKER_STEP_ID}-pid$$"
echo @@
echo @@ "Obtaining lease!"
echo @@
echo @@ "About to block up to the 100 minutes trying to get exclusive access to the kubernetes cluster."
echo @@ "If this blocks unexpectedly and you are sure that the kubernetes cluster isn't in use by "
echo @@ "another Wercker pipeline, you can force the lease to free up via 'kubectl delete cm acceptance-test-lease'."
echo @@ "See LEASE_ID in run.sh for details about this heuristic."
echo @@ "LEASE_ID=$LEASE_ID host=$HOST date=`date` user=$USER."
echo @@
echo @@ "Current lease owner (if any):"
$WERCKER_SOURCE_DIR/src/integration-tests/bash/lease.sh -s
echo @@
echo @@ "About to try obtain lease:"
$WERCKER_SOURCE_DIR/src/integration-tests/bash/lease.sh -o "$LEASE_ID" -t $((100 * 60))
echo @@
export HOST_PATH="/scratch"
export PV_ROOT=$HOST_PATH
export RESULT_ROOT="$WERCKER_OUTPUT_DIR/k8s_dir"
mkdir -m 777 -p $RESULT_ROOT
export PROJECT_ROOT="${WERCKER_SOURCE_DIR}"
$WERCKER_SOURCE_DIR/src/integration-tests/bash/cleanup.sh
export IMAGE_NAME_OPERATOR="${REPO_REPOSITORY}"
export IMAGE_TAG_OPERATOR="${WERCKER_GIT_BRANCH//[_\/]/-}"
export IMAGE_PULL_POLICY_OPERATOR="Always"
export IMAGE_PULL_SECRET_OPERATOR="ocir-registry"
export IMAGE_PULL_SECRET_WEBLOGIC="docker-store"
echo "Integration test suite against the test image which is:"
echo "$IMAGE_NAME_OPERATOR:$IMAGE_TAG_OPERATOR"
- script:
name: Install pre-reqs
code: |
yum -y install tar gzip procps
- wercker/maven:
goals: clean verify
version: 3.5.2
profiles: java-integration-tests
- script:
name: cleanup and store
code: |
#!/bin/bash
function cleanup_and_store {
$WERCKER_SOURCE_DIR/integration-tests/src/test/resources/statedump.sh
# clean up
yum clean all
# store the artifacts so we can download them easily
tar czvf ${WERCKER_REPORT_ARTIFACTS_DIR}/integration-test-data.tar.gz /pipeline/output/*
}
cleanup_and_store
# This pipeline runs quality checks
quality:
steps:
- script:
name: Install pre-reqs
code: |
yum -y install tar gzip procps
- wercker/maven:
profiles: build-sonar
maven_opts: -Dsonar.login=${SONAR_LOGIN} -Dsonar.password=${SONAR_PASSWORD} -Dsonar.host.url=${SONAR_HOST}
goals: clean install sonar:sonar
cache_repo: true
version: 3.5.2
promote-image:
steps:
- internal/docker-push:
image-name: $REPO_REPOSITORY:$IMAGE_TAG_OPERATOR
username: $DOCKER_USERNAME
password: $DOCKER_PASSWORD
repository: oracle/weblogic-kubernetes-operator
tag: $IMAGE_TAG_OPERATOR
build-args: VERSION=$VERSION
dev:
steps:
- internal/shell