From 373f59461159dd14eabcde82fcdaf305a2ca5b6f Mon Sep 17 00:00:00 2001 From: Viren Baraiya Date: Sat, 9 Sep 2023 00:57:05 -0700 Subject: [PATCH] docker changes --- docker/docker-compose-mysql.yaml | 3 +- docker/docker-compose-prometheus.yaml | 20 ------- docker/docker-compose.yaml | 2 +- docker/server/DockerfileLocal | 8 +-- docker/server/bin/startup.sh | 2 +- .../config/config-docker-internal.properties | 36 ------------ .../config/config-local-postgres.properties | 26 +++++---- docker/server/config/config-local.properties | 37 ------------ docker/server/config/config-redis.properties | 22 +++++++ docker/server/config/config.properties | 57 ++++++++++--------- 10 files changed, 73 insertions(+), 140 deletions(-) delete mode 100644 docker/docker-compose-prometheus.yaml delete mode 100755 docker/server/config/config-docker-internal.properties delete mode 100755 docker/server/config/config-local.properties create mode 100755 docker/server/config/config-redis.properties diff --git a/docker/docker-compose-mysql.yaml b/docker/docker-compose-mysql.yaml index c7d05943d2..0db7d31d65 100644 --- a/docker/docker-compose-mysql.yaml +++ b/docker/docker-compose-mysql.yaml @@ -4,7 +4,8 @@ services: conductor-server: environment: - - CONFIG_PROP=config-mysql-grpc.properties + - conductor.db.type=redis_standalone + - conductor.redis.hosts=rs:6379:us-east-1c image: conductor:server build: context: .. diff --git a/docker/docker-compose-prometheus.yaml b/docker/docker-compose-prometheus.yaml deleted file mode 100644 index 10f8d80e40..0000000000 --- a/docker/docker-compose-prometheus.yaml +++ /dev/null @@ -1,20 +0,0 @@ -version: '3' - -services: - - prometheus: - image: prom/prometheus - volumes: - - ./prometheus/:/etc/prometheus/ - command: - - '--config.file=/etc/prometheus/prometheus.yml' - ports: - - 9090:9090 - external_links: - - conductor-server:conductor-server - networks: - - internal - restart: always - -networks: - internal: \ No newline at end of file diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml index 91cb3d4610..e68ed82102 100644 --- a/docker/docker-compose.yaml +++ b/docker/docker-compose.yaml @@ -3,7 +3,7 @@ version: '2.3' services: conductor-server: environment: - - CONFIG_PROP=config-local.properties + - image: conductor:server container_name: conductor-server build: diff --git a/docker/server/DockerfileLocal b/docker/server/DockerfileLocal index b34ae2a86b..8b6116bf74 100644 --- a/docker/server/DockerfileLocal +++ b/docker/server/DockerfileLocal @@ -19,7 +19,8 @@ RUN apk add --update nodejs npm yarn COPY . /conductor WORKDIR /conductor/ui -RUN yarn install && yarn build +#RUN yarn install && yarn build +RUN mkdir build RUN ls -ltr RUN echo "Done building UI" @@ -52,7 +53,6 @@ LABEL maintainer="Netflix OSS " RUN apk add openjdk11 RUN apk add curl RUN apk add nginx -RUN apk add redis RUN apk add postgresql14 # Make app folders @@ -62,9 +62,7 @@ RUN mkdir -p /app/elasticsearch # Copy the compiled output to new image COPY docker/server/bin /app COPY docker/server/config /app/config -COPY docker/server/config/redis.conf /app/config/redis.conf -COPY docker/server/bin/start_all.sh /app/start_all.sh -COPY docker/server/bin/health.sh /app/health.sh +COPY docker/server/bin/start_all_local.sh /app/start_all_local.sh COPY --from=builder /server-build/conductor-community/community-server/build/libs/*boot*.jar /app/libs/conductor-server.jar # Copy compiled UI assets to nginx www directory diff --git a/docker/server/bin/startup.sh b/docker/server/bin/startup.sh index 21289baab6..e5bd7f501d 100755 --- a/docker/server/bin/startup.sh +++ b/docker/server/bin/startup.sh @@ -28,7 +28,7 @@ export config_file= if [ -z "$CONFIG_PROP" ]; then - echo "Using an in-memory instance of conductor"; + echo "Using default configuration file"; export config_file=/app/config/config.properties else echo "Using '$CONFIG_PROP'"; diff --git a/docker/server/config/config-docker-internal.properties b/docker/server/config/config-docker-internal.properties deleted file mode 100755 index 85d94eebbb..0000000000 --- a/docker/server/config/config-docker-internal.properties +++ /dev/null @@ -1,36 +0,0 @@ -# Servers. -conductor.grpc-server.enabled=false - -# Database persistence model. -conductor.db.type=redis_standalone -conductor.queue.type=redis_standalone -# Dynomite Cluster details. -# format is host:port:rack separated by semicolon -conductor.redis.hosts=kubernetes.docker.internal:6379:us-east-1c - -# Namespace for the keys stored in Dynomite/Redis -conductor.redis.workflowNamespacePrefix=conductor - -# Namespace prefix for the dyno queues -conductor.redis.queueNamespacePrefix=conductor_queues - -# No. of threads allocated to dyno-queues (optional) -queues.dynomite.threads=10 - -# By default with dynomite, we want the repairservice enabled -conductor.app.workflowRepairServiceEnabled=true - -# Non-quorum port used to connect to local redis. Used by dyno-queues. -# When using redis directly, set this to the same port as redis server -# For Dynomite, this is 22122 by default or the local redis-server port used by Dynomite. -conductor.redis.queuesNonQuorumPort=22122 - -# Elastic search instance indexing is disabled. -conductor.indexing.enabled=true -conductor.elasticsearch.url=http://kubernetes.docker.internal:9200 -conductor.elasticsearch.indexReplicasCount=0 - -# Load sample kitchen sink workflow -loadSample=true - -conductor.elasticsearch.clusterHealthColor=yellow diff --git a/docker/server/config/config-local-postgres.properties b/docker/server/config/config-local-postgres.properties index 76d563d742..1a8a8a4e46 100755 --- a/docker/server/config/config-local-postgres.properties +++ b/docker/server/config/config-local-postgres.properties @@ -1,18 +1,20 @@ +conductor.grpc-server.enabled=false # Database persistence type. conductor.db.type=postgres - -# postgres -spring.datasource.url=jdbc:postgresql://localhost:5432/postgres +conductor.db.schema=conductor +conductor.postgres.schema=conductor +conductor.db.queue.type=postgres +spring.datasource.url=jdbc:postgresql://localhost:5432/postgres?schema=conductor&ApplicationName=conductor spring.datasource.username=postgres spring.datasource.password=postgres - -# Elastic search instance indexing is enabled. -conductor.indexing.enabled=true conductor.indexing.type=postgres +conductor.indexing.enabled=true -# Additional modules for metrics collection exposed to Prometheus (optional) -conductor.metrics-prometheus.enabled=true -management.endpoints.web.exposure.include=prometheus - -# Load sample kitchen sink workflow -loadSample=true +# The following is to force Elastic Search IndexDAO not to run. If it just missing it will still try to start v6 +conductor.elasticsearch.version=postgres +flyway.schema=clo-workflow +# Hikari pool sizes are -1 by default and prevent startup +spring.datasource.hikari.maximum-pool-size=10 +spring.datasource.hikari.minimum-idle=2 +# Elastic search instance indexing is enabled. +#conductor.indexing.enabled=false \ No newline at end of file diff --git a/docker/server/config/config-local.properties b/docker/server/config/config-local.properties deleted file mode 100755 index 09b5cd251a..0000000000 --- a/docker/server/config/config-local.properties +++ /dev/null @@ -1,37 +0,0 @@ -# Servers. -conductor.grpc-server.enabled=false - -# Database persistence model. -conductor.db.type=redis_standalone -conductor.queue.type=redis_standalone -# Dynomite Cluster details. -# format is host:port:rack separated by semicolon -conductor.redis.hosts=localhost:6379:us-east-1c - -# Namespace for the keys stored in Dynomite/Redis -conductor.redis.workflowNamespacePrefix=conductor - -# Namespace prefix for the dyno queues -conductor.redis.queueNamespacePrefix=conductor_queues - -# No. of threads allocated to dyno-queues (optional) -queues.dynomite.threads=10 - -# By default with dynomite, we want the repairservice enabled -conductor.app.workflowRepairServiceEnabled=true - -# Non-quorum port used to connect to local redis. Used by dyno-queues. -# When using redis directly, set this to the same port as redis server -# For Dynomite, this is 22122 by default or the local redis-server port used by Dynomite. -conductor.redis.queuesNonQuorumPort=22122 - -# Elastic search instance indexing is disabled. -conductor.indexing.enabled=true -conductor.elasticsearch.url=http://localhost:9200 - -conductor.elasticsearch.indexReplicasCount=0 - -# Load sample kitchen sink workflow -loadSample=true - -conductor.elasticsearch.clusterHealthColor=yellow diff --git a/docker/server/config/config-redis.properties b/docker/server/config/config-redis.properties new file mode 100755 index 0000000000..12410110f5 --- /dev/null +++ b/docker/server/config/config-redis.properties @@ -0,0 +1,22 @@ +# Database persistence type. +# Below are the properties for redis +conductor.db.type=redis_standalone +conductor.redis.hosts=rs:6379:us-east-1c +conductor.redis-lock.serverAddress=redis://rs:6379 +conductor.redis.taskDefCacheRefreshInterval=1 +conductor.redis.workflowNamespacePrefix=conductor +conductor.redis.queueNamespacePrefix=conductor_queues + +# Elastic search instance indexing is enabled. +conductor.indexing.enabled=true + +# Transport address to elasticsearch +conductor.elasticsearch.url=http://es:9200 +conductor.elasticsearch.indexName=conductor + +# Additional modules for metrics collection exposed to Prometheus (optional) +conductor.metrics-prometheus.enabled=true +management.endpoints.web.exposure.include=prometheus + +# Load sample kitchen sink workflow +loadSample=true diff --git a/docker/server/config/config.properties b/docker/server/config/config.properties index b1463a6e5e..c966e873bd 100755 --- a/docker/server/config/config.properties +++ b/docker/server/config/config.properties @@ -1,37 +1,40 @@ -# Database persistence type. -conductor.db.type=redis_standalone -conductor.redis.hosts=localhost:6379:us-east-1c -conductor.redis-lock.serverAddress=redis://localhost:6379 -conductor.redis.taskDefCacheRefreshInterval=1 -conductor.redis.workflowNamespacePrefix=conductor -conductor.redis.queueNamespacePrefix=conductor_queues +# See README in the docker for configuration guide +conductor.db.type=SET_THIS -# By default with dynomite, we want the repairservice enabled -conductor.app.workflowRepairServiceEnabled=true +# =====================================================# +# Redis Configuration Properties +# =====================================================# +#conductor.db.type=redis_standalone +# The last part MUST be us-east-1c, it is not used and is kept for backwards compatibility +# conductor.redis.hosts=rs:6379:us-east-1c +# -# Elastic search instance indexing is enabled. -conductor.indexing.enabled=true +# conductor.redis-lock.serverAddress=redis://rs:6379 +# conductor.redis.taskDefCacheRefreshInterval=1 +# conductor.redis.workflowNamespacePrefix=conductor +# conductor.redis.queueNamespacePrefix=conductor_queues -# Transport address to elasticsearch -conductor.elasticsearch.url=http://localhost:9200 -# Name of the elasticsearch cluster -conductor.elasticsearch.indexName=conductor -#conductor.event-queues.amqp.queueType=classic -#conductor.event-queues.amqp.sequentialMsgProcessing=true +# =====================================================# +# Postgres Configuration Properties +# =====================================================# -# Additional modules for metrics collection exposed via logger (optional) -# conductor.metrics-logger.enabled=true -# conductor.metrics-logger.reportPeriodSeconds=15 +# conductor.db.type=postgres +# spring.datasource.url=jdbc:postgresql://localhost:5432/postgres +# spring.datasource.username=postgres +# spring.datasource.password=postgres +# Additionally you can use set the spring.datasource.XXX properties for connection pool size etc. -# Additional modules for metrics collection exposed to Prometheus (optional) -# conductor.metrics-prometheus.enabled=true -# management.endpoints.web.exposure.include=prometheus +# If you want to use Postgres as indexing store set the following +# conductor.indexing.enabled=true +# conductor.indexing.type=postgres -# To enable Workflow/Task Summary Input/Output JSON Serialization, use the following: -# conductor.app.summary-input-output-json-serialization.enabled=true +# When using Elasticsearch 7 for indexing, set the following + +# conductor.indexing.enabled=true +# conductor.elasticsearch.url=http://es:9200 +# conductor.elasticsearch.version=7 +# conductor.elasticsearch.indexName=conductor -# Load sample kitchen sink workflow -loadSample=true