forked from hashicorp/terraform-google-vault
-
Notifications
You must be signed in to change notification settings - Fork 0
/
main.tf
111 lines (87 loc) · 4.82 KB
/
main.tf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
# ---------------------------------------------------------------------------------------------------------------------
# DEPLOY A VAULT CLUSTER IN GOOGLE CLOUD
# This is an example of how to use the vault-cluster module to deploy a public Vault cluster in GCP. A public Vault
# cluster is NOT recommended for production usage, but it's the easiest way to try things out. For production usage,
# see the vault-cluster-private example, or if necessary, the vault-cluster-public example. Note that this Vault cluster
# uses Consul, running in a separate cluster, as its High Availability backend.
# ---------------------------------------------------------------------------------------------------------------------
provider "google" {
region = var.gcp_region
project = var.gcp_project_id
}
terraform {
# The modules used in this example have been updated with 0.12 syntax, which means the example is no longer
# compatible with any versions below 0.12.
required_version = ">= 0.12"
}
# ---------------------------------------------------------------------------------------------------------------------
# DEPLOY THE VAULT SERVER CLUSTER
# ---------------------------------------------------------------------------------------------------------------------
module "vault_cluster" {
# When using these modules in your own templates, you will need to use a Git URL with a ref attribute that pins you
# to a specific version of the modules, such as the following example:
# source = "git::git@github.com:hashicorp/terraform-google-vault.git//modules/vault-cluster?ref=v0.0.1"
source = "./modules/vault-cluster"
gcp_project_id = var.gcp_project_id
gcp_region = var.gcp_region
cluster_name = var.vault_cluster_name
cluster_size = var.vault_cluster_size
cluster_tag_name = var.vault_cluster_name
machine_type = var.vault_cluster_machine_type
image_project_id = var.image_project_id
source_image = var.vault_source_image
startup_script = data.template_file.startup_script_vault.rendered
gcs_bucket_name = var.vault_cluster_name
gcs_bucket_location = var.gcs_bucket_location
gcs_bucket_storage_class = var.gcs_bucket_class
gcs_bucket_force_destroy = var.gcs_bucket_force_destroy
root_volume_disk_size_gb = var.root_volume_disk_size_gb
root_volume_disk_type = var.root_volume_disk_type
# Even when the Vault cluster is pubicly accessible via a Load Balancer, we still make the Vault nodes themselves
# private to improve the overall security posture. Note that the only way to reach private nodes via SSH is to first
# SSH into another node that is not private.
assign_public_ip_addresses = true
# To enable external access to the Vault Cluster, enter the approved CIDR Blocks or tags below.
# We enable health checks from the Consul Server cluster to Vault.
allowed_inbound_cidr_blocks_api = ["0.0.0.0/0"]
allowed_inbound_tags_api = [var.consul_server_cluster_name]
}
# Render the Startup Script that will run on each Vault Instance on boot. This script will configure and start Vault.
data "template_file" "startup_script_vault" {
template = file(
"${path.module}/examples/root-example/startup-script-vault.sh",
)
vars = {
consul_cluster_tag_name = var.consul_server_cluster_name
vault_cluster_tag_name = var.vault_cluster_name
enable_vault_ui = var.enable_vault_ui ? "--enable-ui" : ""
}
}
# ---------------------------------------------------------------------------------------------------------------------
# DEPLOY THE CONSUL SERVER CLUSTER
# ---------------------------------------------------------------------------------------------------------------------
module "consul_cluster" {
source = "git::git@github.com:hashicorp/terraform-google-consul.git//modules/consul-cluster?ref=v0.4.0"
gcp_project_id = var.gcp_project_id
gcp_region = var.gcp_region
cluster_name = var.consul_server_cluster_name
cluster_tag_name = var.consul_server_cluster_name
cluster_size = var.consul_server_cluster_size
source_image = var.consul_server_source_image
machine_type = var.consul_server_machine_type
startup_script = data.template_file.startup_script_consul.rendered
# In a production setting, we strongly recommend only launching a Consul Server cluster as private nodes.
# Note that the only way to reach private nodes via SSH is to first SSH into another node that is not private.
assign_public_ip_addresses = true
allowed_inbound_tags_dns = [var.vault_cluster_name]
allowed_inbound_tags_http_api = [var.vault_cluster_name]
}
# This Startup Script will run at boot configure and start Consul on the Consul Server cluster nodes
data "template_file" "startup_script_consul" {
template = file(
"${path.module}/examples/root-example/startup-script-consul.sh",
)
vars = {
cluster_tag_name = var.consul_server_cluster_name
}
}