diff --git a/aws/infrastructure.tf b/aws/infrastructure.tf index 6ea83ada..78132ebf 100644 --- a/aws/infrastructure.tf +++ b/aws/infrastructure.tf @@ -21,7 +21,6 @@ module "configuration" { sudoer_username = var.sudoer_username generate_ssh_key = var.generate_ssh_key public_keys = var.public_keys - volume_devices = local.volume_devices domain_name = module.design.domain_name bastion_tag = module.design.bastion_tag cluster_name = var.cluster_name @@ -189,17 +188,6 @@ resource "aws_volume_attachment" "attachments" { } locals { - volume_devices = { - for ki, vi in var.volumes : - ki => { - for kj, vj in vi : - kj => [ for key, volume in module.design.volumes: - "/dev/disk/by-id/*${replace(aws_ebs_volume.volumes["${volume["instance"]}-${ki}-${kj}"].id, "-", "")}" - if key == "${volume["instance"]}-${ki}-${kj}" - ] - } - } - inventory = { for x, values in module.design.instances : x => { public_ip = contains(values.tags, "public") ? aws_eip.public_ip[x].public_ip : "" @@ -212,6 +200,16 @@ locals { gpus = try(one(data.aws_ec2_instance_type.instance_type[values.prefix].gpus).count, 0) mig = lookup(values, "mig", null) } + volumes = contains(keys(module.design.volume_per_instance), x) ? { + for pv_key, pv_values in var.volumes: + pv_key => { + for name, specs in pv_values: + name => merge( + { glob = "/dev/disk/by-id/*${replace(aws_ebs_volume.volumes["${x}-${pv_key}-${name}"].id, "-", "")}" }, + specs, + ) + } if contains(values.tags, pv_key) + } : {} } } diff --git a/azure/infrastructure.tf b/azure/infrastructure.tf index 94261b35..f7dbcfa8 100644 --- a/azure/infrastructure.tf +++ b/azure/infrastructure.tf @@ -21,7 +21,6 @@ module "configuration" { sudoer_username = var.sudoer_username generate_ssh_key = var.generate_ssh_key public_keys = var.public_keys - volume_devices = local.volume_devices domain_name = module.design.domain_name bastion_tag = module.design.bastion_tag cluster_name = var.cluster_name @@ -149,17 +148,6 @@ resource "azurerm_virtual_machine_data_disk_attachment" "attachments" { } locals { - volume_devices = { - for ki, vi in var.volumes : - ki => { - for kj, vj in vi : - kj => [for key, volume in module.design.volumes : - "/dev/disk/azure/scsi1/lun${index(module.design.volume_per_instance[volume.instance], replace(key, "${volume.instance}-", ""))}" - if key == "${volume["instance"]}-${ki}-${kj}" - ] - } - } - resource_group_name = var.azure_resource_group == "" ? azurerm_resource_group.group[0].name : var.azure_resource_group vmsizes = jsondecode(file("${path.module}/vmsizes.json")) @@ -175,6 +163,16 @@ locals { gpus = local.vmsizes[values.type].gpus mig = lookup(values, "mig", null) } + volumes = contains(keys(module.design.volume_per_instance), x) ? { + for pv_key, pv_values in var.volumes: + pv_key => { + for name, specs in pv_values: + name => merge( + { glob = "/dev/disk/azure/scsi1/lun${index(module.design.volume_per_instance[x], replace(pv_key, "${x}-", ""))}" }, + specs, + ) + } if contains(values.tags, pv_key) + } : {} } } diff --git a/common/configuration/main.tf b/common/configuration/main.tf index 19f65cfc..6fea5e9f 100644 --- a/common/configuration/main.tf +++ b/common/configuration/main.tf @@ -10,7 +10,6 @@ variable "cloud_provider" { } variable "cloud_region" { } variable "domain_name" { } variable "cluster_name" { } -variable "volume_devices" { } variable "guest_passwd" { } variable "generate_ssh_key" { } @@ -73,7 +72,6 @@ locals { terraform = { instances = local.inventory tag_ip = local.tag_ip - volumes = var.volume_devices data = { sudoer_username = var.sudoer_username public_keys = local.ssh_authorized_keys diff --git a/docs/README.md b/docs/README.md index f5b33c71..f36d1ce7 100644 --- a/docs/README.md +++ b/docs/README.md @@ -1816,33 +1816,10 @@ Puppet data hierarchy. ### 10.14 Expand a volume -Volumes defined in the `volumes` map can be expanded at will. After their creation, you can -increase their size in the `main.tf` then call `terraform apply` and the associated block -device will be expanded. - -To benefit from the new storage, the following commands need to be ran as root -on the instance to which the expanded volume is attached. - -1. Identify the physical volume path - ``` - pvscan - ``` -2. Expand the physical volume - ``` - pvresize /dev/vdxyz # replace vdxyz by the volume identify at step 1 - ``` -3. Identify the volume group path - ``` - lvdisplay - ``` -4. Expand the volume group using step volume group path identified - ``` - lvextend -l '+100%FREE' -r /dev/project_vg/project - ``` -5. Resize the XFS filesystem: - ``` - xfs_growfs /dev/project_vg/project - ``` +Volumes defined in the `volumes` map can be expanded at will. To enable online extension of +a volume, add `enable_resize = true` to its specs map. You can then increase the size at will. +The corresponding volume will be expanded by the cloud provider and the filesystem will be +extended by Puppet. ## 11. Customize Magic Castle Terraform Files diff --git a/docs/design.md b/docs/design.md index 60ef7313..1f430dcb 100644 --- a/docs/design.md +++ b/docs/design.md @@ -201,26 +201,18 @@ the `module.design.instances` map. 7. **Create the volumes**. In `infrastructure.tf`, define the `volumes` resource using `module.design.volumes`. -8. **Consolidate the volume device information**. In `infrastructure.tf`, define a local -variable named `volume_devices` implementing the following logic in HCL. Replace -the line starting by `/dev/disk/by-id` with the proper logic that would match the volume -resource to its device path from within the instance to which it is attached. +8. **Consolidate the instances' information**. In `infrastructure.tf`, define a local variable named `inventory` that will be a map containing the following keys for each instance: `public_ip`, `local_ip`, `prefix`, `tags`, and `specs` (#cpu, #gpus, ram, volumes). For the volumes, you need to provide the paths under which the volumes will be found on the instances to which they are attached. This is typically derived from the volume id. Here is an example: ```hcl - volume_devices = { - for ki, vi in var.volumes : - ki => { - for kj, vj in vi : - kj => [for key, volume in module.design.volumes : - "/dev/disk/by-id/*${substr(provider_volume.volumes["${volume["instance"]}-${ki}-${kj}"].id, 0, 20)}" - if key == "${volume["instance"]}-${ki}-${kj}" - ] - } - } + volumes = contains(keys(module.design.volume_per_instance), x) ? { + for pv_key, pv_values in var.volumes: + pv_key => { + for name, specs in pv_values: + name => ["/dev/disk/by-id/*${substr(provider.volumes["${x}-${pv_key}-${name}"].id, 0, 20)}"] + } if contains(values.tags, pv_key) + } : {} ``` -9. **Consolidate the instances' information**. In `infrastructure.tf`, define a local variable named `inventory` that will be a map containing the following keys for each instance: `public_ip`, `local_ip`, `prefix`, `tags`, and `specs` (#cpu, #gpus, ram). - -10. **Create the instance configurations**. In `infrastructure.tf`, include the +9. **Create the instance configurations**. In `infrastructure.tf`, include the `common/configuration` module like this: ```hcl module "configuration" { @@ -231,7 +223,6 @@ resource to its device path from within the instance to which it is attached. sudoer_username = var.sudoer_username generate_ssh_key = var.generate_ssh_key public_keys = var.public_keys - volume_devices = local.volume_devices domain_name = module.design.domain_name cluster_name = var.cluster_name guest_passwd = var.guest_passwd @@ -241,15 +232,15 @@ resource to its device path from within the instance to which it is attached. cloud_region = local.cloud_region } ``` -11. **Create the instances**. In `infrastructure.tf`, define the `instances` resource using +10. **Create the instances**. In `infrastructure.tf`, define the `instances` resource using `module.design.instances_to_build` for the instance attributes and `module.configuration.user_data` for the initial configuration. -12. **Attach the volumes**. In `infrastructure.tf`, define the `attachments` resource using +11. **Attach the volumes**. In `infrastructure.tf`, define the `attachments` resource using `module.design.volumes` and refer to the attribute `each.value.instance` to retrieve the instance's id to which the volume needs to be attached. -13. **Identify the public instances**. In `infrastructure.tf`, define a local variable named `public_instances` +12. **Identify the public instances**. In `infrastructure.tf`, define a local variable named `public_instances` that contains the attributes of instances that are publicly accessible from Internet and their ids. ```hcl locals { @@ -260,7 +251,7 @@ that contains the attributes of instances that are publicly accessible from Inte } ``` -14. **Include the provision module to transmit Terraform data to the Puppet server**. In `infrastructure.tf`, include the +13. **Include the provision module to transmit Terraform data to the Puppet server**. In `infrastructure.tf`, include the `common/provision` module like this ```hcl module "provision" { @@ -360,21 +351,7 @@ Alibaba cloud has an answer for each resource, so we will use this provider in t } ``` -8. **Consolidate the volume devices' information**. Add the following snippet to `infrastructure.tf`: - ```hcl - volume_devices = { - for ki, vi in var.volumes : - ki => { - for kj, vj in vi : - kj => [for key, volume in module.design.volumes : - "/dev/disk/by-id/virtio-${replace(alicloud_disk.volumes["${volume["instance"]}-${ki}-${kj}"].id, "d-", "")}" - if key == "${volume["instance"]}-${ki}-${kj}" - ] - } - } - ``` - -9. **Consolidate the instances' information**. Add the following snippet to `infrastructure.tf`: +8. **Consolidate the instances' information**. Add the following snippet to `infrastructure.tf`: ```hcl locals { inventory = { for x, values in module.design.instances : @@ -387,13 +364,20 @@ Alibaba cloud has an answer for each resource, so we will use this provider in t cpus = ... gpus = ... ram = ... + volumes = contains(keys(module.design.volume_per_instance), x) ? { + for pv_key, pv_values in var.volumes: + pv_key => { + for name, specs in pv_values: + name => ["/dev/disk/by-id/virtio-${replace(alicloud_disk.volumes["${x}-${pv_key}-${name}"].id, "d-", "")}"] + } if contains(values.tags, pv_key) + } : {} } } } } ``` -10. **Create the instance configurations**. In `infrastructure.tf`, include the +9. **Create the instance configurations**. In `infrastructure.tf`, include the `common/configuration` module like this: ```hcl module "configuration" { @@ -404,7 +388,6 @@ Alibaba cloud has an answer for each resource, so we will use this provider in t sudoer_username = var.sudoer_username generate_ssh_key = var.generate_ssh_key public_keys = var.public_keys - volume_devices = local.volume_devices domain_name = module.design.domain_name cluster_name = var.cluster_name guest_passwd = var.guest_passwd @@ -415,21 +398,21 @@ Alibaba cloud has an answer for each resource, so we will use this provider in t } ``` -11. **Create the instances**. Add and complete the following snippet to `infrastructure.tf`: +10. **Create the instances**. Add and complete the following snippet to `infrastructure.tf`: ```hcl resource "alicloud_instance" "instances" { for_each = module.design.instances } ``` -12. **Attach the volumes**. Add and complete the following snippet to `infrastructure.tf`: +11. **Attach the volumes**. Add and complete the following snippet to `infrastructure.tf`: ```hcl resource "alicloud_disk_attachment" "attachments" { for_each = module.design.volumes } ``` -13. **Identify the public instances**. In `infrastructure.tf`, define a local variable named `public_instances` +12. **Identify the public instances**. In `infrastructure.tf`, define a local variable named `public_instances` that contains the attributes of instances that are publicly accessible from Internet and their ids. ```hcl locals { @@ -440,7 +423,7 @@ that contains the attributes of instances that are publicly accessible from Inte } ``` -14. **Include the provision module to transmit Terraform data to the Puppet server**. In `infrastructure.tf`, include the +13. **Include the provision module to transmit Terraform data to the Puppet server**. In `infrastructure.tf`, include the `common/provision` module like this ```hcl module "provision" { diff --git a/gcp/infrastructure.tf b/gcp/infrastructure.tf index 7a6959a3..1583897d 100644 --- a/gcp/infrastructure.tf +++ b/gcp/infrastructure.tf @@ -21,7 +21,6 @@ module "configuration" { sudoer_username = var.sudoer_username generate_ssh_key = var.generate_ssh_key public_keys = var.public_keys - volume_devices = local.volume_devices domain_name = module.design.domain_name bastion_tag = module.design.bastion_tag cluster_name = var.cluster_name @@ -162,17 +161,6 @@ resource "google_compute_attached_disk" "attachments" { } locals { - volume_devices = { - for ki, vi in var.volumes : - ki => { - for kj, vj in vi : - kj => [for key, volume in module.design.volumes : - "/dev/disk/by-id/google-${var.cluster_name}-${volume["instance"]}-${ki}-${kj}" - if key == "${volume["instance"]}-${ki}-${kj}" - ] - } - } - inventory = { for x, values in module.design.instances : x => { public_ip = contains(values.tags, "public") ? google_compute_address.public_ip[x].address : "" @@ -185,6 +173,16 @@ locals { gpus = try(data.external.machine_type[values["prefix"]].result["gpus"], lookup(values, "gpu_count", 0)) mig = lookup(values, "mig", null) } + volumes = contains(keys(module.design.volume_per_instance), x) ? { + for pv_key, pv_values in var.volumes: + pv_key => { + for name, specs in pv_values: + name => merge( + { glob = "/dev/disk/by-id/google-${var.cluster_name}-${x}-${pv_key}-${name}"}, + specs, + ) + } if contains(values.tags, pv_key) + } : {} } } diff --git a/openstack/infrastructure.tf b/openstack/infrastructure.tf index 306933e8..ffb4225c 100644 --- a/openstack/infrastructure.tf +++ b/openstack/infrastructure.tf @@ -16,7 +16,6 @@ module "configuration" { sudoer_username = var.sudoer_username generate_ssh_key = var.generate_ssh_key public_keys = var.public_keys - volume_devices = local.volume_devices domain_name = module.design.domain_name bastion_tag = module.design.bastion_tag cluster_name = var.cluster_name @@ -107,6 +106,7 @@ resource "openstack_blockstorage_volume_v3" "volumes" { size = each.value.size volume_type = lookup(each.value, "type", null) snapshot_id = lookup(each.value, "snapshot", null) + enable_online_resize = lookup(each.value, "enable_resize", false) } resource "openstack_compute_volume_attach_v2" "attachments" { @@ -116,17 +116,6 @@ resource "openstack_compute_volume_attach_v2" "attachments" { } locals { - volume_devices = { - for ki, vi in var.volumes : - ki => { - for kj, vj in vi : - kj => [for key, volume in module.design.volumes : - "/dev/disk/by-id/*${substr(openstack_blockstorage_volume_v3.volumes["${volume["instance"]}-${ki}-${kj}"].id, 0, 20)}" - if key == "${volume["instance"]}-${ki}-${kj}" - ] - } - } - inventory = { for x, values in module.design.instances : x => { public_ip = contains(values.tags, "public") ? local.public_ip[x] : "" @@ -142,6 +131,16 @@ locals { ]) mig = lookup(values, "mig", null) } + volumes = contains(keys(module.design.volume_per_instance), x) ? { + for pv_key, pv_values in var.volumes: + pv_key => { + for name, specs in pv_values: + name => merge( + { glob = "/dev/disk/by-id/*${substr(openstack_blockstorage_volume_v3.volumes["${x}-${pv_key}-${name}"].id, 0, 20)}" }, + specs, + ) + } if contains(values.tags, pv_key) + } : {} } }