Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove 'scp' dependency #4

Merged
merged 4 commits into from
Nov 16, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 15 additions & 0 deletions examples/hcloud-k3s/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
# K3S example for Hetzner-Cloud

Configuration in this directory creates a k3s cluster resources including network, subnet and instances.

## Usage

To run this example you need to execute:

```bash
$ terraform init
$ terraform plan
$ terraform apply
```

Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources.
73 changes: 73 additions & 0 deletions examples/hcloud-k3s/instances.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
resource "hcloud_ssh_key" "default" {
name = "K3S terraform module - Provisionning SSH key"
public_key = var.ssh_key
}

resource "hcloud_network" "k3s" {
name = "k3s-network"
ip_range = "10.0.0.0/8"
}

resource "hcloud_network_subnet" "k3s_nodes" {
type = "server"
network_id = hcloud_network.k3s.id
network_zone = "eu-central"
ip_range = "10.254.1.0/24"
}

resource "hcloud_network_subnet" "k3s_internal" {
type = "server"
network_id = hcloud_network.k3s.id
network_zone = "eu-central"
ip_range = "10.0.0.0/15"
}

data "hcloud_image" "ubuntu" {
name = "ubuntu-18.04"
}

resource "hcloud_server" "master" {
name = "k3s-master"

image = data.hcloud_image.ubuntu.name
server_type = "cx11-ceph"

ssh_keys = [
hcloud_ssh_key.default.id
]
labels = {
provisioner = "terraform",
engine = "k3s",
node_type = "master"
}
}

resource "hcloud_server_network" "master_network" {
server_id = hcloud_server.master.id
network_id = hcloud_network.k3s.id
ip = cidrhost(hcloud_network_subnet.k3s_nodes.ip_range, 1)
}

resource "hcloud_server" "minions" {
count = var.minions_num
name = "k3s-minion-${count.index}"

image = data.hcloud_image.ubuntu.name
server_type = "cx11-ceph"

ssh_keys = [
hcloud_ssh_key.default.id
]
labels = {
provisioner = "terraform",
engine = "k3s",
node_type = "minion"
}
}

resource "hcloud_server_network" "minions_network" {
count = length(hcloud_server.minions)
server_id = hcloud_server.minions[count.index].id
network_id = hcloud_network.k3s.id
ip = cidrhost(hcloud_network_subnet.k3s_nodes.ip_range, count.index + 2)
}
31 changes: 31 additions & 0 deletions examples/hcloud-k3s/main.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
terraform {
required_version = "~> 0.12.0"
}

provider "hcloud" {}

module "k3s" {
source = "xunleii/k3s/module"

k3s_version = "latest"
cluster_cidr = "10.0.0.0/16"
cluster_service_cidr = "10.1.0.0/16"
drain_timeout = "30s"

master_node = {
ip = hcloud_server_network.master_network.ip
connection = {
host = hcloud_server.master.ipv4_address
}
}

minion_nodes = {
for i in range(length(hcloud_server.minions)) :
hcloud_server.minions[i].name => {
ip = hcloud_server_network.minions_network[i].ip
connection = {
host = hcloud_server.minions[i].ipv4_address
}
}
}
}
Empty file added examples/hcloud-k3s/outputs.tf
Empty file.
9 changes: 9 additions & 0 deletions examples/hcloud-k3s/variables.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
variable "ssh_key" {
description = "SSH public Key content needed to provision the instances."
type = "string"
}

variable "minions_num" {
description = "Number of minion nodes."
default = 3
}
7 changes: 6 additions & 1 deletion main.tf
Original file line number Diff line number Diff line change
@@ -1,3 +1,8 @@
terraform {
required_version = ">= 0.12"
required_version = "~> 0.12"
}

resource "random_string" "k3s_cluster_secret" {
length = 48
special = false
}
47 changes: 34 additions & 13 deletions master.tf
Original file line number Diff line number Diff line change
@@ -1,28 +1,38 @@
locals {
# Generates the master public IP address
master_host = lookup(var.master_node.connection, "host", var.master_node.ip)

default_tls_san = [
var.master_node.ip,
lookup(var.master_node.connection, "host", var.master_node.ip)
]
tls_san_values = concat(local.default_tls_san, var.additional_tls_san)
tls_san_opt = "--tls-san ${join(" --tls-san ", local.tls_san_values)}"

install_opts = [
# Generates custom TLS Subject Alternative Name for the cluster
tls_san_values = distinct(
concat(
[var.master_node.ip, local.master_host],
var.additional_tls_san
)
)
tls_san_opts = "--tls-san ${join(" --tls-san ", local.tls_san_values)}"

# Generates the master installation arguments
master_install_arg_list = [
"--node-ip ${var.master_node.ip}",
"--cluster-domain ${var.cluster_name}",
"--cluster-cidr ${var.cluster_cidr}",
"--service-cidr ${var.cluster_service_cidr}",
local.tls_san_opt,
local.tls_san_opts,
]
install_opt = join(" ", local.install_opts)
master_install_args = join(" ", local.master_install_arg_list)

# Generates the master installation env vars
master_install_env_list = [
"INSTALL_K3S_VERSION=${local.k3s_version}",
"K3S_CLUSTER_SECRET=${random_string.k3s_cluster_secret.result}"
]
master_install_envs = join(" ", local.master_install_env_list)
}

resource "null_resource" "k3s_master" {
triggers = {
master_ip = sha1(var.master_node.ip)
master_input = sha1(local.master_host)
install_opt = sha1(local.install_opt)
install_args = sha1(local.master_install_args)
}

connection {
Expand Down Expand Up @@ -56,11 +66,21 @@ resource "null_resource" "k3s_master" {
bastion_certificate = lookup(var.master_node.connection, "bastion_certificate", null)
}

# Check if curl is installed
provisioner "remote-exec" {
inline = [
"if ! command -V curl > /dev/null; then echo >&2 '[ERROR] curl must be installed to continue...'; exit 127; fi",
]
}

# Remove old k3s installation
provisioner "remote-exec" {
inline = [
"if ! command -V k3s-uninstall.sh > /dev/null; then exit; fi",
"echo >&2 [WARN] K3S seems already installed on this node and will be uninstalled.",
"k3s-uninstall.sh",
]
}
}

resource "null_resource" "k3s_master_installer" {
Expand Down Expand Up @@ -101,9 +121,10 @@ resource "null_resource" "k3s_master_installer" {
bastion_certificate = lookup(var.master_node.connection, "bastion_certificate", null)
}

# Install K3S server
provisioner "remote-exec" {
inline = [
"curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=${local.k3s_version} sh -s - ${local.install_opt}",
"curl -sfL https://get.k3s.io | ${local.master_install_envs} sh -s - ${local.master_install_args}",
"until kubectl get nodes | grep -v '[WARN] No resources found'; do sleep 1; done"
]
}
Expand Down
34 changes: 19 additions & 15 deletions minions.tf
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
locals {
minion_install_opts = [
minion_install_env_list = [
"INSTALL_K3S_VERSION=${local.k3s_version}",
"K3S_URL=https://${var.master_node.ip}:6443",
"K3S_TOKEN=$(cat /etc/rancher/k3s/server/node-token)"
"K3S_CLUSTER_SECRET=${random_string.k3s_cluster_secret.result}"
]
minion_install_opt = join(" ", local.minion_install_opts)
minion_install_envs = join(" ", local.minion_install_env_list)
}

resource "null_resource" "k3s_minions" {
Expand Down Expand Up @@ -47,16 +47,19 @@ resource "null_resource" "k3s_minions" {
bastion_certificate = lookup(each.value.connection, "bastion_certificate", null)
}

# Check if curl is installed
provisioner "remote-exec" {
inline = [
"if ! command -V curl > /dev/null; then echo >&2 '[ERROR] curl must be installed to continue...'; exit 127; fi",
"if command -V k3s-agent-uninstall.sh > /dev/null; then k3s-agent-uninstall.sh; fi",
"echo >&2 [NOTE] Importing node-token is mandatory and require some SSH configuration.",
"echo >&2 [NOTE] If the next command fails, feel free to open an issue on the module repository.",
"echo >&2 [NOTE] This behaviour will change only when we are able to download a file from the remote.",
"rm -rf /etc/rancher/k3s/server",
"mkdir -p /etc/rancher/k3s/server",
"scp -P ${lookup(var.master_node.connection, "port", "22")} -o 'StrictHostKeyChecking no' ${lookup(var.master_node.connection, "user", "root")}@${var.master_node.ip}:/var/lib/rancher/k3s/server/node-token /etc/rancher/k3s/server",
]
}

# Remove old k3s installation
provisioner "remote-exec" {
inline = [
"if ! command -V k3s-agent-uninstall.sh > /dev/null; then exit; fi",
"echo >&2 [WARN] K3S seems already installed on this node and will be uninstalled.",
"k3s-agent-uninstall.sh",
]
}
}
Expand Down Expand Up @@ -100,9 +103,10 @@ resource "null_resource" "k3s_minions_installer" {
bastion_certificate = lookup(each.value.connection, "bastion_certificate", null)
}

# Install K3S agent
provisioner "remote-exec" {
inline = [
"curl -sfL https://get.k3s.io | ${local.minion_install_opt} sh -s - --node-ip ${each.value.ip}"
"curl -sfL https://get.k3s.io | ${local.minion_install_envs} sh -s - --node-ip ${each.value.ip}"
]
}
}
Expand All @@ -111,8 +115,7 @@ resource "null_resource" "k3s_minions_uninstaller" {
for_each = var.minion_nodes

triggers = {
minion = null_resource.k3s_minions[each.key].id
minion_ip = each.value.ip
minion = null_resource.k3s_minions[each.key].id
}

connection {
Expand Down Expand Up @@ -146,11 +149,12 @@ resource "null_resource" "k3s_minions_uninstaller" {
bastion_certificate = lookup(var.master_node.connection, "bastion_certificate", null)
}

# Drain and delete the removed node
provisioner "remote-exec" {
when = "destroy"
inline = [
"NODE=$(kubectl get node -l 'k3s.io/internal-ip = ${self.triggers.minion_ip}' | tail -n 1 | awk '{printf $1}')",
"kubectl drain $${NODE} --force --delete-local-data --ignore-daemonsets",
"NODE=$(kubectl get node -l 'k3s.io/internal-ip = ${null_resource.k3s_minions[each.key].triggers.minion_ip}' | tail -n 1 | awk '{printf $1}')",
"kubectl drain $${NODE} --force --delete-local-data --ignore-daemonsets --timeout ${var.drain_timeout}",
"kubectl delete node $${NODE}",
"sed -i \"/$${NODE}$/d\" /var/lib/rancher/k3s/server/cred/node-passwd",
]
Expand Down
6 changes: 6 additions & 0 deletions variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,12 @@ variable "master_node" {
})
}

variable "drain_timeout" {
description = "The length of time to wait before giving up the node draining. Infinite by default."
type = string
default = "0s"
}

variable "minion_nodes" {
description = "List of minion configuration nodes."
type = map(object({
Expand Down
2 changes: 1 addition & 1 deletion version.tf
Original file line number Diff line number Diff line change
Expand Up @@ -3,5 +3,5 @@ data "http" "k3s_version" {
}

locals {
k3s_version = "${var.k3s_version == "latest" ? "${jsondecode(data.http.k3s_version.body).tag_name}" : var.k3s_version}"
k3s_version = var.k3s_version == "latest" ? jsondecode(data.http.k3s_version.body).tag_name : var.k3s_version
}