Skip to content

Commit

Permalink
Merge pull request #12 from xunleii/refact-clean-module
Browse files Browse the repository at this point in the history
Refact clean module
  • Loading branch information
xunleii authored Dec 1, 2019
2 parents e20d879 + 8c85639 commit 7a3f5a9
Show file tree
Hide file tree
Showing 10 changed files with 151 additions and 173 deletions.
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -7,3 +7,6 @@

# .tfvars files
*.tfvars

# Intellij folders (used as TF IDE)
.idea/
46 changes: 30 additions & 16 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,32 +2,39 @@

[![MIT Licensed](https://img.shields.io/badge/license-MIT-green.svg)](https://tldrlegal.com/license/mit-license)

Terraform module that creates a [k3s](https://k3s.io/) cluster with all node given nodes. Currently, it only applies the k3s installation script, without any complex settings nor HA cluster (may be in future releases).
Terraform module that creates a [k3s](https://k3s.io/) cluster with all node given nodes. Currently, it only applies the k3s installation script. HA clustering is not managed.
> :warning: **This module only works with k3s version greater than v1.0.0**
## Usage

``` terraform
``` hcl-terraform
module "k3s" {
source = "xunleii/k3s/module"
k3s_version = "v0.10.2"
k3s_version = "v1.0.0"
cluster_name = "my.k3s.local"
cluster_cidr = "10.0.0.0/16"
cluster_service_cidr = "10.1.0.0/16"
additional_tls_san = ["k3s.my.domain.com"]
cluster_cidr = {
pods = "10.0.0.0/16"
services = "10.1.0.0/16"
}
additional_flags = {
master = []
minion = [
"--node-label node-role.kubernetes.io/minion='true'",
master = [
"--flannel-backend=none",
"--tls-san k3s.my.domain.com"
]
common = [
"--no-flannel"
minion = [
"--flannel-backend=none",
]
}
master_node = {
# The node name will be automatically provided by
# the module using this value... any usage of --node-name
# in additional_flags will be ignored
name = "master"
# This IP will be used as k3s master node IP.... if you want to use a public
# address for the connection, use connection.host instead
ip = "10.123.45.67"
Expand All @@ -39,6 +46,9 @@ module "k3s" {
}
}
minion_nodes = {
# The node name will be automatically provided by
# the module using the key... any usage of --node-name
# in additional_flags will be ignored
k3s-node-01 = {
ip = "10.123.45.68"
connection = {
Expand All @@ -63,16 +73,20 @@ module "k3s" {

### Connection

The `connection` object can use all SSH [Terraform connection arguments](https://www.terraform.io/docs/provisioners/connection.html#argument-reference).

> Currently, only SSH type is allowed for this module (depends on SSH command to provide the `node-token` file from the master to a minion).
> If you encounter problems during this step (you will see `[NOTE]` log prefix juste before), feel free to open an issue.
> This will be resolved when a provider that can get file remotely will be available.
The `connection` object can use all [Terraform connection arguments](https://www.terraform.io/docs/provisioners/connection.html#argument-reference).

### Kubeconfig

Because Terraform doesn't allow us to get file remotely, you need to get it manually (with `external` data for example).

``` hcl-terraform
resource null_resource kubeconfig {
provisioner "local-exec" {
command = "scp ubuntu@203.123.45.67:/etc/rancher/k3s/k3s.yaml kubeconfig"
}
}
```

## License

terraform-module-k3s is released under the MIT License. See the bundled LICENSE file for details.
23 changes: 8 additions & 15 deletions examples/hcloud-k3s/instances.tf
Original file line number Diff line number Diff line change
@@ -1,32 +1,25 @@
resource "hcloud_ssh_key" "default" {
resource hcloud_ssh_key default {
name = "K3S terraform module - Provisionning SSH key"
public_key = var.ssh_key
}

resource "hcloud_network" "k3s" {
resource hcloud_network k3s {
name = "k3s-network"
ip_range = "10.0.0.0/8"
}

resource "hcloud_network_subnet" "k3s_nodes" {
resource hcloud_network_subnet k3s_nodes {
type = "server"
network_id = hcloud_network.k3s.id
network_zone = "eu-central"
ip_range = "10.254.1.0/24"
}

resource "hcloud_network_subnet" "k3s_internal" {
type = "server"
network_id = hcloud_network.k3s.id
network_zone = "eu-central"
ip_range = "10.0.0.0/15"
}

data "hcloud_image" "ubuntu" {
data hcloud_image ubuntu {
name = "ubuntu-18.04"
}

resource "hcloud_server" "master" {
resource hcloud_server master {
name = "k3s-master"

image = data.hcloud_image.ubuntu.name
Expand All @@ -42,13 +35,13 @@ resource "hcloud_server" "master" {
}
}

resource "hcloud_server_network" "master_network" {
resource hcloud_server_network master_network {
server_id = hcloud_server.master.id
network_id = hcloud_network.k3s.id
ip = cidrhost(hcloud_network_subnet.k3s_nodes.ip_range, 1)
}

resource "hcloud_server" "minions" {
resource hcloud_server minions {
count = var.minions_num
name = "k3s-minion-${count.index}"

Expand All @@ -65,7 +58,7 @@ resource "hcloud_server" "minions" {
}
}

resource "hcloud_server_network" "minions_network" {
resource hcloud_server_network minions_network {
count = length(hcloud_server.minions)
server_id = hcloud_server.minions[count.index].id
network_id = hcloud_network.k3s.id
Expand Down
33 changes: 17 additions & 16 deletions examples/hcloud-k3s/main.tf
Original file line number Diff line number Diff line change
@@ -1,29 +1,30 @@
terraform {
required_version = "~> 0.12.0"
}
provider hcloud {}

provider "hcloud" {}

module "k3s" {
module k3s {
source = "./../.."

k3s_version = "latest"
cluster_cidr = "10.0.0.0/16"
cluster_service_cidr = "10.1.0.0/16"
drain_timeout = "30s"
k3s_version = "latest"
cluster_cidr = {
pods = "10.42.0.0/16"
services = "10.43.0.0/16"
}
drain_timeout = "30s"

additional_flags = {
master = []
minion = [
"--node-label node-role.kubernetes.io/minion='true'",
master = [
"--disable-cloud-controller",
"--flannel-iface ens10",
"--kubelet-arg cloud-provider=external" # required to use https://github.com/hetznercloud/hcloud-cloud-controller-manager
]
common = [
"--no-flannel"
minion = [
"--flannel-iface ens10",
"--kubelet-arg cloud-provider=external" # required to use https://github.com/hetznercloud/hcloud-cloud-controller-manager
]
}

master_node = {
ip = hcloud_server_network.master_network.ip
name = "master"
ip = hcloud_server_network.master_network.ip
connection = {
host = hcloud_server.master.ipv4_address
}
Expand Down
6 changes: 3 additions & 3 deletions examples/hcloud-k3s/variables.tf
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
variable "ssh_key" {
variable ssh_key {
description = "SSH public Key content needed to provision the instances."
type = "string"
type = string
}

variable "minions_num" {
variable minions_num {
description = "Number of minion nodes."
default = 3
}
File renamed without changes.
9 changes: 9 additions & 0 deletions main.tf
Original file line number Diff line number Diff line change
@@ -1,5 +1,14 @@
terraform {
required_version = "~> 0.12"
required_providers {
cloudflare = "~> 2.1"
hcloud = "~> 1.15"
http = "~> 1.1"
local = "~> 1.4"
null = "~> 2.1"
random = "~> 2.2"
template = "~> 2.1"
}
}

resource "random_password" "k3s_cluster_secret" {
Expand Down
70 changes: 22 additions & 48 deletions master.tf
Original file line number Diff line number Diff line change
@@ -1,42 +1,18 @@
locals {
# Generates the master public IP address
master_host = lookup(var.master_node.connection, "host", var.master_node.ip)

# Generates custom TLS Subject Alternative Name for the cluster
tls_san_values = distinct(
concat(
[var.master_node.ip, local.master_host],
var.additional_tls_san
)
)
tls_san_opts = "--tls-san ${join(" --tls-san ", local.tls_san_values)}"

# Generates the master installation arguments
master_install_arg_list = concat(
[
"--node-ip ${var.master_node.ip}",
"--cluster-domain ${var.cluster_name}",
"--cluster-cidr ${var.cluster_cidr}",
"--service-cidr ${var.cluster_service_cidr}",
local.tls_san_opts,
],
var.additional_flags.master,
var.additional_flags.common,
)
master_install_args = join(" ", local.master_install_arg_list)

# Generates the master installation env vars
master_install_env_list = [
"INSTALL_K3S_VERSION=${local.k3s_version}",
"K3S_CLUSTER_SECRET=${random_password.k3s_cluster_secret.result}"
master_default_flags = [
"--node-ip ${var.master_node.ip}",
"--node-name ${var.master_node.name}",
"--cluster-domain ${var.cluster_name}",
"--cluster-cidr ${var.cluster_cidr.pods}",
"--service-cidr ${var.cluster_cidr.services}",
"--token ${random_password.k3s_cluster_secret.result}",
]
master_install_envs = join(" ", local.master_install_env_list)
master_install_flags = join(" ", concat(var.additional_flags.master, local.master_default_flags))
}

resource "null_resource" "k3s_master" {
resource null_resource k3s_master {
triggers = {
master_ip = sha1(var.master_node.ip)
install_args = sha1(local.master_install_args)
install_args = sha1(local.master_install_flags)
}

connection {
Expand All @@ -55,11 +31,10 @@ resource "null_resource" "k3s_master" {
agent_identity = lookup(var.master_node.connection, "agent_identity", null)
host_key = lookup(var.master_node.connection, "host_key", null)

# NOTE: Currently not working on Windows machines
# https = lookup(var.master_node.connection, "https", null)
# insecure = lookup(var.master_node.connection, "insecure", null)
# use_ntlm = lookup(var.master_node.connection, "use_ntlm", null)
# cacert = lookup(var.master_node.connection, "cacert", null)
https = lookup(var.master_node.connection, "https", null)
insecure = lookup(var.master_node.connection, "insecure", null)
use_ntlm = lookup(var.master_node.connection, "use_ntlm", null)
cacert = lookup(var.master_node.connection, "cacert", null)

bastion_host = lookup(var.master_node.connection, "bastion_host", null)
bastion_host_key = lookup(var.master_node.connection, "bastion_host_key", null)
Expand All @@ -71,14 +46,14 @@ resource "null_resource" "k3s_master" {
}

# Check if curl is installed
provisioner "remote-exec" {
provisioner remote-exec {
inline = [
"if ! command -V curl > /dev/null; then echo >&2 '[ERROR] curl must be installed to continue...'; exit 127; fi",
]
}

# Remove old k3s installation
provisioner "remote-exec" {
provisioner remote-exec {
inline = [
"if ! command -V k3s-uninstall.sh > /dev/null; then exit; fi",
"echo >&2 [WARN] K3S seems already installed on this node and will be uninstalled.",
Expand All @@ -87,7 +62,7 @@ resource "null_resource" "k3s_master" {
}
}

resource "null_resource" "k3s_master_installer" {
resource null_resource k3s_master_installer {
triggers = {
master_init = null_resource.k3s_master.id
version = local.k3s_version
Expand All @@ -110,11 +85,10 @@ resource "null_resource" "k3s_master_installer" {
agent_identity = lookup(var.master_node.connection, "agent_identity", null)
host_key = lookup(var.master_node.connection, "host_key", null)

# NOTE: Currently not working on Windows machines
# https = lookup(var.master_node.connection, "https", null)
# insecure = lookup(var.master_node.connection, "insecure", null)
# use_ntlm = lookup(var.master_node.connection, "use_ntlm", null)
# cacert = lookup(var.master_node.connection, "cacert", null)
https = lookup(var.master_node.connection, "https", null)
insecure = lookup(var.master_node.connection, "insecure", null)
use_ntlm = lookup(var.master_node.connection, "use_ntlm", null)
cacert = lookup(var.master_node.connection, "cacert", null)

bastion_host = lookup(var.master_node.connection, "bastion_host", null)
bastion_host_key = lookup(var.master_node.connection, "bastion_host_key", null)
Expand All @@ -128,7 +102,7 @@ resource "null_resource" "k3s_master_installer" {
# Install K3S server
provisioner "remote-exec" {
inline = [
"curl -sfL https://get.k3s.io | ${local.master_install_envs} sh -s - ${local.master_install_args}",
"curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=${local.k3s_version} sh -s - ${local.master_install_flags}",
"until kubectl get nodes | grep -v '[WARN] No resources found'; do sleep 1; done"
]
}
Expand Down
Loading

0 comments on commit 7a3f5a9

Please sign in to comment.