Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix: correct some installation issues (#44 & #45) #46

Merged
merged 2 commits into from
Feb 10, 2021
Merged

Conversation

xunleii
Copy link
Owner

@xunleii xunleii commented Feb 10, 2021

Try to fix #44 & #45, occurring during node installation

  • Change README.md examples
  • Do node labeling after installation

@github-actions
Copy link
Contributor

Terraform Init success

Terraform Validate success

Terraform Plan success

Show Plan
An execution plan has been generated and is shown below.
Resource actions are indicated with the following symbols:
  + create

Terraform will perform the following actions:

  # hcloud_network.k3s will be created
  + resource "hcloud_network" "k3s" {
      + id       = (known after apply)
      + ip_range = "10.0.0.0/8"
      + name     = "k3s-network"
    }

  # hcloud_network_subnet.k3s_nodes will be created
  + resource "hcloud_network_subnet" "k3s_nodes" {
      + gateway      = (known after apply)
      + id           = (known after apply)
      + ip_range     = "10.254.1.0/24"
      + network_id   = (known after apply)
      + network_zone = "eu-central"
      + type         = "server"
    }

  # hcloud_server.agents[0] will be created
  + resource "hcloud_server" "agents" {
      + backup_window = (known after apply)
      + backups       = false
      + datacenter    = (known after apply)
      + id            = (known after apply)
      + image         = "ubuntu-20.04"
      + ipv4_address  = (known after apply)
      + ipv6_address  = (known after apply)
      + ipv6_network  = (known after apply)
      + keep_disk     = false
      + labels        = {
          + "engine"      = "k3s"
          + "node_type"   = "agent"
          + "nodepool"    = "gpu"
          + "provisioner" = "terraform"
        }
      + location      = (known after apply)
      + name          = "k3s-agent-0"
      + server_type   = "cx11-ceph"
      + ssh_keys      = (known after apply)
      + status        = (known after apply)
    }

  # hcloud_server.agents[1] will be created
  + resource "hcloud_server" "agents" {
      + backup_window = (known after apply)
      + backups       = false
      + datacenter    = (known after apply)
      + id            = (known after apply)
      + image         = "ubuntu-20.04"
      + ipv4_address  = (known after apply)
      + ipv6_address  = (known after apply)
      + ipv6_network  = (known after apply)
      + keep_disk     = false
      + labels        = {
          + "engine"      = "k3s"
          + "node_type"   = "agent"
          + "nodepool"    = "general"
          + "provisioner" = "terraform"
        }
      + location      = (known after apply)
      + name          = "k3s-agent-1"
      + server_type   = "cx11-ceph"
      + ssh_keys      = (known after apply)
      + status        = (known after apply)
    }

  # hcloud_server.agents[2] will be created
  + resource "hcloud_server" "agents" {
      + backup_window = (known after apply)
      + backups       = false
      + datacenter    = (known after apply)
      + id            = (known after apply)
      + image         = "ubuntu-20.04"
      + ipv4_address  = (known after apply)
      + ipv6_address  = (known after apply)
      + ipv6_network  = (known after apply)
      + keep_disk     = false
      + labels        = {
          + "engine"      = "k3s"
          + "node_type"   = "agent"
          + "nodepool"    = "general"
          + "provisioner" = "terraform"
        }
      + location      = (known after apply)
      + name          = "k3s-agent-2"
      + server_type   = "cx11-ceph"
      + ssh_keys      = (known after apply)
      + status        = (known after apply)
    }

  # hcloud_server.control_planes[0] will be created
  + resource "hcloud_server" "control_planes" {
      + backup_window = (known after apply)
      + backups       = false
      + datacenter    = (known after apply)
      + id            = (known after apply)
      + image         = "ubuntu-20.04"
      + ipv4_address  = (known after apply)
      + ipv6_address  = (known after apply)
      + ipv6_network  = (known after apply)
      + keep_disk     = false
      + labels        = {
          + "engine"      = "k3s"
          + "node_type"   = "control-plane"
          + "provisioner" = "terraform"
        }
      + location      = (known after apply)
      + name          = "k3s-control-plane-0"
      + server_type   = "cx11-ceph"
      + ssh_keys      = (known after apply)
      + status        = (known after apply)
    }

  # hcloud_server.control_planes[1] will be created
  + resource "hcloud_server" "control_planes" {
      + backup_window = (known after apply)
      + backups       = false
      + datacenter    = (known after apply)
      + id            = (known after apply)
      + image         = "ubuntu-20.04"
      + ipv4_address  = (known after apply)
      + ipv6_address  = (known after apply)
      + ipv6_network  = (known after apply)
      + keep_disk     = false
      + labels        = {
          + "engine"      = "k3s"
          + "node_type"   = "control-plane"
          + "provisioner" = "terraform"
        }
      + location      = (known after apply)
      + name          = "k3s-control-plane-1"
      + server_type   = "cx11-ceph"
      + ssh_keys      = (known after apply)
      + status        = (known after apply)
    }

  # hcloud_server.control_planes[2] will be created
  + resource "hcloud_server" "control_planes" {
      + backup_window = (known after apply)
      + backups       = false
      + datacenter    = (known after apply)
      + id            = (known after apply)
      + image         = "ubuntu-20.04"
      + ipv4_address  = (known after apply)
      + ipv6_address  = (known after apply)
      + ipv6_network  = (known after apply)
      + keep_disk     = false
      + labels        = {
          + "engine"      = "k3s"
          + "node_type"   = "control-plane"
          + "provisioner" = "terraform"
        }
      + location      = (known after apply)
      + name          = "k3s-control-plane-2"
      + server_type   = "cx11-ceph"
      + ssh_keys      = (known after apply)
      + status        = (known after apply)
    }

  # hcloud_server_network.agents_network[0] will be created
  + resource "hcloud_server_network" "agents_network" {
      + id          = (known after apply)
      + ip          = "10.254.1.4"
      + mac_address = (known after apply)
      + server_id   = (known after apply)
      + subnet_id   = (known after apply)
    }

  # hcloud_server_network.agents_network[1] will be created
  + resource "hcloud_server_network" "agents_network" {
      + id          = (known after apply)
      + ip          = "10.254.1.5"
      + mac_address = (known after apply)
      + server_id   = (known after apply)
      + subnet_id   = (known after apply)
    }

  # hcloud_server_network.agents_network[2] will be created
  + resource "hcloud_server_network" "agents_network" {
      + id          = (known after apply)
      + ip          = "10.254.1.6"
      + mac_address = (known after apply)
      + server_id   = (known after apply)
      + subnet_id   = (known after apply)
    }

  # hcloud_server_network.control_planes[0] will be created
  + resource "hcloud_server_network" "control_planes" {
      + id          = (known after apply)
      + ip          = "10.254.1.1"
      + mac_address = (known after apply)
      + server_id   = (known after apply)
      + subnet_id   = (known after apply)
    }

  # hcloud_server_network.control_planes[1] will be created
  + resource "hcloud_server_network" "control_planes" {
      + id          = (known after apply)
      + ip          = "10.254.1.2"
      + mac_address = (known after apply)
      + server_id   = (known after apply)
      + subnet_id   = (known after apply)
    }

  # hcloud_server_network.control_planes[2] will be created
  + resource "hcloud_server_network" "control_planes" {
      + id          = (known after apply)
      + ip          = "10.254.1.3"
      + mac_address = (known after apply)
      + server_id   = (known after apply)
      + subnet_id   = (known after apply)
    }

  # hcloud_ssh_key.default will be created
  + resource "hcloud_ssh_key" "default" {
      + fingerprint = (known after apply)
      + id          = (known after apply)
      + name        = "K3S terraform module - Provisionning SSH key"
    }

  # module.k3s.null_resource.agents_drain["k3s-agent-0_node"] will be created
  + resource "null_resource" "agents_drain" {
      + id       = (known after apply)
      + triggers = (known after apply)
    }

  # module.k3s.null_resource.agents_drain["k3s-agent-1_node"] will be created
  + resource "null_resource" "agents_drain" {
      + id       = (known after apply)
      + triggers = (known after apply)
    }

  # module.k3s.null_resource.agents_drain["k3s-agent-2_node"] will be created
  + resource "null_resource" "agents_drain" {
      + id       = (known after apply)
      + triggers = (known after apply)
    }

  # module.k3s.null_resource.agents_install["k3s-agent-0_node"] will be created
  + resource "null_resource" "agents_install" {
      + id       = (known after apply)
      + triggers = {
          + "on_immutable_changes" = "231c2099d0850d7cc82da54dad821067a7f24c4f"
          + "on_new_version"       = "v1.20.2+k3s1"
        }
    }

  # module.k3s.null_resource.agents_install["k3s-agent-1_node"] will be created
  + resource "null_resource" "agents_install" {
      + id       = (known after apply)
      + triggers = {
          + "on_immutable_changes" = "231c2099d0850d7cc82da54dad821067a7f24c4f"
          + "on_new_version"       = "v1.20.2+k3s1"
        }
    }

  # module.k3s.null_resource.agents_install["k3s-agent-2_node"] will be created
  + resource "null_resource" "agents_install" {
      + id       = (known after apply)
      + triggers = {
          + "on_immutable_changes" = "231c2099d0850d7cc82da54dad821067a7f24c4f"
          + "on_new_version"       = "v1.20.2+k3s1"
        }
    }

  # module.k3s.null_resource.agents_label["k3s-agent-0_node|node.kubernetes.io/pool"] will be created
  + resource "null_resource" "agents_label" {
      + id       = (known after apply)
      + triggers = (known after apply)
    }

  # module.k3s.null_resource.agents_label["k3s-agent-1_node|node.kubernetes.io/pool"] will be created
  + resource "null_resource" "agents_label" {
      + id       = (known after apply)
      + triggers = (known after apply)
    }

  # module.k3s.null_resource.agents_label["k3s-agent-2_node|node.kubernetes.io/pool"] will be created
  + resource "null_resource" "agents_label" {
      + id       = (known after apply)
      + triggers = (known after apply)
    }

  # module.k3s.null_resource.agents_taint["k3s-agent-0_node|dedicated"] will be created
  + resource "null_resource" "agents_taint" {
      + id       = (known after apply)
      + triggers = (known after apply)
    }

  # module.k3s.null_resource.k8s_ca_certificates_install[0] will be created
  + resource "null_resource" "k8s_ca_certificates_install" {
      + id = (known after apply)
    }

  # module.k3s.null_resource.k8s_ca_certificates_install[1] will be created
  + resource "null_resource" "k8s_ca_certificates_install" {
      + id = (known after apply)
    }

  # module.k3s.null_resource.k8s_ca_certificates_install[2] will be created
  + resource "null_resource" "k8s_ca_certificates_install" {
      + id = (known after apply)
    }

  # module.k3s.null_resource.k8s_ca_certificates_install[3] will be created
  + resource "null_resource" "k8s_ca_certificates_install" {
      + id = (known after apply)
    }

  # module.k3s.null_resource.k8s_ca_certificates_install[4] will be created
  + resource "null_resource" "k8s_ca_certificates_install" {
      + id = (known after apply)
    }

  # module.k3s.null_resource.k8s_ca_certificates_install[5] will be created
  + resource "null_resource" "k8s_ca_certificates_install" {
      + id = (known after apply)
    }

  # module.k3s.null_resource.servers_drain["k3s-control-plane-0"] will be created
  + resource "null_resource" "servers_drain" {
      + id       = (known after apply)
      + triggers = (known after apply)
    }

  # module.k3s.null_resource.servers_drain["k3s-control-plane-1"] will be created
  + resource "null_resource" "servers_drain" {
      + id       = (known after apply)
      + triggers = (known after apply)
    }

  # module.k3s.null_resource.servers_drain["k3s-control-plane-2"] will be created
  + resource "null_resource" "servers_drain" {
      + id       = (known after apply)
      + triggers = (known after apply)
    }

  # module.k3s.null_resource.servers_install["k3s-control-plane-0"] will be created
  + resource "null_resource" "servers_install" {
      + id       = (known after apply)
      + triggers = {
          + "on_immutable_changes" = "5bbc5b363504fa478032f0d97c877f884a076d94"
          + "on_new_version"       = "v1.20.2+k3s1"
        }
    }

  # module.k3s.null_resource.servers_install["k3s-control-plane-1"] will be created
  + resource "null_resource" "servers_install" {
      + id       = (known after apply)
      + triggers = {
          + "on_immutable_changes" = "5bbc5b363504fa478032f0d97c877f884a076d94"
          + "on_new_version"       = "v1.20.2+k3s1"
        }
    }

  # module.k3s.null_resource.servers_install["k3s-control-plane-2"] will be created
  + resource "null_resource" "servers_install" {
      + id       = (known after apply)
      + triggers = {
          + "on_immutable_changes" = "5bbc5b363504fa478032f0d97c877f884a076d94"
          + "on_new_version"       = "v1.20.2+k3s1"
        }
    }

  # module.k3s.random_password.k3s_cluster_secret will be created
  + resource "random_password" "k3s_cluster_secret" {
      + id          = (known after apply)
      + length      = 48
      + lower       = true
      + min_lower   = 0
      + min_numeric = 0
      + min_special = 0
      + min_upper   = 0
      + number      = true
      + result      = (sensitive value)
      + special     = false
      + upper       = true
    }

  # module.k3s.tls_cert_request.master_user[0] will be created
  + resource "tls_cert_request" "master_user" {
      + cert_request_pem = (known after apply)
      + id               = (known after apply)
      + key_algorithm    = "ECDSA"
      + private_key_pem  = (sensitive value)

      + subject {
          + common_name  = "master-user"
          + organization = "system:masters"
        }
    }

  # module.k3s.tls_locally_signed_cert.master_user[0] will be created
  + resource "tls_locally_signed_cert" "master_user" {
      + allowed_uses          = [
          + "key_encipherment",
          + "digital_signature",
          + "client_auth",
        ]
      + ca_cert_pem           = (known after apply)
      + ca_key_algorithm      = "ECDSA"
      + ca_private_key_pem    = (sensitive value)
      + cert_pem              = (known after apply)
      + cert_request_pem      = (known after apply)
      + early_renewal_hours   = 0
      + id                    = (known after apply)
      + ready_for_renewal     = true
      + validity_end_time     = (known after apply)
      + validity_period_hours = 876600
      + validity_start_time   = (known after apply)
    }

  # module.k3s.tls_private_key.kubernetes_ca[0] will be created
  + resource "tls_private_key" "kubernetes_ca" {
      + algorithm                  = "ECDSA"
      + ecdsa_curve                = "P384"
      + id                         = (known after apply)
      + private_key_pem            = (sensitive value)
      + public_key_fingerprint_md5 = (known after apply)
      + public_key_openssh         = (known after apply)
      + public_key_pem             = (known after apply)
      + rsa_bits                   = 2048
    }

  # module.k3s.tls_private_key.kubernetes_ca[1] will be created
  + resource "tls_private_key" "kubernetes_ca" {
      + algorithm                  = "ECDSA"
      + ecdsa_curve                = "P384"
      + id                         = (known after apply)
      + private_key_pem            = (sensitive value)
      + public_key_fingerprint_md5 = (known after apply)
      + public_key_openssh         = (known after apply)
      + public_key_pem             = (known after apply)
      + rsa_bits                   = 2048
    }

  # module.k3s.tls_private_key.kubernetes_ca[2] will be created
  + resource "tls_private_key" "kubernetes_ca" {
      + algorithm                  = "ECDSA"
      + ecdsa_curve                = "P384"
      + id                         = (known after apply)
      + private_key_pem            = (sensitive value)
      + public_key_fingerprint_md5 = (known after apply)
      + public_key_openssh         = (known after apply)
      + public_key_pem             = (known after apply)
      + rsa_bits                   = 2048
    }

  # module.k3s.tls_private_key.master_user[0] will be created
  + resource "tls_private_key" "master_user" {
      + algorithm                  = "ECDSA"
      + ecdsa_curve                = "P384"
      + id                         = (known after apply)
      + private_key_pem            = (sensitive value)
      + public_key_fingerprint_md5 = (known after apply)
      + public_key_openssh         = (known after apply)
      + public_key_pem             = (known after apply)
      + rsa_bits                   = 2048
    }

  # module.k3s.tls_self_signed_cert.kubernetes_ca_certs["0"] will be created
  + resource "tls_self_signed_cert" "kubernetes_ca_certs" {
      + allowed_uses          = [
          + "critical",
          + "digitalSignature",
          + "keyEncipherment",
          + "keyCertSign",
        ]
      + cert_pem              = (known after apply)
      + early_renewal_hours   = 0
      + id                    = (known after apply)
      + is_ca_certificate     = true
      + key_algorithm         = "ECDSA"
      + private_key_pem       = (sensitive value)
      + ready_for_renewal     = true
      + validity_end_time     = (known after apply)
      + validity_period_hours = 876600
      + validity_start_time   = (known after apply)

      + subject {
          + common_name = "kubernetes-client-ca"
        }
    }

  # module.k3s.tls_self_signed_cert.kubernetes_ca_certs["1"] will be created
  + resource "tls_self_signed_cert" "kubernetes_ca_certs" {
      + allowed_uses          = [
          + "critical",
          + "digitalSignature",
          + "keyEncipherment",
          + "keyCertSign",
        ]
      + cert_pem              = (known after apply)
      + early_renewal_hours   = 0
      + id                    = (known after apply)
      + is_ca_certificate     = true
      + key_algorithm         = "ECDSA"
      + private_key_pem       = (sensitive value)
      + ready_for_renewal     = true
      + validity_end_time     = (known after apply)
      + validity_period_hours = 876600
      + validity_start_time   = (known after apply)

      + subject {
          + common_name = "kubernetes-server-ca"
        }
    }

  # module.k3s.tls_self_signed_cert.kubernetes_ca_certs["2"] will be created
  + resource "tls_self_signed_cert" "kubernetes_ca_certs" {
      + allowed_uses          = [
          + "critical",
          + "digitalSignature",
          + "keyEncipherment",
          + "keyCertSign",
        ]
      + cert_pem              = (known after apply)
      + early_renewal_hours   = 0
      + id                    = (known after apply)
      + is_ca_certificate     = true
      + key_algorithm         = "ECDSA"
      + private_key_pem       = (sensitive value)
      + ready_for_renewal     = true
      + validity_end_time     = (known after apply)
      + validity_period_hours = 876600
      + validity_start_time   = (known after apply)

      + subject {
          + common_name = "kubernetes-request-header-key-ca"
        }
    }

Plan: 47 to add, 0 to change, 0 to destroy.

Changes to Outputs:
  + summary = {
      + agents  = [
          + {
              + annotations = []
              + labels      = {
                  + node.kubernetes.io/pool = "gpu"
                }
              + name        = "k3s-agent-0"
              + taints      = {
                  + dedicated = "gpu:NoSchedule"
                }
            },
          + {
              + annotations = []
              + labels      = {
                  + node.kubernetes.io/pool = "general"
                }
              + name        = "k3s-agent-1"
              + taints      = {
                  + dedicated = null
                }
            },
          + {
              + annotations = []
              + labels      = {
                  + node.kubernetes.io/pool = "general"
                }
              + name        = "k3s-agent-2"
              + taints      = {
                  + dedicated = null
                }
            },
        ]
      + servers = [
          + {
              + annotations = {
                  + server_id = 0
                }
              + labels      = []
              + name        = "k3s-control-plane-0"
              + taints      = []
            },
          + {
              + annotations = {
                  + server_id = 1
                }
              + labels      = []
              + name        = "k3s-control-plane-1"
              + taints      = []
            },
          + {
              + annotations = {
                  + server_id = 2
                }
              + labels      = []
              + name        = "k3s-control-plane-2"
              + taints      = []
            },
        ]
      + version = "v1.20.2+k3s1"
    }

------------------------------------------------------------------------

Note: You didn't specify an "-out" parameter to save this plan, so Terraform
can't guarantee that exactly these actions will be performed if
"terraform apply" is subsequently run.

Pusher: @xunleii, Action: pull_request, Workdir: examples/hcloud-k3s, Workflow: Terraform GitHub Actions

@xunleii xunleii merged commit b69151d into master Feb 10, 2021
@xunleii xunleii deleted the fix/node-install branch February 10, 2021 13:44
@xunleii xunleii linked an issue Feb 10, 2021 that may be closed by this pull request
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

Successfully merging this pull request may close these issues.

failed to start k3s node with label node-role.kubernetes.io/*** register: metadata.name: Invalid value
1 participant