Verified Commit 54650547 authored by Hugo's avatar Hugo
Browse files

config plus souple + addons

parent b0316945
......@@ -4,4 +4,5 @@ terraform.tfstate.backup
terraform.tfstate.d
values.auto.tfvars
k0sctl.yaml
kubeconfig
\ No newline at end of file
kubeconfig
*.log
\ No newline at end of file
name: ada-lovelace
sshKeys: ["3245559"]
location: "fsn1"
network:
id: 1116209
zone: eu-central
cloudCidr: 10.0.1.0/24
api:
lbType: lb11
privateIp: 10.0.1.5
ingress:
lbType: lb11
privateIp: 10.0.1.6
ports: ["80", "443"]
nodes:
c1:
type: vm
role: controller
serverType: cx21
privateIp: 10.0.1.11
ingress: false
volumes: {}
c2:
type: vm
role: controller
serverType: cx21
privateIp: 10.0.1.12
ingress: false
volumes: {}
c3:
type: vm
role: controller
serverType: cx21
privateIp: 10.0.1.13
ingress: false
volumes: {}
w1:
type: vm
role: worker
serverType: cx21
privateIp: 10.0.1.21
ingress: false
volumes:
vol1:
size: 10
vg: true
vol2:
size: 10
vg: false
w2:
type: vm
role: worker
serverType: cx21
privateIp: 10.0.1.22
ingress: false
volumes:
vol1:
size: 10
vg: true
vol2:
size: 10
vg: false
w3:
type: vm
role: worker
serverType: cx21
privateIp: 10.0.1.23
ingress: true
volumes:
vol1:
size: 10
vg: true
vol2:
size: 10
vg: false
w4:
type: vm
role: worker
serverType: cx21
privateIp: 10.0.1.24
ingress: true
volumes:
vol1:
size: 10
vg: true
vol2:
size: 10
vg: false
......@@ -7,4 +7,3 @@
state: absent
variables:
hcloud_token: "{{ lookup('env', 'HCLOUD_TOKEN') }}"
env: "{{ lookup('env', 'ENV') }}"
apiVersion: helm.k0sproject.io/v1beta1
kind: Chart
metadata:
name: cert-manager
namespace: "kube-system"
spec:
chartName: jetstack/cert-manager
namespace: cert-manager
version: 1.3.1
values: ""
apiVersion: helm.k0sproject.io/v1beta1
kind: Chart
metadata:
name: ingress-nginx
namespace: "kube-system"
spec:
chartName: ingress-nginx/ingress-nginx
namespace: ingress-nginx
version: 3.30.0
values: ""
apiVersion: helm.k0sproject.io/v1beta1
kind: Chart
metadata:
name: prometheus-stack
namespace: "kube-system"
spec:
chartName: prometheus-community/kube-prometheus-stack
namespace: monitoring
version: 15.4.5
values: ""
......@@ -7,6 +7,7 @@
hosts: all
remote_user: root
gather_facts: false
any_errors_fatal: true
handlers:
- name: reboot
reboot: {}
......@@ -21,4 +22,8 @@
tasks:
- import_tasks: "tasks/deploy.yaml"
- name: deploy addons
hosts: controller
remote_user: root
tasks:
- import_tasks: "tasks/addons.yaml"
- name: synchronization addons
ansible.posix.synchronize:
src: "addons/{{item}}/"
dest: "/var/lib/k0s/manifests/{{item}}"
delete: yes
loop:
- helm
- openebs
......@@ -2,6 +2,12 @@
template:
src: k0sctl.yaml.j2
dest: k0sctl.yaml
register: k0sctlConfig
- name: run k0sctl
shell: k0sctl apply
when: k0sctlConfig.changed
- name: fetch kubeconfig
shell: k0sctl kubeconfig > kubeconfig
when: k0sctlConfig.changed
......@@ -4,22 +4,19 @@
state: present
variables:
hcloud_token: "{{ lookup('env', 'HCLOUD_TOKEN') }}"
env: "{{ lookup('env', 'ENV') }}"
register: tf
- name: register controller hosts
- name: register hosts
add_host:
name: "{{ item }}"
groups: "controller"
loop: "{{ tf.outputs.controller_ipv4.value }}"
name: "{{ item.value.publicIp }}"
groups: "{{ item.value.role }}"
node: "{{ item.value }}"
node_pvs: '{{ item.value.volumes | dict2items | map(attribute="value") | selectattr("vg", "true") | map(attribute="device") }}'
node_disks: '{{ item.value.volumes | dict2items | map(attribute="value") | selectattr("vg", "false") | map(attribute="device") }}'
loop: "{{ tf.outputs.cluster.value.nodes | dict2items }}"
loop_control:
label: "{{ item.key }}"
- name: register worker hosts
add_host:
name: "{{ item }}"
groups: "worker"
loop: "{{ tf.outputs.worker_ipv4.value }}"
- name: register variables
- name: register cluster config
set_fact:
control_plane_ipv4: "{{ tf.outputs.control_plane_ipv4.value }}"
ingress_ipv4: "{{ tf.outputs.ingress_ipv4.value }}"
cluster: "{{ tf.outputs.cluster.value }}"
- name: install wireguard
- name: install packages
ansible.builtin.package:
name: wireguard
name:
- wireguard
- parted
- name: copy modules.conf
ansible.builtin.copy:
src: modules.conf
dest: /etc/modules-load.d/modules.conf
notify: reboot
- name: create k0s folder
file:
path: /var/lib/k0s
state: directory
- name: create kubelet folder
file:
path: /var/lib/k0s/kubelet
state: directory
- name: create kubelet symbolic link
file:
src: "/var/lib/k0s/kubelet"
dest: "/var/lib/kubelet"
state: link
- name: read device information
community.general.parted:
device: "{{ item }}"
unit: GiB
loop: "{{ node_disks }}"
register: diskinfo
- debug:
var: diskinfo
- name: create a volume group
community.general.lvg:
vg: vg-k8s
pvs: "{{ node_pvs }}"
pvresize: yes
when: node_pvs | length > 0
apiVersion: k0sctl.k0sproject.io/v1beta1
kind: Cluster
metadata:
name: k0s-cluster
name: {{cluster.name}}
spec:
hosts:
{%for ip in groups["controller"] %}
{%for n, node in cluster.nodes.items() %}
- ssh:
address: {{ip}}
address: {{node.publicIp}}
port: 22
role: controller
{%endfor%}
{%for ip in groups["worker"] %}
- ssh:
address: {{ip}}
port: 22
role: worker
role: {{node.role}}
privateAddress: {{node.privateIp}}
{%endfor%}
k0s:
version: 0.13.1
version: v1.20.6+k0s.0
config:
apiVersion: k0s.k0sproject.io/v1beta1
kind: Cluster
metadata:
name: ada-lovelace
name: {{cluster.name}}
spec:
api:
externalAddress: {{control_plane_ipv4}}
api:
externalAddress: {{cluster.api.publicIp}}
sans:
- {{control_plane_ipv4}}
- {{cluster.api.publicIp}}
network:
provider: calico
calico:
mtu: 1440
wireguard: true
ipAutodetectionMethod: cidr={{cluster.network.cloudCidr}}
extensions:
helm:
repositories:
- name: stable
url: https://charts.helm.sh/stable
- name: prometheus-community
url: https://prometheus-community.github.io/helm-charts
- name: openebs
url: https://openebs.github.io/charts
- name: ingress-nginx
url: https://kubernetes.github.io/ingress-nginx
- name: jetstack
url: https://charts.jetstack.io
- name: minio
url: https://operator.min.io/
resource "hcloud_server" "controller" {
count = local.controller_count
name = "${local.cluster_name}-controller-${count.index}"
image = "debian-10"
server_type = local.controller_type
ssh_keys = local.ssh_keys
location = local.location
labels = {
cluster = local.cluster_name
role = "controller"
}
}
resource "hcloud_load_balancer" "control_plane_load_balancer" {
name = "${local.cluster_name}-control-plane"
load_balancer_type = local.control_plane_lb_type
location = local.location
labels = {
cluster = local.cluster_name
}
dynamic "target" {
for_each = hcloud_server.controller
content {
type = "server"
server_id = target.value["id"]
}
}
}
resource "hcloud_load_balancer_service" "control_plane_load_balancer_service" {
for_each = toset(local.control_plane_ports)
load_balancer_id = hcloud_load_balancer.control_plane_load_balancer.id
protocol = "tcp"
listen_port = each.value
destination_port = each.value
}
variable "hcloud_token" {
description = "Hetzner cloud auth token"
type = string
sensitive = true
}
locals {
config = yamldecode(file("../config.yml"))
}
module "cluster" {
source = "../../terraform-hetzner-cluster"
source = "git@git.indie.host:indiehost/tech/infrastructure/terraform-hetzner-cluster.git"
hcloud_token = var.hcloud_token
name = local.config.name
sshKeys = local.config.sshKeys
location = local.config.location
api = local.config.api
ingress = local.config.ingress
network = local.config.network
nodes = local.config.nodes
}
output "cluster" {
value = module.cluster
}
\ No newline at end of file
output "controller_ipv4" {
value = hcloud_server.controller.*.ipv4_address
}
output "worker_ipv4" {
value = concat(hcloud_server.worker.*.ipv4_address,local.extra_workers)
}
output "control_plane_ipv4" {
value = hcloud_load_balancer.control_plane_load_balancer.ipv4
}
output "ingress_ipv4" {
value = hcloud_load_balancer.ingress_load_balancer.ipv4
}
provider "hcloud" {
token = var.hcloud_token
}
variable "hcloud_token" {
description = "Hetzner cloud auth token"
type = string
sensitive = true
}
variable "env" {
description = "Environment"
type = string
}
locals {
cluster_name = var.env == "production" ? "ada-lovelace" : "ada-lovelace-${var.env}"
ssh_keys = ["3245559"]
location = "fsn1"
controller_type = var.env == "production" ? "ccx12" : "cx11"
controller_count = 3
worker_type = "cx11"
worker_count = var.env == "production" ? 0 : 4
volume_count = 2
volume_size = 10
control_plane_ports = ["6443", "9443", "8132", "8133"]
ingress_ports = ["80", "443"]
control_plane_lb_type = "lb11"
ingress_lb_type = var.env == "production" ? "lb21" : "lb11"
extra_workers = var.env == "production" ? [] : []
}
terraform {
required_providers {
hcloud = {
source = "hetznercloud/hcloud"
}
template = {
source = "hashicorp/template"
}
}
}
resource "hcloud_server" "worker" {
count = local.worker_count
name = "${local.cluster_name}-worker-${count.index}"
image = "debian-10"
server_type = local.worker_type
ssh_keys = local.ssh_keys
location = local.location
labels = {
cluster = local.cluster_name
role = "worker"
}
}
resource "hcloud_volume" "volume" {
count = local.volume_count * local.worker_count
name = "${local.cluster_name}-${count.index}"
size = local.volume_size
server_id = hcloud_server.worker[count.index % local.worker_count].id
depends_on = [hcloud_server.worker]
}
resource "hcloud_load_balancer" "ingress_load_balancer" {
name = "${local.cluster_name}-ingress"
load_balancer_type = local.ingress_lb_type
location = local.location
labels = {
cluster = local.cluster_name
}
dynamic "target" {
for_each = hcloud_server.worker
content {
type = "server"
server_id = target.value["id"]
}
}
}
resource "hcloud_load_balancer_service" "ingress_load_balancer_service" {
for_each = toset(local.ingress_ports)
load_balancer_id = hcloud_load_balancer.ingress_load_balancer.id
protocol = "tcp"
listen_port = each.value
destination_port = each.value
}
resource "hcloud_load_balancer_target" "ingress_load_balancer_target" {
for_each = toset(local.extra_workers)
type = "ip"
load_balancer_id = hcloud_load_balancer.ingress_load_balancer.id
ip = each.value
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment