Compare commits

...

14 Commits

62 changed files with 3748 additions and 254 deletions

157
README.md
View File

@@ -1,21 +1,162 @@
# schleppe High Availability project
Goal is to have better webapp uptime for than AWS.
Defines code which describes a HA & cached scalable way of serving web applications.
## Architecture
```
+-----------------------------------------------------------+
| Domain: schleppe.cloud |
| |
| +-----DNS (Cloudflare)-----+ |
| | round-robin A records | |
| +--------------------------+ |
| │ |
| ┌─────────────────┴─────────────────┐ |
| │ │ |
| A: 193.72.45.133 B: 45.23.78.120 |
| (SITE A) (SITE B..N) |
+------------+-----------------------------------+----------+
│ └────────────────┐
v v
+----------------------------------------------------+ +--------------------+
| Site A (REGION: EU) | | Site B..N |
| | | (Copy of site A) |
| +----------- Floating IP (keepalived/etcd) ---+ | +--------------------+
| | | |
| | +-------------+ +-------------+ | |
| | | HAProxy-1 | | HAProxy-2 | | |
| | | (ACTIVE) | | (STANDBY) | | |
| | +------+------+ +-------+-----+ | |
| | └─── active / standby ──┘ | |
| | | |
| +----------------------+----------------------+ |
| │ |
| (SSL termination + readiness checks) |
| │ |
| v |
| +-------+---------+ |
| | haproxy (LB) | |
| +-----+----+--+---+ |
| │ │ A |
| direct │ │ │ via cache |
| │ v │ |
| │ +-+--+---------+ |
| │ | varnish (n) | |
| │ +------+-------+ |
| │ │ HIT / MISS |
| │ │ |
| └─────────┤ |
| │ |
| v |
| +---------+--------+ |
| | web servers (n) | |
| +------------------+ |
| |
+----------------------------------------------------+
```
Where varnish & web server are minimum of 2 instances. Currently three regions, EU, US & schleppe on-prem.
There is always only a single haproxy (with fallback) routing traffic per site, but multiple varnish & webservers all connected together w/ shared routing tables.
## Configure environment
Ensure that the following environment variables exist. It is smart to disable history in your terminal before pasting any API keys, (`unset HISTFILE` for bash, or `fish --private` for fish).
- `CLOUDFLARE_API_TOKEN`: update DNS for given zones
- `HCLOUD_TOKEN`: permissions to create cloud resources
## infrastructure
Configured cloud resources in hezner with Pulumi.
Hetzner has two regions:
- us
- eu
```bash
cd hetzner-pulumi
Each region has:
- haproxy x2
- varnish x2
- webservers
# first time, init pulumi stack (name optional)
pulumi stack init kevinmidboe/hetzner
# required configuration values
pulumi config set sshPublicKey "$(cat ~/.ssh/id_ed25519.pub)"
# up infrastructure
pulumi up
# (optional w/ adding private IP)
# private ips struggle, need to run again to assign correctly
pulumi up
```
## provision
Ansible is used to provision software and environments for different software needed.
Ansible is used to provision software and environments for software needed and services.
Get ansible configuration values from pulumi output:
```bash
cd ansible
# generate inventory (manualy update inventory file)
./scripts/generate-inventory.sh | pbcopy
# following updates config files in place
./scripts/update-config_certbot-domains.sh
./scripts/update-config_webserver-ips.sh
```
Run playbooks:
```bash
# install, configure & start haproxy
ansible-playbook plays/haproxy.yml -i hetzner.ini -l haproxy
# install, configure & start varnish
ansible-playbook plays/varnish.yml -i hetzner.ini -l varnish
# install web resources & dependencies, pull & starts docker containers
ansible-playbook plays/docker.yml -i hetzner.ini -l web
ansible-playbook plays/web.yml -i hetzner.ini -l web
```
### ansible play: haproxy
roles:
- haproxy
- certbot
The vars `haproxy_varnish_ip` & `haproxy_traefik_ip` defines IPs iterated over when copying template to hosts. These respectively point to available varnish cache servers & webservers.
> `certbot_cloudflare_domains` runs certbot to make sure valid certs exists for instances serving traffic attached to DNS.
### ansible play: varnish
roles:
- varnish
installs and configures varnish. Iterates over all `haproxy_traefik_ip` when copying varnish.vcl template. Make sure to update these IP's with the current webservers we want to point varnish to. These should match the same webservers haproxy might directly point at if not proxying through varnish.
### ansible play: docker + web
## manual steps / TODO
Still issuing certs manually:
```bash
cd /root/.secrets/certbot
touch cloudflare_k9e-no.ini; touch cloudflare_planetposen-no.ini; touch cloudflare_schleppe-cloud.ini
certbot certonly --dns-cloudflare --dns-cloudflare-credentials /root/.secrets/certbot/cloudflare_schleppe-cloud.ini -d whoami.schleppe.cloud --agree-tos && \
certbot certonly --dns-cloudflare --dns-cloudflare-credentials /root/.secrets/certbot/cloudflare_k9e-no.ini -d k9e.no --agree-tos && \
certbot certonly --dns-cloudflare --dns-cloudflare-credentials /root/.secrets/certbot/cloudflare_planetposen-no.ini -d planetposen.no --agree-tos
cat /etc/letsencrypt/live/k9e.no/fullchain.pem /etc/letsencrypt/live/k9e.no/privkey.pem > /etc/haproxy/certs/ssl-k9e.no.pem && \
cat /etc/letsencrypt/live/planetposen.no/fullchain.pem /etc/letsencrypt/live/planetposen.no/privkey.pem > /etc/haproxy/certs/ssl-planetposen.no.pem && \
cat /etc/letsencrypt/live/whoami.schleppe.cloud/fullchain.pem /etc/letsencrypt/live/whoami.schleppe.cloud/privkey.pem > /etc/haproxy/certs/ssl-whoami.schleppe.cloud.pem
systemctl restart haproxy.service
```
Need to have a shared storage between all the instances, e.g. `etcd`.

View File

@@ -1,6 +0,0 @@
---
# CI specific vars
users:
- root
ssh_keys_users: ['drone']

View File

@@ -1,5 +0,0 @@
---
# Consul server specific
consul_is_server: true
consul_is_ui: true
consul_bootstrap_expect: 1

View File

@@ -6,12 +6,3 @@ dns_nameservers:
- "2606:4700:4700::1001"
default_user: "kevin"
# Consul cluster
consul_datacenter: "schleppe"
consul_servers:
- "10.0.0.140"
- "10.0.0.141"
- "10.0.0.142"
consul_install_dnsmasq: false

View File

@@ -1,12 +1,21 @@
haproxy_traefik_ip:
- "10.24.1.1"
- "10.25.0.4"
haproxy_traefik_port: 80
haproxy_varnish_port: 80
haproxy_cookie_value: "{{ inventory_hostname | default('server-1') }}"
haproxy_dynamic_cookie_key: "mysecretphrase"
haproxy_stats_auth: "admin:strongpassword"
haproxy_certs_dir: "/etc/haproxy/certs"
certbot_cloudflare_secrets_dir: "/root/.secrets/certbot"
certbot_cloudflare_ini_path: "/root/.secrets/certbot/cloudflare.ini"
certbot_cloudflare_api_token: "REPLACE_WITH_REAL_TOKEN"
haproxy_varnish_ip:
- 10.24.2.1
- 10.24.2.2
- 10.25.2.1
- 10.25.2.2
haproxy_traefik_ip:
- 10.24.3.6
- 10.24.3.3
- 10.25.3.4
certbot_cloudflare_domains:
- k9e.no
- planetposen.no
- whoami.schleppe.cloud

View File

@@ -1,6 +0,0 @@
---
# python path
ansible_python_interpreter: /usr/local/bin/python3
users:
- kevin

View File

@@ -1,6 +0,0 @@
---
apt_packages:
- git
- build-essential
- openjdk-21-jdk
minecraft_version: 1.20.6

View File

@@ -1,2 +0,0 @@
---
proxmox_install_qemu_guest_agent: true

View File

@@ -0,0 +1,7 @@
varnish_major: 60lts
varnish_cfg_path: /etc/varnish
haproxy_traefik_port: 80
haproxy_traefik_ip:
- 10.24.3.6
- 10.24.3.3
- 10.25.3.4

View File

@@ -1,2 +0,0 @@
---
ssh_keys_users: ['kevin', 'kasper']

View File

@@ -1,16 +0,0 @@
---
- name: Check if vault is reachable for dynamic config
hosts: all
connection: local
gather_facts: false
pre_tasks:
- name: Check for vault env variables
set_fact:
has_vault: "{{ lookup('env', 'VAULT_ADDR') and lookup('env', 'VAULT_TOKEN') and lookup('env', 'HAS_VAULT') != 'FALSE' }}"
roles:
- { role: roles/vault-config, when: has_vault }
- name: Install all bind9 service and transfer zone files
hosts: all
roles:
- role: roles/bind9

View File

@@ -1,5 +0,0 @@
---
- name: Consul
hosts: all
roles:
- role: roles/consul

View File

@@ -1,5 +0,0 @@
---
- name: Provision git server with gitea
hosts: all
roles:
- role: roles/gitea

View File

@@ -3,5 +3,5 @@
hosts: haproxy
roles:
# - role: roles/certbot
- role: roles/certbot
- role: roles/haproxy

View File

@@ -1,7 +0,0 @@
---
- name: Install and setup immich backup service
hosts: all
roles:
# - role: roles/docker
- role: roles/immich

View File

@@ -1,7 +0,0 @@
---
- name: Setup minecraft requirements w/ latest server jar
hosts: all
roles:
- role: roles/apt
- role: roles/minecraft

View File

@@ -1,19 +0,0 @@
---
- name: Check if vault is reachable for dynamic config
hosts: all
connection: local
gather_facts: false
pre_tasks:
- name: Check for vault env variables
set_fact:
has_vault: "{{ lookup('env', 'VAULT_ADDR') and lookup('env', 'VAULT_TOKEN') }}"
TELEGRAF_TOKEN: "{{ lookup('env', 'TELEGRAF_TOKEN') }}"
roles:
- { role: roles/vault-config, when: has_vault }
- name: Basic setup for proxmox vm clients
hosts: proxmox_nodes
roles:
# - role: roles/prox-telegraf-metrics
- role: roles/prox-templates

View File

@@ -1,5 +0,0 @@
---
- name: Install & configure syncthing
hosts: all
roles:
- role: roles/syncthing

View File

@@ -1,6 +0,0 @@
---
- name: Install traefik binary & config
hosts: all
roles:
- role: roles/traefik

View File

@@ -2,8 +2,8 @@
- name: Install and configure systemd for varnish
hosts: varnish
roles:
- role: roles/firewall
enable_80_ufw_port: true
enable_443_ufw_port: true
- role: roles/varnish
# - role: roles/firewall
# enable_80_ufw_port: true
# enable_443_ufw_port: true
#
- role: roles/varnish

View File

@@ -1,7 +0,0 @@
---
- name: Install all required packages, built and start service for vault
hosts: all
roles:
- role: roles/vault
- role: roles/firewall
enable_vault_ufw_port: true

View File

@@ -1,5 +0,0 @@
---
- name: Install all required packages, built and start service for vinlottis
hosts: all
roles:
- role: roles/vinlottis

6
ansible/plays/web.yml Normal file
View File

@@ -0,0 +1,6 @@
---
- name: copies docker-compose files to all web hosts
hosts: web
roles:
- role: roles/web

View File

@@ -1,6 +0,0 @@
- name: Setup wireguard
hosts: all
roles:
- role: roles/docker
- role: roles/firewall
- role: roles/wireguard

View File

@@ -1,21 +0,0 @@
---
- name: Check if vault is reachable for dynamic config
hosts: all
connection: local
gather_facts: false
pre_tasks:
- name: Check for vault env variables
set_fact:
has_vault: "{{ lookup('env', 'VAULT_ADDR') and lookup('env', 'VAULT_TOKEN') }}"
XWIKI_DB_USER: "{{ lookup('env', 'XWIKI_DB_USER') }}"
XWIKI_DB_PASSWORD: "{{ lookup('env', 'XWIKI_DB_PASSWORD') }}"
XWIKI_DB_ROOT_PASSWORD: "{{ lookup('env', 'XWIKI_DB_ROOT_PASSWORD') }}"
roles:
- { role: roles/vault-config, when: has_vault }
- name: Setup xwiki working directory and move docker-compose file
hosts: all
roles:
- role: roles/docker
- role: roles/firewall
- role: roles/xwiki

View File

@@ -1,3 +1,13 @@
certbot_email: kevin.midboe+ha.project@gmail.com
certbot_secrets_dir: /root/.secrets/certbot
combined_certs_dir: /etc/haproxy/certs
combined_cert_prefix: "ssl-"
# Set true while testing to avoid LE rate limits
certbot_use_staging: false
le_renewal_window_seconds: 2592000
certbot_throttle: 1
certbot_packages:
- certbot
- python3-certbot-dns-cloudflare

View File

@@ -0,0 +1,81 @@
---
- name: Read Cloudflare secrets directory from environment (invalid by default)
ansible.builtin.set_fact:
cloudflare_api_key: >-
{{ lookup('ansible.builtin.env', 'CLOUDFLARE_API_KEY')
| default('__CLOUDFLARE_API_KEY_NOT_SET__', true) }}
no_log: true
- name: Fail if CLOUDFLARE_API_KEY is not set
ansible.builtin.assert:
that:
- cloudflare_api_key != '__CLOUDFLARE_API_KEY_NOT_SET__'
fail_msg: >
CLOUDFLARE_API_KEY environment variable is required
- name: Validate dns_cloudflare_api_token looks sane
ansible.builtin.assert:
that:
- cloudflare_api_key is regex('[A-Za-z0-9]$')
fail_msg: >
must contain a valid
CLOUDFLARE_API_KEY = <alphanumeric>
no_log: false
- name: Ensure certbot secrets directory exists
ansible.builtin.file:
path: "{{ certbot_secrets_dir }}"
state: directory
owner: root
group: root
mode: "0700"
- name: Write Cloudflare credential file
ansible.builtin.template:
src: cloudflare.ini.j2
dest: "{{ certbot_secrets_dir }}/certbot-cloudflare.ini"
owner: root
group: root
mode: "0600"
no_log: true
- name: Ensure combined cert output directory exists
ansible.builtin.file:
path: "{{ combined_certs_dir }}"
state: directory
owner: root
group: root
mode: "0755"
# Request/renew: certbot is already idempotent-ish. We guard with `creates` to avoid
# re-issuing on first provision runs; renewals happen via cron/systemd timer (recommended).
- name: Obtain certificate via certbot dns-cloudflare (first issuance)
ansible.builtin.command: >
certbot certonly
--agree-tos
--non-interactive
--email {{ certbot_email }}
--dns-cloudflare
--dns-cloudflare-credentials {{ certbot_secrets_dir }}/certbot-cloudflare.ini
-d {{ item }}
{% if certbot_use_staging %}--staging{% endif %}
args:
creates: "/etc/letsencrypt/live/{{ item }}/fullchain.pem"
loop: "{{ certbot_cloudflare_domains | default([]) }}"
register: certbot_issue
changed_when: certbot_issue.rc == 0
failed_when: certbot_issue.rc != 0
async: 0
# Combine cert+key for Traefik/HAProxy-style PEM bundle
- name: Combine fullchain + privkey into single PEM bundle
ansible.builtin.shell: |
set -euo pipefail
cat \
/etc/letsencrypt/live/{{ item }}/fullchain.pem \
/etc/letsencrypt/live/{{ item }}/privkey.pem \
> {{ combined_certs_dir }}/{{ combined_cert_prefix }}{{ item }}.pem
chmod 0600 {{ combined_certs_dir }}/{{ combined_cert_prefix }}{{ item }}.pem
args:
executable: /bin/bash
loop: "{{ certbot_cloudflare_domains | default([]) }}"

View File

@@ -1,3 +1,4 @@
---
- import_tasks: install.yml
- import_tasks: secrets.yml
# - import_tasks: issue_certs.yml

View File

@@ -1 +1,2 @@
dns_cloudflare_api_token = {{ certbot_cloudflare_api_token }}
# Managed by ansible
dns_cloudflare_api_token = {{ lookup('ansible.builtin.env', 'CLOUDFLARE_API_KEY') }}

View File

@@ -35,13 +35,37 @@ defaults
errorfile 503 /etc/haproxy/errors/503.http
errorfile 504 /etc/haproxy/errors/504.http
# Front door: public HTTP
frontend fe_http
# Front door: main frontend dedicated to end users
frontend ft_web
bind :80
http-request set-header X-Forwarded-Proto https
option forwardfor
# Cache routing acl definitions
acl static_content path_end .jpg .jpeg .gif .png .css .js .htm .html
acl pseudo_static path_end .php ! path_beg /dynamic/
acl image_php path_beg /images.php
acl varnish_available nbsrv(bk_varnish_uri) ge 1
# Caches health detection + routing decision
use_backend bk_varnish_uri if varnish_available static_content
use_backend bk_varnish_uri if varnish_available pseudo_static
use_backend bk_varnish_url_param if varnish_available image_php
# Read debug query parameter
http-request set-var(txn.debug) urlp(debug)
# Define what "debug enabled" means
acl debug_enabled var(txn.debug) -m str -i 1 true yes on
# Debug headers
http-request set-var(txn.http_ver) req.ver
http-response add-header X-HA-HTTP-Version %[var(txn.http_ver)] if debug_enabled
http-response add-header X-HA-TLS-Version %[ssl_fc_protocol] if debug_enabled
http-response add-header X-HA-Frontend %[fe_name] if debug_enabled
http-response add-header X-HA-Backend %[be_name] if debug_enabled
http-response add-header X-HA-Server %[srv_name] if debug_enabled
http-response add-header X-HA-Server %[hostname] if debug_enabled
http-response add-header X-Debug-Client-IP %[src] if debug_enabled
http-response add-header Cache-Control no-store if debug_enabled
# dynamic content or all caches are unavailable
default_backend be_traefik_http
# Front door: public HTTPS
@@ -58,47 +82,45 @@ frontend fe_https
# acl is_h2 ssl_fc_alpn -i h2
# http-response set-header Alt-Svc "h3=\":443\"; ma=900" if is_h2
# =========================================================
# Debug response headers (enabled via ?debug=1)
# Cache routing acl definitions
acl static_content path_end .jpg .jpeg .gif .png .css .js .htm .html
acl pseudo_static path_end .php ! path_beg /dynamic/
acl image_php path_beg /images.php
acl varnish_available nbsrv(bk_varnish_uri) ge 1
# Caches health detection + routing decision
use_backend bk_varnish_uri if varnish_available static_content
use_backend bk_varnish_uri if varnish_available pseudo_static
use_backend bk_varnish_url_param if varnish_available image_php
# Read debug query parameter
http-request set-var(txn.debug) urlp(debug)
# Define what "debug enabled" means
acl debug_enabled var(txn.debug) -m str -i 1 true yes on
# Debug headers
http-request set-var(txn.http_ver) req.ver
http-response add-header X-Debug-HTTP-Version %[var(txn.http_ver)] if debug_enabled
http-response add-header X-Debug-Served-By haproxy-https if debug_enabled
http-response add-header X-Debug-Frontend %[fe_name] if debug_enabled
http-response add-header X-Debug-Backend %[be_name] if debug_enabled
http-response add-header X-Debug-Server %[srv_name] if debug_enabled
# Client & network
http-response add-header X-Debug-Client-IP %[src] if debug_enabled
# http-response add-header X-Debug-Client-Port %[sp] if debug_enabled
# http-response add-header X-Debug-XFF %[req.hdr(X-Forwarded-For)] if debug_enabled
# TLS / HTTPS details
http-response add-header X-Debug-TLS %[ssl_fc] if debug_enabled
http-response add-header X-Debug-TLS-Version %[ssl_fc_protocol] if debug_enabled
http-response add-header X-Debug-TLS-Cipher %[ssl_fc_cipher] if debug_enabled
# Request identity & correlation
http-response add-header X-Debug-Request-ID %[unique-id] if debug_enabled
http-response add-header X-Debug-Method %[method] if debug_enabled
# Safety: prevent caching of debug responses
http-response add-header Cache-Control no-store if debug_enabled
http-response add-header X-HA-HTTP-Version %[var(txn.http_ver)] if debug_enabled
http-response add-header X-HA-TLS-Version %[ssl_fc_protocol] if debug_enabled
http-response add-header X-HA-Frontend %[fe_name] if debug_enabled
http-response add-header X-HA-Backend %[be_name] if debug_enabled
http-response add-header X-HA-Server %[srv_name] if debug_enabled
http-response add-header X-HA-Server %[hostname] if debug_enabled
http-response add-header X-Debug-Client-IP %[src] if debug_enabled
http-response add-header Cache-Control no-store if debug_enabled
# dynamic content or all caches are unavailable
default_backend be_traefik_http
# Backend: Traefik VM
backend be_traefik_http
mode http
balance roundrobin
cookie LB_SERVER insert indirect nocache dynamic
# app servers must say if everything is fine on their side
# and they can process requests
option httpchk
option httpchk GET /appcheck
http-check expect rstring [oO][kK]
cookie LB_SERVER insert indirect nocache
dynamic-cookie-key {{ haproxy_dynamic_cookie_key }}
# Health check: Traefik should respond with 404 for unknown host; that's still "alive".
@@ -109,6 +131,39 @@ backend be_traefik_http
server traefik{{ loop.index }} {{ ip }}:{{ haproxy_traefik_port }} check cookie {{ haproxy_cookie_value }}
{% endfor %}
# VARNISH
# static backend with balance based on the uri, including the query string
# to avoid caching an object on several caches
backend bk_varnish_uri
balance uri # in latest HAProxy version, one can add 'whole' keyword
# Varnish must tell it's ready to accept traffic
option httpchk HEAD /varnishcheck
http-check expect status 200
# client IP information
option forwardfor
# avoid request redistribution when the number of caches changes (crash or start up)
hash-type consistent
{% for ip in haproxy_varnish_ip %}
server varnish{{ loop.index }} {{ ip }}:{{ haproxy_varnish_port }} check
{% endfor %}
# cache backend with balance based on the value of the URL parameter called "id"
# to avoid caching an object on several caches
backend bk_varnish_url_param
balance url_param id
# client IP information
option forwardfor
# avoid request redistribution when the number of caches changes (crash or start up)
hash-type consistent
{% for ip in haproxy_varnish_ip %}
server varnish{{ loop.index }} {{ ip }}:{{ haproxy_varnish_port }} track bk_varnish_uri/varnish{{ loop.index }}
{% endfor %}
# Frontend: HAProxy prometheus exporter metrics
frontend fe_metrics
bind :8405

View File

@@ -0,0 +1,6 @@
---
- name: reload varnish
service:
name: varnish
state: reloaded

View File

@@ -0,0 +1,46 @@
---
- file:
path: "/etc/varnish"
state: directory
owner: root
group: root
mode: "0755"
- template:
src: default.vcl.j2
dest: "{{ varnish_cfg_path }}/default.vcl"
owner: root
group: root
mode: "0644"
# validate: "haproxy -c -f %s"
notify: reload varnish
- template:
src: vcl_deliver.vcl.j2
dest: "{{ varnish_cfg_path }}/vcl_deliver.vcl"
owner: root
group: root
mode: "0644"
# validate: "haproxy -c -f %s"
notify: reload varnish
- file:
path: "/etc/varnish/includes"
state: directory
owner: root
group: root
mode: "0755"
- template:
src: includes/x-cache-header.vcl.j2
dest: "{{ varnish_cfg_path }}/includes/x-cache-header.vcl"
owner: root
group: root
mode: "0644"
# validate: "haproxy -c -f %s"
notify: reload varnish
- service:
name: varnish
state: restarted

View File

@@ -0,0 +1,113 @@
---
- name: Ensure apt cache is up to date (pre)
ansible.builtin.apt:
update_cache: true
cache_valid_time: 3600
- name: Debian only - ensure debian-archive-keyring is installed
ansible.builtin.apt:
name: debian-archive-keyring
state: present
when: ansible_facts.distribution == "Debian"
- name: Ensure required tools are installed (curl, gnupg, apt-transport-https)
ansible.builtin.apt:
name:
- curl
- gnupg
- apt-transport-https
state: present
# Packagecloud repo parameters:
# os = "debian" or "ubuntu"
# dist = codename (e.g. bookworm, bullseye, focal, jammy, noble)
# :contentReference[oaicite:1]{index=1}
- name: Set packagecloud repo parameters
ansible.builtin.set_fact:
varnish_pkgcloud_os: "{{ 'ubuntu' if ansible_facts.distribution == 'Ubuntu' else 'debian' }}"
varnish_pkgcloud_dist: "bookworm"
# varnish_pkgcloud_dist: "{{ ansible_facts.distribution_release }}"
# ---- apt >= 1.1 path (keyrings + signed-by) ----
- name: Ensure /etc/apt/keyrings exists
ansible.builtin.file:
path: /etc/apt/keyrings
state: directory
mode: "0755"
- name: Download packagecloud GPG key (ascii)
ansible.builtin.get_url:
url: https://packagecloud.io/varnishcache/varnish{{ varnish_major }}/gpgkey
dest: /tmp/varnishcache_varnish{{ varnish_major }}.gpgkey
mode: "0644"
- name: Dearmor packagecloud key into /etc/apt/keyrings
ansible.builtin.command: >
gpg --dearmor -o /etc/apt/keyrings/varnishcache_varnish{{ varnish_major }}-archive-keyring.gpg
/tmp/varnishcache_varnish{{ varnish_major }}.gpgkey
args:
creates: /etc/apt/keyrings/varnishcache_varnish{{ varnish_major }}-archive-keyring.gpg
- name: Ensure Sequoia crypto-policy directory exists
ansible.builtin.file:
path: /etc/crypto-policies/back-ends
state: directory
owner: root
group: root
mode: "0755"
- name: Allow SHA1 signatures for sequoia (packagecloud compatibility)
ansible.builtin.copy:
dest: /etc/crypto-policies/back-ends/sequoia.config
owner: root
group: root
mode: "0644"
backup: true
content: |
[hash_algorithms]
sha1 = "always"
- name: Add Varnish 6.0 LTS repo
ansible.builtin.apt_repository:
repo: "deb [signed-by=/etc/apt/keyrings/varnishcache_varnish{{ varnish_major }}-archive-keyring.gpg] https://packagecloud.io/varnishcache/varnish60lts/{{ varnish_pkgcloud_os }}/ {{ varnish_pkgcloud_dist }} main"
filename: varnishcache_varnish{{ varnish_major }}
state: present
- name: Add Varnish 6.0 LTS source repo (optional)
ansible.builtin.apt_repository:
repo: "deb-src [signed-by=/etc/apt/keyrings/varnishcache_varnish{{ varnish_major }}-archive-keyring.gpg] https://packagecloud.io/varnishcache/varnish60lts/{{ varnish_pkgcloud_os }}/ {{ varnish_pkgcloud_dist }} main"
filename: varnishcache_varnish{{ varnish_major }}
state: present
when:
- varnish_enable_deb_src | default(false)
- name: Update apt cache (after adding repo)
ansible.builtin.apt:
update_cache: true
- name: Install Varnish Cache 6.0 LTS
ansible.builtin.apt:
name: "{{ varnish_packages | default(['varnish']) }}"
state: present
- name: Copy systemd template
become: true
ansible.builtin.template:
src: varnish-systemd.j2
dest: /lib/systemd/system/varnish.service
owner: root
mode: "0644"
- name: Restart systemd daemon
become: true
ansible.builtin.systemd:
daemon_reload: yes
- name: Reload varnish service
become: true
ansible.builtin.systemd:
name: varnish.service
state: reloaded

View File

@@ -1,57 +1,2 @@
---
- name: update apt
become: true
apt:
update_cache: yes
cache_valid_time: 86400
- name: install required packages
package:
name:
- debian-archive-keyring
- curl
- gnupg
- apt-transport-https
- name: add varnish apt key & repo
block:
- name: add varnish key
apt_key:
url: https://packagecloud.io/varnishcache/varnish60lts/gpgkey
state: present
- name: add varnish repo
apt_repository:
repo: 'deb https://packagecloud.io/varnishcache/varnish60lts/{{ varnish_release }} {{ varnish_release_codename }} main'
state: present
- name: add varnish repo src
apt_repository:
repo: 'deb-src https://packagecloud.io/varnishcache/varnish60lts/{{ varnish_release }} {{ varnish_release_codename }} main'
state: present
- name: update apt
become: true
apt:
update_cache: yes
cache_valid_time: 86400
- name: install varnish package
package:
name: varnish
- name: copy systemd template
template:
src: varnish-systemd.j2
dest: /lib/systemd/system/varnish.service
owner: root
mode: 644
- name: restart systemd daemon
systemd:
daemon_reload: yes
- name: restart varnish service
systemd:
name: varnish.service
state: reloaded
- import_tasks: install.yml
- import_tasks: copy-source.yml

View File

@@ -0,0 +1,206 @@
vcl 4.1;
import std;
import directors;
include "vcl_deliver.vcl";
include "includes/x-cache-header.vcl";
{% for ip in haproxy_traefik_ip %}
backend bk_appsrv_static-{{ loop.index }} {
.host = "{{ ip }}";
.port = "{{ haproxy_traefik_port }}";
.connect_timeout = 3s;
.first_byte_timeout = 10s;
.between_bytes_timeout = 5s;
.probe = {
.url = "/ping";
.expected_response = 404;
.timeout = 1s;
.interval = 3s;
.window = 2;
.threshold = 2;
.initial = 2;
}
}
{% endfor %}
/*
* Who is allowed to PURGE
*/
acl purge {
"127.0.0.1";
"localhost";
# add your admin / app hosts here
}
sub vcl_init {
new vdir = directors.round_robin();
{% for ip in haproxy_traefik_ip %}
vdir.add_backend(bk_appsrv_static-{{ loop.index }});
{% endfor %}
}
sub vcl_recv {
### Default options
# Health Checking
if (req.url == "/varnishcheck") {
return (synth(200, "health check OK!"));
}
# Set default backend
set req.backend_hint = vdir.backend();
# grace period (stale content delivery while revalidating)
set req.grace = 30s;
# Purge request
if (req.method == "PURGE") {
if (client.ip !~ purge) {
return (synth(405, "Not allowed."));
}
return (purge);
}
# Accept-Encoding header clean-up
if (req.http.Accept-Encoding) {
# use gzip when possible, otherwise use deflate
if (req.http.Accept-Encoding ~ "gzip") {
set req.http.Accept-Encoding = "gzip";
} elsif (req.http.Accept-Encoding ~ "deflate") {
set req.http.Accept-Encoding = "deflate";
} else {
# unknown algorithm, remove accept-encoding header
unset req.http.Accept-Encoding;
}
# Microsoft Internet Explorer 6 is well know to be buggy with compression and css / js
if (req.url ~ "\.(css|js)(\?.*)?$" && req.http.User-Agent ~ "MSIE 6") {
unset req.http.Accept-Encoding;
}
}
# Enable debug headers through query param
if (req.url ~ "(?i)debug=(true|yes|1)") {
set req.http.X-debug = true;
}
### Per host/application configuration
# bk_appsrv_static
# Stale content delivery
if (std.healthy(req.backend_hint)) {
set req.grace = 30s;
} else {
set req.grace = 1d;
}
# Cookie ignored in these static pages
unset req.http.Cookie;
### Common options
# Static objects are first looked up in the cache
if (req.url ~ "\.(png|gif|jpg|swf|css|js)(\?.*)?$") {
return (hash);
}
# Default: look for the object in cache
return (hash);
}
sub vcl_hash {
hash_data(req.url);
if (req.http.host) {
hash_data(req.http.host);
} else {
hash_data(server.ip);
}
}
/*
* Called after a successful PURGE
*/
sub vcl_purge {
return (synth(200, "Purged."));
}
sub vcl_backend_response {
# Stale content delivery
set beresp.grace = 1d;
# Hide Server information
unset beresp.http.Server;
# Store compressed objects in memory (gzip at fetch time)
# Varnish can deliver gunzipped/gzipped depending on client support
if (beresp.http.Content-Type ~ "(?i)(text|application)") {
set beresp.do_gzip = true;
}
###################
# cache rules #
###################
# HTML pages → short cache or no cache
if (bereq.url ~ "\.html$") {
set beresp.ttl = 30s; # Cache briefly
set beresp.uncacheable = true; # Or disable cache entirely
}
# JavaScript & CSS → long cache
if (bereq.url ~ "\.(js|css)$") {
set beresp.ttl = 1d;
}
# Images under /image/ → long cache
if (bereq.url ~ "^/images/.*\.(svg|png|jpe?g)$") {
set beresp.ttl = 1y;
}
# Favicons → long cache
if (bereq.url ~ "^/favicons/") {
set beresp.ttl = 1y;
}
# Fallback: ensure some cache
if (beresp.ttl <= 0s) {
set beresp.ttl = 22s;
}
set beresp.http.X-TTL = beresp.ttl;
# remove any cookie on static or pseudo-static objects
unset beresp.http.Set-Cookie;
return (deliver);
}
sub vcl_deliver {
# unset resp.http.Via;
unset resp.http.X-Varnish;
# Handle conditional request with ETag
if (
req.http.If-None-Match &&
req.http.If-None-Match == resp.http.ETag
) {
return (synth(304));
}
return (deliver);
}
sub vcl_synth {
if (resp.status == 304) {
set resp.http.ETag = req.http.If-None-Match;
# set resp.http.Content-Length = "0";
return (deliver);
}
# Keep defaults; this replaces the old vcl_error.
# (Your old "obj.status == 751" special case isn't referenced anywhere
# in the provided VCL, so it was dropped.)
return (deliver);
}

View File

@@ -0,0 +1,43 @@
sub vcl_recv {
unset req.http.X-Cache;
}
sub vcl_hit {
set req.http.X-Cache = "HIT";
}
sub vcl_miss {
set req.http.X-Cache = "MISS";
}
sub vcl_pass {
set req.http.X-Cache = "PASS";
}
sub vcl_pipe {
set req.http.X-Cache = "PIPE uncacheable";
}
sub vcl_synth {
set resp.http.X-Cache = "SYNTH";
}
sub vcl_deliver {
if (obj.uncacheable) {
set req.http.X-Cache = req.http.X-Cache + " uncacheable" ;
} else {
set req.http.X-Cache = req.http.X-Cache + " cached" + " (real age: " + resp.http.Age + ", hits: " + obj.hits + ", ttl: " + regsub(resp.http.x-ttl, "\..*", "") + ")";
}
# if we are gracing, make sure the browser doesn't cache things, and set our maxage to 1
# also log grace delivery
if (req.http.graceineffect) {
set resp.http.Cache-Control = regsub(resp.http.Cache-Control, "max-age=[0-9]*", "max-age=1");
set resp.http.Cache-Control = regsub(resp.http.Cache-Control, "channel-maxage=[0-9]*", "channel-maxage=1");
set req.http.X-Cache = req.http.X-Cache + " [grace: " + req.http.graceineffect + " " + req.http.grace + ", remaining: " + req.http.graceduration + "]";
}
# uncomment the following line to show the information in the response
set resp.http.X-Cache = req.http.X-Cache;
}

View File

@@ -0,0 +1,40 @@
sub vcl_deliver {
# Happens when we have all the pieces we need, and are about to send the
# response to the client.
if (resp.status == 503) {
set resp.http.failing-backend = "true";
}
# Give some debug
if (req.http.X-debug && req.esi_level == 0) {
set resp.http.X-Backend = req.backend_hint;
set resp.http.X-Backend-Url = req.url;
set resp.http.X-Varnish-Server = server.hostname;
} else {
# not debug, strip some headers
unset resp.http.X-Cache;
unset resp.http.X-Backend;
unset resp.http.x-upstream;
unset resp.http.x-request-uri;
unset resp.http.Via;
unset resp.http.xkey;
unset resp.http.x-goog-hash;
unset resp.http.x-goog-generation;
unset resp.http.X-GUploader-UploadID;
unset resp.http.x-goog-storage-class;
unset resp.http.x-goog-metageneration;
unset resp.http.x-goog-stored-content-length;
unset resp.http.x-goog-stored-content-encoding;
unset resp.http.x-goog-meta-goog-reserved-file-mtime;
unset resp.http.Server;
unset resp.http.X-Apache-Host;
unset resp.http.X-Varnish-Backend;
unset resp.http.X-Varnish-Host;
unset resp.http.X-Nginx-Host;
unset resp.http.X-Upstream-Age;
unset resp.http.X-Retries;
unset resp.http.X-Varnish;
}
}

View File

@@ -0,0 +1,25 @@
- name: Ensure remote docker directory exists
ansible.builtin.file:
path: "{{ remote_compose_dir }}"
state: directory
owner: root
group: root
mode: '0755'
- name: Copy local docker-compose directory to remote
ansible.builtin.copy:
src: "{{ local_compose_dir }}/"
dest: "{{ remote_compose_dir }}/"
owner: root
group: root
mode: '0644'
- name: Ensure compose-all.sh is executable
ansible.builtin.file:
path: "{{ remote_compose_dir }}/compose-all.sh"
mode: '0755'
- name: Run compose-all.sh up
ansible.builtin.command: bash compose-all.sh up
args:
chdir: "{{ remote_compose_dir }}"

View File

@@ -0,0 +1,3 @@
local_compose_dir: "{{ playbook_dir }}/../../docker-compose"
remote_compose_dir: /opt/docker

View File

@@ -0,0 +1,43 @@
#!/usr/local/bin/bash
#
# Usage: ./scripts/generate-inventory.sh | pbcopy
cd ../hetzner-pulumi
pulumi stack output --json | jq -r '
# extract dc (nbg / va) positionally from hostname
def dc:
(.name | capture("-(?<dc>nbg|hel|ash|va)[0-9]*-").dc);
def region:
if dc == "nbg" then "eu" else "us" end;
def pad($n):
tostring as $s
| ($n - ($s|length)) as $k
| if $k > 0 then ($s + (" " * $k)) else $s end;
.inventory.vms
| map({
region: region,
role: (.name | split("-")[0]),
idx: (.name | capture("-(?<n>[0-9]+)$").n),
ip: .publicIpv4,
dc: dc
})
| group_by(.region)
| .[]
| .[0].region as $r
| "[\($r)]",
(
sort_by(.role, (.idx | tonumber))
| .[]
| (
("\(.role)-\(.dc)-\(.idx)" | pad(15)) +
("ansible_host=\(.ip)" | pad(30)) +
("ansible_port=22" | pad(18)) +
"ansible_user=root"
)
),
""
'

View File

@@ -0,0 +1,14 @@
#!/usr/local/bin/bash
#
# Usage: ./scripts/update-config_certbot-domains.sh | pbcopy
CERTBOT_EXPORT_KEY=certbot_cloudflare_domains
EXPORT_VARIABLES="$(pwd)/group_vars/haproxy.yml"
yq -i 'del(.certbot_cloudflare_domains)' $EXPORT_VARIABLES
cd ../hetzner-pulumi
pulumi stack output --json | jq -r --arg key $CERTBOT_EXPORT_KEY '
($key + ":\n") +
(.inventory.domains | map(" - " + .) | join("\n"))
' >> $EXPORT_VARIABLES

View File

@@ -0,0 +1,20 @@
#!/usr/local/bin/bash
#
# Usage: ./scripts/update-config_varnishserver-ips.sh
IP_EXPORT_KEY=haproxy_varnish_ip
ANSIBLE_DIR="$(pwd)"
PULIMI_DIR="$(pwd)/../hetzner-pulumi"
EXPORT_VARIABLES="$(pwd)/group_vars/haproxy.yml"
yq -i 'del(.haproxy_varnish_ip)' $EXPORT_VARIABLES
cd $PULIMI_DIR
pulumi stack output --json | jq -r --arg key $IP_EXPORT_KEY '
def varnish_private_ips:
.inventory.vms
| map(select(.name | startswith("varnish")) | .privateIp);
($key + ":\n") +
(varnish_private_ips | map(" - " + .) | join("\n"))
' >> $EXPORT_VARIABLES

View File

@@ -0,0 +1,35 @@
#!/usr/local/bin/bash
#
# Usage: ./scripts/update-config_webserver-ips.sh
IP_EXPORT_KEY=haproxy_traefik_ip
ANSIBLE_DIR="$(pwd)"
PULIMI_DIR="$(pwd)/../hetzner-pulumi"
EXPORT_VARIABLES="$(pwd)/group_vars/haproxy.yml"
yq -i 'del(.haproxy_traefik_ip)' $EXPORT_VARIABLES
cd ../hetzner-pulumi
pulumi stack output --json | jq -r --arg key $IP_EXPORT_KEY '
def web_private_ips:
.inventory.vms
| map(select(.name | startswith("web")) | .privateIp);
($key + ":\n") +
(web_private_ips | map(" - " + .) | join("\n"))
' >> $EXPORT_VARIABLES
cd $ANSIBLE_DIR
EXPORT_VARIABLES="$(pwd)/group_vars/varnish.yml"
yq -i 'del(.haproxy_traefik_ip)' $EXPORT_VARIABLES
cd $PULIMI_DIR
pulumi stack output --json | jq -r --arg key $IP_EXPORT_KEY '
def varnish_private_ips:
.inventory.vms
| map(select(.name | startswith("web")) | .privateIp);
($key + ":\n") +
(varnish_private_ips | map(" - " + .) | join("\n"))
' >> $EXPORT_VARIABLES

View File

@@ -0,0 +1,74 @@
version: '3'
services:
traefik:
image: "traefik:latest"
container_name: traefik
restart: unless-stopped
# PORTS
ports:
# HTTP entrypoint
# Exposed on all external addresses (0.0.0.0:80)
- "80:80"
# Traefik API & Dashboard
# Accessible on http://<host>:8080
- "8080:8080"
# COMMAND (STATIC CONFIGURATION)
command:
# Enable Traefik API & Dashboard
- "--api.dashboard=true"
- "--api.insecure=true"
# Log settings
- "--log.level=INFO"
- "--accesslog=true"
# EntryPoints
- "--entrypoints.web.address=:80"
- "--entrypoints.traefik.address=:8080"
# Docker provider
- "--providers.docker=true"
- "--providers.docker.exposedbydefault=false"
# Optional: file provider for dynamic config
- "--providers.file.directory=/etc/traefik/dynamic"
- "--providers.file.watch=true"
# Global settings
- "--global.checknewversion=true"
- "--global.sendanonymoususage=false"
# VOLUMES
volumes:
# Docker socket (required for Docker provider)
- /var/run/docker.sock:/var/run/docker.sock:ro
# Dynamic configuration directory (middlewares, routers, TLS, etc.)
- ./dynamic:/etc/traefik/dynamic:ro
# Logs (optional)
- ./logs:/logs
# NETWORKS
networks:
- traefik
# LABELS (OPTIONAL SELF-ROUTING)
labels:
# Enable Traefik for this container
- "traefik.enable=true"
# Router for dashboard (via Traefik itself)
- "traefik.http.routers.traefik.rule=Host(`traefik.localhost`)"
- "traefik.http.routers.traefik.entrypoints=web"
- "traefik.http.routers.traefik.service=api@internal"
# NETWORK DEFINITIONS
networks:
traefik:
name: traefik
driver: bridge

View File

@@ -0,0 +1,62 @@
#!/usr/bin/env bash
set -euo pipefail
########################################
# CONFIG
########################################
COMPOSE_FILE_NAME="docker-compose.yml"
########################################
# ARGUMENT CHECK
########################################
if [[ $# -ne 1 ]]; then
echo "Usage: $0 {up|down}"
exit 1
fi
ACTION="$1"
if [[ "$ACTION" != "up" && "$ACTION" != "down" ]]; then
echo "Invalid action: $ACTION"
echo "Allowed actions: up, down"
exit 1
fi
########################################
# SAVE STARTING DIRECTORY
########################################
START_DIR="$(pwd)"
########################################
# FIND COMPOSE FILES
########################################
mapfile -t COMPOSE_DIRS < <(
find . -type f -name "$COMPOSE_FILE_NAME" -print0 \
| xargs -0 -n1 dirname | sort
)
########################################
# LOOP THROUGH DIRECTORIES
########################################
for DIR in "${COMPOSE_DIRS[@]}"; do
echo "----------------------------------------"
echo "Processing: $DIR"
echo "Action: docker-compose $ACTION"
echo "----------------------------------------"
cd "$DIR"
if [[ "$ACTION" == "up" ]]; then
docker-compose up -d
else
docker-compose down
fi
cd "$START_DIR"
done
echo "========================================"
echo "Completed docker-compose $ACTION for all stacks"
echo "========================================"

View File

@@ -0,0 +1,28 @@
version: '3'
services:
k9e:
image: kevinmidboe/k9e.no:latest
container_name: k9e
restart: unless-stopped
# NETWORK
networks:
- traefik
# TRAEFIK LABELS
labels:
# Enable Traefik for this container
- "traefik.enable=true"
# Router definition
- "traefik.http.routers.k9e.rule=Host(`k9e.no`)"
- "traefik.http.routers.k9e.entrypoints=web"
# Service definition
- "traefik.http.services.k9e.loadbalancer.server.port=80"
# NETWORK DEFINITIONS
networks:
traefik:
external: true

View File

@@ -0,0 +1,28 @@
version: '3'
services:
planetposen-original:
image: kevinmidboe/planetposen-original:latest
container_name: planetposen
restart: unless-stopped
# NETWORK
networks:
- traefik
# TRAEFIK LABELS
labels:
# Enable Traefik for this container
- "traefik.enable=true"
# Router definition
- "traefik.http.routers.planetposen-original.rule=Host(`planetposen.no`)"
- "traefik.http.routers.planetposen-original.entrypoints=web"
# Service definition
- "traefik.http.services.planetposen-original.loadbalancer.server.port=80"
# NETWORK DEFINITIONS
networks:
traefik:
external: true

View File

@@ -0,0 +1,29 @@
version: '3'
services:
whoami:
image: traefik/whoami
container_name: whoami
restart: unless-stopped
# NETWORK
networks:
- traefik
# TRAEFIK LABELS
labels:
# Enable Traefik for this container
- "traefik.enable=true"
# Router definition
- "traefik.http.routers.whoami.rule=Host(`whoami.schleppe.cloud`)"
- "traefik.http.routers.whoami.entrypoints=web"
# Service definition
- "traefik.http.services.whoami.loadbalancer.server.port=80"
# NETWORK DEFINITIONS
networks:
traefik:
external: true

2
hetzner-pulumi/.gitignore vendored Normal file
View File

@@ -0,0 +1,2 @@
/bin/
/node_modules/

View File

@@ -0,0 +1,6 @@
name: hetzner-pulumi
description: Manages schleppe ha project hetzner resources
runtime:
name: nodejs
options:
packagemanager: yarn

141
hetzner-pulumi/index.ts Normal file
View File

@@ -0,0 +1,141 @@
import {
subNetwork,
regionalNetwork,
allowHttp,
allowSSHToCurrentIP,
floatingIP,
attach,
} from "./resources/network";
import { server } from "./resources/compute";
import { dns } from "./resources/cloudflare";
import {
summarizeServer,
summarizeNetwork,
summarizeSubNetwork,
summarizeFloatingIp,
summarizeFirewall,
} from "./resources/utils";
import {
VmSize,
OS,
NetworkRegion,
NetworkRole,
ServerLocations,
} from "./resources/types";
// regional vnet
const eu = regionalNetwork("ha-net-eu", "10.24.0.0/18", NetworkRegion.eu);
const usEast = regionalNetwork(
"ha-net-us",
"10.25.0.0/18",
NetworkRegion.usEast,
);
// subnets for reginal vnets
const network = {
eu: {
lb: subNetwork(eu, NetworkRole.lb, NetworkRegion.eu, "10.24.1.0/26"),
cache: subNetwork(eu, NetworkRole.cache, NetworkRegion.eu, "10.24.2.0/26"),
web: subNetwork(eu, NetworkRole.web, NetworkRegion.eu, "10.24.3.0/26"),
// db: subNetwork(eu, NetworkRole.db, "10.24.4.0/24")
},
usEast: {
lb: subNetwork(
usEast,
NetworkRole.lb,
NetworkRegion.usEast,
"10.25.1.0/26",
),
cache: subNetwork(
usEast,
NetworkRole.cache,
NetworkRegion.usEast,
"10.25.2.0/26",
),
web: subNetwork(
usEast,
NetworkRole.web,
NetworkRegion.usEast,
"10.25.3.0/26",
),
},
};
// variable un-maps
const nbg = ServerLocations.nuremberg;
const ash = ServerLocations.ashburn;
const [EU_LB, US_LB, EU_CACHE, US_CACHE, EU_WEB, US_WEB] = [
network.eu.lb,
network.usEast.lb,
network.eu.cache,
network.usEast.cache,
network.eu.web,
network.usEast.web,
];
// compute - server resources
const haEU1 = server("haproxy-1", VmSize.cx23, OS.debian, nbg, EU_LB, true);
const haEU2 = server("haproxy-2", VmSize.cx23, OS.debian, nbg, EU_LB, true);
const haUS1 = server("haproxy-1", VmSize.cpx11, OS.debian, ash, US_LB, true);
const haUS2 = server("haproxy-2", VmSize.cpx11, OS.debian, ash, US_LB, true);
const cacheEU1 = server("varnish-1", VmSize.cx23, OS.debian, nbg, EU_CACHE);
const cacheEU2 = server("varnish-2", VmSize.cx23, OS.debian, nbg, EU_CACHE);
const cacheUS1 = server("varnish-1", VmSize.cpx11, OS.debian, ash, US_CACHE);
const cacheUS2 = server("varnish-2", VmSize.cpx11, OS.debian, ash, US_CACHE);
const webEU1 = server("web-1", VmSize.cx23, OS.debian, nbg, EU_WEB);
const webEU2 = server("web-2", VmSize.cx23, OS.debian, nbg, EU_WEB);
const webUS1 = server("web-1", VmSize.cpx11, OS.debian, ash, US_WEB);
// floating IPs
const euFloatingIP = floatingIP("schleppe-ha-nbg", haEU1);
const usFloatingIP = floatingIP("schleppe-ha-va", haUS1);
const floatingIPs = [euFloatingIP, usFloatingIP];
const domains = ["k9e.no", "planetposen.no", "whoami.schleppe.cloud"];
// Update Cloudflare DNS
domains.forEach((domain) => {
dns(domain, euFloatingIP, "eu-fip");
dns(domain, usFloatingIP, "us-fip");
});
// firewall
const allowSSH = allowSSHToCurrentIP();
const firewalls = [allowHttp, allowSSH];
// DISABLED
attach("ssh-fa", allowSSH, [haEU1, haEU2, haUS1, haUS2]);
// exports
const servers = [
haEU1,
haEU2,
haUS1,
haUS2,
cacheEU1,
cacheEU2,
cacheUS1,
cacheUS2,
webEU1,
webEU2,
webUS1,
];
const networks = [eu, usEast];
const subNetworks = [
network.eu.lb,
network.eu.cache,
network.eu.web,
network.usEast.lb,
network.usEast.web,
];
export const inventory = {
vms: servers.map(summarizeServer),
networks: networks.map(summarizeNetwork),
subnetworks: subNetworks.map(summarizeSubNetwork),
firewalls: firewalls.map(summarizeFirewall),
floatingIps: floatingIPs.map(summarizeFloatingIp),
domains,
};

View File

@@ -0,0 +1,15 @@
{
"name": "hetzner-pulumi",
"main": "index.ts",
"devDependencies": {
"@types/node": "^18",
"typescript": "^5.0.0"
},
"dependencies": {
"@pulumi/cloudflare": "^6.12.0",
"@pulumi/hcloud": "^1.29.0",
"@pulumi/pulumi": "^3.213.0",
"@pulumi/random": "^4.18.4",
"zod": "^4.2.1"
}
}

View File

@@ -0,0 +1,44 @@
import * as hcloud from "@pulumi/hcloud";
import * as cloudflare from "@pulumi/cloudflare";
async function getZone(domain: string): Promise<cloudflare.Zone | null> {
let match;
const zones = await cloudflare.getZones();
zones.results.forEach((zone) => {
if (domain.includes(zone.name)) match = zone;
});
if (match) return match;
return null;
}
export async function dns(
domain: string,
ipAddress: hcloud.FloatingIp,
suffix: string,
) {
const ip = ipAddress.ipAddress.apply((ip) => ip);
const name = `${domain}-${suffix}_dns_record`;
const comment = "managed by pulumi - schleppe-ha-project";
const zone = await getZone(domain);
if (!zone)
throw new Error(
"no matching zone found! check cloudflare token scopes & registration",
);
return new cloudflare.DnsRecord(
name,
{
zoneId: zone.id,
name: domain,
ttl: 1,
type: "A",
content: ip,
proxied: false,
comment,
},
{ dependsOn: [ipAddress] },
);
}

View File

@@ -0,0 +1,86 @@
import * as pulumi from "@pulumi/pulumi";
import * as hcloud from "@pulumi/hcloud";
import * as random from "@pulumi/random";
import { config } from "./config";
import { getCheapestServerType, topicedLabel } from "./utils";
import { VmSize, OS, ServerLocations } from "./types";
// “Tag” servers using labels. Hetzner firewalls can target servers by label selectors. :contentReference[oaicite:2]{index=2}
const serverLabels = {
env: pulumi.getStack(),
managed: "pulumi",
};
const sshPublicKey = config.require("sshPublicKey");
const sshKey = new hcloud.SshKey("ssh-key", {
name: `pulumi-${pulumi.getStack()}-ssh`,
publicKey: sshPublicKey,
});
const serverName = (name: string, location: string) => {
if (name.includes("-")) {
const [n, id] = name.split("-");
return `${n}-${location}-${id}`;
}
return `${name}-${location}`;
};
export function server(
name: string,
size: VmSize,
os: OS = OS.debian,
location: ServerLocations,
network: hcloud.NetworkSubnet,
ipv4: boolean = false,
): hcloud.Server {
const extraLabel = topicedLabel(name)
name = serverName(name, location);
const networkId = network.networkId.apply((id) => String(id).split("-")[0]);
const server = new hcloud.Server(
name,
{
name,
image: os,
serverType: size,
location,
backups: false,
publicNets: [
{
ipv4Enabled: ipv4,
ipv6Enabled: true,
},
],
networks: [
{
networkId: networkId.apply((nid) => Number(nid)),
},
],
sshKeys: [sshKey.name],
labels: {
...serverLabels,
...extraLabel
},
},
{ dependsOn: [network] },
);
const serverNet = new hcloud.ServerNetwork(
`${name}-servernet-${location}`,
{
serverId: server.id.apply((id) => Number(id)),
subnetId: network.id,
},
{
dependsOn: [network, server],
parent: server,
deleteBeforeReplace: true,
ignoreChanges: [ 'serverId', 'ip', 'aliasIps', 'networkId', 'subnetId' ]
},
);
return server;
}

View File

@@ -0,0 +1,7 @@
import * as pulumi from "@pulumi/pulumi";
const config = new pulumi.Config();
export {
config
}

View File

@@ -0,0 +1,124 @@
import * as pulumi from "@pulumi/pulumi";
import * as hcloud from "@pulumi/hcloud";
import type { NetworkRegion } from "./types";
import { currentIPAddress } from "./utils";
// NETWORKS
const networkName = (name: string, region: NetworkRegion) =>
`${name}-net-${region}`;
export function regionalNetwork(
name: string,
cidr: string,
region: NetworkRegion,
) {
const parentNetworkRange = 22;
const [ip, _] = cidr.split("/");
const net = new hcloud.Network(name, {
name,
ipRange: `${ip}/${parentNetworkRange}`,
labels: {
region,
hiearchy: "parent",
},
});
return net;
}
export function subNetwork(
parentNetwork: hcloud.Network,
prefix: string,
region: NetworkRegion,
cidr: string,
): hcloud.NetworkSubnet {
const name = `${prefix}-subnet-${region}`;
const net = new hcloud.NetworkSubnet(
name,
{
networkId: parentNetwork.id.apply((id) => Number(id)),
type: "cloud",
networkZone: region,
ipRange: cidr,
},
{ parent: parentNetwork, dependsOn: [parentNetwork] },
);
return net;
}
// FLOATING IPs
export function floatingIP(name: string, server: hcloud.Server) {
return new hcloud.FloatingIp(
name,
{
type: "ipv4",
serverId: server.id.apply((i) => Number(i)),
},
{ dependsOn: [server] },
);
}
// FIREWALL RULES
export const allowHttp = new hcloud.Firewall("allow-http", {
name: "allow-http",
applyTos: [
{
labelSelector: `role=load-balancer,env=${pulumi.getStack()}`,
},
],
rules: [
{
direction: "in",
protocol: "tcp",
port: "80",
sourceIps: ["0.0.0.0/0", "::/0"],
description: "Allow HTTP",
},
{
direction: "in",
protocol: "tcp",
port: "443",
sourceIps: ["0.0.0.0/0", "::/0"],
description: "Allow HTTPS",
},
{
direction: "in",
protocol: "udp",
port: "443",
sourceIps: ["0.0.0.0/0", "::/0"],
description: "Allow QUIC",
},
],
});
export function allowSSHToCurrentIP() {
const ip = currentIPAddress()
return new hcloud.Firewall("allow-ssh", {
name: "allow-ssh",
rules: [
{
direction: "in",
protocol: "tcp",
port: "22",
sourceIps: [ip],
description: "Allow SSH from approved CIDRs only",
},
],
});
}
export function attach(
name: string,
firewall: hcloud.Firewall,
servers: hcloud.Server[],
) {
return new hcloud.FirewallAttachment(name, {
firewallId: firewall.id.apply((id) => Number(id)),
serverIds: servers.map((server) => server.id.apply((id) => Number(id))),
});
}

View File

@@ -0,0 +1,2 @@
export * from "./network";
export * from "./server";

View File

@@ -0,0 +1,12 @@
export enum NetworkRegion {
eu = "eu-central",
usWest = "us-west",
usEast = "us-east",
}
export enum NetworkRole {
lb = "load-balancer",
cache = "varnish-cache",
web = "webserver",
db = "database",
}

View File

@@ -0,0 +1,22 @@
export enum VmSize {
small = "small",
medium = "medium",
large = "large",
cx23 = "cx23",
cax11 = "cax11",
cpx11 = "cpx11"
}
export enum OS {
debian = "debian-13",
ubuntu = "ubuntu",
}
export enum ServerLocations {
helsinki = "hel1",
falkenstein = "fsn1",
nuremberg = "nbg1",
hillsboro = "hil",
ashburn = "ash",
sinapore = "sig",
}

View File

@@ -0,0 +1,165 @@
import * as pulumi from "@pulumi/pulumi";
import * as hcloud from "@pulumi/hcloud";
import { z } from "zod";
import * as crypto from "node:crypto";
/**
* Region abstraction exposed to users
*/
export type PricingRegion = "eu" | "us" | "ap";
/**
* Hetzner region → locations mapping
*/
const regionToLocations: Record<PricingRegion, string[]> = {
eu: ["nbg1", "fsn1", "hel1"],
us: ["ash", "hil"],
ap: ["sin"],
};
const HCLOUD_API = "https://api.hetzner.cloud/v1";
/**
* Runtime validation for Hetzner /server_types response
*/
const serverTypesResponseSchema = z.object({
server_types: z.array(
z.object({
name: z.string(),
deprecated: z.boolean().optional(),
prices: z.array(
z.object({
location: z.string(),
price_monthly: z.object({
gross: z.string(),
}),
price_hourly: z.object({
gross: z.string(),
}),
}),
),
}),
),
});
/**
* Returns the cheapest available server type name
* for a given abstract region (eu | us | ap).
*
* Pricing basis: monthly gross
*/
export function getCheapestServerType(
region: PricingRegion,
): pulumi.Output<string> {
const locations = regionToLocations[region];
const hcloudCfg = new pulumi.Config("hcloud");
const token = hcloudCfg.requireSecret("token");
return pulumi.all([token]).apply(async ([t]) => {
const res = await fetch(`${HCLOUD_API}/server_types`, {
headers: { Authorization: `Bearer ${t}` },
});
if (!res.ok) {
throw new pulumi.RunError(
`Hetzner API error: ${res.status} ${res.statusText}`,
);
}
const json = await res.json();
const parsed = serverTypesResponseSchema.safeParse(json);
if (!parsed.success) {
const hash = crypto
.createHash("sha256")
.update(JSON.stringify(json))
.digest("hex")
.slice(0, 12);
throw new pulumi.RunError(
`Unexpected Hetzner /server_types payload (sha256:${hash})`,
);
}
const cheapest = parsed.data.server_types
.filter((st) => st.deprecated !== true)
.flatMap((st) =>
st.prices
.filter((p) => locations.includes(p.location))
.map((p) => ({
name: st.name,
price: Number.parseFloat(p.price_hourly.gross),
})),
)
.filter((x) => Number.isFinite(x.price))
.sort((a, b) => a.price - b.price)[0];
if (!cheapest) {
throw new pulumi.RunError(
`No priced server types found for region=${region}`,
);
}
return cheapest.name;
});
}
interface Label {
role?: string
}
export function topicedLabel(name: string) {
let labels: Label = {};
if (name.includes("haproxy")) {
labels.role = 'load-balancer';
} else if (name.includes("web")) {
labels.role = 'web'
}
return labels
}
export const summarizeServer = (s: hcloud.Server) => ({
name: s.name,
publicIpv4: s.ipv4Address,
publicIpv6: s.ipv6Address,
privateIp: s.networks.apply(nets => nets?.[0]?.ip ?? 'null'),
});
export const summarizeNetwork = (n: hcloud.Network) => ({
name: n.name,
cidr: n.ipRange
});
export const summarizeSubNetwork = (n: hcloud.NetworkSubnet) => ({
gateway: n.gateway,
cidr: n.ipRange,
zone: n.networkZone,
type: n.type
});
export const summarizeFloatingIp = (floatingIp: hcloud.FloatingIp) => ({
name: floatingIp.name,
address: floatingIp.ipAddress,
attachedTo: floatingIp.serverId,
location: floatingIp.homeLocation,
labels: floatingIp.labels
})
export const summarizeFirewall = (firewall: hcloud.Firewall) => ({
name: firewall.name,
rules: firewall.rules,
labels: firewall.labels
})
export const summarizeDns = (firewall: hcloud.Firewall) => ({
name: firewall.name,
rules: firewall.rules,
labels: firewall.labels
})
export async function currentIPAddress(): Promise<string> {
return fetch('https://ifconfig.me/ip')
.then(resp => resp.text())
}

View File

@@ -0,0 +1,18 @@
{
"compilerOptions": {
"strict": true,
"outDir": "bin",
"target": "es2020",
"module": "commonjs",
"moduleResolution": "node",
"sourceMap": true,
"experimentalDecorators": true,
"pretty": true,
"noFallthroughCasesInSwitch": true,
"noImplicitReturns": true,
"forceConsistentCasingInFileNames": true
},
"files": [
"index.ts"
]
}

1924
hetzner-pulumi/yarn.lock Normal file

File diff suppressed because it is too large Load Diff