ansible playbooks for retailor.io infrastructure

This commit is contained in:
2025-03-03 19:33:36 +01:00
committed by KevinMidboe
commit 92cb10ba27
139 changed files with 33603 additions and 0 deletions

10
.env.example Normal file
View File

@@ -0,0 +1,10 @@
ELASTIC_HOSTS=http://elasticsearch:9200
ELASTIC_PASSWORD=
KIBANA_PASSWORD=
GRAFANA_PASSWORD=
LOKI_S3_BUCKET=
LOKI_S3_ENDPOINT=
LOKI_S3_KEY_ID=
LOKI_S3_ACCESS_KEY=
LOKI_S3_URL=

1
.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
.env

3
.gitmodules vendored Normal file
View File

@@ -0,0 +1,3 @@
[submodule "roles/tailscale"]
path = roles/tailscale
url = https://github.com/artis3n/ansible-role-tailscale

33
README.md Normal file
View File

@@ -0,0 +1,33 @@
# playbooks retailor.io
Includes ansible playbooks for provisioning & configuring retailor server nodes.
## run
Directory `plays/` contains all available plays. To run a play specify the inventory file and the play. This runs for all hosts specified in `inventory.ini` in order to keep state updated, to run on a single host pass flag `-l web2`.
```bash
ansible-playbook -i inventory plays/tailscale.yml
```
Specifying a single host:
```bash
ansible-playbook -i inventory plays/firewall.yml -l cache
```
## install
Install using homebrew, apt or another package manager:
MacOS:
```bash
brew install ansible-playbook
```
Ubuntu:
```bash
(sudo) apt install ansible-playbook
```

12
group_vars/all.yml Normal file
View File

@@ -0,0 +1,12 @@
---
install_cadvisor_exporter: false
install_nginx_exporter: false
install_redis_exporter: false
users:
- username: forge
oh_my_zsh:
theme: robbyrussell
- username: root
oh_my_zsh:
theme: robbyrussell

7
group_vars/app.yml Normal file
View File

@@ -0,0 +1,7 @@
---
custom_firewall_ports:
- 2049
- 1111
apt_packages:
- nfs-server
description: 'app node for hosting retailor database & persistent assets, serves NFS mount'

5
group_vars/cache.yml Normal file
View File

@@ -0,0 +1,5 @@
---
custom_firewall_ports:
- 6379
install_redis_exporter: true
description: 'cache node for retailor redis cache'

10
group_vars/hetzner.yml Normal file
View File

@@ -0,0 +1,10 @@
---
install_cadvisor_exporter: true
users:
- username: kevin
oh_my_zsh:
theme: robbyrussell
- username: root
oh_my_zsh:
theme: robbyrussell

6
group_vars/lb.yml Normal file
View File

@@ -0,0 +1,6 @@
---
custom_firewall_ports:
- 80
- 443
install_nginx_exporter: true
description: "load balancer serving web servers for retailor.io"

8
group_vars/web.yml Normal file
View File

@@ -0,0 +1,8 @@
---
custom_firewall_ports:
- 80
- 443
apt_packages:
- nfs-common
install_nginx_exporter: true
description: 'webserver node for hosting retailor laravel project'

4
group_vars/worker.yml Normal file
View File

@@ -0,0 +1,4 @@
---
apt_packages:
- nfs-common
description: 'queue server for laravel'

22
inventory.ini Normal file
View File

@@ -0,0 +1,22 @@
[web]
web1 ansible_host=web1 ansible_port=22 ansible_user=root
web2 ansible_host=web2 ansible_port=22 ansible_user=root
#
[worker]
worker1 ansible_host=worker1 ansible_port=22 ansible_user=root
worker2 ansible_host=worker2 ansible_port=22 ansible_user=root
[lb]
load-balancer ansible_host=loadbalancer ansible_port=22 ansible_user=root
[cache]
cache ansible_host=cache ansible_port=22 ansible_user=root
[metrics]
grafana ansible_host=grafana ansible_port=22 ansible_user=root
elasticsearch ansible_host=elasticsearch ansible_port=22 ansible_user=root
[hetzner]
grafana
elasticsearch

View File

@@ -0,0 +1,9 @@
---
- name: Basic setup, brute force protection, firewall and log shipping
hosts: all
roles:
- role: roles/base-packages # - Basic server setup and configuration
- role: roles/fail2ban # - SSH Brute force protection
- role: roles/firewall # - Firewall, if firewall_enable is true
- role: roles/oh-my-zsh
- role: roles/motd

12
plays/elasticsearch.yml Normal file
View File

@@ -0,0 +1,12 @@
---
- name: Install ELK stack using Docker
hosts: elasticsearch
vars:
elk_version: "8.17.0"
roles:
# TODO check for docker install before purging it. This nukes containers
# we might not want destroyed.
# Leave a file, or read the same input requirements into a apt check
# which skips purge if satisfied.
# - role: roles/docker
- role: roles/elasticsearch

5
plays/firewall.yml Normal file
View File

@@ -0,0 +1,5 @@
---
- name: Install all ufw service and enable ports
hosts: all
roles:
- role: roles/firewall

View File

@@ -0,0 +1,10 @@
---
- name: Install nginx filebeat log exporter
hosts: web
roles:
- role: roles/filebeat-nginx
- name: Install laravel filebeat log exporter
hosts: web
roles:
- role: roles/filebeat-laravel

17
plays/metrics-node.yml Normal file
View File

@@ -0,0 +1,17 @@
---
- name: Install prometheus exporters
hosts: all
vars:
node_exporter_version: "1.9.0"
nginx_exporter_version: "1.4.1"
cadvisor_version: "0.46.0"
fail2ban_exporter_version: "0.10.2"
roles:
- role: roles/node_exporter
- role: roles/fail2ban_exporter
- role: roles/redis_exporter
when: install_redis_exporter
- role: roles/cadvisor
when: install_cadvisor_exporter
- role: roles/nginx_prometheus_exporter
when: install_nginx_exporter

5
plays/ping.yml Normal file
View File

@@ -0,0 +1,5 @@
- hosts: all
tasks:
- name: Ping all hosts
ping:

10
plays/prome-grafana.yml Normal file
View File

@@ -0,0 +1,10 @@
---
- name: Install prometheus & grafana using Docker
hosts: grafana
vars:
prometheus_version: "latest"
grafana_version: "latest"
cadvisor_version: "0.46.0"
roles:
# - role: roles/docker
- role: roles/prometheus-grafana

7
plays/tailscale.yml Normal file
View File

@@ -0,0 +1,7 @@
---
- name: Install tailscale
hosts: all
roles:
- role: roles/tailscale
vars:
tailscale_authkey: "{{ lookup('env', 'TAILSCALE_KEY') }}"

23
plays/upgrade.yml Normal file
View File

@@ -0,0 +1,23 @@
---
- hosts: all
gather_facts: yes
tasks:
- name: Perform a dist-upgrade.
ansible.builtin.apt:
upgrade: dist
update_cache: yes
- name: Check if a reboot is required.
ansible.builtin.stat:
path: /var/run/reboot-required
get_checksum: no
register: reboot_required_file
- name: Reboot the server (if required).
ansible.builtin.reboot:
when: reboot_required_file.stat.exists == true
- name: Remove dependencies that are no longer required.
ansible.builtin.apt:
autoremove: yes

View File

@@ -0,0 +1,22 @@
# Default base role values
---
# Flags for adding APT repositories for common software
apt_skip_user_packages: false
# Default base role values
base_packages:
- git
- vim
- curl
- dnsutils
# - ntp
- ssh
- fail2ban
- openssh-server
- openssl
base_packages_user:
- htop
- tree
- ncdu
- nload

View File

@@ -0,0 +1,31 @@
# APT Related tasks for the base role
---
# Add repositories
- name: install apt https support
apt: pkg=apt-transport-https state=latest update_cache=yes cache_valid_time=3600
# Add default packages
- name: install base packages
apt: pkg={{ item }}
with_items: "{{ base_packages }}"
# Add user defined packages
- name: install packages for user quality of life
apt:
pkg: "{{ item }}"
state: present
update_cache: yes
with_items: "{{ base_packages_user | default([]) }}"
when: not apt_skip_user_packages | default(False)
# Add host specific packages
- name: install packages
apt:
pkg: "{{ item }}"
state: present
update_cache: yes
with_items: "{{ apt_packages | default([]) }}"
# Dist-upgrade
# - name: perform dist-upgrade
# apt: upgrade=dist

View File

@@ -0,0 +1,3 @@
---
cadvisor_version: "0.46.0"
cadvisor_binary_url: "https://github.com/google/cadvisor/releases/download/v{{ cadvisor_version }}/cadvisor-v{{ cadvisor_version }}-linux-amd64"

View File

@@ -0,0 +1,5 @@
---
- name: Restart cAdvisor
docker_container:
name: cadvisor
state: restarted

View File

@@ -0,0 +1,13 @@
---
galaxy_info:
author: Your Name
description: Ansible role to deploy cAdvisor using Docker
license: MIT
min_ansible_version: "2.9"
platforms:
- name: Debian
versions:
- all
galaxy_tags:
- monitoring
- cadvisor

View File

@@ -0,0 +1,39 @@
---
- name: Ensure the cAdvisor binary is downloaded
get_url:
url: "{{ cadvisor_binary_url }}"
dest: "/usr/local/bin/cadvisor"
mode: '0755'
tags:
- install_cadvisor
- name: Create cAdvisor data directory
file:
path: "/var/lib/cadvisor"
state: directory
mode: '0755'
tags:
- create_cadvisor_data_dir
- name: Create cAdvisor service
template:
src: "cadvisor.service.j2"
dest: "/etc/systemd/system/cadvisor.service"
mode: '0644'
tags:
- create_cadvisor_service
- name: Reload systemd to pick up the cAdvisor service
systemd:
daemon_reload: yes
tags:
- reload_systemd
- name: Enable and start the cAdvisor service
systemd:
name: cadvisor
state: started
enabled: yes
tags:
- start_cadvisor

View File

@@ -0,0 +1,5 @@
---
- name: Install cAdvisor (native binary)
include_tasks: install.yml
tags:
- install

View File

@@ -0,0 +1,12 @@
[Unit]
Description=cAdvisor
Documentation=https://github.com/google/cadvisor
[Service]
ExecStart=/usr/local/bin/cadvisor
Restart=always
LimitNOFILE=4096
User=root
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,15 @@
clean_install_remove_packages:
- docker.io
- docker-doc
- docker-compose
- podman-docker
- containerd
- runc
install_packages:
- docker-ce
- docker-ce-cli
- containerd.io
- docker-buildx-plugin
- docker-compose
- docker-compose-plugin

View File

@@ -0,0 +1,5 @@
---
- name: Restart Docker
systemd:
name: docker
state: restarted

View File

@@ -0,0 +1,13 @@
---
galaxy_info:
author: Your Name
description: Ansible role to install and manage Docker on Debian
license: MIT
min_ansible_version: "2.9"
platforms:
- name: Debian
versions:
- all
galaxy_tags:
- docker
dependencies: []

View File

@@ -0,0 +1,27 @@
---
- name: Check if the current distro is supported (Ubuntu or Debian)
set_fact:
distro_supported: "{{ ansible_facts['distribution'].lower() in supported_distros }}"
tags:
- check_distro
- name: Set installation URL based on the distro
set_fact:
install_url: "https://download.docker.com/linux/{{ ansible_facts['distribution'].lower() }} {{ ansible_distribution_release }} stable"
when: distro_supported
tags:
- set_url
- name: Log Unsupported Distro
debug:
msg: "The {{ ansible_facts['distribution'] }} distribution is not supported. Skipping Docker installation."
when: not distro_supported
tags:
- unsupported_distro
- name: Skip Docker installation task if distro is unsupported
meta: end_play
when: not distro_supported
tags:
- end_play

View File

@@ -0,0 +1,12 @@
---
- name: Add Docker repository
apt_repository:
repo: deb {{ install_url }}
state: present
when: distro_supported
- name: Install Docker
apt:
name: "{{ docker_package }}"
state: present
when: distro_supported

View File

@@ -0,0 +1,11 @@
---
- name: Include distro check tasks
include_tasks: check_distro.yml
tags:
- check_distro
- name: Include Docker installation tasks if distro is supported
include_tasks: install.yml
when: distro_supported
tags:
- install

View File

@@ -0,0 +1,45 @@
---
- name: Clean install by removing any docker package
package: name={{ item }} state=absent
with_items: "{{ clean_install_remove_packages }}"
- name: Ensure curl & ca-certs are installed
package:
name:
- ca-certificates
- curl
- gnupg
state: latest
- name: Ensure docker keyring file exists
file:
path: /etc/apt/keyrings/docker.gpg
state: touch
- name: Download docker gpg key and add to keyrings
shell: |
install -m 0755 -d /etc/apt/keyrings
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor --yes -o /etc/apt/keyrings/docker.gpg
chmod a+r /etc/apt/keyrings/docker.gpg
- name: Sign and add docker deb source
shell: |
echo \
"deb [arch="$(dpkg --print-architecture)" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \
"$(. /etc/os-release && echo "$VERSION_CODENAME")" stable" | \
tee /etc/apt/sources.list.d/docker.list > /dev/null
- name: Update apt sources
become: true
apt:
update_cache: yes
cache_valid_time: 1
- name: Install docker packages
package: name={{ item }} state=latest
with_items: "{{ install_packages }}"
- name: Ensure group docker exists
user:
name: docker
state: present

View File

@@ -0,0 +1,14 @@
---
- name: Start and enable Docker service
systemd:
name: docker
enabled: yes
state: started
- name: Start and enable Docker service
systemd:
name: docker
enabled: yes
state: started
when: distro_supported

View File

@@ -0,0 +1,23 @@
# elasticsearch
Play configures ELK stack using docker & is available without HTTPS. Configure container variables in `tasks/SERVICE.yml` files, environment variables for the services & Java can also be configured here.
The following are manual steps required during setup.
## elastic
After creating elasticsearch container SSH into the running host and generate a new password for user `elastic` using command:
```bash
docker exec -it elasticsearch /usr/share/elasticsearch/bin/elasticsearch-reset-password -u elastic
```
## kibana
Create a password for `kibana_system` user:
```bash
export ELASTIC_PASSWORD=
export KIBANA_PASSWORD=
curl -s -X POST -u "elastic:${ELASTIC_PASSWORD}" -H "Content-Type: application/json" http://elasticsearch:9200/_security/user/kibana_system/_password -d "{\"password\":\"${KIBANA_PASSWORD}\"}" | grep -q "^{}";
```

View File

@@ -0,0 +1,16 @@
---
galaxy_info:
author: Your Name
description: Ansible role to deploy prometheus & grafana using Docker
license: MIT
min_ansible_version: "2.9"
platforms:
- name: Debian
versions:
- all
galaxy_tags:
- monitoring
- cadvisor
dependencies:
- docker

View File

@@ -0,0 +1,46 @@
---
- name: Create a Docker network for Elasticsearch
docker_network:
name: elk_network
state: present
- name: Pull Elasticsearch Docker image
docker_image:
name: docker.elastic.co/elasticsearch/elasticsearch-wolfi:{{ elk_version }}
source: pull
- name: Create Elasticsearch configuration file directory on host
file:
path: /etc/elasticsearch
state: directory
mode: '0755'
# - name: Create Elasticsearch configuration file
# template:
# src: elasticsearch.yml.j2
# dest: /etc/elasticsearch/elasticsearch.yml
- name: Start Elasticsearch container
docker_container:
name: elasticsearch
image: docker.elastic.co/elasticsearch/elasticsearch-wolfi:{{ elk_version }}
state: started
restart: yes
restart_policy: unless-stopped
published_ports:
- "9200:9200"
volumes:
- /etc/elasticsearch/esdata:/usr/share/elasticsearch/data
networks:
- name: elk_network
ipv4_address: 172.19.0.2
env:
node.name: elasticsearch
cluster.name: retailor-elk
discovery.type: single-node
bootstrap.memory_lock: "true"
# limits elasticsearch to 2 GB of RAM
ES_JAVA_OPTS: "-Xms1g -Xmx2g"
# disables SSL & xpack security
xpack.security.http.ssl.enabled: "false"

View File

@@ -0,0 +1,37 @@
---
- name: Create a Docker network for Kibana
docker_network:
name: elk_network
state: present
- name: Create kibana directory on host
file:
path: /etc/kibana
state: directory
mode: "0755"
- name: Pull Kibana Docker image
docker_image:
name: docker.elastic.co/kibana/kibana:{{ elk_version }}
source: pull
# TODO rember to move CA cert from elastic to Kibana
# docker cp elasticsearch:/usr/share/elasticsearch/config/certs/http_ca.crt .
# docker cp http_ca.crt kibana:/usr/share/kibana/config/certs/ca/http_ca.crt
- name: Start Kibana container
docker_container:
name: kibana
image: docker.elastic.co/kibana/kibana:{{ elk_version }}
state: started
restart: yes
restart_policy: unless-stopped
published_ports:
- "5601:5601"
env:
ELASTICSEARCH_HOSTS: "{{ env_vars.ELASTIC_HOSTS }}"
ELASTICSEARCH_USERNAME: kibana_system
ELASTICSEARCH_PASSWORD: "{{ env_vars.KIBANA_PASSWORD }}"
TELEMETRY_ENABLED: "false"
networks:
- name: elk_network

View File

@@ -0,0 +1,64 @@
---
- name: Create a Docker network for Logstash
docker_network:
name: elk_network
state: present
- name: Create logstash directory on host
file:
path: /etc/logstash
state: directory
mode: "0755"
- name: Copy logstash config
copy:
src: templates/pipelines.yml.j2
dest: /etc/logstash/pipelines.yml
- name: Create logstash directory on host
file:
path: /etc/logstash/pipeline
state: directory
mode: "0755"
- name: Copy logstash input configs
copy:
src: "{{ item }}"
dest: /etc/logstash/pipeline/{{ item | basename | regex_replace('\.j2$', '') }}
loop: "{{ query('fileglob', 'templates/logstash-conf.d/*.j2') }}"
- name: Pull Logstash Docker image
docker_image:
name: docker.elastic.co/logstash/logstash:{{ elk_version }}
source: pull
# TODO rember to move CA cert from elastic to Logstash
# docker cp elasticsearch:/usr/share/elasticsearch/config/certs/http_ca.crt .
# docker cp http_ca.crt logstash:/usr/share/logstash/config/certs/ca/http_ca.crt
- name: Start Logstash container
docker_container:
name: logstash
image: docker.elastic.co/logstash/logstash:{{ elk_version }}
state: started
restart: yes
restart_policy: unless-stopped
command:
- /bin/bash
- -c
- |
echo "Waiting for Elasticsearch availability";
until curl -s {{ env_vars.ELASTIC_HOSTS }} | grep -q "missing authentication credentials"; do sleep 1; done;
echo "Starting logstash";
/usr/share/logstash/bin/logstash
published_ports:
- "5044-5049:5044-5049"
volumes:
- /etc/logstash/pipelines.yml:/usr/share/logstash/config/pipelines.yml
- /etc/logstash/pipeline:/usr/share/logstash/pipeline
env:
xpack.monitoring.enabled: "false"
ELASTIC_USER: elastic
ELASTIC_PASSWORD: "{{ env_vars.ELASTIC_PASSWORD }}"
ELASTIC_HOSTS: "{{ env_vars.ELASTIC_HOSTS }}"
networks:
- name: elk_network

View File

@@ -0,0 +1,7 @@
---
# ensure we have variables from .env files
- include_tasks: ../roles/env/tasks/main.yml
- include_tasks: elasticsearch.yml
- include_tasks: kibana.yml
- include_tasks: logstash.yml

View File

@@ -0,0 +1,272 @@
###################### Filebeat Configuration Example #########################
# This file is an example configuration file highlighting only the most common
# options. The filebeat.reference.yml file from the same directory contains all the
# supported options with more comments. You can use it as a reference.
#
# You can find the full configuration reference here:
# https://www.elastic.co/guide/en/beats/filebeat/index.html
# For more available modules and options, please see the filebeat.reference.yml sample
# configuration file.
# ============================== Filebeat inputs ===============================
filebeat.inputs:
# Each - is an input. Most options can be set at the input level, so
# you can use different inputs for various configurations.
# Below are the input specific configurations.
- type: log
# Change to true to enable this input configuration.
enabled: false
# Paths that should be crawled and fetched. Glob based paths.
paths:
- /var/log/ngnix/*.log
#- c:\programdata\elasticsearch\logs\*
# Exclude lines. A list of regular expressions to match. It drops the lines that are
# matching any regular expression from the list.
#exclude_lines: ['^DBG']
# Include lines. A list of regular expressions to match. It exports the lines that are
# matching any regular expression from the list.
#include_lines: ['^ERR', '^WARN']
# Exclude files. A list of regular expressions to match. Filebeat drops the files that
# are matching any regular expression from the list. By default, no files are dropped.
#exclude_files: ['.gz$']
# Optional additional fields. These fields can be freely picked
# to add additional information to the crawled log files for filtering
#fields:
# level: debug
# review: 1
### Multiline options
# Multiline can be used for log messages spanning multiple lines. This is common
# for Java Stack Traces or C-Line Continuation
# The regexp Pattern that has to be matched. The example pattern matches all lines starting with [
#multiline.pattern: ^\[
# Defines if the pattern set under pattern should be negated or not. Default is false.
#multiline.negate: false
# Match can be set to "after" or "before". It is used to define if lines should be append to a pattern
# that was (not) matched before or after or as long as a pattern is not matched based on negate.
# Note: After is the equivalent to previous and before is the equivalent to to next in Logstash
#multiline.match: after
# filestream is an input for collecting log messages from files. It is going to replace log input in the future.
- type: filestream
# Change to true to enable this input configuration.
enabled: true
# Paths that should be crawled and fetched. Glob based paths.
paths:
- /var/log/nginx/*.log
#- c:\programdata\elasticsearch\logs\*
exclude_files: ['\.gz$']
# Exclude lines. A list of regular expressions to match. It drops the lines that are
# matching any regular expression from the list.
#exclude_lines: ['^DBG']
# Include lines. A list of regular expressions to match. It exports the lines that are
# matching any regular expression from the list.
#include_lines: ['^ERR', '^WARN']
# Exclude files. A list of regular expressions to match. Filebeat drops the files that
# are matching any regular expression from the list. By default, no files are dropped.
#prospector.scanner.exclude_files: ['.gz$']
# Optional additional fields. These fields can be freely picked
# to add additional information to the crawled log files for filtering
#fields:
# level: debug
# review: 1
# ============================== Filebeat modules ==============================
filebeat.config.modules:
# Glob pattern for configuration loading
path: ${path.config}/modules.d/*.yml
# Set to true to enable config reloading
reload.enabled: false
# Period on which files under path should be checked for changes
#reload.period: 10s
# ======================= Elasticsearch template setting =======================
setup.template.settings:
index.number_of_shards: 1
#index.codec: best_compression
#_source.enabled: false
# ================================== General ===================================
# The name of the shipper that publishes the network data. It can be used to group
# all the transactions sent by a single shipper in the web interface.
#name:
# The tags of the shipper are included in their own field with each
# transaction published.
#tags: ["service-X", "web-tier"]
# Optional fields that you can specify to add additional information to the
# output.
#fields:
# env: staging
# ================================= Dashboards =================================
# These settings control loading the sample dashboards to the Kibana index. Loading
# the dashboards is disabled by default and can be enabled either by setting the
# options here or by using the `setup` command.
#setup.dashboards.enabled: false
# The URL from where to download the dashboards archive. By default this URL
# has a value which is computed based on the Beat name and version. For released
# versions, this URL points to the dashboard archive on the artifacts.elastic.co
# website.
#setup.dashboards.url:
# =================================== Kibana ===================================
# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.
# This requires a Kibana endpoint configuration.
setup.kibana:
# Kibana Host
# Scheme and port can be left out and will be set to the default (http and 5601)
# In case you specify and additional path, the scheme is required: http://localhost:5601/path
# IPv6 addresses should always be defined as: https://[2001:db8::1]:5601
#host: "localhost:5601"
# Kibana Space ID
# ID of the Kibana Space into which the dashboards should be loaded. By default,
# the Default Space will be used.
#space.id:
# =============================== Elastic Cloud ================================
# These settings simplify using Filebeat with the Elastic Cloud (https://cloud.elastic.co/).
# The cloud.id setting overwrites the `output.elasticsearch.hosts` and
# `setup.kibana.host` options.
# You can find the `cloud.id` in the Elastic Cloud web UI.
#cloud.id:
# The cloud.auth setting overwrites the `output.elasticsearch.username` and
# `output.elasticsearch.password` settings. The format is `<user>:<pass>`.
#cloud.auth:
# ================================== Outputs ===================================
# Configure what output to use when sending the data collected by the beat.
# ---------------------------- Elasticsearch Output ----------------------------
# output.elasticsearch:
# Array of hosts to connect to.
# hosts: ["elastic.schleppe:9200"]
# Protocol - either `http` (default) or `https`.
# protocol: "https"
# Authentication credentials - either API key or username/password.
#api_key: "id:api_key"
#username: "elastic"
#password: "changeme"
# ------------------------------ Logstash Output -------------------------------
output.logstash:
# The Logstash hosts
hosts: ["elasticsearch:5400"]
# Optional SSL. By default is off.
# List of root certificates for HTTPS server verifications
# ssl.certificate_authorities: ["/etc/elk-certs/elk-ssl.crt"]
# Certificate for SSL client authentication
# ssl.certificate: "/etc/elk-certs/elk-ssl.crt"
# Client Certificate Key
# ssl.key: "/etc/elk-certs/elk-ssl.key"
# ================================= Processors =================================
processors:
- add_host_metadata:
when.not.contains.tags: forwarded
- add_cloud_metadata: ~
- add_docker_metadata: ~
- add_kubernetes_metadata: ~
# ================================== Logging ===================================
# Sets log level. The default log level is info.
# Available log levels are: error, warning, info, debug
# logging.level: debug
# At debug level, you can selectively enable logging only for some components.
# To enable all selectors use ["*"]. Examples of other selectors are "beat",
# "publisher", "service".
#logging.selectors: ["*"]
# ============================= X-Pack Monitoring ==============================
# Filebeat can export internal metrics to a central Elasticsearch monitoring
# cluster. This requires xpack monitoring to be enabled in Elasticsearch. The
# reporting is disabled by default.
# Set to true to enable the monitoring reporter.
#monitoring.enabled: false
# Sets the UUID of the Elasticsearch cluster under which monitoring data for this
# Filebeat instance will appear in the Stack Monitoring UI. If output.elasticsearch
# is enabled, the UUID is derived from the Elasticsearch cluster referenced by output.elasticsearch.
#monitoring.cluster_uuid:
# Uncomment to send the metrics to Elasticsearch. Most settings from the
# Elasticsearch output are accepted here as well.
# Note that the settings should point to your Elasticsearch *monitoring* cluster.
# Any setting that is not set is automatically inherited from the Elasticsearch
# output configuration, so if you have the Elasticsearch output configured such
# that it is pointing to your Elasticsearch monitoring cluster, you can simply
# uncomment the following line.
#monitoring.elasticsearch:
# ============================== Instrumentation ===============================
# Instrumentation support for the filebeat.
#instrumentation:
# Set to true to enable instrumentation of filebeat.
#enabled: false
# Environment in which filebeat is running on (eg: staging, production, etc.)
#environment: ""
# APM Server hosts to report instrumentation results to.
#hosts:
# - http://localhost:8200
# API Key for the APM Server(s).
# If api_key is set then secret_token will be ignored.
#api_key:
# Secret token for the APM Server(s).
#secret_token:
# ================================= Migration ==================================
# This allows to enable 6.7 migration aliases
#migration.6_to_7.enabled: true

View File

@@ -0,0 +1,17 @@
input {
beats {
port => 5045
}
}
filter {
}
output {
elasticsearch {
index => "laravel-logs-%{+YYYY.MM}"
hosts => "${ELASTIC_HOSTS}"
user => "elastic"
password => "${ELASTIC_PASSWORD}"
}
}

View File

@@ -0,0 +1,24 @@
input {
beats {
port => 5044
}
}
filter {
if [pipeline_id] == "nginx" {
mutate { add_field => { "route" => "nginx_pipeline" } }
} else if [pipeline_id] == "laravel" {
mutate { add_field => { "route" => "laravel_pipeline" } }
}
}
output {
if [pipeline_id] == "nginx" {
pipeline { send_to => "nginx_pipeline" }
} else if [pipeline_id] == "laravel" {
pipeline { send_to => "laravel_pipeline" }
} else {
# Handle unknown cases
stdout { codec => rubydebug }
}
}

View File

@@ -0,0 +1,39 @@
input {
beats {
port => 5044
}
}
filter {
grok {
match => [ "message" , "%{COMBINEDAPACHELOG}+%{GREEDYDATA:extra_fields}"]
overwrite => [ "message" ]
}
mutate {
convert => ["response", "integer"]
convert => ["bytes", "integer"]
convert => ["responsetime", "float"]
}
# geoip {
# source => "clientip"
# add_tag => [ "nginx-geoip" ]
# }
date {
match => [ "timestamp" , "dd/MMM/YYYY:HH:mm:ss Z" ]
remove_field => [ "timestamp" ]
}
# useragent {
# source => "agent"
# }
}
output {
elasticsearch {
index => "weblogs-%{+YYYY.MM}"
hosts => "${ELASTIC_HOSTS}"
user => "elastic"
password => "${ELASTIC_PASSWORD}"
document_type => "nginx_logs"
}
}

View File

@@ -0,0 +1,5 @@
- pipeline.id: nginx_pipeline
path.config: "/usr/share/logstash/pipeline/nginx_pipeline.conf"
- pipeline.id: laravel_pipeline
path.config: "/usr/share/logstash/pipeline/laravel_pipeline.conf"

12
roles/env/tasks/main.yml vendored Normal file
View File

@@ -0,0 +1,12 @@
---
- name: Read .env file
ansible.builtin.slurp:
src: "../.env"
delegate_to: localhost
register: env_file
- name: Parse .env file
ansible.builtin.set_fact:
env_vars: "{{ dict(env_file['content'] | b64decode | split('\n') | select('search', '=') | map('split', '=', 1) | list) }}"

View File

@@ -0,0 +1,3 @@
---
- name: restart fail2ban
service: name=fail2ban state=restarted

View File

@@ -0,0 +1,18 @@
---
- name: install fail2ban
apt: pkg=fail2ban state=latest
- name: /etc/fail2ban/jail.conf
template: src=fail2ban/jail.conf.j2 dest=/etc/fail2ban/jail.conf owner=root group=root mode=644
notify: restart fail2ban
- name: /etc/fail2ban/fail2ban.conf
template: src=fail2ban/fail2ban.conf.j2 dest=/etc/fail2ban/fail2ban.conf owner=root group=root mode=644
notify: restart fail2ban
- name: Enable and start the fail2ban service
systemd:
name: fail2ban
state: started
enabled: yes

View File

@@ -0,0 +1,4 @@
---
- include_tasks: fail2ban.yaml
when: ansible_os_family == "Debian"

View File

@@ -0,0 +1,88 @@
# {{ ansible_managed }}
# Fail2Ban main configuration file
#
# Comments: use '#' for comment lines and ';' (following a space) for inline comments
#
# Changes: in most of the cases you should not modify this
# file, but provide customizations in fail2ban.local file, e.g.:
#
# [DEFAULT]
# loglevel = DEBUG
#
[DEFAULT]
# Option: loglevel
# Notes.: Set the log level output.
# CRITICAL
# ERROR
# WARNING
# NOTICE
# INFO
# DEBUG
# Values: [ LEVEL ] Default: ERROR
#
loglevel = INFO
# Option: logtarget
# Notes.: Set the log target. This could be a file, SYSLOG, STDERR or STDOUT.
# Only one log target can be specified.
# If you change logtarget from the default value and you are
# using logrotate -- also adjust or disable rotation in the
# corresponding configuration file
# (e.g. /etc/logrotate.d/fail2ban on Debian systems)
# Values: [ STDOUT | STDERR | SYSLOG | SYSOUT | FILE ] Default: STDERR
#
logtarget = /var/log/fail2ban.log
# Option: syslogsocket
# Notes: Set the syslog socket file. Only used when logtarget is SYSLOG
# auto uses platform.system() to determine predefined paths
# Values: [ auto | FILE ] Default: auto
syslogsocket = auto
# Option: socket
# Notes.: Set the socket file. This is used to communicate with the daemon. Do
# not remove this file when Fail2ban runs. It will not be possible to
# communicate with the server afterwards.
# Values: [ FILE ] Default: /var/run/fail2ban/fail2ban.sock
#
socket = /var/run/fail2ban/fail2ban.sock
# Option: pidfile
# Notes.: Set the PID file. This is used to store the process ID of the
# fail2ban server.
# Values: [ FILE ] Default: /var/run/fail2ban/fail2ban.pid
#
pidfile = /var/run/fail2ban/fail2ban.pid
# Options: dbfile
# Notes.: Set the file for the fail2ban persistent data to be stored.
# A value of ":memory:" means database is only stored in memory
# and data is lost when fail2ban is stopped.
# A value of "None" disables the database.
# Values: [ None :memory: FILE ] Default: /var/lib/fail2ban/fail2ban.sqlite3
dbfile = /var/lib/fail2ban/fail2ban.sqlite3
# Options: dbpurgeage
# Notes.: Sets age at which bans should be purged from the database
# Values: [ SECONDS ] Default: 86400 (24hours)
dbpurgeage = 1d
# Options: dbmaxmatches
# Notes.: Number of matches stored in database per ticket (resolvable via
# tags <ipmatches>/<ipjailmatches> in actions)
# Values: [ INT ] Default: 10
dbmaxmatches = 10
[Definition]
[Thread]
# Options: stacksize
# Notes.: Specifies the stack size (in KiB) to be used for subsequently created threads,
# and must be 0 or a positive integer value of at least 32.
# Values: [ SIZE ] Default: 0 (use platform or configured default)
#stacksize = 0

View File

@@ -0,0 +1,956 @@
# {{ ansible_managed }}
#
# WARNING: heavily refactored in 0.9.0 release. Please review and
# customize settings for your setup.
#
# Changes: in most of the cases you should not modify this
# file, but provide customizations in jail.local file,
# or separate .conf files under jail.d/ directory, e.g.:
#
# HOW TO ACTIVATE JAILS:
#
# YOU SHOULD NOT MODIFY THIS FILE.
#
# It will probably be overwritten or improved in a distribution update.
#
# Provide customizations in a jail.local file or a jail.d/customisation.local.
# For example to change the default bantime for all jails and to enable the
# ssh-iptables jail the following (uncommented) would appear in the .local file.
# See man 5 jail.conf for details.
#
# [DEFAULT]
# bantime = 1h
#
# [sshd]
# enabled = true
#
# See jail.conf(5) man page for more information
# Comments: use '#' for comment lines and ';' (following a space) for inline comments
[INCLUDES]
#before = paths-distro.conf
before = paths-debian.conf
# The DEFAULT allows a global definition of the options. They can be overridden
# in each jail afterwards.
[DEFAULT]
#
# MISCELLANEOUS OPTIONS
#
# "bantime.increment" allows to use database for searching of previously banned ip's to increase a
# default ban time using special formula, default it is banTime * 1, 2, 4, 8, 16, 32...
#bantime.increment = true
# "bantime.rndtime" is the max number of seconds using for mixing with random time
# to prevent "clever" botnets calculate exact time IP can be unbanned again:
#bantime.rndtime =
# "bantime.maxtime" is the max number of seconds using the ban time can reach (don't grows further)
#bantime.maxtime =
# "bantime.factor" is a coefficient to calculate exponent growing of the formula or common multiplier,
# default value of factor is 1 and with default value of formula, the ban time
# grows by 1, 2, 4, 8, 16 ...
#bantime.factor = 1
# "bantime.formula" used by default to calculate next value of ban time, default value bellow,
# the same ban time growing will be reached by multipliers 1, 2, 4, 8, 16, 32...
#bantime.formula = ban.Time * (1<<(ban.Count if ban.Count<20 else 20)) * banFactor
#
# more aggressive example of formula has the same values only for factor "2.0 / 2.885385" :
#bantime.formula = ban.Time * math.exp(float(ban.Count+1)*banFactor)/math.exp(1*banFactor)
# "bantime.multipliers" used to calculate next value of ban time instead of formula, coresponding
# previously ban count and given "bantime.factor" (for multipliers default is 1);
# following example grows ban time by 1, 2, 4, 8, 16 ... and if last ban count greater as multipliers count,
# always used last multiplier (64 in example), for factor '1' and original ban time 600 - 10.6 hours
#bantime.multipliers = 1 2 4 8 16 32 64
# following example can be used for small initial ban time (bantime=60) - it grows more aggressive at begin,
# for bantime=60 the multipliers are minutes and equal: 1 min, 5 min, 30 min, 1 hour, 5 hour, 12 hour, 1 day, 2 day
#bantime.multipliers = 1 5 30 60 300 720 1440 2880
# "bantime.overalljails" (if true) specifies the search of IP in the database will be executed
# cross over all jails, if false (dafault), only current jail of the ban IP will be searched
#bantime.overalljails = false
# --------------------
# "ignoreself" specifies whether the local resp. own IP addresses should be ignored
# (default is true). Fail2ban will not ban a host which matches such addresses.
#ignoreself = true
# "ignoreip" can be a list of IP addresses, CIDR masks or DNS hosts. Fail2ban
# will not ban a host which matches an address in this list. Several addresses
# can be defined using space (and/or comma) separator.
#ignoreip = 127.0.0.1/8 ::1
# External command that will take an tagged arguments to ignore, e.g. <ip>,
# and return true if the IP is to be ignored. False otherwise.
#
# ignorecommand = /path/to/command <ip>
ignorecommand =
# "bantime" is the number of seconds that a host is banned.
bantime = 10m
# A host is banned if it has generated "maxretry" during the last "findtime"
# seconds.
findtime = 10m
# "maxretry" is the number of failures before a host get banned.
maxretry = 5
# "maxmatches" is the number of matches stored in ticket (resolvable via tag <matches> in actions).
maxmatches = %(maxretry)s
# "backend" specifies the backend used to get files modification.
# Available options are "pyinotify", "gamin", "polling", "systemd" and "auto".
# This option can be overridden in each jail as well.
#
# pyinotify: requires pyinotify (a file alteration monitor) to be installed.
# If pyinotify is not installed, Fail2ban will use auto.
# gamin: requires Gamin (a file alteration monitor) to be installed.
# If Gamin is not installed, Fail2ban will use auto.
# polling: uses a polling algorithm which does not require external libraries.
# systemd: uses systemd python library to access the systemd journal.
# Specifying "logpath" is not valid for this backend.
# See "journalmatch" in the jails associated filter config
# auto: will try to use the following backends, in order:
# pyinotify, gamin, polling.
#
# Note: if systemd backend is chosen as the default but you enable a jail
# for which logs are present only in its own log files, specify some other
# backend for that jail (e.g. polling) and provide empty value for
# journalmatch. See https://github.com/fail2ban/fail2ban/issues/959#issuecomment-74901200
backend = auto
# "usedns" specifies if jails should trust hostnames in logs,
# warn when DNS lookups are performed, or ignore all hostnames in logs
#
# yes: if a hostname is encountered, a DNS lookup will be performed.
# warn: if a hostname is encountered, a DNS lookup will be performed,
# but it will be logged as a warning.
# no: if a hostname is encountered, will not be used for banning,
# but it will be logged as info.
# raw: use raw value (no hostname), allow use it for no-host filters/actions (example user)
usedns = warn
# "logencoding" specifies the encoding of the log files handled by the jail
# This is used to decode the lines from the log file.
# Typical examples: "ascii", "utf-8"
#
# auto: will use the system locale setting
logencoding = auto
# "enabled" enables the jails.
# By default all jails are disabled, and it should stay this way.
# Enable only relevant to your setup jails in your .local or jail.d/*.conf
#
# true: jail will be enabled and log files will get monitored for changes
# false: jail is not enabled
enabled = false
# "mode" defines the mode of the filter (see corresponding filter implementation for more info).
mode = normal
# "filter" defines the filter to use by the jail.
# By default jails have names matching their filter name
#
filter = %(__name__)s[mode=%(mode)s]
#
# ACTIONS
#
# Some options used for actions
# Destination email address used solely for the interpolations in
# jail.{conf,local,d/*} configuration files.
destemail = root@localhost
# Sender email address used solely for some actions
sender = root@<fq-hostname>
# E-mail action. Since 0.8.1 Fail2Ban uses sendmail MTA for the
# mailing. Change mta configuration parameter to mail if you want to
# revert to conventional 'mail'.
mta = sendmail
# Default protocol
protocol = tcp
# Specify chain where jumps would need to be added in ban-actions expecting parameter chain
chain = <known/chain>
# Ports to be banned
# Usually should be overridden in a particular jail
port = 0:65535
# Format of user-agent https://tools.ietf.org/html/rfc7231#section-5.5.3
fail2ban_agent = Fail2Ban/%(fail2ban_version)s
#
# Action shortcuts. To be used to define action parameter
# Default banning action (e.g. iptables, iptables-new,
# iptables-multiport, shorewall, etc) It is used to define
# action_* variables. Can be overridden globally or per
# section within jail.local file
banaction = iptables-multiport
banaction_allports = iptables-allports
# The simplest action to take: ban only
action_ = %(banaction)s[name=%(__name__)s, port="%(port)s", protocol="%(protocol)s", chain="%(chain)s"]
# ban & send an e-mail with whois report to the destemail.
action_mw = %(banaction)s[name=%(__name__)s, port="%(port)s", protocol="%(protocol)s", chain="%(chain)s"]
%(mta)s-whois[name=%(__name__)s, sender="%(sender)s", dest="%(destemail)s", protocol="%(protocol)s", chain="%(chain)s"]
# ban & send an e-mail with whois report and relevant log lines
# to the destemail.
action_mwl = %(banaction)s[name=%(__name__)s, port="%(port)s", protocol="%(protocol)s", chain="%(chain)s"]
%(mta)s-whois-lines[name=%(__name__)s, sender="%(sender)s", dest="%(destemail)s", logpath="%(logpath)s", chain="%(chain)s"]
# See the IMPORTANT note in action.d/xarf-login-attack for when to use this action
#
# ban & send a xarf e-mail to abuse contact of IP address and include relevant log lines
# to the destemail.
action_xarf = %(banaction)s[name=%(__name__)s, port="%(port)s", protocol="%(protocol)s", chain="%(chain)s"]
xarf-login-attack[service=%(__name__)s, sender="%(sender)s", logpath="%(logpath)s", port="%(port)s"]
# ban IP on CloudFlare & send an e-mail with whois report and relevant log lines
# to the destemail.
action_cf_mwl = cloudflare[cfuser="%(cfemail)s", cftoken="%(cfapikey)s"]
%(mta)s-whois-lines[name=%(__name__)s, sender="%(sender)s", dest="%(destemail)s", logpath="%(logpath)s", chain="%(chain)s"]
# Report block via blocklist.de fail2ban reporting service API
#
# See the IMPORTANT note in action.d/blocklist_de.conf for when to use this action.
# Specify expected parameters in file action.d/blocklist_de.local or if the interpolation
# `action_blocklist_de` used for the action, set value of `blocklist_de_apikey`
# in your `jail.local` globally (section [DEFAULT]) or per specific jail section (resp. in
# corresponding jail.d/my-jail.local file).
#
action_blocklist_de = blocklist_de[email="%(sender)s", service=%(filter)s, apikey="%(blocklist_de_apikey)s", agent="%(fail2ban_agent)s"]
# Report ban via badips.com, and use as blacklist
#
# See BadIPsAction docstring in config/action.d/badips.py for
# documentation for this action.
#
# NOTE: This action relies on banaction being present on start and therefore
# should be last action defined for a jail.
#
action_badips = badips.py[category="%(__name__)s", banaction="%(banaction)s", agent="%(fail2ban_agent)s"]
#
# Report ban via badips.com (uses action.d/badips.conf for reporting only)
#
action_badips_report = badips[category="%(__name__)s", agent="%(fail2ban_agent)s"]
# Report ban via abuseipdb.com.
#
# See action.d/abuseipdb.conf for usage example and details.
#
action_abuseipdb = abuseipdb
# Choose default action. To change, just override value of 'action' with the
# interpolation to the chosen action shortcut (e.g. action_mw, action_mwl, etc) in jail.local
# globally (section [DEFAULT]) or per specific section
action = %(action_)s
#
# JAILS
#
#
# SSH servers
#
[sshd]
# To use more aggressive sshd modes set filter parameter "mode" in jail.local:
# normal (default), ddos, extra or aggressive (combines all).
# See "tests/files/logs/sshd" or "filter.d/sshd.conf" for usage example and details.
#mode = normal
port = ssh
logpath = %(sshd_log)s
backend = %(sshd_backend)s
[dropbear]
port = ssh
logpath = %(dropbear_log)s
backend = %(dropbear_backend)s
[selinux-ssh]
port = ssh
logpath = %(auditd_log)s
#
# HTTP servers
#
[apache-auth]
port = http,https
logpath = %(apache_error_log)s
[apache-badbots]
# Ban hosts which agent identifies spammer robots crawling the web
# for email addresses. The mail outputs are buffered.
port = http,https
logpath = %(apache_access_log)s
bantime = 48h
maxretry = 1
[apache-noscript]
port = http,https
logpath = %(apache_error_log)s
[apache-overflows]
port = http,https
logpath = %(apache_error_log)s
maxretry = 2
[apache-nohome]
port = http,https
logpath = %(apache_error_log)s
maxretry = 2
[apache-botsearch]
port = http,https
logpath = %(apache_error_log)s
maxretry = 2
[apache-fakegooglebot]
port = http,https
logpath = %(apache_access_log)s
maxretry = 1
ignorecommand = %(ignorecommands_dir)s/apache-fakegooglebot <ip>
[apache-modsecurity]
port = http,https
logpath = %(apache_error_log)s
maxretry = 2
[apache-shellshock]
port = http,https
logpath = %(apache_error_log)s
maxretry = 1
[openhab-auth]
filter = openhab
action = iptables-allports[name=NoAuthFailures]
logpath = /opt/openhab/logs/request.log
[nginx-http-auth]
port = http,https
logpath = %(nginx_error_log)s
# To use 'nginx-limit-req' jail you should have `ngx_http_limit_req_module`
# and define `limit_req` and `limit_req_zone` as described in nginx documentation
# http://nginx.org/en/docs/http/ngx_http_limit_req_module.html
# or for example see in 'config/filter.d/nginx-limit-req.conf'
[nginx-limit-req]
port = http,https
logpath = %(nginx_error_log)s
[nginx-botsearch]
port = http,https
logpath = %(nginx_error_log)s
maxretry = 2
# Ban attackers that try to use PHP's URL-fopen() functionality
# through GET/POST variables. - Experimental, with more than a year
# of usage in production environments.
[php-url-fopen]
port = http,https
logpath = %(nginx_access_log)s
%(apache_access_log)s
[suhosin]
port = http,https
logpath = %(suhosin_log)s
[lighttpd-auth]
# Same as above for Apache's mod_auth
# It catches wrong authentifications
port = http,https
logpath = %(lighttpd_error_log)s
#
# Webmail and groupware servers
#
[roundcube-auth]
port = http,https
logpath = %(roundcube_errors_log)s
# Use following line in your jail.local if roundcube logs to journal.
#backend = %(syslog_backend)s
[openwebmail]
port = http,https
logpath = /var/log/openwebmail.log
[horde]
port = http,https
logpath = /var/log/horde/horde.log
[groupoffice]
port = http,https
logpath = /home/groupoffice/log/info.log
[sogo-auth]
# Monitor SOGo groupware server
# without proxy this would be:
# port = 20000
port = http,https
logpath = /var/log/sogo/sogo.log
[tine20]
logpath = /var/log/tine20/tine20.log
port = http,https
#
# Web Applications
#
#
[drupal-auth]
port = http,https
logpath = %(syslog_daemon)s
backend = %(syslog_backend)s
[guacamole]
port = http,https
logpath = /var/log/tomcat*/catalina.out
[monit]
#Ban clients brute-forcing the monit gui login
port = 2812
logpath = /var/log/monit
/var/log/monit.log
[webmin-auth]
port = 10000
logpath = %(syslog_authpriv)s
backend = %(syslog_backend)s
[froxlor-auth]
port = http,https
logpath = %(syslog_authpriv)s
backend = %(syslog_backend)s
#
# HTTP Proxy servers
#
#
[squid]
port = 80,443,3128,8080
logpath = /var/log/squid/access.log
[3proxy]
port = 3128
logpath = /var/log/3proxy.log
#
# FTP servers
#
[proftpd]
port = ftp,ftp-data,ftps,ftps-data
logpath = %(proftpd_log)s
backend = %(proftpd_backend)s
[pure-ftpd]
port = ftp,ftp-data,ftps,ftps-data
logpath = %(pureftpd_log)s
backend = %(pureftpd_backend)s
[gssftpd]
port = ftp,ftp-data,ftps,ftps-data
logpath = %(syslog_daemon)s
backend = %(syslog_backend)s
[wuftpd]
port = ftp,ftp-data,ftps,ftps-data
logpath = %(wuftpd_log)s
backend = %(wuftpd_backend)s
[vsftpd]
# or overwrite it in jails.local to be
# logpath = %(syslog_authpriv)s
# if you want to rely on PAM failed login attempts
# vsftpd's failregex should match both of those formats
port = ftp,ftp-data,ftps,ftps-data
logpath = %(vsftpd_log)s
#
# Mail servers
#
# ASSP SMTP Proxy Jail
[assp]
port = smtp,465,submission
logpath = /root/path/to/assp/logs/maillog.txt
[courier-smtp]
port = smtp,465,submission
logpath = %(syslog_mail)s
backend = %(syslog_backend)s
[postfix]
# To use another modes set filter parameter "mode" in jail.local:
mode = more
port = smtp,465,submission
logpath = %(postfix_log)s
backend = %(postfix_backend)s
[postfix-rbl]
filter = postfix[mode=rbl]
port = smtp,465,submission
logpath = %(postfix_log)s
backend = %(postfix_backend)s
maxretry = 1
[sendmail-auth]
port = submission,465,smtp
logpath = %(syslog_mail)s
backend = %(syslog_backend)s
[sendmail-reject]
# To use more aggressive modes set filter parameter "mode" in jail.local:
# normal (default), extra or aggressive
# See "tests/files/logs/sendmail-reject" or "filter.d/sendmail-reject.conf" for usage example and details.
#mode = normal
port = smtp,465,submission
logpath = %(syslog_mail)s
backend = %(syslog_backend)s
[qmail-rbl]
filter = qmail
port = smtp,465,submission
logpath = /service/qmail/log/main/current
# dovecot defaults to logging to the mail syslog facility
# but can be set by syslog_facility in the dovecot configuration.
[dovecot]
port = pop3,pop3s,imap,imaps,submission,465,sieve
logpath = %(dovecot_log)s
backend = %(dovecot_backend)s
[sieve]
port = smtp,465,submission
logpath = %(dovecot_log)s
backend = %(dovecot_backend)s
[solid-pop3d]
port = pop3,pop3s
logpath = %(solidpop3d_log)s
[exim]
# see filter.d/exim.conf for further modes supported from filter:
#mode = normal
port = smtp,465,submission
logpath = %(exim_main_log)s
[exim-spam]
port = smtp,465,submission
logpath = %(exim_main_log)s
[kerio]
port = imap,smtp,imaps,465
logpath = /opt/kerio/mailserver/store/logs/security.log
#
# Mail servers authenticators: might be used for smtp,ftp,imap servers, so
# all relevant ports get banned
#
[courier-auth]
port = smtp,465,submission,imap,imaps,pop3,pop3s
logpath = %(syslog_mail)s
backend = %(syslog_backend)s
[postfix-sasl]
filter = postfix[mode=auth]
port = smtp,465,submission,imap,imaps,pop3,pop3s
# You might consider monitoring /var/log/mail.warn instead if you are
# running postfix since it would provide the same log lines at the
# "warn" level but overall at the smaller filesize.
logpath = %(postfix_log)s
backend = %(postfix_backend)s
[perdition]
port = imap,imaps,pop3,pop3s
logpath = %(syslog_mail)s
backend = %(syslog_backend)s
[squirrelmail]
port = smtp,465,submission,imap,imap2,imaps,pop3,pop3s,http,https,socks
logpath = /var/lib/squirrelmail/prefs/squirrelmail_access_log
[cyrus-imap]
port = imap,imaps
logpath = %(syslog_mail)s
backend = %(syslog_backend)s
[uwimap-auth]
port = imap,imaps
logpath = %(syslog_mail)s
backend = %(syslog_backend)s
#
#
# DNS servers
#
# !!! WARNING !!!
# Since UDP is connection-less protocol, spoofing of IP and imitation
# of illegal actions is way too simple. Thus enabling of this filter
# might provide an easy way for implementing a DoS against a chosen
# victim. See
# http://nion.modprobe.de/blog/archives/690-fail2ban-+-dns-fail.html
# Please DO NOT USE this jail unless you know what you are doing.
#
# IMPORTANT: see filter.d/named-refused for instructions to enable logging
# This jail blocks UDP traffic for DNS requests.
# [named-refused-udp]
#
# filter = named-refused
# port = domain,953
# protocol = udp
# logpath = /var/log/named/security.log
# IMPORTANT: see filter.d/named-refused for instructions to enable logging
# This jail blocks TCP traffic for DNS requests.
[named-refused]
port = domain,953
logpath = /var/log/named/security.log
[nsd]
port = 53
action = %(banaction)s[name=%(__name__)s-tcp, port="%(port)s", protocol="tcp", chain="%(chain)s", actname=%(banaction)s-tcp]
%(banaction)s[name=%(__name__)s-udp, port="%(port)s", protocol="udp", chain="%(chain)s", actname=%(banaction)s-udp]
logpath = /var/log/nsd.log
#
# Miscellaneous
#
[asterisk]
port = 5060,5061
action = %(banaction)s[name=%(__name__)s-tcp, port="%(port)s", protocol="tcp", chain="%(chain)s", actname=%(banaction)s-tcp]
%(banaction)s[name=%(__name__)s-udp, port="%(port)s", protocol="udp", chain="%(chain)s", actname=%(banaction)s-udp]
%(mta)s-whois[name=%(__name__)s, dest="%(destemail)s"]
logpath = /var/log/asterisk/messages
maxretry = 10
[freeswitch]
port = 5060,5061
action = %(banaction)s[name=%(__name__)s-tcp, port="%(port)s", protocol="tcp", chain="%(chain)s", actname=%(banaction)s-tcp]
%(banaction)s[name=%(__name__)s-udp, port="%(port)s", protocol="udp", chain="%(chain)s", actname=%(banaction)s-udp]
%(mta)s-whois[name=%(__name__)s, dest="%(destemail)s"]
logpath = /var/log/freeswitch.log
maxretry = 10
# enable adminlog; it will log to a file inside znc's directory by default.
[znc-adminlog]
port = 6667
logpath = /var/lib/znc/moddata/adminlog/znc.log
# To log wrong MySQL access attempts add to /etc/my.cnf in [mysqld] or
# equivalent section:
# log-warnings = 2
#
# for syslog (daemon facility)
# [mysqld_safe]
# syslog
#
# for own logfile
# [mysqld]
# log-error=/var/log/mysqld.log
[mysqld-auth]
port = 3306
logpath = %(mysql_log)s
backend = %(mysql_backend)s
# Log wrong MongoDB auth (for details see filter 'filter.d/mongodb-auth.conf')
[mongodb-auth]
# change port when running with "--shardsvr" or "--configsvr" runtime operation
port = 27017
logpath = /var/log/mongodb/mongodb.log
# Jail for more extended banning of persistent abusers
# !!! WARNINGS !!!
# 1. Make sure that your loglevel specified in fail2ban.conf/.local
# is not at DEBUG level -- which might then cause fail2ban to fall into
# an infinite loop constantly feeding itself with non-informative lines
# 2. Increase dbpurgeage defined in fail2ban.conf to e.g. 648000 (7.5 days)
# to maintain entries for failed logins for sufficient amount of time
[recidive]
logpath = /var/log/fail2ban.log
banaction = %(banaction_allports)s
bantime = 1w
findtime = 1d
# Generic filter for PAM. Has to be used with action which bans all
# ports such as iptables-allports, shorewall
[pam-generic]
# pam-generic filter can be customized to monitor specific subset of 'tty's
banaction = %(banaction_allports)s
logpath = %(syslog_authpriv)s
backend = %(syslog_backend)s
[xinetd-fail]
banaction = iptables-multiport-log
logpath = %(syslog_daemon)s
backend = %(syslog_backend)s
maxretry = 2
# stunnel - need to set port for this
[stunnel]
logpath = /var/log/stunnel4/stunnel.log
[ejabberd-auth]
port = 5222
logpath = /var/log/ejabberd/ejabberd.log
[counter-strike]
logpath = /opt/cstrike/logs/L[0-9]*.log
# Firewall: http://www.cstrike-planet.com/faq/6
tcpport = 27030,27031,27032,27033,27034,27035,27036,27037,27038,27039
udpport = 1200,27000,27001,27002,27003,27004,27005,27006,27007,27008,27009,27010,27011,27012,27013,27014,27015
action = %(banaction)s[name=%(__name__)s-tcp, port="%(tcpport)s", protocol="tcp", chain="%(chain)s", actname=%(banaction)s-tcp]
%(banaction)s[name=%(__name__)s-udp, port="%(udpport)s", protocol="udp", chain="%(chain)s", actname=%(banaction)s-udp]
[bitwarden]
port = http,https
logpath = /home/*/bwdata/logs/identity/Identity/log.txt
[centreon]
port = http,https
logpath = /var/log/centreon/login.log
# consider low maxretry and a long bantime
# nobody except your own Nagios server should ever probe nrpe
[nagios]
logpath = %(syslog_daemon)s ; nrpe.cfg may define a different log_facility
backend = %(syslog_backend)s
maxretry = 1
[oracleims]
# see "oracleims" filter file for configuration requirement for Oracle IMS v6 and above
logpath = /opt/sun/comms/messaging64/log/mail.log_current
banaction = %(banaction_allports)s
[directadmin]
logpath = /var/log/directadmin/login.log
port = 2222
[portsentry]
logpath = /var/lib/portsentry/portsentry.history
maxretry = 1
[pass2allow-ftp]
# this pass2allow example allows FTP traffic after successful HTTP authentication
port = ftp,ftp-data,ftps,ftps-data
# knocking_url variable must be overridden to some secret value in jail.local
knocking_url = /knocking/
filter = apache-pass[knocking_url="%(knocking_url)s"]
# access log of the website with HTTP auth
logpath = %(apache_access_log)s
blocktype = RETURN
returntype = DROP
action = %(action_)s[blocktype=%(blocktype)s, returntype=%(returntype)s,
actionstart_on_demand=false, actionrepair_on_unban=true]
bantime = 1h
maxretry = 1
findtime = 1
[murmur]
# AKA mumble-server
port = 64738
action = %(banaction)s[name=%(__name__)s-tcp, port="%(port)s", protocol=tcp, chain="%(chain)s", actname=%(banaction)s-tcp]
%(banaction)s[name=%(__name__)s-udp, port="%(port)s", protocol=udp, chain="%(chain)s", actname=%(banaction)s-udp]
logpath = /var/log/mumble-server/mumble-server.log
[screensharingd]
# For Mac OS Screen Sharing Service (VNC)
logpath = /var/log/system.log
logencoding = utf-8
[haproxy-http-auth]
# HAProxy by default doesn't log to file you'll need to set it up to forward
# logs to a syslog server which would then write them to disk.
# See "haproxy-http-auth" filter for a brief cautionary note when setting
# maxretry and findtime.
logpath = /var/log/haproxy.log
[slapd]
port = ldap,ldaps
logpath = /var/log/slapd.log
[domino-smtp]
port = smtp,ssmtp
logpath = /home/domino01/data/IBM_TECHNICAL_SUPPORT/console.log
[phpmyadmin-syslog]
port = http,https
logpath = %(syslog_authpriv)s
backend = %(syslog_backend)s
[zoneminder]
# Zoneminder HTTP/HTTPS web interface auth
# Logs auth failures to apache2 error log
port = http,https
logpath = %(apache_error_log)s
[traefik-auth]
# to use 'traefik-auth' filter you have to configure your Traefik instance,
# see `filter.d/traefik-auth.conf` for details and service example.
port = http,https
logpath = /var/log/traefik/access.log

View File

@@ -0,0 +1 @@
---

View File

@@ -0,0 +1 @@
---

View File

@@ -0,0 +1,6 @@
---
- name: Restart fail2ban_exporter
systemd:
name: fail2ban_exporter
state: restarted
daemon_reload: yes

View File

@@ -0,0 +1,24 @@
---
- name: Get fail2ban release download url
uri:
url: "https://gitlab.com/hctrdev/fail2ban-prometheus-exporter/-/releases/v{{ fail2ban_exporter_version }}/downloads/fail2ban_exporter_{{ fail2ban_exporter_version }}_linux_amd64.tar.gz"
follow_redirects: all
register: fail2ban_download_url
- name: Download fail2ban Exporter
get_url:
url: "{{ fail2ban_download_url.url }}"
dest: "/tmp/fail2ban_exporter.tar.gz"
- name: Extract fail2ban Exporter
unarchive:
src: "/tmp/fail2ban_exporter.tar.gz"
dest: "/tmp"
remote_src: yes
- name: Move fail2ban Exporter binary
copy:
src: "/tmp/fail2ban_exporter"
dest: "/usr/sbin/fail2ban_exporter"
remote_src: yes
mode: '0755'

View File

@@ -0,0 +1,3 @@
---
- include_tasks: install.yaml
- include_tasks: service.yml

View File

@@ -0,0 +1,23 @@
---
- name: Create fail2ban_exporter service file
template:
src: "fail2ban_exporter.service.j2"
dest: "/etc/systemd/system/fail2ban_exporter.service"
mode: '0644'
tags:
- create_service
- name: Reload systemd to pick up the fail2ban_exporter service
systemd:
daemon_reload: yes
tags:
- reload_systemd
- name: Enable and start the fail2ban_exporter service
systemd:
name: fail2ban_exporter
state: started
enabled: yes
tags:
- start_fail2ban_exporter

View File

@@ -0,0 +1,19 @@
[Unit]
Description=Fail2ban metric exporter for Prometheus
Documentation=https://gitlab.com/hctrdev/fail2ban-prometheus-exporter/-/blob/main/README.md
Requires=network-online.target
After=network-online.target
[Service]
ExecStart=/usr/sbin/fail2ban_exporter
Restart=on-failure
RestartSec=5s
NoNewPrivileges=true
# Currently need to run the exporter as root to ensure it has read/write access to the
# fail2ban socket file.
User=root
Group=root
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,64 @@
- name: Update APT package cache
apt:
update_cache: yes
- name: Install dependencies
apt:
name: [apt-transport-https, wget]
state: present
- name: Download and add Elastic GPG key
ansible.builtin.shell: |
wget -qO - https://artifacts.elastic.co/GPG-KEY-elasticsearch | sudo apt-key add -
args:
executable: /bin/bash
- name: Add Elastic APT repository
ansible.builtin.copy:
dest: /etc/apt/sources.list.d/elastic-8.x.list
content: "deb https://artifacts.elastic.co/packages/8.x/apt stable main"
- name: Update APT cache after adding repo
apt:
update_cache: yes
- name: Install Filebeat
apt:
name: filebeat
state: present
- name: Copy Filebeat configuration
template:
src: filebeat.yml.j2
dest: /etc/filebeat/filebeat-laravel.yml
- name: Disable default Filebeat service
systemd:
name: filebeat
enabled: no
state: stopped
- name: Remove default Filebeat systemd service file
file:
path: /etc/systemd/system/filebeat.service
state: absent
- name: Copy laravel Filebeat systemd service file
template:
src: filebeat-laravel.service.j2
dest: /etc/systemd/system/filebeat-laravel.service
mode: '0644'
- name: TEMP! Ensure stopped Filebeat service
systemd:
name: filebeat-laravel
enabled: yes
state: stopped
daemon_reload: yes
# - name: Enable and restart Filebeat service
# systemd:
# name: filebeat-laravel
# enabled: yes
# state: restarted
# daemon_reload: yes

View File

@@ -0,0 +1,18 @@
[Unit]
Description=Filebeat sends log files to Logstash or directly to Elasticsearch.
Documentation=https://www.elastic.co/beats/filebeat
Wants=network-online.target
After=network-online.target
[Service]
UMask=0027
Environment="GODEBUG='madvdontneed=1'"
Environment="BEAT_LOG_OPTS="
Environment="BEAT_CONFIG_OPTS=-c /etc/filebeat/filebeat-laravel.yml"
Environment="BEAT_PATH_OPTS=--path.home /usr/share/filebeat --path.config /etc/filebeat --path.data /var/lib/filebeat-laravel --path.logs /var/log/filebeat"
ExecStart=/usr/share/filebeat/bin/filebeat --environment systemd $BEAT_LOG_OPTS $BEAT_CONFIG_OPTS $BEAT_PATH_OPTS
Restart=always
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,229 @@
###################### Filebeat Configuration Example #########################
# This file is an example configuration file highlighting only the most common
# options. The filebeat.reference.yml file from the same directory contains all the
# supported options with more comments. You can use it as a reference.
#
# You can find the full configuration reference here:
# https://www.elastic.co/guide/en/beats/filebeat/index.html
# For more available modules and options, please see the filebeat.reference.yml sample
# configuration file.
# ============================== Filebeat inputs ===============================
filebeat.inputs:
# Each - is an input. Most options can be set at the input level, so
# you can use different inputs for various configurations.
# Below are the input specific configurations.
# filestream is an input for collecting log messages from files. It is going to replace log input in the future.
- type: filestream
id: laravel-filestream
# Change to true to enable this input configuration.
enabled: true
# Paths that should be crawled and fetched. Glob based paths.
paths:
- /home/forge/app.retailor.io/storage/logs/*.log
#- c:\programdata\elasticsearch\logs\*
exclude_files: ['\.gz$']
# Exclude lines. A list of regular expressions to match. It drops the lines that are
# matching any regular expression from the list.
#exclude_lines: ['^DBG']
# Include lines. A list of regular expressions to match. It exports the lines that are
# matching any regular expression from the list.
#include_lines: ['^ERR', '^WARN']
# Exclude files. A list of regular expressions to match. Filebeat drops the files that
# are matching any regular expression from the list. By default, no files are dropped.
#prospector.scanner.exclude_files: ['.gz$']
# Optional additional fields. These fields can be freely picked
# to add additional information to the crawled log files for filtering
fields:
pipeline_id: "laravel"
fields_under_root: true
# ============================== Filebeat modules ==============================
filebeat.config.modules:
# Glob pattern for configuration loading
path: ${path.config}/modules.d/*.yml
# Set to true to enable config reloading
reload.enabled: false
# Period on which files under path should be checked for changes
#reload.period: 10s
# ======================= Elasticsearch template setting =======================
setup.template.settings:
index.number_of_shards: 1
#index.codec: best_compression
#_source.enabled: false
# ================================== General ===================================
# The name of the shipper that publishes the network data. It can be used to group
# all the transactions sent by a single shipper in the web interface.
#name:
# The tags of the shipper are included in their own field with each
# transaction published.
#tags: ["service-X", "web-tier"]
# Optional fields that you can specify to add additional information to the
# output.
#fields:
# env: staging
# ================================= Dashboards =================================
# These settings control loading the sample dashboards to the Kibana index. Loading
# the dashboards is disabled by default and can be enabled either by setting the
# options here or by using the `setup` command.
#setup.dashboards.enabled: false
# The URL from where to download the dashboards archive. By default this URL
# has a value which is computed based on the Beat name and version. For released
# versions, this URL points to the dashboard archive on the artifacts.elastic.co
# website.
#setup.dashboards.url:
# =================================== Kibana ===================================
# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.
# This requires a Kibana endpoint configuration.
setup.kibana:
# Kibana Host
# Scheme and port can be left out and will be set to the default (http and 5601)
# In case you specify and additional path, the scheme is required: http://localhost:5601/path
# IPv6 addresses should always be defined as: https://[2001:db8::1]:5601
#host: "localhost:5601"
# Kibana Space ID
# ID of the Kibana Space into which the dashboards should be loaded. By default,
# the Default Space will be used.
#space.id:
# =============================== Elastic Cloud ================================
# These settings simplify using Filebeat with the Elastic Cloud (https://cloud.elastic.co/).
# The cloud.id setting overwrites the `output.elasticsearch.hosts` and
# `setup.kibana.host` options.
# You can find the `cloud.id` in the Elastic Cloud web UI.
#cloud.id:
# The cloud.auth setting overwrites the `output.elasticsearch.username` and
# `output.elasticsearch.password` settings. The format is `<user>:<pass>`.
#cloud.auth:
# ================================== Outputs ===================================
# Configure what output to use when sending the data collected by the beat.
# ---------------------------- Elasticsearch Output ----------------------------
# output.elasticsearch:
# Array of hosts to connect to.
# hosts: ["https://elasticsearch:9200"]
# Protocol - either `http` (default) or `https`.
# protocol: "https"
# Authentication credentials - either API key or username/password.
#api_key: "id:api_key"
#username: "elastic"
#password: "changeme"
# ------------------------------ Logstash Output -------------------------------
output.logstash:
# The Logstash hosts
hosts: ["elasticsearch:5045"]
# Optional SSL. By default is off.
# List of root certificates for HTTPS server verifications
# ssl.certificate_authorities: ["/etc/elk-certs/elk-ssl.crt"]
# Certificate for SSL client authentication
# ssl.certificate: "/etc/elk-certs/elk-ssl.crt"
# Client Certificate Key
# ssl.key: "/etc/elk-certs/elk-ssl.key"
# ================================= Processors =================================
processors:
- add_host_metadata:
when.not.contains.tags: forwarded
- add_cloud_metadata: ~
- add_docker_metadata: ~
- add_kubernetes_metadata: ~
# ================================== Logging ===================================
# Sets log level. The default log level is info.
# Available log levels are: error, warning, info, debug
# logging.level: debug
# At debug level, you can selectively enable logging only for some components.
# To enable all selectors use ["*"]. Examples of other selectors are "beat",
# "publisher", "service".
#logging.selectors: ["*"]
# ============================= X-Pack Monitoring ==============================
# Filebeat can export internal metrics to a central Elasticsearch monitoring
# cluster. This requires xpack monitoring to be enabled in Elasticsearch. The
# reporting is disabled by default.
# Set to true to enable the monitoring reporter.
#monitoring.enabled: false
# Sets the UUID of the Elasticsearch cluster under which monitoring data for this
# Filebeat instance will appear in the Stack Monitoring UI. If output.elasticsearch
# is enabled, the UUID is derived from the Elasticsearch cluster referenced by output.elasticsearch.
#monitoring.cluster_uuid:
# Uncomment to send the metrics to Elasticsearch. Most settings from the
# Elasticsearch output are accepted here as well.
# Note that the settings should point to your Elasticsearch *monitoring* cluster.
# Any setting that is not set is automatically inherited from the Elasticsearch
# output configuration, so if you have the Elasticsearch output configured such
# that it is pointing to your Elasticsearch monitoring cluster, you can simply
# uncomment the following line.
#monitoring.elasticsearch:
# ============================== Instrumentation ===============================
# Instrumentation support for the filebeat.
#instrumentation:
# Set to true to enable instrumentation of filebeat.
#enabled: false
# Environment in which filebeat is running on (eg: staging, production, etc.)
#environment: ""
# APM Server hosts to report instrumentation results to.
#hosts:
# - http://localhost:8200
# API Key for the APM Server(s).
# If api_key is set then secret_token will be ignored.
#api_key:
# Secret token for the APM Server(s).
#secret_token:
# ================================= Migration ==================================
# This allows to enable 6.7 migration aliases
#migration.6_to_7.enabled: true

View File

@@ -0,0 +1,57 @@
- name: Update APT package cache
apt:
update_cache: yes
- name: Install dependencies
apt:
name: [apt-transport-https, wget]
state: present
- name: Download and add Elastic GPG key
ansible.builtin.shell: |
wget -qO - https://artifacts.elastic.co/GPG-KEY-elasticsearch | sudo apt-key add -
args:
executable: /bin/bash
- name: Add Elastic APT repository
ansible.builtin.copy:
dest: /etc/apt/sources.list.d/elastic-8.x.list
content: "deb https://artifacts.elastic.co/packages/8.x/apt stable main"
- name: Update APT cache after adding repo
apt:
update_cache: yes
- name: Install Filebeat
apt:
name: filebeat
state: present
- name: Copy Filebeat configuration
template:
src: filebeat.yml.j2
dest: /etc/filebeat/filebeat-nginx.yml
- name: Disable default Filebeat service
systemd:
name: filebeat
enabled: no
state: stopped
- name: Remove default Filebeat systemd service file
file:
path: /etc/systemd/system/filebeat.service
state: absent
- name: Copy nginx Filebeat systemd service file
template:
src: filebeat-nginx.service.j2
dest: /etc/systemd/system/filebeat-nginx.service
mode: '0644'
- name: Enable and restart Filebeat service
systemd:
name: filebeat-nginx
enabled: yes
state: restarted
daemon_reload: yes

View File

@@ -0,0 +1,18 @@
[Unit]
Description=Filebeat sends log files to Logstash or directly to Elasticsearch.
Documentation=https://www.elastic.co/beats/filebeat
Wants=network-online.target
After=network-online.target
[Service]
UMask=0027
Environment="GODEBUG='madvdontneed=1'"
Environment="BEAT_LOG_OPTS="
Environment="BEAT_CONFIG_OPTS=-c /etc/filebeat/filebeat-nginx.yml"
Environment="BEAT_PATH_OPTS=--path.home /usr/share/filebeat --path.config /etc/filebeat --path.data /var/lib/filebeat-nginx --path.logs /var/log/filebeat"
ExecStart=/usr/share/filebeat/bin/filebeat --environment systemd $BEAT_LOG_OPTS $BEAT_CONFIG_OPTS $BEAT_PATH_OPTS
Restart=always
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,229 @@
###################### Filebeat Configuration Example #########################
# This file is an example configuration file highlighting only the most common
# options. The filebeat.reference.yml file from the same directory contains all the
# supported options with more comments. You can use it as a reference.
#
# You can find the full configuration reference here:
# https://www.elastic.co/guide/en/beats/filebeat/index.html
# For more available modules and options, please see the filebeat.reference.yml sample
# configuration file.
# ============================== Filebeat inputs ===============================
filebeat.inputs:
# Each - is an input. Most options can be set at the input level, so
# you can use different inputs for various configurations.
# Below are the input specific configurations.
# filestream is an input for collecting log messages from files. It is going to replace log input in the future.
- type: filestream
id: nginx-filestream
# Change to true to enable this input configuration.
enabled: true
# Paths that should be crawled and fetched. Glob based paths.
paths:
- /var/log/nginx/*.log
#- c:\programdata\elasticsearch\logs\*
exclude_files: ['\.gz$']
# Exclude lines. A list of regular expressions to match. It drops the lines that are
# matching any regular expression from the list.
#exclude_lines: ['^DBG']
# Include lines. A list of regular expressions to match. It exports the lines that are
# matching any regular expression from the list.
#include_lines: ['^ERR', '^WARN']
# Exclude files. A list of regular expressions to match. Filebeat drops the files that
# are matching any regular expression from the list. By default, no files are dropped.
#prospector.scanner.exclude_files: ['.gz$']
# Optional additional fields. These fields can be freely picked
# to add additional information to the crawled log files for filtering
fields:
pipeline_id: "nginx"
fields_under_root: true
# ============================== Filebeat modules ==============================
filebeat.config.modules:
# Glob pattern for configuration loading
path: ${path.config}/modules.d/*.yml
# Set to true to enable config reloading
reload.enabled: false
# Period on which files under path should be checked for changes
#reload.period: 10s
# ======================= Elasticsearch template setting =======================
setup.template.settings:
index.number_of_shards: 1
#index.codec: best_compression
#_source.enabled: false
# ================================== General ===================================
# The name of the shipper that publishes the network data. It can be used to group
# all the transactions sent by a single shipper in the web interface.
#name:
# The tags of the shipper are included in their own field with each
# transaction published.
#tags: ["service-X", "web-tier"]
# Optional fields that you can specify to add additional information to the
# output.
#fields:
# env: staging
# ================================= Dashboards =================================
# These settings control loading the sample dashboards to the Kibana index. Loading
# the dashboards is disabled by default and can be enabled either by setting the
# options here or by using the `setup` command.
#setup.dashboards.enabled: false
# The URL from where to download the dashboards archive. By default this URL
# has a value which is computed based on the Beat name and version. For released
# versions, this URL points to the dashboard archive on the artifacts.elastic.co
# website.
#setup.dashboards.url:
# =================================== Kibana ===================================
# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.
# This requires a Kibana endpoint configuration.
setup.kibana:
# Kibana Host
# Scheme and port can be left out and will be set to the default (http and 5601)
# In case you specify and additional path, the scheme is required: http://localhost:5601/path
# IPv6 addresses should always be defined as: https://[2001:db8::1]:5601
#host: "localhost:5601"
# Kibana Space ID
# ID of the Kibana Space into which the dashboards should be loaded. By default,
# the Default Space will be used.
#space.id:
# =============================== Elastic Cloud ================================
# These settings simplify using Filebeat with the Elastic Cloud (https://cloud.elastic.co/).
# The cloud.id setting overwrites the `output.elasticsearch.hosts` and
# `setup.kibana.host` options.
# You can find the `cloud.id` in the Elastic Cloud web UI.
#cloud.id:
# The cloud.auth setting overwrites the `output.elasticsearch.username` and
# `output.elasticsearch.password` settings. The format is `<user>:<pass>`.
#cloud.auth:
# ================================== Outputs ===================================
# Configure what output to use when sending the data collected by the beat.
# ---------------------------- Elasticsearch Output ----------------------------
# output.elasticsearch:
# Array of hosts to connect to.
# hosts: ["https://elasticsearch:9200"]
# Protocol - either `http` (default) or `https`.
# protocol: "https"
# Authentication credentials - either API key or username/password.
#api_key: "id:api_key"
#username: "elastic"
#password: "changeme"
# ------------------------------ Logstash Output -------------------------------
output.logstash:
# The Logstash hosts
hosts: ["elasticsearch:5044"]
# Optional SSL. By default is off.
# List of root certificates for HTTPS server verifications
# ssl.certificate_authorities: ["/etc/elk-certs/elk-ssl.crt"]
# Certificate for SSL client authentication
# ssl.certificate: "/etc/elk-certs/elk-ssl.crt"
# Client Certificate Key
# ssl.key: "/etc/elk-certs/elk-ssl.key"
# ================================= Processors =================================
processors:
- add_host_metadata:
when.not.contains.tags: forwarded
- add_cloud_metadata: ~
- add_docker_metadata: ~
- add_kubernetes_metadata: ~
# ================================== Logging ===================================
# Sets log level. The default log level is info.
# Available log levels are: error, warning, info, debug
# logging.level: debug
# At debug level, you can selectively enable logging only for some components.
# To enable all selectors use ["*"]. Examples of other selectors are "beat",
# "publisher", "service".
#logging.selectors: ["*"]
# ============================= X-Pack Monitoring ==============================
# Filebeat can export internal metrics to a central Elasticsearch monitoring
# cluster. This requires xpack monitoring to be enabled in Elasticsearch. The
# reporting is disabled by default.
# Set to true to enable the monitoring reporter.
#monitoring.enabled: false
# Sets the UUID of the Elasticsearch cluster under which monitoring data for this
# Filebeat instance will appear in the Stack Monitoring UI. If output.elasticsearch
# is enabled, the UUID is derived from the Elasticsearch cluster referenced by output.elasticsearch.
#monitoring.cluster_uuid:
# Uncomment to send the metrics to Elasticsearch. Most settings from the
# Elasticsearch output are accepted here as well.
# Note that the settings should point to your Elasticsearch *monitoring* cluster.
# Any setting that is not set is automatically inherited from the Elasticsearch
# output configuration, so if you have the Elasticsearch output configured such
# that it is pointing to your Elasticsearch monitoring cluster, you can simply
# uncomment the following line.
#monitoring.elasticsearch:
# ============================== Instrumentation ===============================
# Instrumentation support for the filebeat.
#instrumentation:
# Set to true to enable instrumentation of filebeat.
#enabled: false
# Environment in which filebeat is running on (eg: staging, production, etc.)
#environment: ""
# APM Server hosts to report instrumentation results to.
#hosts:
# - http://localhost:8200
# API Key for the APM Server(s).
# If api_key is set then secret_token will be ignored.
#api_key:
# Secret token for the APM Server(s).
#secret_token:
# ================================= Migration ==================================
# This allows to enable 6.7 migration aliases
#migration.6_to_7.enabled: true

View File

@@ -0,0 +1,3 @@
# Defaults for the base role
---
firewall_enable: true

View File

@@ -0,0 +1,23 @@
---
- name: Update apt cache if older than 1 hour
apt:
update_cache: yes
cache_valid_time: 3600
- name: Add ufw
apt: pkg=ufw state=latest
- name: Enable access via ssh
ufw:
rule: allow
port: "22"
- name: Enable custom firewall ports
ufw:
rule: allow
port: "{{ item }}"
loop: "{{ custom_firewall_ports | default([]) }}"
- name: Start ufw
ufw:
state: enabled

View File

@@ -0,0 +1,3 @@
---
# Toggle abstract
description: false

69
roles/motd/tasks/main.yml Normal file
View File

@@ -0,0 +1,69 @@
---
- name: create motd-hostname file
file:
path: /etc/update-motd.d/20-hostname
state: touch
- name: download motd executable from github.com
command: curl -o /usr/local/bin/motd https://raw.githubusercontent.com/kevinmidboe/motdGO/main/motd-linux creates=/usr/local/bin/motd
- name: set motd executable permission
file:
path: /usr/local/bin/motd
mode: +x
- name: generate motd-hostname from motd executable
command: motd -font larry3d -str {{ inventory_hostname }} -parser motd
register: motd_hostname
- name: write command output to 20-hostname file
copy:
content: "{{ motd_hostname.stdout }}"
dest: /etc/update-motd.d/20-hostname
- name: set motd-hostname executable permission
file:
path: /etc/update-motd.d/20-hostname
mode: +x
- name: generate motd-abstract
template:
src: 25-abstract.j2
dest: /etc/update-motd.d/25-abstract
owner: root
group: root
mode: +x
when: description
- name: check if help-text motd exists
stat:
path: /etc/update-motd.d/10-help-text
register: help_text
- name: disable help-text motd
file:
path: /etc/update-motd.d/10-help-text
mode: -x
when: help_text.stat.exists == true
- name: check if motd-news motd exists
stat:
path: /etc/update-motd.d/50-motd-news
register: motd_news
- name: disable motd-news motd
file:
path: /etc/update-motd.d/50-motd-news
mode: -x
when: motd_news.stat.exists == true
- name: check if unminimize motd exists
stat:
path: /etc/update-motd.d/60-unminimize
register: motd_unminimize
- name: disable unminimize motd
file:
path: /etc/update-motd.d/60-unminimize
mode: -x
when: motd_unminimize.stat.exists == true

View File

@@ -0,0 +1,4 @@
#!/bin/dash
printf "\n"
printf "\e[1;36m%s\e > {{ description }} \e[0m\n\n"

View File

@@ -0,0 +1,16 @@
# nginx prometheus exporter
this install a prometheus exporter which it can pool and proxy nginx stub in response.
Nginx needs to be configured with the following location block:
```
location /nginx_status {
stub_status on;
access_log off;
allow 127.0.0.1;
deny all;
}
```
this requires service to scrape using host in url: `http://127.0.0.1/nginx_status` and all other hosts such as public ip or localhost will receive != 20x.

View File

@@ -0,0 +1 @@
---

View File

@@ -0,0 +1,6 @@
---
- name: Restart nginx_prometheus_exporter
systemd:
name: nginx_prometheus_exporter
state: restarted
daemon_reload: yes

View File

@@ -0,0 +1,18 @@
---
- name: Download Nginx Prometheus Exporter
get_url:
url: "https://github.com/nginx/nginx-prometheus-exporter/releases/download/v{{ nginx_exporter_version }}/nginx-prometheus-exporter_{{ nginx_exporter_version }}_linux_amd64.tar.gz"
dest: "/tmp/nginx_prometheus_exporter.tar.gz"
- name: Extract Nginx Prometheus Exporter
unarchive:
src: "/tmp/nginx_prometheus_exporter.tar.gz"
dest: "/tmp"
remote_src: yes
- name: Move Nginx Prometheus Exporter binary
copy:
src: "/tmp/nginx-prometheus-exporter"
dest: "/usr/local/bin/nginx-prometheus-exporter"
remote_src: yes
mode: '0755'

View File

@@ -0,0 +1,3 @@
---
- include_tasks: install.yaml
- include_tasks: service.yml

View File

@@ -0,0 +1,23 @@
---
- name: Create nginx_prometheus_exporter service file
template:
src: "nginx_prometheus_exporter.service.j2"
dest: "/etc/systemd/system/nginx_prometheus_exporter.service"
mode: '0644'
tags:
- create_service
- name: Reload systemd to pick up the nginx_prometheus_exporter service
systemd:
daemon_reload: yes
tags:
- reload_systemd
- name: Enable and start the nginx_prometheus_exporter service
systemd:
name: nginx_prometheus_exporter
state: started
enabled: yes
tags:
- start_nginx_prometheus_exporter

View File

@@ -0,0 +1,11 @@
[Unit]
Description=Nginx Prometheus Exporter
After=network.target
[Service]
User=nobody
ExecStart=/usr/local/bin/nginx-prometheus-exporter -nginx.scrape-uri=http://127.0.0.1/nginx_status
Restart=always
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,6 @@
---
- name: Restart node_exporter
systemd:
name: node_exporter
state: restarted
daemon_reload: yes

View File

@@ -0,0 +1,18 @@
---
- name: Download Node Exporter
get_url:
url: "https://github.com/prometheus/node_exporter/releases/download/v{{ node_exporter_version }}/node_exporter-{{ node_exporter_version }}.linux-amd64.tar.gz"
dest: "/tmp/node_exporter.tar.gz"
- name: Extract Node Exporter
unarchive:
src: "/tmp/node_exporter.tar.gz"
dest: "/tmp"
remote_src: yes
- name: Move Node Exporter binary
copy:
src: "/tmp/node_exporter-{{ node_exporter_version }}.linux-amd64/node_exporter"
dest: "/usr/local/bin/node_exporter"
remote_src: yes
mode: '0755'

View File

@@ -0,0 +1,3 @@
---
- include_tasks: install.yml
- include_tasks: service.yml

View File

@@ -0,0 +1,22 @@
---
- name: Create node_exporter service file
template:
src: "node_exporter.service.j2"
dest: "/etc/systemd/system/node_exporter.service"
mode: '0644'
tags:
- create_service
- name: Reload systemd to pick up the node_exporter service
systemd:
daemon_reload: yes
tags:
- reload_systemd
- name: Enable and start the node_exporter service
systemd:
name: node_exporter
state: started
enabled: yes
tags:
- start_node_exporter

View File

@@ -0,0 +1,11 @@
[Unit]
Description=Prometheus Node Exporter
After=network.target
[Service]
User=nobody
ExecStart=/usr/local/bin/node_exporter
Restart=always
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,2 @@
---
node_exporter_version: "1.7.0"

View File

@@ -0,0 +1,2 @@
skip_list:
- '106' # Role name {} does not match ``^[a-z][a-z0-9_]+$`` pattern'

View File

@@ -0,0 +1,19 @@
# EditorConfig: http://EditorConfig.org
# top-most EditorConfig file
root = true
# Defaults for all editor files
[*]
insert_final_newline = true
indent_style = space
indent_size = 4
trim_trailing_whitespace = true
# Files with a smaller indent
[*.yml]
indent_size = 2
# Jinja2 template files
[*.j2]
end_of_line = lf

View File

@@ -0,0 +1 @@
4.3.4

View File

@@ -0,0 +1 @@
2.9.1

View File

@@ -0,0 +1 @@
4.3.1

View File

@@ -0,0 +1 @@
3.8.3

View File

@@ -0,0 +1 @@
3.0.8

View File

@@ -0,0 +1 @@
3.6.7

View File

@@ -0,0 +1 @@
5.3.1

View File

@@ -0,0 +1 @@
1.24.2

View File

@@ -0,0 +1,36 @@
---
language: python
python: '3.6'
env:
global:
- MOLECULEW_USE_SYSTEM=true
matrix:
# Spin off separate builds for each of the following versions of Ansible
- MOLECULEW_ANSIBLE=2.8.16
- MOLECULEW_ANSIBLE=2.9.1
# Require Ubuntu 16.04
dist: xenial
# Require Docker
services:
- docker
install:
# Install dependencies
- ./moleculew wrapper-install
# Display versions
- ./moleculew wrapper-versions
script:
- ./moleculew test
branches:
only:
- master
- /^(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)([\.\-].*)?$/
notifications:
webhooks: https://galaxy.ansible.com/api/v1/notifications/

33
roles/oh-my-zsh/.yamllint Normal file
View File

@@ -0,0 +1,33 @@
---
# Based on ansible-lint config
extends: default
rules:
braces:
max-spaces-inside: 1
level: error
brackets:
max-spaces-inside: 1
level: error
colons:
max-spaces-after: -1
level: error
commas:
max-spaces-after: -1
level: error
comments: disable
comments-indentation: disable
document-start: disable
empty-lines:
max: 3
level: error
hyphens:
level: error
indentation: disable
key-duplicates: enable
line-length: disable
new-line-at-end-of-file: disable
new-lines:
type: unix
trailing-spaces: disable
truthy: disable

21
roles/oh-my-zsh/LICENSE Normal file
View File

@@ -0,0 +1,21 @@
MIT License
Copyright (c) 2016 GantSign Ltd.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

147
roles/oh-my-zsh/README.md Normal file
View File

@@ -0,0 +1,147 @@
Ansible Role: Oh My Zsh
=======================
[![Build Status](https://travis-ci.com/gantsign/ansible-role-oh-my-zsh.svg?branch=master)](https://travis-ci.com/gantsign/ansible-role-oh-my-zsh)
[![Ansible Galaxy](https://img.shields.io/badge/ansible--galaxy-gantsign.oh--my--zsh-blue.svg)](https://galaxy.ansible.com/gantsign/oh-my-zsh)
[![License](https://img.shields.io/badge/license-MIT-blue.svg)](https://raw.githubusercontent.com/gantsign/ansible-role-oh-my-zsh/master/LICENSE)
Role to download, install and configure [Oh-My-Zsh](http://ohmyz.sh/).
**Note:** you may be better off using the alternative
[gantsign.ansible_role_antigen](https://galaxy.ansible.com/gantsign/antigen)
role that can install third-party Zsh plugins as well as installing Oh My Zsh
and its plugins.
Requirements
------------
* Ansible >= 2.8
* Linux Distribution
* Debian Family
* Debian
* Jessie (8)
* Stretch (9)
* Ubuntu
* Xenial (16.04)
* Bionic (18.04)
* RedHat Family
* CentOS
* 7
* Fedora
* 31
* SUSE Family
* openSUSE
* 15.1
* Note: other versions are likely to work but have not been tested.
Role Variables
--------------
The following variables will change the behavior of this role (default values
are shown below):
```yaml
# Default theme
oh_my_zsh_theme: robbyrussell
# Default plugins
oh_my_zsh_plugins:
- git
# Wether to install by default for all specified users.
# May be overridden by `oh_my_zsh: install:` under each user.
oh_my_zsh_install: yes
# User configuration
# Important: oh-my-zsh is installed per user so you need to specify the users to install it for.
users:
- username: example1
oh_my_zsh:
theme: robbyrussell
plugins:
- git
- username: example2
oh_my_zsh:
theme: robbyrussell
plugins:
- git
- mvn
- username: example3
oh_my_zsh:
install: no
```
Example Playbook
----------------
```yaml
- hosts: servers
roles:
- role: gantsign.oh-my-zsh
users:
- username: example
```
More Roles From GantSign
------------------------
You can find more roles from GantSign on
[Ansible Galaxy](https://galaxy.ansible.com/gantsign).
Development & Testing
---------------------
This project uses [Molecule](http://molecule.readthedocs.io/) to aid in the
development and testing; the role is unit tested using
[Testinfra](http://testinfra.readthedocs.io/) and
[pytest](http://docs.pytest.org/).
To develop or test you'll need to have installed the following:
* Linux (e.g. [Ubuntu](http://www.ubuntu.com/))
* [Docker](https://www.docker.com/)
* [Python](https://www.python.org/) (including python-pip)
* [Ansible](https://www.ansible.com/)
* [Molecule](http://molecule.readthedocs.io/)
Because the above can be tricky to install, this project includes
[Molecule Wrapper](https://github.com/gantsign/molecule-wrapper). Molecule
Wrapper is a shell script that installs Molecule and it's dependencies (apart
from Linux) and then executes Molecule with the command you pass it.
To test this role using Molecule Wrapper run the following command from the
project root:
```bash
./moleculew test
```
Note: some of the dependencies need `sudo` permission to install.
License
-------
MIT
Author Information
------------------
John Freeman
GantSign Ltd.
Company No. 06109112 (registered in England)

View File

@@ -0,0 +1,11 @@
---
# Default theme
oh_my_zsh_theme: robbyrussell
# Default plugins
oh_my_zsh_plugins:
- git
# Wether to install by default for all specified users.
# May be overridden by `oh_my_zsh_install` under each user.
oh_my_zsh_install: yes

Some files were not shown because too many files have changed in this diff Show More