mirror of
https://github.com/KevinMidboe/playbooks-retailor.git
synced 2026-01-08 10:25:44 +00:00
ansible playbooks for retailor.io infrastructure
This commit is contained in:
22
roles/base-packages/defaults/main.yml
Normal file
22
roles/base-packages/defaults/main.yml
Normal file
@@ -0,0 +1,22 @@
|
||||
# Default base role values
|
||||
---
|
||||
# Flags for adding APT repositories for common software
|
||||
apt_skip_user_packages: false
|
||||
|
||||
# Default base role values
|
||||
base_packages:
|
||||
- git
|
||||
- vim
|
||||
- curl
|
||||
- dnsutils
|
||||
# - ntp
|
||||
- ssh
|
||||
- fail2ban
|
||||
- openssh-server
|
||||
- openssl
|
||||
|
||||
base_packages_user:
|
||||
- htop
|
||||
- tree
|
||||
- ncdu
|
||||
- nload
|
||||
31
roles/base-packages/tasks/main.yml
Normal file
31
roles/base-packages/tasks/main.yml
Normal file
@@ -0,0 +1,31 @@
|
||||
# APT Related tasks for the base role
|
||||
---
|
||||
# Add repositories
|
||||
- name: install apt https support
|
||||
apt: pkg=apt-transport-https state=latest update_cache=yes cache_valid_time=3600
|
||||
|
||||
# Add default packages
|
||||
- name: install base packages
|
||||
apt: pkg={{ item }}
|
||||
with_items: "{{ base_packages }}"
|
||||
|
||||
# Add user defined packages
|
||||
- name: install packages for user quality of life
|
||||
apt:
|
||||
pkg: "{{ item }}"
|
||||
state: present
|
||||
update_cache: yes
|
||||
with_items: "{{ base_packages_user | default([]) }}"
|
||||
when: not apt_skip_user_packages | default(False)
|
||||
|
||||
# Add host specific packages
|
||||
- name: install packages
|
||||
apt:
|
||||
pkg: "{{ item }}"
|
||||
state: present
|
||||
update_cache: yes
|
||||
with_items: "{{ apt_packages | default([]) }}"
|
||||
|
||||
# Dist-upgrade
|
||||
# - name: perform dist-upgrade
|
||||
# apt: upgrade=dist
|
||||
3
roles/cadvisor/defaults/main.yml
Normal file
3
roles/cadvisor/defaults/main.yml
Normal file
@@ -0,0 +1,3 @@
|
||||
---
|
||||
cadvisor_version: "0.46.0"
|
||||
cadvisor_binary_url: "https://github.com/google/cadvisor/releases/download/v{{ cadvisor_version }}/cadvisor-v{{ cadvisor_version }}-linux-amd64"
|
||||
5
roles/cadvisor/handlers/main.yml
Normal file
5
roles/cadvisor/handlers/main.yml
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
- name: Restart cAdvisor
|
||||
docker_container:
|
||||
name: cadvisor
|
||||
state: restarted
|
||||
13
roles/cadvisor/meta/main.yml
Normal file
13
roles/cadvisor/meta/main.yml
Normal file
@@ -0,0 +1,13 @@
|
||||
---
|
||||
galaxy_info:
|
||||
author: Your Name
|
||||
description: Ansible role to deploy cAdvisor using Docker
|
||||
license: MIT
|
||||
min_ansible_version: "2.9"
|
||||
platforms:
|
||||
- name: Debian
|
||||
versions:
|
||||
- all
|
||||
galaxy_tags:
|
||||
- monitoring
|
||||
- cadvisor
|
||||
39
roles/cadvisor/tasks/install.yml
Normal file
39
roles/cadvisor/tasks/install.yml
Normal file
@@ -0,0 +1,39 @@
|
||||
---
|
||||
- name: Ensure the cAdvisor binary is downloaded
|
||||
get_url:
|
||||
url: "{{ cadvisor_binary_url }}"
|
||||
dest: "/usr/local/bin/cadvisor"
|
||||
mode: '0755'
|
||||
tags:
|
||||
- install_cadvisor
|
||||
|
||||
- name: Create cAdvisor data directory
|
||||
file:
|
||||
path: "/var/lib/cadvisor"
|
||||
state: directory
|
||||
mode: '0755'
|
||||
tags:
|
||||
- create_cadvisor_data_dir
|
||||
|
||||
- name: Create cAdvisor service
|
||||
template:
|
||||
src: "cadvisor.service.j2"
|
||||
dest: "/etc/systemd/system/cadvisor.service"
|
||||
mode: '0644'
|
||||
tags:
|
||||
- create_cadvisor_service
|
||||
|
||||
- name: Reload systemd to pick up the cAdvisor service
|
||||
systemd:
|
||||
daemon_reload: yes
|
||||
tags:
|
||||
- reload_systemd
|
||||
|
||||
- name: Enable and start the cAdvisor service
|
||||
systemd:
|
||||
name: cadvisor
|
||||
state: started
|
||||
enabled: yes
|
||||
tags:
|
||||
- start_cadvisor
|
||||
|
||||
5
roles/cadvisor/tasks/main.yml
Normal file
5
roles/cadvisor/tasks/main.yml
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
- name: Install cAdvisor (native binary)
|
||||
include_tasks: install.yml
|
||||
tags:
|
||||
- install
|
||||
12
roles/cadvisor/templates/cadvisor.service.j2
Normal file
12
roles/cadvisor/templates/cadvisor.service.j2
Normal file
@@ -0,0 +1,12 @@
|
||||
[Unit]
|
||||
Description=cAdvisor
|
||||
Documentation=https://github.com/google/cadvisor
|
||||
|
||||
[Service]
|
||||
ExecStart=/usr/local/bin/cadvisor
|
||||
Restart=always
|
||||
LimitNOFILE=4096
|
||||
User=root
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
15
roles/docker/defaults/main.yml
Normal file
15
roles/docker/defaults/main.yml
Normal file
@@ -0,0 +1,15 @@
|
||||
clean_install_remove_packages:
|
||||
- docker.io
|
||||
- docker-doc
|
||||
- docker-compose
|
||||
- podman-docker
|
||||
- containerd
|
||||
- runc
|
||||
|
||||
install_packages:
|
||||
- docker-ce
|
||||
- docker-ce-cli
|
||||
- containerd.io
|
||||
- docker-buildx-plugin
|
||||
- docker-compose
|
||||
- docker-compose-plugin
|
||||
5
roles/docker/handlers/main.yml
Normal file
5
roles/docker/handlers/main.yml
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
- name: Restart Docker
|
||||
systemd:
|
||||
name: docker
|
||||
state: restarted
|
||||
13
roles/docker/meta/main.yml
Normal file
13
roles/docker/meta/main.yml
Normal file
@@ -0,0 +1,13 @@
|
||||
---
|
||||
galaxy_info:
|
||||
author: Your Name
|
||||
description: Ansible role to install and manage Docker on Debian
|
||||
license: MIT
|
||||
min_ansible_version: "2.9"
|
||||
platforms:
|
||||
- name: Debian
|
||||
versions:
|
||||
- all
|
||||
galaxy_tags:
|
||||
- docker
|
||||
dependencies: []
|
||||
27
roles/docker/tasks/check_distro.yml
Normal file
27
roles/docker/tasks/check_distro.yml
Normal file
@@ -0,0 +1,27 @@
|
||||
---
|
||||
- name: Check if the current distro is supported (Ubuntu or Debian)
|
||||
set_fact:
|
||||
distro_supported: "{{ ansible_facts['distribution'].lower() in supported_distros }}"
|
||||
tags:
|
||||
- check_distro
|
||||
|
||||
- name: Set installation URL based on the distro
|
||||
set_fact:
|
||||
install_url: "https://download.docker.com/linux/{{ ansible_facts['distribution'].lower() }} {{ ansible_distribution_release }} stable"
|
||||
when: distro_supported
|
||||
tags:
|
||||
- set_url
|
||||
|
||||
- name: Log Unsupported Distro
|
||||
debug:
|
||||
msg: "The {{ ansible_facts['distribution'] }} distribution is not supported. Skipping Docker installation."
|
||||
when: not distro_supported
|
||||
tags:
|
||||
- unsupported_distro
|
||||
|
||||
- name: Skip Docker installation task if distro is unsupported
|
||||
meta: end_play
|
||||
when: not distro_supported
|
||||
tags:
|
||||
- end_play
|
||||
|
||||
12
roles/docker/tasks/install.yml
Normal file
12
roles/docker/tasks/install.yml
Normal file
@@ -0,0 +1,12 @@
|
||||
---
|
||||
- name: Add Docker repository
|
||||
apt_repository:
|
||||
repo: deb {{ install_url }}
|
||||
state: present
|
||||
when: distro_supported
|
||||
|
||||
- name: Install Docker
|
||||
apt:
|
||||
name: "{{ docker_package }}"
|
||||
state: present
|
||||
when: distro_supported
|
||||
11
roles/docker/tasks/main-distro-check.yml
Normal file
11
roles/docker/tasks/main-distro-check.yml
Normal file
@@ -0,0 +1,11 @@
|
||||
---
|
||||
- name: Include distro check tasks
|
||||
include_tasks: check_distro.yml
|
||||
tags:
|
||||
- check_distro
|
||||
|
||||
- name: Include Docker installation tasks if distro is supported
|
||||
include_tasks: install.yml
|
||||
when: distro_supported
|
||||
tags:
|
||||
- install
|
||||
45
roles/docker/tasks/main.yml
Normal file
45
roles/docker/tasks/main.yml
Normal file
@@ -0,0 +1,45 @@
|
||||
---
|
||||
- name: Clean install by removing any docker package
|
||||
package: name={{ item }} state=absent
|
||||
with_items: "{{ clean_install_remove_packages }}"
|
||||
|
||||
- name: Ensure curl & ca-certs are installed
|
||||
package:
|
||||
name:
|
||||
- ca-certificates
|
||||
- curl
|
||||
- gnupg
|
||||
state: latest
|
||||
|
||||
- name: Ensure docker keyring file exists
|
||||
file:
|
||||
path: /etc/apt/keyrings/docker.gpg
|
||||
state: touch
|
||||
|
||||
- name: Download docker gpg key and add to keyrings
|
||||
shell: |
|
||||
install -m 0755 -d /etc/apt/keyrings
|
||||
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor --yes -o /etc/apt/keyrings/docker.gpg
|
||||
chmod a+r /etc/apt/keyrings/docker.gpg
|
||||
|
||||
- name: Sign and add docker deb source
|
||||
shell: |
|
||||
echo \
|
||||
"deb [arch="$(dpkg --print-architecture)" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \
|
||||
"$(. /etc/os-release && echo "$VERSION_CODENAME")" stable" | \
|
||||
tee /etc/apt/sources.list.d/docker.list > /dev/null
|
||||
|
||||
- name: Update apt sources
|
||||
become: true
|
||||
apt:
|
||||
update_cache: yes
|
||||
cache_valid_time: 1
|
||||
|
||||
- name: Install docker packages
|
||||
package: name={{ item }} state=latest
|
||||
with_items: "{{ install_packages }}"
|
||||
|
||||
- name: Ensure group docker exists
|
||||
user:
|
||||
name: docker
|
||||
state: present
|
||||
14
roles/docker/tasks/service.yml
Normal file
14
roles/docker/tasks/service.yml
Normal file
@@ -0,0 +1,14 @@
|
||||
---
|
||||
- name: Start and enable Docker service
|
||||
systemd:
|
||||
name: docker
|
||||
enabled: yes
|
||||
state: started
|
||||
|
||||
- name: Start and enable Docker service
|
||||
systemd:
|
||||
name: docker
|
||||
enabled: yes
|
||||
state: started
|
||||
when: distro_supported
|
||||
|
||||
23
roles/elasticsearch/README.md
Normal file
23
roles/elasticsearch/README.md
Normal file
@@ -0,0 +1,23 @@
|
||||
# elasticsearch
|
||||
|
||||
Play configures ELK stack using docker & is available without HTTPS. Configure container variables in `tasks/SERVICE.yml` files, environment variables for the services & Java can also be configured here.
|
||||
|
||||
The following are manual steps required during setup.
|
||||
|
||||
## elastic
|
||||
|
||||
After creating elasticsearch container SSH into the running host and generate a new password for user `elastic` using command:
|
||||
|
||||
```bash
|
||||
docker exec -it elasticsearch /usr/share/elasticsearch/bin/elasticsearch-reset-password -u elastic
|
||||
```
|
||||
|
||||
## kibana
|
||||
|
||||
Create a password for `kibana_system` user:
|
||||
|
||||
```bash
|
||||
export ELASTIC_PASSWORD=
|
||||
export KIBANA_PASSWORD=
|
||||
curl -s -X POST -u "elastic:${ELASTIC_PASSWORD}" -H "Content-Type: application/json" http://elasticsearch:9200/_security/user/kibana_system/_password -d "{\"password\":\"${KIBANA_PASSWORD}\"}" | grep -q "^{}";
|
||||
```
|
||||
16
roles/elasticsearch/meta/main.yml
Normal file
16
roles/elasticsearch/meta/main.yml
Normal file
@@ -0,0 +1,16 @@
|
||||
---
|
||||
galaxy_info:
|
||||
author: Your Name
|
||||
description: Ansible role to deploy prometheus & grafana using Docker
|
||||
license: MIT
|
||||
min_ansible_version: "2.9"
|
||||
platforms:
|
||||
- name: Debian
|
||||
versions:
|
||||
- all
|
||||
galaxy_tags:
|
||||
- monitoring
|
||||
- cadvisor
|
||||
dependencies:
|
||||
- docker
|
||||
|
||||
46
roles/elasticsearch/tasks/elasticsearch.yml
Normal file
46
roles/elasticsearch/tasks/elasticsearch.yml
Normal file
@@ -0,0 +1,46 @@
|
||||
---
|
||||
- name: Create a Docker network for Elasticsearch
|
||||
docker_network:
|
||||
name: elk_network
|
||||
state: present
|
||||
|
||||
- name: Pull Elasticsearch Docker image
|
||||
docker_image:
|
||||
name: docker.elastic.co/elasticsearch/elasticsearch-wolfi:{{ elk_version }}
|
||||
source: pull
|
||||
|
||||
- name: Create Elasticsearch configuration file directory on host
|
||||
file:
|
||||
path: /etc/elasticsearch
|
||||
state: directory
|
||||
mode: '0755'
|
||||
|
||||
# - name: Create Elasticsearch configuration file
|
||||
# template:
|
||||
# src: elasticsearch.yml.j2
|
||||
# dest: /etc/elasticsearch/elasticsearch.yml
|
||||
|
||||
- name: Start Elasticsearch container
|
||||
docker_container:
|
||||
name: elasticsearch
|
||||
image: docker.elastic.co/elasticsearch/elasticsearch-wolfi:{{ elk_version }}
|
||||
state: started
|
||||
restart: yes
|
||||
restart_policy: unless-stopped
|
||||
published_ports:
|
||||
- "9200:9200"
|
||||
volumes:
|
||||
- /etc/elasticsearch/esdata:/usr/share/elasticsearch/data
|
||||
networks:
|
||||
- name: elk_network
|
||||
ipv4_address: 172.19.0.2
|
||||
env:
|
||||
node.name: elasticsearch
|
||||
cluster.name: retailor-elk
|
||||
discovery.type: single-node
|
||||
bootstrap.memory_lock: "true"
|
||||
# limits elasticsearch to 2 GB of RAM
|
||||
ES_JAVA_OPTS: "-Xms1g -Xmx2g"
|
||||
# disables SSL & xpack security
|
||||
xpack.security.http.ssl.enabled: "false"
|
||||
|
||||
37
roles/elasticsearch/tasks/kibana.yml
Normal file
37
roles/elasticsearch/tasks/kibana.yml
Normal file
@@ -0,0 +1,37 @@
|
||||
---
|
||||
- name: Create a Docker network for Kibana
|
||||
docker_network:
|
||||
name: elk_network
|
||||
state: present
|
||||
|
||||
- name: Create kibana directory on host
|
||||
file:
|
||||
path: /etc/kibana
|
||||
state: directory
|
||||
mode: "0755"
|
||||
|
||||
- name: Pull Kibana Docker image
|
||||
docker_image:
|
||||
name: docker.elastic.co/kibana/kibana:{{ elk_version }}
|
||||
source: pull
|
||||
|
||||
# TODO rember to move CA cert from elastic to Kibana
|
||||
# docker cp elasticsearch:/usr/share/elasticsearch/config/certs/http_ca.crt .
|
||||
# docker cp http_ca.crt kibana:/usr/share/kibana/config/certs/ca/http_ca.crt
|
||||
- name: Start Kibana container
|
||||
docker_container:
|
||||
name: kibana
|
||||
image: docker.elastic.co/kibana/kibana:{{ elk_version }}
|
||||
state: started
|
||||
restart: yes
|
||||
restart_policy: unless-stopped
|
||||
published_ports:
|
||||
- "5601:5601"
|
||||
env:
|
||||
ELASTICSEARCH_HOSTS: "{{ env_vars.ELASTIC_HOSTS }}"
|
||||
ELASTICSEARCH_USERNAME: kibana_system
|
||||
ELASTICSEARCH_PASSWORD: "{{ env_vars.KIBANA_PASSWORD }}"
|
||||
TELEMETRY_ENABLED: "false"
|
||||
networks:
|
||||
- name: elk_network
|
||||
|
||||
64
roles/elasticsearch/tasks/logstash.yml
Normal file
64
roles/elasticsearch/tasks/logstash.yml
Normal file
@@ -0,0 +1,64 @@
|
||||
---
|
||||
- name: Create a Docker network for Logstash
|
||||
docker_network:
|
||||
name: elk_network
|
||||
state: present
|
||||
|
||||
- name: Create logstash directory on host
|
||||
file:
|
||||
path: /etc/logstash
|
||||
state: directory
|
||||
mode: "0755"
|
||||
|
||||
- name: Copy logstash config
|
||||
copy:
|
||||
src: templates/pipelines.yml.j2
|
||||
dest: /etc/logstash/pipelines.yml
|
||||
|
||||
- name: Create logstash directory on host
|
||||
file:
|
||||
path: /etc/logstash/pipeline
|
||||
state: directory
|
||||
mode: "0755"
|
||||
|
||||
- name: Copy logstash input configs
|
||||
copy:
|
||||
src: "{{ item }}"
|
||||
dest: /etc/logstash/pipeline/{{ item | basename | regex_replace('\.j2$', '') }}
|
||||
loop: "{{ query('fileglob', 'templates/logstash-conf.d/*.j2') }}"
|
||||
|
||||
- name: Pull Logstash Docker image
|
||||
docker_image:
|
||||
name: docker.elastic.co/logstash/logstash:{{ elk_version }}
|
||||
source: pull
|
||||
|
||||
# TODO rember to move CA cert from elastic to Logstash
|
||||
# docker cp elasticsearch:/usr/share/elasticsearch/config/certs/http_ca.crt .
|
||||
# docker cp http_ca.crt logstash:/usr/share/logstash/config/certs/ca/http_ca.crt
|
||||
- name: Start Logstash container
|
||||
docker_container:
|
||||
name: logstash
|
||||
image: docker.elastic.co/logstash/logstash:{{ elk_version }}
|
||||
state: started
|
||||
restart: yes
|
||||
restart_policy: unless-stopped
|
||||
command:
|
||||
- /bin/bash
|
||||
- -c
|
||||
- |
|
||||
echo "Waiting for Elasticsearch availability";
|
||||
until curl -s {{ env_vars.ELASTIC_HOSTS }} | grep -q "missing authentication credentials"; do sleep 1; done;
|
||||
echo "Starting logstash";
|
||||
/usr/share/logstash/bin/logstash
|
||||
published_ports:
|
||||
- "5044-5049:5044-5049"
|
||||
volumes:
|
||||
- /etc/logstash/pipelines.yml:/usr/share/logstash/config/pipelines.yml
|
||||
- /etc/logstash/pipeline:/usr/share/logstash/pipeline
|
||||
env:
|
||||
xpack.monitoring.enabled: "false"
|
||||
ELASTIC_USER: elastic
|
||||
ELASTIC_PASSWORD: "{{ env_vars.ELASTIC_PASSWORD }}"
|
||||
ELASTIC_HOSTS: "{{ env_vars.ELASTIC_HOSTS }}"
|
||||
networks:
|
||||
- name: elk_network
|
||||
7
roles/elasticsearch/tasks/main.yml
Normal file
7
roles/elasticsearch/tasks/main.yml
Normal file
@@ -0,0 +1,7 @@
|
||||
---
|
||||
# ensure we have variables from .env files
|
||||
- include_tasks: ../roles/env/tasks/main.yml
|
||||
|
||||
- include_tasks: elasticsearch.yml
|
||||
- include_tasks: kibana.yml
|
||||
- include_tasks: logstash.yml
|
||||
272
roles/elasticsearch/templates/filebeat.yml.j2
Normal file
272
roles/elasticsearch/templates/filebeat.yml.j2
Normal file
@@ -0,0 +1,272 @@
|
||||
###################### Filebeat Configuration Example #########################
|
||||
|
||||
# This file is an example configuration file highlighting only the most common
|
||||
# options. The filebeat.reference.yml file from the same directory contains all the
|
||||
# supported options with more comments. You can use it as a reference.
|
||||
#
|
||||
# You can find the full configuration reference here:
|
||||
# https://www.elastic.co/guide/en/beats/filebeat/index.html
|
||||
|
||||
# For more available modules and options, please see the filebeat.reference.yml sample
|
||||
# configuration file.
|
||||
|
||||
# ============================== Filebeat inputs ===============================
|
||||
|
||||
filebeat.inputs:
|
||||
|
||||
# Each - is an input. Most options can be set at the input level, so
|
||||
# you can use different inputs for various configurations.
|
||||
# Below are the input specific configurations.
|
||||
|
||||
- type: log
|
||||
|
||||
# Change to true to enable this input configuration.
|
||||
enabled: false
|
||||
|
||||
# Paths that should be crawled and fetched. Glob based paths.
|
||||
paths:
|
||||
- /var/log/ngnix/*.log
|
||||
#- c:\programdata\elasticsearch\logs\*
|
||||
|
||||
# Exclude lines. A list of regular expressions to match. It drops the lines that are
|
||||
# matching any regular expression from the list.
|
||||
#exclude_lines: ['^DBG']
|
||||
|
||||
# Include lines. A list of regular expressions to match. It exports the lines that are
|
||||
# matching any regular expression from the list.
|
||||
#include_lines: ['^ERR', '^WARN']
|
||||
|
||||
# Exclude files. A list of regular expressions to match. Filebeat drops the files that
|
||||
# are matching any regular expression from the list. By default, no files are dropped.
|
||||
#exclude_files: ['.gz$']
|
||||
|
||||
# Optional additional fields. These fields can be freely picked
|
||||
# to add additional information to the crawled log files for filtering
|
||||
#fields:
|
||||
# level: debug
|
||||
# review: 1
|
||||
|
||||
### Multiline options
|
||||
|
||||
# Multiline can be used for log messages spanning multiple lines. This is common
|
||||
# for Java Stack Traces or C-Line Continuation
|
||||
|
||||
# The regexp Pattern that has to be matched. The example pattern matches all lines starting with [
|
||||
#multiline.pattern: ^\[
|
||||
|
||||
# Defines if the pattern set under pattern should be negated or not. Default is false.
|
||||
#multiline.negate: false
|
||||
|
||||
# Match can be set to "after" or "before". It is used to define if lines should be append to a pattern
|
||||
# that was (not) matched before or after or as long as a pattern is not matched based on negate.
|
||||
# Note: After is the equivalent to previous and before is the equivalent to to next in Logstash
|
||||
#multiline.match: after
|
||||
|
||||
# filestream is an input for collecting log messages from files. It is going to replace log input in the future.
|
||||
- type: filestream
|
||||
|
||||
# Change to true to enable this input configuration.
|
||||
enabled: true
|
||||
|
||||
# Paths that should be crawled and fetched. Glob based paths.
|
||||
paths:
|
||||
- /var/log/nginx/*.log
|
||||
#- c:\programdata\elasticsearch\logs\*
|
||||
|
||||
exclude_files: ['\.gz$']
|
||||
|
||||
# Exclude lines. A list of regular expressions to match. It drops the lines that are
|
||||
# matching any regular expression from the list.
|
||||
#exclude_lines: ['^DBG']
|
||||
|
||||
# Include lines. A list of regular expressions to match. It exports the lines that are
|
||||
# matching any regular expression from the list.
|
||||
#include_lines: ['^ERR', '^WARN']
|
||||
|
||||
# Exclude files. A list of regular expressions to match. Filebeat drops the files that
|
||||
# are matching any regular expression from the list. By default, no files are dropped.
|
||||
#prospector.scanner.exclude_files: ['.gz$']
|
||||
|
||||
# Optional additional fields. These fields can be freely picked
|
||||
# to add additional information to the crawled log files for filtering
|
||||
#fields:
|
||||
# level: debug
|
||||
# review: 1
|
||||
|
||||
# ============================== Filebeat modules ==============================
|
||||
|
||||
filebeat.config.modules:
|
||||
# Glob pattern for configuration loading
|
||||
path: ${path.config}/modules.d/*.yml
|
||||
|
||||
# Set to true to enable config reloading
|
||||
reload.enabled: false
|
||||
|
||||
# Period on which files under path should be checked for changes
|
||||
#reload.period: 10s
|
||||
|
||||
# ======================= Elasticsearch template setting =======================
|
||||
|
||||
setup.template.settings:
|
||||
index.number_of_shards: 1
|
||||
#index.codec: best_compression
|
||||
#_source.enabled: false
|
||||
|
||||
|
||||
# ================================== General ===================================
|
||||
|
||||
# The name of the shipper that publishes the network data. It can be used to group
|
||||
# all the transactions sent by a single shipper in the web interface.
|
||||
#name:
|
||||
|
||||
# The tags of the shipper are included in their own field with each
|
||||
# transaction published.
|
||||
#tags: ["service-X", "web-tier"]
|
||||
|
||||
# Optional fields that you can specify to add additional information to the
|
||||
# output.
|
||||
#fields:
|
||||
# env: staging
|
||||
|
||||
# ================================= Dashboards =================================
|
||||
# These settings control loading the sample dashboards to the Kibana index. Loading
|
||||
# the dashboards is disabled by default and can be enabled either by setting the
|
||||
# options here or by using the `setup` command.
|
||||
#setup.dashboards.enabled: false
|
||||
|
||||
# The URL from where to download the dashboards archive. By default this URL
|
||||
# has a value which is computed based on the Beat name and version. For released
|
||||
# versions, this URL points to the dashboard archive on the artifacts.elastic.co
|
||||
# website.
|
||||
#setup.dashboards.url:
|
||||
|
||||
# =================================== Kibana ===================================
|
||||
|
||||
# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.
|
||||
# This requires a Kibana endpoint configuration.
|
||||
setup.kibana:
|
||||
|
||||
# Kibana Host
|
||||
# Scheme and port can be left out and will be set to the default (http and 5601)
|
||||
# In case you specify and additional path, the scheme is required: http://localhost:5601/path
|
||||
# IPv6 addresses should always be defined as: https://[2001:db8::1]:5601
|
||||
#host: "localhost:5601"
|
||||
|
||||
# Kibana Space ID
|
||||
# ID of the Kibana Space into which the dashboards should be loaded. By default,
|
||||
# the Default Space will be used.
|
||||
#space.id:
|
||||
|
||||
# =============================== Elastic Cloud ================================
|
||||
|
||||
# These settings simplify using Filebeat with the Elastic Cloud (https://cloud.elastic.co/).
|
||||
|
||||
# The cloud.id setting overwrites the `output.elasticsearch.hosts` and
|
||||
# `setup.kibana.host` options.
|
||||
# You can find the `cloud.id` in the Elastic Cloud web UI.
|
||||
#cloud.id:
|
||||
|
||||
# The cloud.auth setting overwrites the `output.elasticsearch.username` and
|
||||
# `output.elasticsearch.password` settings. The format is `<user>:<pass>`.
|
||||
#cloud.auth:
|
||||
|
||||
# ================================== Outputs ===================================
|
||||
|
||||
# Configure what output to use when sending the data collected by the beat.
|
||||
|
||||
# ---------------------------- Elasticsearch Output ----------------------------
|
||||
# output.elasticsearch:
|
||||
# Array of hosts to connect to.
|
||||
# hosts: ["elastic.schleppe:9200"]
|
||||
|
||||
# Protocol - either `http` (default) or `https`.
|
||||
# protocol: "https"
|
||||
|
||||
# Authentication credentials - either API key or username/password.
|
||||
#api_key: "id:api_key"
|
||||
#username: "elastic"
|
||||
#password: "changeme"
|
||||
|
||||
# ------------------------------ Logstash Output -------------------------------
|
||||
output.logstash:
|
||||
# The Logstash hosts
|
||||
hosts: ["elasticsearch:5400"]
|
||||
|
||||
# Optional SSL. By default is off.
|
||||
# List of root certificates for HTTPS server verifications
|
||||
# ssl.certificate_authorities: ["/etc/elk-certs/elk-ssl.crt"]
|
||||
|
||||
# Certificate for SSL client authentication
|
||||
# ssl.certificate: "/etc/elk-certs/elk-ssl.crt"
|
||||
|
||||
# Client Certificate Key
|
||||
# ssl.key: "/etc/elk-certs/elk-ssl.key"
|
||||
|
||||
# ================================= Processors =================================
|
||||
processors:
|
||||
- add_host_metadata:
|
||||
when.not.contains.tags: forwarded
|
||||
- add_cloud_metadata: ~
|
||||
- add_docker_metadata: ~
|
||||
- add_kubernetes_metadata: ~
|
||||
|
||||
# ================================== Logging ===================================
|
||||
|
||||
# Sets log level. The default log level is info.
|
||||
# Available log levels are: error, warning, info, debug
|
||||
# logging.level: debug
|
||||
|
||||
# At debug level, you can selectively enable logging only for some components.
|
||||
# To enable all selectors use ["*"]. Examples of other selectors are "beat",
|
||||
# "publisher", "service".
|
||||
#logging.selectors: ["*"]
|
||||
|
||||
# ============================= X-Pack Monitoring ==============================
|
||||
# Filebeat can export internal metrics to a central Elasticsearch monitoring
|
||||
# cluster. This requires xpack monitoring to be enabled in Elasticsearch. The
|
||||
# reporting is disabled by default.
|
||||
|
||||
# Set to true to enable the monitoring reporter.
|
||||
#monitoring.enabled: false
|
||||
|
||||
# Sets the UUID of the Elasticsearch cluster under which monitoring data for this
|
||||
# Filebeat instance will appear in the Stack Monitoring UI. If output.elasticsearch
|
||||
# is enabled, the UUID is derived from the Elasticsearch cluster referenced by output.elasticsearch.
|
||||
#monitoring.cluster_uuid:
|
||||
|
||||
# Uncomment to send the metrics to Elasticsearch. Most settings from the
|
||||
# Elasticsearch output are accepted here as well.
|
||||
# Note that the settings should point to your Elasticsearch *monitoring* cluster.
|
||||
# Any setting that is not set is automatically inherited from the Elasticsearch
|
||||
# output configuration, so if you have the Elasticsearch output configured such
|
||||
# that it is pointing to your Elasticsearch monitoring cluster, you can simply
|
||||
# uncomment the following line.
|
||||
#monitoring.elasticsearch:
|
||||
|
||||
# ============================== Instrumentation ===============================
|
||||
|
||||
# Instrumentation support for the filebeat.
|
||||
#instrumentation:
|
||||
# Set to true to enable instrumentation of filebeat.
|
||||
#enabled: false
|
||||
|
||||
# Environment in which filebeat is running on (eg: staging, production, etc.)
|
||||
#environment: ""
|
||||
|
||||
# APM Server hosts to report instrumentation results to.
|
||||
#hosts:
|
||||
# - http://localhost:8200
|
||||
|
||||
# API Key for the APM Server(s).
|
||||
# If api_key is set then secret_token will be ignored.
|
||||
#api_key:
|
||||
|
||||
# Secret token for the APM Server(s).
|
||||
#secret_token:
|
||||
|
||||
|
||||
# ================================= Migration ==================================
|
||||
|
||||
# This allows to enable 6.7 migration aliases
|
||||
#migration.6_to_7.enabled: true
|
||||
|
||||
@@ -0,0 +1,17 @@
|
||||
input {
|
||||
beats {
|
||||
port => 5045
|
||||
}
|
||||
}
|
||||
|
||||
filter {
|
||||
}
|
||||
|
||||
output {
|
||||
elasticsearch {
|
||||
index => "laravel-logs-%{+YYYY.MM}"
|
||||
hosts => "${ELASTIC_HOSTS}"
|
||||
user => "elastic"
|
||||
password => "${ELASTIC_PASSWORD}"
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,24 @@
|
||||
input {
|
||||
beats {
|
||||
port => 5044
|
||||
}
|
||||
}
|
||||
|
||||
filter {
|
||||
if [pipeline_id] == "nginx" {
|
||||
mutate { add_field => { "route" => "nginx_pipeline" } }
|
||||
} else if [pipeline_id] == "laravel" {
|
||||
mutate { add_field => { "route" => "laravel_pipeline" } }
|
||||
}
|
||||
}
|
||||
|
||||
output {
|
||||
if [pipeline_id] == "nginx" {
|
||||
pipeline { send_to => "nginx_pipeline" }
|
||||
} else if [pipeline_id] == "laravel" {
|
||||
pipeline { send_to => "laravel_pipeline" }
|
||||
} else {
|
||||
# Handle unknown cases
|
||||
stdout { codec => rubydebug }
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,39 @@
|
||||
input {
|
||||
beats {
|
||||
port => 5044
|
||||
}
|
||||
}
|
||||
|
||||
filter {
|
||||
grok {
|
||||
match => [ "message" , "%{COMBINEDAPACHELOG}+%{GREEDYDATA:extra_fields}"]
|
||||
overwrite => [ "message" ]
|
||||
}
|
||||
mutate {
|
||||
convert => ["response", "integer"]
|
||||
convert => ["bytes", "integer"]
|
||||
convert => ["responsetime", "float"]
|
||||
}
|
||||
# geoip {
|
||||
# source => "clientip"
|
||||
# add_tag => [ "nginx-geoip" ]
|
||||
# }
|
||||
date {
|
||||
match => [ "timestamp" , "dd/MMM/YYYY:HH:mm:ss Z" ]
|
||||
remove_field => [ "timestamp" ]
|
||||
}
|
||||
# useragent {
|
||||
# source => "agent"
|
||||
# }
|
||||
}
|
||||
|
||||
output {
|
||||
elasticsearch {
|
||||
index => "weblogs-%{+YYYY.MM}"
|
||||
hosts => "${ELASTIC_HOSTS}"
|
||||
user => "elastic"
|
||||
password => "${ELASTIC_PASSWORD}"
|
||||
document_type => "nginx_logs"
|
||||
}
|
||||
}
|
||||
|
||||
5
roles/elasticsearch/templates/pipelines.yml.j2
Normal file
5
roles/elasticsearch/templates/pipelines.yml.j2
Normal file
@@ -0,0 +1,5 @@
|
||||
- pipeline.id: nginx_pipeline
|
||||
path.config: "/usr/share/logstash/pipeline/nginx_pipeline.conf"
|
||||
|
||||
- pipeline.id: laravel_pipeline
|
||||
path.config: "/usr/share/logstash/pipeline/laravel_pipeline.conf"
|
||||
12
roles/env/tasks/main.yml
vendored
Normal file
12
roles/env/tasks/main.yml
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
---
|
||||
- name: Read .env file
|
||||
ansible.builtin.slurp:
|
||||
src: "../.env"
|
||||
delegate_to: localhost
|
||||
register: env_file
|
||||
|
||||
- name: Parse .env file
|
||||
ansible.builtin.set_fact:
|
||||
env_vars: "{{ dict(env_file['content'] | b64decode | split('\n') | select('search', '=') | map('split', '=', 1) | list) }}"
|
||||
|
||||
|
||||
3
roles/fail2ban/handlers/main.yaml
Normal file
3
roles/fail2ban/handlers/main.yaml
Normal file
@@ -0,0 +1,3 @@
|
||||
---
|
||||
- name: restart fail2ban
|
||||
service: name=fail2ban state=restarted
|
||||
18
roles/fail2ban/tasks/fail2ban.yaml
Normal file
18
roles/fail2ban/tasks/fail2ban.yaml
Normal file
@@ -0,0 +1,18 @@
|
||||
---
|
||||
- name: install fail2ban
|
||||
apt: pkg=fail2ban state=latest
|
||||
|
||||
- name: /etc/fail2ban/jail.conf
|
||||
template: src=fail2ban/jail.conf.j2 dest=/etc/fail2ban/jail.conf owner=root group=root mode=644
|
||||
notify: restart fail2ban
|
||||
|
||||
- name: /etc/fail2ban/fail2ban.conf
|
||||
template: src=fail2ban/fail2ban.conf.j2 dest=/etc/fail2ban/fail2ban.conf owner=root group=root mode=644
|
||||
notify: restart fail2ban
|
||||
|
||||
- name: Enable and start the fail2ban service
|
||||
systemd:
|
||||
name: fail2ban
|
||||
state: started
|
||||
enabled: yes
|
||||
|
||||
4
roles/fail2ban/tasks/main.yaml
Normal file
4
roles/fail2ban/tasks/main.yaml
Normal file
@@ -0,0 +1,4 @@
|
||||
---
|
||||
- include_tasks: fail2ban.yaml
|
||||
when: ansible_os_family == "Debian"
|
||||
|
||||
88
roles/fail2ban/templates/fail2ban/fail2ban.conf.j2
Normal file
88
roles/fail2ban/templates/fail2ban/fail2ban.conf.j2
Normal file
@@ -0,0 +1,88 @@
|
||||
# {{ ansible_managed }}
|
||||
|
||||
# Fail2Ban main configuration file
|
||||
#
|
||||
# Comments: use '#' for comment lines and ';' (following a space) for inline comments
|
||||
#
|
||||
# Changes: in most of the cases you should not modify this
|
||||
# file, but provide customizations in fail2ban.local file, e.g.:
|
||||
#
|
||||
# [DEFAULT]
|
||||
# loglevel = DEBUG
|
||||
#
|
||||
|
||||
[DEFAULT]
|
||||
|
||||
# Option: loglevel
|
||||
# Notes.: Set the log level output.
|
||||
# CRITICAL
|
||||
# ERROR
|
||||
# WARNING
|
||||
# NOTICE
|
||||
# INFO
|
||||
# DEBUG
|
||||
# Values: [ LEVEL ] Default: ERROR
|
||||
#
|
||||
loglevel = INFO
|
||||
|
||||
# Option: logtarget
|
||||
# Notes.: Set the log target. This could be a file, SYSLOG, STDERR or STDOUT.
|
||||
# Only one log target can be specified.
|
||||
# If you change logtarget from the default value and you are
|
||||
# using logrotate -- also adjust or disable rotation in the
|
||||
# corresponding configuration file
|
||||
# (e.g. /etc/logrotate.d/fail2ban on Debian systems)
|
||||
# Values: [ STDOUT | STDERR | SYSLOG | SYSOUT | FILE ] Default: STDERR
|
||||
#
|
||||
logtarget = /var/log/fail2ban.log
|
||||
|
||||
# Option: syslogsocket
|
||||
# Notes: Set the syslog socket file. Only used when logtarget is SYSLOG
|
||||
# auto uses platform.system() to determine predefined paths
|
||||
# Values: [ auto | FILE ] Default: auto
|
||||
syslogsocket = auto
|
||||
|
||||
# Option: socket
|
||||
# Notes.: Set the socket file. This is used to communicate with the daemon. Do
|
||||
# not remove this file when Fail2ban runs. It will not be possible to
|
||||
# communicate with the server afterwards.
|
||||
# Values: [ FILE ] Default: /var/run/fail2ban/fail2ban.sock
|
||||
#
|
||||
socket = /var/run/fail2ban/fail2ban.sock
|
||||
|
||||
# Option: pidfile
|
||||
# Notes.: Set the PID file. This is used to store the process ID of the
|
||||
# fail2ban server.
|
||||
# Values: [ FILE ] Default: /var/run/fail2ban/fail2ban.pid
|
||||
#
|
||||
pidfile = /var/run/fail2ban/fail2ban.pid
|
||||
|
||||
# Options: dbfile
|
||||
# Notes.: Set the file for the fail2ban persistent data to be stored.
|
||||
# A value of ":memory:" means database is only stored in memory
|
||||
# and data is lost when fail2ban is stopped.
|
||||
# A value of "None" disables the database.
|
||||
# Values: [ None :memory: FILE ] Default: /var/lib/fail2ban/fail2ban.sqlite3
|
||||
dbfile = /var/lib/fail2ban/fail2ban.sqlite3
|
||||
|
||||
# Options: dbpurgeage
|
||||
# Notes.: Sets age at which bans should be purged from the database
|
||||
# Values: [ SECONDS ] Default: 86400 (24hours)
|
||||
dbpurgeage = 1d
|
||||
|
||||
# Options: dbmaxmatches
|
||||
# Notes.: Number of matches stored in database per ticket (resolvable via
|
||||
# tags <ipmatches>/<ipjailmatches> in actions)
|
||||
# Values: [ INT ] Default: 10
|
||||
dbmaxmatches = 10
|
||||
|
||||
[Definition]
|
||||
|
||||
|
||||
[Thread]
|
||||
|
||||
# Options: stacksize
|
||||
# Notes.: Specifies the stack size (in KiB) to be used for subsequently created threads,
|
||||
# and must be 0 or a positive integer value of at least 32.
|
||||
# Values: [ SIZE ] Default: 0 (use platform or configured default)
|
||||
#stacksize = 0
|
||||
956
roles/fail2ban/templates/fail2ban/jail.conf.j2
Normal file
956
roles/fail2ban/templates/fail2ban/jail.conf.j2
Normal file
@@ -0,0 +1,956 @@
|
||||
# {{ ansible_managed }}
|
||||
|
||||
#
|
||||
# WARNING: heavily refactored in 0.9.0 release. Please review and
|
||||
# customize settings for your setup.
|
||||
#
|
||||
# Changes: in most of the cases you should not modify this
|
||||
# file, but provide customizations in jail.local file,
|
||||
# or separate .conf files under jail.d/ directory, e.g.:
|
||||
#
|
||||
# HOW TO ACTIVATE JAILS:
|
||||
#
|
||||
# YOU SHOULD NOT MODIFY THIS FILE.
|
||||
#
|
||||
# It will probably be overwritten or improved in a distribution update.
|
||||
#
|
||||
# Provide customizations in a jail.local file or a jail.d/customisation.local.
|
||||
# For example to change the default bantime for all jails and to enable the
|
||||
# ssh-iptables jail the following (uncommented) would appear in the .local file.
|
||||
# See man 5 jail.conf for details.
|
||||
#
|
||||
# [DEFAULT]
|
||||
# bantime = 1h
|
||||
#
|
||||
# [sshd]
|
||||
# enabled = true
|
||||
#
|
||||
# See jail.conf(5) man page for more information
|
||||
|
||||
|
||||
|
||||
# Comments: use '#' for comment lines and ';' (following a space) for inline comments
|
||||
|
||||
|
||||
[INCLUDES]
|
||||
|
||||
#before = paths-distro.conf
|
||||
before = paths-debian.conf
|
||||
|
||||
# The DEFAULT allows a global definition of the options. They can be overridden
|
||||
# in each jail afterwards.
|
||||
|
||||
[DEFAULT]
|
||||
|
||||
#
|
||||
# MISCELLANEOUS OPTIONS
|
||||
#
|
||||
|
||||
# "bantime.increment" allows to use database for searching of previously banned ip's to increase a
|
||||
# default ban time using special formula, default it is banTime * 1, 2, 4, 8, 16, 32...
|
||||
#bantime.increment = true
|
||||
|
||||
# "bantime.rndtime" is the max number of seconds using for mixing with random time
|
||||
# to prevent "clever" botnets calculate exact time IP can be unbanned again:
|
||||
#bantime.rndtime =
|
||||
|
||||
# "bantime.maxtime" is the max number of seconds using the ban time can reach (don't grows further)
|
||||
#bantime.maxtime =
|
||||
|
||||
# "bantime.factor" is a coefficient to calculate exponent growing of the formula or common multiplier,
|
||||
# default value of factor is 1 and with default value of formula, the ban time
|
||||
# grows by 1, 2, 4, 8, 16 ...
|
||||
#bantime.factor = 1
|
||||
|
||||
# "bantime.formula" used by default to calculate next value of ban time, default value bellow,
|
||||
# the same ban time growing will be reached by multipliers 1, 2, 4, 8, 16, 32...
|
||||
#bantime.formula = ban.Time * (1<<(ban.Count if ban.Count<20 else 20)) * banFactor
|
||||
#
|
||||
# more aggressive example of formula has the same values only for factor "2.0 / 2.885385" :
|
||||
#bantime.formula = ban.Time * math.exp(float(ban.Count+1)*banFactor)/math.exp(1*banFactor)
|
||||
|
||||
# "bantime.multipliers" used to calculate next value of ban time instead of formula, coresponding
|
||||
# previously ban count and given "bantime.factor" (for multipliers default is 1);
|
||||
# following example grows ban time by 1, 2, 4, 8, 16 ... and if last ban count greater as multipliers count,
|
||||
# always used last multiplier (64 in example), for factor '1' and original ban time 600 - 10.6 hours
|
||||
#bantime.multipliers = 1 2 4 8 16 32 64
|
||||
# following example can be used for small initial ban time (bantime=60) - it grows more aggressive at begin,
|
||||
# for bantime=60 the multipliers are minutes and equal: 1 min, 5 min, 30 min, 1 hour, 5 hour, 12 hour, 1 day, 2 day
|
||||
#bantime.multipliers = 1 5 30 60 300 720 1440 2880
|
||||
|
||||
# "bantime.overalljails" (if true) specifies the search of IP in the database will be executed
|
||||
# cross over all jails, if false (dafault), only current jail of the ban IP will be searched
|
||||
#bantime.overalljails = false
|
||||
|
||||
# --------------------
|
||||
|
||||
# "ignoreself" specifies whether the local resp. own IP addresses should be ignored
|
||||
# (default is true). Fail2ban will not ban a host which matches such addresses.
|
||||
#ignoreself = true
|
||||
|
||||
# "ignoreip" can be a list of IP addresses, CIDR masks or DNS hosts. Fail2ban
|
||||
# will not ban a host which matches an address in this list. Several addresses
|
||||
# can be defined using space (and/or comma) separator.
|
||||
#ignoreip = 127.0.0.1/8 ::1
|
||||
|
||||
# External command that will take an tagged arguments to ignore, e.g. <ip>,
|
||||
# and return true if the IP is to be ignored. False otherwise.
|
||||
#
|
||||
# ignorecommand = /path/to/command <ip>
|
||||
ignorecommand =
|
||||
|
||||
# "bantime" is the number of seconds that a host is banned.
|
||||
bantime = 10m
|
||||
|
||||
# A host is banned if it has generated "maxretry" during the last "findtime"
|
||||
# seconds.
|
||||
findtime = 10m
|
||||
|
||||
# "maxretry" is the number of failures before a host get banned.
|
||||
maxretry = 5
|
||||
|
||||
# "maxmatches" is the number of matches stored in ticket (resolvable via tag <matches> in actions).
|
||||
maxmatches = %(maxretry)s
|
||||
|
||||
# "backend" specifies the backend used to get files modification.
|
||||
# Available options are "pyinotify", "gamin", "polling", "systemd" and "auto".
|
||||
# This option can be overridden in each jail as well.
|
||||
#
|
||||
# pyinotify: requires pyinotify (a file alteration monitor) to be installed.
|
||||
# If pyinotify is not installed, Fail2ban will use auto.
|
||||
# gamin: requires Gamin (a file alteration monitor) to be installed.
|
||||
# If Gamin is not installed, Fail2ban will use auto.
|
||||
# polling: uses a polling algorithm which does not require external libraries.
|
||||
# systemd: uses systemd python library to access the systemd journal.
|
||||
# Specifying "logpath" is not valid for this backend.
|
||||
# See "journalmatch" in the jails associated filter config
|
||||
# auto: will try to use the following backends, in order:
|
||||
# pyinotify, gamin, polling.
|
||||
#
|
||||
# Note: if systemd backend is chosen as the default but you enable a jail
|
||||
# for which logs are present only in its own log files, specify some other
|
||||
# backend for that jail (e.g. polling) and provide empty value for
|
||||
# journalmatch. See https://github.com/fail2ban/fail2ban/issues/959#issuecomment-74901200
|
||||
backend = auto
|
||||
|
||||
# "usedns" specifies if jails should trust hostnames in logs,
|
||||
# warn when DNS lookups are performed, or ignore all hostnames in logs
|
||||
#
|
||||
# yes: if a hostname is encountered, a DNS lookup will be performed.
|
||||
# warn: if a hostname is encountered, a DNS lookup will be performed,
|
||||
# but it will be logged as a warning.
|
||||
# no: if a hostname is encountered, will not be used for banning,
|
||||
# but it will be logged as info.
|
||||
# raw: use raw value (no hostname), allow use it for no-host filters/actions (example user)
|
||||
usedns = warn
|
||||
|
||||
# "logencoding" specifies the encoding of the log files handled by the jail
|
||||
# This is used to decode the lines from the log file.
|
||||
# Typical examples: "ascii", "utf-8"
|
||||
#
|
||||
# auto: will use the system locale setting
|
||||
logencoding = auto
|
||||
|
||||
# "enabled" enables the jails.
|
||||
# By default all jails are disabled, and it should stay this way.
|
||||
# Enable only relevant to your setup jails in your .local or jail.d/*.conf
|
||||
#
|
||||
# true: jail will be enabled and log files will get monitored for changes
|
||||
# false: jail is not enabled
|
||||
enabled = false
|
||||
|
||||
|
||||
# "mode" defines the mode of the filter (see corresponding filter implementation for more info).
|
||||
mode = normal
|
||||
|
||||
# "filter" defines the filter to use by the jail.
|
||||
# By default jails have names matching their filter name
|
||||
#
|
||||
filter = %(__name__)s[mode=%(mode)s]
|
||||
|
||||
|
||||
#
|
||||
# ACTIONS
|
||||
#
|
||||
|
||||
# Some options used for actions
|
||||
|
||||
# Destination email address used solely for the interpolations in
|
||||
# jail.{conf,local,d/*} configuration files.
|
||||
destemail = root@localhost
|
||||
|
||||
# Sender email address used solely for some actions
|
||||
sender = root@<fq-hostname>
|
||||
|
||||
# E-mail action. Since 0.8.1 Fail2Ban uses sendmail MTA for the
|
||||
# mailing. Change mta configuration parameter to mail if you want to
|
||||
# revert to conventional 'mail'.
|
||||
mta = sendmail
|
||||
|
||||
# Default protocol
|
||||
protocol = tcp
|
||||
|
||||
# Specify chain where jumps would need to be added in ban-actions expecting parameter chain
|
||||
chain = <known/chain>
|
||||
|
||||
# Ports to be banned
|
||||
# Usually should be overridden in a particular jail
|
||||
port = 0:65535
|
||||
|
||||
# Format of user-agent https://tools.ietf.org/html/rfc7231#section-5.5.3
|
||||
fail2ban_agent = Fail2Ban/%(fail2ban_version)s
|
||||
|
||||
#
|
||||
# Action shortcuts. To be used to define action parameter
|
||||
|
||||
# Default banning action (e.g. iptables, iptables-new,
|
||||
# iptables-multiport, shorewall, etc) It is used to define
|
||||
# action_* variables. Can be overridden globally or per
|
||||
# section within jail.local file
|
||||
banaction = iptables-multiport
|
||||
banaction_allports = iptables-allports
|
||||
|
||||
# The simplest action to take: ban only
|
||||
action_ = %(banaction)s[name=%(__name__)s, port="%(port)s", protocol="%(protocol)s", chain="%(chain)s"]
|
||||
|
||||
# ban & send an e-mail with whois report to the destemail.
|
||||
action_mw = %(banaction)s[name=%(__name__)s, port="%(port)s", protocol="%(protocol)s", chain="%(chain)s"]
|
||||
%(mta)s-whois[name=%(__name__)s, sender="%(sender)s", dest="%(destemail)s", protocol="%(protocol)s", chain="%(chain)s"]
|
||||
|
||||
# ban & send an e-mail with whois report and relevant log lines
|
||||
# to the destemail.
|
||||
action_mwl = %(banaction)s[name=%(__name__)s, port="%(port)s", protocol="%(protocol)s", chain="%(chain)s"]
|
||||
%(mta)s-whois-lines[name=%(__name__)s, sender="%(sender)s", dest="%(destemail)s", logpath="%(logpath)s", chain="%(chain)s"]
|
||||
|
||||
# See the IMPORTANT note in action.d/xarf-login-attack for when to use this action
|
||||
#
|
||||
# ban & send a xarf e-mail to abuse contact of IP address and include relevant log lines
|
||||
# to the destemail.
|
||||
action_xarf = %(banaction)s[name=%(__name__)s, port="%(port)s", protocol="%(protocol)s", chain="%(chain)s"]
|
||||
xarf-login-attack[service=%(__name__)s, sender="%(sender)s", logpath="%(logpath)s", port="%(port)s"]
|
||||
|
||||
# ban IP on CloudFlare & send an e-mail with whois report and relevant log lines
|
||||
# to the destemail.
|
||||
action_cf_mwl = cloudflare[cfuser="%(cfemail)s", cftoken="%(cfapikey)s"]
|
||||
%(mta)s-whois-lines[name=%(__name__)s, sender="%(sender)s", dest="%(destemail)s", logpath="%(logpath)s", chain="%(chain)s"]
|
||||
|
||||
# Report block via blocklist.de fail2ban reporting service API
|
||||
#
|
||||
# See the IMPORTANT note in action.d/blocklist_de.conf for when to use this action.
|
||||
# Specify expected parameters in file action.d/blocklist_de.local or if the interpolation
|
||||
# `action_blocklist_de` used for the action, set value of `blocklist_de_apikey`
|
||||
# in your `jail.local` globally (section [DEFAULT]) or per specific jail section (resp. in
|
||||
# corresponding jail.d/my-jail.local file).
|
||||
#
|
||||
action_blocklist_de = blocklist_de[email="%(sender)s", service=%(filter)s, apikey="%(blocklist_de_apikey)s", agent="%(fail2ban_agent)s"]
|
||||
|
||||
# Report ban via badips.com, and use as blacklist
|
||||
#
|
||||
# See BadIPsAction docstring in config/action.d/badips.py for
|
||||
# documentation for this action.
|
||||
#
|
||||
# NOTE: This action relies on banaction being present on start and therefore
|
||||
# should be last action defined for a jail.
|
||||
#
|
||||
action_badips = badips.py[category="%(__name__)s", banaction="%(banaction)s", agent="%(fail2ban_agent)s"]
|
||||
#
|
||||
# Report ban via badips.com (uses action.d/badips.conf for reporting only)
|
||||
#
|
||||
action_badips_report = badips[category="%(__name__)s", agent="%(fail2ban_agent)s"]
|
||||
|
||||
# Report ban via abuseipdb.com.
|
||||
#
|
||||
# See action.d/abuseipdb.conf for usage example and details.
|
||||
#
|
||||
action_abuseipdb = abuseipdb
|
||||
|
||||
# Choose default action. To change, just override value of 'action' with the
|
||||
# interpolation to the chosen action shortcut (e.g. action_mw, action_mwl, etc) in jail.local
|
||||
# globally (section [DEFAULT]) or per specific section
|
||||
action = %(action_)s
|
||||
|
||||
|
||||
#
|
||||
# JAILS
|
||||
#
|
||||
|
||||
#
|
||||
# SSH servers
|
||||
#
|
||||
|
||||
[sshd]
|
||||
|
||||
# To use more aggressive sshd modes set filter parameter "mode" in jail.local:
|
||||
# normal (default), ddos, extra or aggressive (combines all).
|
||||
# See "tests/files/logs/sshd" or "filter.d/sshd.conf" for usage example and details.
|
||||
#mode = normal
|
||||
port = ssh
|
||||
logpath = %(sshd_log)s
|
||||
backend = %(sshd_backend)s
|
||||
|
||||
|
||||
[dropbear]
|
||||
|
||||
port = ssh
|
||||
logpath = %(dropbear_log)s
|
||||
backend = %(dropbear_backend)s
|
||||
|
||||
|
||||
[selinux-ssh]
|
||||
|
||||
port = ssh
|
||||
logpath = %(auditd_log)s
|
||||
|
||||
|
||||
#
|
||||
# HTTP servers
|
||||
#
|
||||
|
||||
[apache-auth]
|
||||
|
||||
port = http,https
|
||||
logpath = %(apache_error_log)s
|
||||
|
||||
|
||||
[apache-badbots]
|
||||
# Ban hosts which agent identifies spammer robots crawling the web
|
||||
# for email addresses. The mail outputs are buffered.
|
||||
port = http,https
|
||||
logpath = %(apache_access_log)s
|
||||
bantime = 48h
|
||||
maxretry = 1
|
||||
|
||||
|
||||
[apache-noscript]
|
||||
|
||||
port = http,https
|
||||
logpath = %(apache_error_log)s
|
||||
|
||||
|
||||
[apache-overflows]
|
||||
|
||||
port = http,https
|
||||
logpath = %(apache_error_log)s
|
||||
maxretry = 2
|
||||
|
||||
|
||||
[apache-nohome]
|
||||
|
||||
port = http,https
|
||||
logpath = %(apache_error_log)s
|
||||
maxretry = 2
|
||||
|
||||
|
||||
[apache-botsearch]
|
||||
|
||||
port = http,https
|
||||
logpath = %(apache_error_log)s
|
||||
maxretry = 2
|
||||
|
||||
|
||||
[apache-fakegooglebot]
|
||||
|
||||
port = http,https
|
||||
logpath = %(apache_access_log)s
|
||||
maxretry = 1
|
||||
ignorecommand = %(ignorecommands_dir)s/apache-fakegooglebot <ip>
|
||||
|
||||
|
||||
[apache-modsecurity]
|
||||
|
||||
port = http,https
|
||||
logpath = %(apache_error_log)s
|
||||
maxretry = 2
|
||||
|
||||
|
||||
[apache-shellshock]
|
||||
|
||||
port = http,https
|
||||
logpath = %(apache_error_log)s
|
||||
maxretry = 1
|
||||
|
||||
|
||||
[openhab-auth]
|
||||
|
||||
filter = openhab
|
||||
action = iptables-allports[name=NoAuthFailures]
|
||||
logpath = /opt/openhab/logs/request.log
|
||||
|
||||
|
||||
[nginx-http-auth]
|
||||
|
||||
port = http,https
|
||||
logpath = %(nginx_error_log)s
|
||||
|
||||
# To use 'nginx-limit-req' jail you should have `ngx_http_limit_req_module`
|
||||
# and define `limit_req` and `limit_req_zone` as described in nginx documentation
|
||||
# http://nginx.org/en/docs/http/ngx_http_limit_req_module.html
|
||||
# or for example see in 'config/filter.d/nginx-limit-req.conf'
|
||||
[nginx-limit-req]
|
||||
port = http,https
|
||||
logpath = %(nginx_error_log)s
|
||||
|
||||
[nginx-botsearch]
|
||||
|
||||
port = http,https
|
||||
logpath = %(nginx_error_log)s
|
||||
maxretry = 2
|
||||
|
||||
|
||||
# Ban attackers that try to use PHP's URL-fopen() functionality
|
||||
# through GET/POST variables. - Experimental, with more than a year
|
||||
# of usage in production environments.
|
||||
|
||||
[php-url-fopen]
|
||||
|
||||
port = http,https
|
||||
logpath = %(nginx_access_log)s
|
||||
%(apache_access_log)s
|
||||
|
||||
|
||||
[suhosin]
|
||||
|
||||
port = http,https
|
||||
logpath = %(suhosin_log)s
|
||||
|
||||
|
||||
[lighttpd-auth]
|
||||
# Same as above for Apache's mod_auth
|
||||
# It catches wrong authentifications
|
||||
port = http,https
|
||||
logpath = %(lighttpd_error_log)s
|
||||
|
||||
|
||||
#
|
||||
# Webmail and groupware servers
|
||||
#
|
||||
|
||||
[roundcube-auth]
|
||||
|
||||
port = http,https
|
||||
logpath = %(roundcube_errors_log)s
|
||||
# Use following line in your jail.local if roundcube logs to journal.
|
||||
#backend = %(syslog_backend)s
|
||||
|
||||
|
||||
[openwebmail]
|
||||
|
||||
port = http,https
|
||||
logpath = /var/log/openwebmail.log
|
||||
|
||||
|
||||
[horde]
|
||||
|
||||
port = http,https
|
||||
logpath = /var/log/horde/horde.log
|
||||
|
||||
|
||||
[groupoffice]
|
||||
|
||||
port = http,https
|
||||
logpath = /home/groupoffice/log/info.log
|
||||
|
||||
|
||||
[sogo-auth]
|
||||
# Monitor SOGo groupware server
|
||||
# without proxy this would be:
|
||||
# port = 20000
|
||||
port = http,https
|
||||
logpath = /var/log/sogo/sogo.log
|
||||
|
||||
|
||||
[tine20]
|
||||
|
||||
logpath = /var/log/tine20/tine20.log
|
||||
port = http,https
|
||||
|
||||
|
||||
#
|
||||
# Web Applications
|
||||
#
|
||||
#
|
||||
|
||||
[drupal-auth]
|
||||
|
||||
port = http,https
|
||||
logpath = %(syslog_daemon)s
|
||||
backend = %(syslog_backend)s
|
||||
|
||||
[guacamole]
|
||||
|
||||
port = http,https
|
||||
logpath = /var/log/tomcat*/catalina.out
|
||||
|
||||
[monit]
|
||||
#Ban clients brute-forcing the monit gui login
|
||||
port = 2812
|
||||
logpath = /var/log/monit
|
||||
/var/log/monit.log
|
||||
|
||||
|
||||
[webmin-auth]
|
||||
|
||||
port = 10000
|
||||
logpath = %(syslog_authpriv)s
|
||||
backend = %(syslog_backend)s
|
||||
|
||||
|
||||
[froxlor-auth]
|
||||
|
||||
port = http,https
|
||||
logpath = %(syslog_authpriv)s
|
||||
backend = %(syslog_backend)s
|
||||
|
||||
|
||||
#
|
||||
# HTTP Proxy servers
|
||||
#
|
||||
#
|
||||
|
||||
[squid]
|
||||
|
||||
port = 80,443,3128,8080
|
||||
logpath = /var/log/squid/access.log
|
||||
|
||||
|
||||
[3proxy]
|
||||
|
||||
port = 3128
|
||||
logpath = /var/log/3proxy.log
|
||||
|
||||
|
||||
#
|
||||
# FTP servers
|
||||
#
|
||||
|
||||
|
||||
[proftpd]
|
||||
|
||||
port = ftp,ftp-data,ftps,ftps-data
|
||||
logpath = %(proftpd_log)s
|
||||
backend = %(proftpd_backend)s
|
||||
|
||||
|
||||
[pure-ftpd]
|
||||
|
||||
port = ftp,ftp-data,ftps,ftps-data
|
||||
logpath = %(pureftpd_log)s
|
||||
backend = %(pureftpd_backend)s
|
||||
|
||||
|
||||
[gssftpd]
|
||||
|
||||
port = ftp,ftp-data,ftps,ftps-data
|
||||
logpath = %(syslog_daemon)s
|
||||
backend = %(syslog_backend)s
|
||||
|
||||
|
||||
[wuftpd]
|
||||
|
||||
port = ftp,ftp-data,ftps,ftps-data
|
||||
logpath = %(wuftpd_log)s
|
||||
backend = %(wuftpd_backend)s
|
||||
|
||||
|
||||
[vsftpd]
|
||||
# or overwrite it in jails.local to be
|
||||
# logpath = %(syslog_authpriv)s
|
||||
# if you want to rely on PAM failed login attempts
|
||||
# vsftpd's failregex should match both of those formats
|
||||
port = ftp,ftp-data,ftps,ftps-data
|
||||
logpath = %(vsftpd_log)s
|
||||
|
||||
|
||||
#
|
||||
# Mail servers
|
||||
#
|
||||
|
||||
# ASSP SMTP Proxy Jail
|
||||
[assp]
|
||||
|
||||
port = smtp,465,submission
|
||||
logpath = /root/path/to/assp/logs/maillog.txt
|
||||
|
||||
|
||||
[courier-smtp]
|
||||
|
||||
port = smtp,465,submission
|
||||
logpath = %(syslog_mail)s
|
||||
backend = %(syslog_backend)s
|
||||
|
||||
|
||||
[postfix]
|
||||
# To use another modes set filter parameter "mode" in jail.local:
|
||||
mode = more
|
||||
port = smtp,465,submission
|
||||
logpath = %(postfix_log)s
|
||||
backend = %(postfix_backend)s
|
||||
|
||||
|
||||
[postfix-rbl]
|
||||
|
||||
filter = postfix[mode=rbl]
|
||||
port = smtp,465,submission
|
||||
logpath = %(postfix_log)s
|
||||
backend = %(postfix_backend)s
|
||||
maxretry = 1
|
||||
|
||||
|
||||
[sendmail-auth]
|
||||
|
||||
port = submission,465,smtp
|
||||
logpath = %(syslog_mail)s
|
||||
backend = %(syslog_backend)s
|
||||
|
||||
|
||||
[sendmail-reject]
|
||||
# To use more aggressive modes set filter parameter "mode" in jail.local:
|
||||
# normal (default), extra or aggressive
|
||||
# See "tests/files/logs/sendmail-reject" or "filter.d/sendmail-reject.conf" for usage example and details.
|
||||
#mode = normal
|
||||
port = smtp,465,submission
|
||||
logpath = %(syslog_mail)s
|
||||
backend = %(syslog_backend)s
|
||||
|
||||
|
||||
[qmail-rbl]
|
||||
|
||||
filter = qmail
|
||||
port = smtp,465,submission
|
||||
logpath = /service/qmail/log/main/current
|
||||
|
||||
|
||||
# dovecot defaults to logging to the mail syslog facility
|
||||
# but can be set by syslog_facility in the dovecot configuration.
|
||||
[dovecot]
|
||||
|
||||
port = pop3,pop3s,imap,imaps,submission,465,sieve
|
||||
logpath = %(dovecot_log)s
|
||||
backend = %(dovecot_backend)s
|
||||
|
||||
|
||||
[sieve]
|
||||
|
||||
port = smtp,465,submission
|
||||
logpath = %(dovecot_log)s
|
||||
backend = %(dovecot_backend)s
|
||||
|
||||
|
||||
[solid-pop3d]
|
||||
|
||||
port = pop3,pop3s
|
||||
logpath = %(solidpop3d_log)s
|
||||
|
||||
|
||||
[exim]
|
||||
# see filter.d/exim.conf for further modes supported from filter:
|
||||
#mode = normal
|
||||
port = smtp,465,submission
|
||||
logpath = %(exim_main_log)s
|
||||
|
||||
|
||||
[exim-spam]
|
||||
|
||||
port = smtp,465,submission
|
||||
logpath = %(exim_main_log)s
|
||||
|
||||
|
||||
[kerio]
|
||||
|
||||
port = imap,smtp,imaps,465
|
||||
logpath = /opt/kerio/mailserver/store/logs/security.log
|
||||
|
||||
|
||||
#
|
||||
# Mail servers authenticators: might be used for smtp,ftp,imap servers, so
|
||||
# all relevant ports get banned
|
||||
#
|
||||
|
||||
[courier-auth]
|
||||
|
||||
port = smtp,465,submission,imap,imaps,pop3,pop3s
|
||||
logpath = %(syslog_mail)s
|
||||
backend = %(syslog_backend)s
|
||||
|
||||
|
||||
[postfix-sasl]
|
||||
|
||||
filter = postfix[mode=auth]
|
||||
port = smtp,465,submission,imap,imaps,pop3,pop3s
|
||||
# You might consider monitoring /var/log/mail.warn instead if you are
|
||||
# running postfix since it would provide the same log lines at the
|
||||
# "warn" level but overall at the smaller filesize.
|
||||
logpath = %(postfix_log)s
|
||||
backend = %(postfix_backend)s
|
||||
|
||||
|
||||
[perdition]
|
||||
|
||||
port = imap,imaps,pop3,pop3s
|
||||
logpath = %(syslog_mail)s
|
||||
backend = %(syslog_backend)s
|
||||
|
||||
|
||||
[squirrelmail]
|
||||
|
||||
port = smtp,465,submission,imap,imap2,imaps,pop3,pop3s,http,https,socks
|
||||
logpath = /var/lib/squirrelmail/prefs/squirrelmail_access_log
|
||||
|
||||
|
||||
[cyrus-imap]
|
||||
|
||||
port = imap,imaps
|
||||
logpath = %(syslog_mail)s
|
||||
backend = %(syslog_backend)s
|
||||
|
||||
|
||||
[uwimap-auth]
|
||||
|
||||
port = imap,imaps
|
||||
logpath = %(syslog_mail)s
|
||||
backend = %(syslog_backend)s
|
||||
|
||||
|
||||
#
|
||||
#
|
||||
# DNS servers
|
||||
#
|
||||
|
||||
|
||||
# !!! WARNING !!!
|
||||
# Since UDP is connection-less protocol, spoofing of IP and imitation
|
||||
# of illegal actions is way too simple. Thus enabling of this filter
|
||||
# might provide an easy way for implementing a DoS against a chosen
|
||||
# victim. See
|
||||
# http://nion.modprobe.de/blog/archives/690-fail2ban-+-dns-fail.html
|
||||
# Please DO NOT USE this jail unless you know what you are doing.
|
||||
#
|
||||
# IMPORTANT: see filter.d/named-refused for instructions to enable logging
|
||||
# This jail blocks UDP traffic for DNS requests.
|
||||
# [named-refused-udp]
|
||||
#
|
||||
# filter = named-refused
|
||||
# port = domain,953
|
||||
# protocol = udp
|
||||
# logpath = /var/log/named/security.log
|
||||
|
||||
# IMPORTANT: see filter.d/named-refused for instructions to enable logging
|
||||
# This jail blocks TCP traffic for DNS requests.
|
||||
|
||||
[named-refused]
|
||||
|
||||
port = domain,953
|
||||
logpath = /var/log/named/security.log
|
||||
|
||||
|
||||
[nsd]
|
||||
|
||||
port = 53
|
||||
action = %(banaction)s[name=%(__name__)s-tcp, port="%(port)s", protocol="tcp", chain="%(chain)s", actname=%(banaction)s-tcp]
|
||||
%(banaction)s[name=%(__name__)s-udp, port="%(port)s", protocol="udp", chain="%(chain)s", actname=%(banaction)s-udp]
|
||||
logpath = /var/log/nsd.log
|
||||
|
||||
|
||||
#
|
||||
# Miscellaneous
|
||||
#
|
||||
|
||||
[asterisk]
|
||||
|
||||
port = 5060,5061
|
||||
action = %(banaction)s[name=%(__name__)s-tcp, port="%(port)s", protocol="tcp", chain="%(chain)s", actname=%(banaction)s-tcp]
|
||||
%(banaction)s[name=%(__name__)s-udp, port="%(port)s", protocol="udp", chain="%(chain)s", actname=%(banaction)s-udp]
|
||||
%(mta)s-whois[name=%(__name__)s, dest="%(destemail)s"]
|
||||
logpath = /var/log/asterisk/messages
|
||||
maxretry = 10
|
||||
|
||||
|
||||
[freeswitch]
|
||||
|
||||
port = 5060,5061
|
||||
action = %(banaction)s[name=%(__name__)s-tcp, port="%(port)s", protocol="tcp", chain="%(chain)s", actname=%(banaction)s-tcp]
|
||||
%(banaction)s[name=%(__name__)s-udp, port="%(port)s", protocol="udp", chain="%(chain)s", actname=%(banaction)s-udp]
|
||||
%(mta)s-whois[name=%(__name__)s, dest="%(destemail)s"]
|
||||
logpath = /var/log/freeswitch.log
|
||||
maxretry = 10
|
||||
|
||||
|
||||
# enable adminlog; it will log to a file inside znc's directory by default.
|
||||
[znc-adminlog]
|
||||
|
||||
port = 6667
|
||||
logpath = /var/lib/znc/moddata/adminlog/znc.log
|
||||
|
||||
|
||||
# To log wrong MySQL access attempts add to /etc/my.cnf in [mysqld] or
|
||||
# equivalent section:
|
||||
# log-warnings = 2
|
||||
#
|
||||
# for syslog (daemon facility)
|
||||
# [mysqld_safe]
|
||||
# syslog
|
||||
#
|
||||
# for own logfile
|
||||
# [mysqld]
|
||||
# log-error=/var/log/mysqld.log
|
||||
[mysqld-auth]
|
||||
|
||||
port = 3306
|
||||
logpath = %(mysql_log)s
|
||||
backend = %(mysql_backend)s
|
||||
|
||||
|
||||
# Log wrong MongoDB auth (for details see filter 'filter.d/mongodb-auth.conf')
|
||||
[mongodb-auth]
|
||||
# change port when running with "--shardsvr" or "--configsvr" runtime operation
|
||||
port = 27017
|
||||
logpath = /var/log/mongodb/mongodb.log
|
||||
|
||||
|
||||
# Jail for more extended banning of persistent abusers
|
||||
# !!! WARNINGS !!!
|
||||
# 1. Make sure that your loglevel specified in fail2ban.conf/.local
|
||||
# is not at DEBUG level -- which might then cause fail2ban to fall into
|
||||
# an infinite loop constantly feeding itself with non-informative lines
|
||||
# 2. Increase dbpurgeage defined in fail2ban.conf to e.g. 648000 (7.5 days)
|
||||
# to maintain entries for failed logins for sufficient amount of time
|
||||
[recidive]
|
||||
|
||||
logpath = /var/log/fail2ban.log
|
||||
banaction = %(banaction_allports)s
|
||||
bantime = 1w
|
||||
findtime = 1d
|
||||
|
||||
|
||||
# Generic filter for PAM. Has to be used with action which bans all
|
||||
# ports such as iptables-allports, shorewall
|
||||
|
||||
[pam-generic]
|
||||
# pam-generic filter can be customized to monitor specific subset of 'tty's
|
||||
banaction = %(banaction_allports)s
|
||||
logpath = %(syslog_authpriv)s
|
||||
backend = %(syslog_backend)s
|
||||
|
||||
|
||||
[xinetd-fail]
|
||||
|
||||
banaction = iptables-multiport-log
|
||||
logpath = %(syslog_daemon)s
|
||||
backend = %(syslog_backend)s
|
||||
maxretry = 2
|
||||
|
||||
|
||||
# stunnel - need to set port for this
|
||||
[stunnel]
|
||||
|
||||
logpath = /var/log/stunnel4/stunnel.log
|
||||
|
||||
|
||||
[ejabberd-auth]
|
||||
|
||||
port = 5222
|
||||
logpath = /var/log/ejabberd/ejabberd.log
|
||||
|
||||
|
||||
[counter-strike]
|
||||
|
||||
logpath = /opt/cstrike/logs/L[0-9]*.log
|
||||
# Firewall: http://www.cstrike-planet.com/faq/6
|
||||
tcpport = 27030,27031,27032,27033,27034,27035,27036,27037,27038,27039
|
||||
udpport = 1200,27000,27001,27002,27003,27004,27005,27006,27007,27008,27009,27010,27011,27012,27013,27014,27015
|
||||
action = %(banaction)s[name=%(__name__)s-tcp, port="%(tcpport)s", protocol="tcp", chain="%(chain)s", actname=%(banaction)s-tcp]
|
||||
%(banaction)s[name=%(__name__)s-udp, port="%(udpport)s", protocol="udp", chain="%(chain)s", actname=%(banaction)s-udp]
|
||||
|
||||
[bitwarden]
|
||||
port = http,https
|
||||
logpath = /home/*/bwdata/logs/identity/Identity/log.txt
|
||||
|
||||
[centreon]
|
||||
port = http,https
|
||||
logpath = /var/log/centreon/login.log
|
||||
|
||||
# consider low maxretry and a long bantime
|
||||
# nobody except your own Nagios server should ever probe nrpe
|
||||
[nagios]
|
||||
|
||||
logpath = %(syslog_daemon)s ; nrpe.cfg may define a different log_facility
|
||||
backend = %(syslog_backend)s
|
||||
maxretry = 1
|
||||
|
||||
|
||||
[oracleims]
|
||||
# see "oracleims" filter file for configuration requirement for Oracle IMS v6 and above
|
||||
logpath = /opt/sun/comms/messaging64/log/mail.log_current
|
||||
banaction = %(banaction_allports)s
|
||||
|
||||
[directadmin]
|
||||
logpath = /var/log/directadmin/login.log
|
||||
port = 2222
|
||||
|
||||
[portsentry]
|
||||
logpath = /var/lib/portsentry/portsentry.history
|
||||
maxretry = 1
|
||||
|
||||
[pass2allow-ftp]
|
||||
# this pass2allow example allows FTP traffic after successful HTTP authentication
|
||||
port = ftp,ftp-data,ftps,ftps-data
|
||||
# knocking_url variable must be overridden to some secret value in jail.local
|
||||
knocking_url = /knocking/
|
||||
filter = apache-pass[knocking_url="%(knocking_url)s"]
|
||||
# access log of the website with HTTP auth
|
||||
logpath = %(apache_access_log)s
|
||||
blocktype = RETURN
|
||||
returntype = DROP
|
||||
action = %(action_)s[blocktype=%(blocktype)s, returntype=%(returntype)s,
|
||||
actionstart_on_demand=false, actionrepair_on_unban=true]
|
||||
bantime = 1h
|
||||
maxretry = 1
|
||||
findtime = 1
|
||||
|
||||
|
||||
[murmur]
|
||||
# AKA mumble-server
|
||||
port = 64738
|
||||
action = %(banaction)s[name=%(__name__)s-tcp, port="%(port)s", protocol=tcp, chain="%(chain)s", actname=%(banaction)s-tcp]
|
||||
%(banaction)s[name=%(__name__)s-udp, port="%(port)s", protocol=udp, chain="%(chain)s", actname=%(banaction)s-udp]
|
||||
logpath = /var/log/mumble-server/mumble-server.log
|
||||
|
||||
|
||||
[screensharingd]
|
||||
# For Mac OS Screen Sharing Service (VNC)
|
||||
logpath = /var/log/system.log
|
||||
logencoding = utf-8
|
||||
|
||||
[haproxy-http-auth]
|
||||
# HAProxy by default doesn't log to file you'll need to set it up to forward
|
||||
# logs to a syslog server which would then write them to disk.
|
||||
# See "haproxy-http-auth" filter for a brief cautionary note when setting
|
||||
# maxretry and findtime.
|
||||
logpath = /var/log/haproxy.log
|
||||
|
||||
[slapd]
|
||||
port = ldap,ldaps
|
||||
logpath = /var/log/slapd.log
|
||||
|
||||
[domino-smtp]
|
||||
port = smtp,ssmtp
|
||||
logpath = /home/domino01/data/IBM_TECHNICAL_SUPPORT/console.log
|
||||
|
||||
[phpmyadmin-syslog]
|
||||
port = http,https
|
||||
logpath = %(syslog_authpriv)s
|
||||
backend = %(syslog_backend)s
|
||||
|
||||
|
||||
[zoneminder]
|
||||
# Zoneminder HTTP/HTTPS web interface auth
|
||||
# Logs auth failures to apache2 error log
|
||||
port = http,https
|
||||
logpath = %(apache_error_log)s
|
||||
|
||||
[traefik-auth]
|
||||
# to use 'traefik-auth' filter you have to configure your Traefik instance,
|
||||
# see `filter.d/traefik-auth.conf` for details and service example.
|
||||
port = http,https
|
||||
logpath = /var/log/traefik/access.log
|
||||
|
||||
1
roles/fail2ban/vars/main.yaml
Normal file
1
roles/fail2ban/vars/main.yaml
Normal file
@@ -0,0 +1 @@
|
||||
---
|
||||
1
roles/fail2ban_exporter/defaults/main.yml
Normal file
1
roles/fail2ban_exporter/defaults/main.yml
Normal file
@@ -0,0 +1 @@
|
||||
---
|
||||
6
roles/fail2ban_exporter/handlers/main.yaml
Normal file
6
roles/fail2ban_exporter/handlers/main.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
---
|
||||
- name: Restart fail2ban_exporter
|
||||
systemd:
|
||||
name: fail2ban_exporter
|
||||
state: restarted
|
||||
daemon_reload: yes
|
||||
24
roles/fail2ban_exporter/tasks/install.yaml
Normal file
24
roles/fail2ban_exporter/tasks/install.yaml
Normal file
@@ -0,0 +1,24 @@
|
||||
---
|
||||
- name: Get fail2ban release download url
|
||||
uri:
|
||||
url: "https://gitlab.com/hctrdev/fail2ban-prometheus-exporter/-/releases/v{{ fail2ban_exporter_version }}/downloads/fail2ban_exporter_{{ fail2ban_exporter_version }}_linux_amd64.tar.gz"
|
||||
follow_redirects: all
|
||||
register: fail2ban_download_url
|
||||
|
||||
- name: Download fail2ban Exporter
|
||||
get_url:
|
||||
url: "{{ fail2ban_download_url.url }}"
|
||||
dest: "/tmp/fail2ban_exporter.tar.gz"
|
||||
|
||||
- name: Extract fail2ban Exporter
|
||||
unarchive:
|
||||
src: "/tmp/fail2ban_exporter.tar.gz"
|
||||
dest: "/tmp"
|
||||
remote_src: yes
|
||||
|
||||
- name: Move fail2ban Exporter binary
|
||||
copy:
|
||||
src: "/tmp/fail2ban_exporter"
|
||||
dest: "/usr/sbin/fail2ban_exporter"
|
||||
remote_src: yes
|
||||
mode: '0755'
|
||||
3
roles/fail2ban_exporter/tasks/main.yaml
Normal file
3
roles/fail2ban_exporter/tasks/main.yaml
Normal file
@@ -0,0 +1,3 @@
|
||||
---
|
||||
- include_tasks: install.yaml
|
||||
- include_tasks: service.yml
|
||||
23
roles/fail2ban_exporter/tasks/service.yml
Normal file
23
roles/fail2ban_exporter/tasks/service.yml
Normal file
@@ -0,0 +1,23 @@
|
||||
---
|
||||
- name: Create fail2ban_exporter service file
|
||||
template:
|
||||
src: "fail2ban_exporter.service.j2"
|
||||
dest: "/etc/systemd/system/fail2ban_exporter.service"
|
||||
mode: '0644'
|
||||
tags:
|
||||
- create_service
|
||||
|
||||
- name: Reload systemd to pick up the fail2ban_exporter service
|
||||
systemd:
|
||||
daemon_reload: yes
|
||||
tags:
|
||||
- reload_systemd
|
||||
|
||||
- name: Enable and start the fail2ban_exporter service
|
||||
systemd:
|
||||
name: fail2ban_exporter
|
||||
state: started
|
||||
enabled: yes
|
||||
tags:
|
||||
- start_fail2ban_exporter
|
||||
|
||||
@@ -0,0 +1,19 @@
|
||||
[Unit]
|
||||
Description=Fail2ban metric exporter for Prometheus
|
||||
Documentation=https://gitlab.com/hctrdev/fail2ban-prometheus-exporter/-/blob/main/README.md
|
||||
Requires=network-online.target
|
||||
After=network-online.target
|
||||
|
||||
[Service]
|
||||
ExecStart=/usr/sbin/fail2ban_exporter
|
||||
Restart=on-failure
|
||||
RestartSec=5s
|
||||
NoNewPrivileges=true
|
||||
|
||||
# Currently need to run the exporter as root to ensure it has read/write access to the
|
||||
# fail2ban socket file.
|
||||
User=root
|
||||
Group=root
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
64
roles/filebeat-laravel/tasks/main.yml
Normal file
64
roles/filebeat-laravel/tasks/main.yml
Normal file
@@ -0,0 +1,64 @@
|
||||
- name: Update APT package cache
|
||||
apt:
|
||||
update_cache: yes
|
||||
|
||||
- name: Install dependencies
|
||||
apt:
|
||||
name: [apt-transport-https, wget]
|
||||
state: present
|
||||
|
||||
- name: Download and add Elastic GPG key
|
||||
ansible.builtin.shell: |
|
||||
wget -qO - https://artifacts.elastic.co/GPG-KEY-elasticsearch | sudo apt-key add -
|
||||
args:
|
||||
executable: /bin/bash
|
||||
|
||||
- name: Add Elastic APT repository
|
||||
ansible.builtin.copy:
|
||||
dest: /etc/apt/sources.list.d/elastic-8.x.list
|
||||
content: "deb https://artifacts.elastic.co/packages/8.x/apt stable main"
|
||||
|
||||
- name: Update APT cache after adding repo
|
||||
apt:
|
||||
update_cache: yes
|
||||
|
||||
- name: Install Filebeat
|
||||
apt:
|
||||
name: filebeat
|
||||
state: present
|
||||
|
||||
- name: Copy Filebeat configuration
|
||||
template:
|
||||
src: filebeat.yml.j2
|
||||
dest: /etc/filebeat/filebeat-laravel.yml
|
||||
|
||||
- name: Disable default Filebeat service
|
||||
systemd:
|
||||
name: filebeat
|
||||
enabled: no
|
||||
state: stopped
|
||||
|
||||
- name: Remove default Filebeat systemd service file
|
||||
file:
|
||||
path: /etc/systemd/system/filebeat.service
|
||||
state: absent
|
||||
|
||||
- name: Copy laravel Filebeat systemd service file
|
||||
template:
|
||||
src: filebeat-laravel.service.j2
|
||||
dest: /etc/systemd/system/filebeat-laravel.service
|
||||
mode: '0644'
|
||||
|
||||
- name: TEMP! Ensure stopped Filebeat service
|
||||
systemd:
|
||||
name: filebeat-laravel
|
||||
enabled: yes
|
||||
state: stopped
|
||||
daemon_reload: yes
|
||||
|
||||
# - name: Enable and restart Filebeat service
|
||||
# systemd:
|
||||
# name: filebeat-laravel
|
||||
# enabled: yes
|
||||
# state: restarted
|
||||
# daemon_reload: yes
|
||||
18
roles/filebeat-laravel/templates/filebeat-laravel.service.j2
Normal file
18
roles/filebeat-laravel/templates/filebeat-laravel.service.j2
Normal file
@@ -0,0 +1,18 @@
|
||||
[Unit]
|
||||
Description=Filebeat sends log files to Logstash or directly to Elasticsearch.
|
||||
Documentation=https://www.elastic.co/beats/filebeat
|
||||
Wants=network-online.target
|
||||
After=network-online.target
|
||||
|
||||
[Service]
|
||||
UMask=0027
|
||||
Environment="GODEBUG='madvdontneed=1'"
|
||||
Environment="BEAT_LOG_OPTS="
|
||||
Environment="BEAT_CONFIG_OPTS=-c /etc/filebeat/filebeat-laravel.yml"
|
||||
Environment="BEAT_PATH_OPTS=--path.home /usr/share/filebeat --path.config /etc/filebeat --path.data /var/lib/filebeat-laravel --path.logs /var/log/filebeat"
|
||||
ExecStart=/usr/share/filebeat/bin/filebeat --environment systemd $BEAT_LOG_OPTS $BEAT_CONFIG_OPTS $BEAT_PATH_OPTS
|
||||
Restart=always
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
229
roles/filebeat-laravel/templates/filebeat.yml.j2
Normal file
229
roles/filebeat-laravel/templates/filebeat.yml.j2
Normal file
@@ -0,0 +1,229 @@
|
||||
###################### Filebeat Configuration Example #########################
|
||||
|
||||
# This file is an example configuration file highlighting only the most common
|
||||
# options. The filebeat.reference.yml file from the same directory contains all the
|
||||
# supported options with more comments. You can use it as a reference.
|
||||
#
|
||||
# You can find the full configuration reference here:
|
||||
# https://www.elastic.co/guide/en/beats/filebeat/index.html
|
||||
|
||||
# For more available modules and options, please see the filebeat.reference.yml sample
|
||||
# configuration file.
|
||||
|
||||
# ============================== Filebeat inputs ===============================
|
||||
|
||||
filebeat.inputs:
|
||||
|
||||
# Each - is an input. Most options can be set at the input level, so
|
||||
# you can use different inputs for various configurations.
|
||||
# Below are the input specific configurations.
|
||||
|
||||
# filestream is an input for collecting log messages from files. It is going to replace log input in the future.
|
||||
- type: filestream
|
||||
id: laravel-filestream
|
||||
|
||||
# Change to true to enable this input configuration.
|
||||
enabled: true
|
||||
|
||||
# Paths that should be crawled and fetched. Glob based paths.
|
||||
paths:
|
||||
- /home/forge/app.retailor.io/storage/logs/*.log
|
||||
#- c:\programdata\elasticsearch\logs\*
|
||||
|
||||
exclude_files: ['\.gz$']
|
||||
|
||||
# Exclude lines. A list of regular expressions to match. It drops the lines that are
|
||||
# matching any regular expression from the list.
|
||||
#exclude_lines: ['^DBG']
|
||||
|
||||
# Include lines. A list of regular expressions to match. It exports the lines that are
|
||||
# matching any regular expression from the list.
|
||||
#include_lines: ['^ERR', '^WARN']
|
||||
|
||||
# Exclude files. A list of regular expressions to match. Filebeat drops the files that
|
||||
# are matching any regular expression from the list. By default, no files are dropped.
|
||||
#prospector.scanner.exclude_files: ['.gz$']
|
||||
|
||||
# Optional additional fields. These fields can be freely picked
|
||||
# to add additional information to the crawled log files for filtering
|
||||
fields:
|
||||
pipeline_id: "laravel"
|
||||
fields_under_root: true
|
||||
|
||||
# ============================== Filebeat modules ==============================
|
||||
|
||||
filebeat.config.modules:
|
||||
# Glob pattern for configuration loading
|
||||
path: ${path.config}/modules.d/*.yml
|
||||
|
||||
# Set to true to enable config reloading
|
||||
reload.enabled: false
|
||||
|
||||
# Period on which files under path should be checked for changes
|
||||
#reload.period: 10s
|
||||
|
||||
# ======================= Elasticsearch template setting =======================
|
||||
|
||||
setup.template.settings:
|
||||
index.number_of_shards: 1
|
||||
#index.codec: best_compression
|
||||
#_source.enabled: false
|
||||
|
||||
|
||||
# ================================== General ===================================
|
||||
|
||||
# The name of the shipper that publishes the network data. It can be used to group
|
||||
# all the transactions sent by a single shipper in the web interface.
|
||||
#name:
|
||||
|
||||
# The tags of the shipper are included in their own field with each
|
||||
# transaction published.
|
||||
#tags: ["service-X", "web-tier"]
|
||||
|
||||
# Optional fields that you can specify to add additional information to the
|
||||
# output.
|
||||
#fields:
|
||||
# env: staging
|
||||
|
||||
# ================================= Dashboards =================================
|
||||
# These settings control loading the sample dashboards to the Kibana index. Loading
|
||||
# the dashboards is disabled by default and can be enabled either by setting the
|
||||
# options here or by using the `setup` command.
|
||||
#setup.dashboards.enabled: false
|
||||
|
||||
# The URL from where to download the dashboards archive. By default this URL
|
||||
# has a value which is computed based on the Beat name and version. For released
|
||||
# versions, this URL points to the dashboard archive on the artifacts.elastic.co
|
||||
# website.
|
||||
#setup.dashboards.url:
|
||||
|
||||
# =================================== Kibana ===================================
|
||||
|
||||
# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.
|
||||
# This requires a Kibana endpoint configuration.
|
||||
setup.kibana:
|
||||
|
||||
# Kibana Host
|
||||
# Scheme and port can be left out and will be set to the default (http and 5601)
|
||||
# In case you specify and additional path, the scheme is required: http://localhost:5601/path
|
||||
# IPv6 addresses should always be defined as: https://[2001:db8::1]:5601
|
||||
#host: "localhost:5601"
|
||||
|
||||
# Kibana Space ID
|
||||
# ID of the Kibana Space into which the dashboards should be loaded. By default,
|
||||
# the Default Space will be used.
|
||||
#space.id:
|
||||
|
||||
# =============================== Elastic Cloud ================================
|
||||
|
||||
# These settings simplify using Filebeat with the Elastic Cloud (https://cloud.elastic.co/).
|
||||
|
||||
# The cloud.id setting overwrites the `output.elasticsearch.hosts` and
|
||||
# `setup.kibana.host` options.
|
||||
# You can find the `cloud.id` in the Elastic Cloud web UI.
|
||||
#cloud.id:
|
||||
|
||||
# The cloud.auth setting overwrites the `output.elasticsearch.username` and
|
||||
# `output.elasticsearch.password` settings. The format is `<user>:<pass>`.
|
||||
#cloud.auth:
|
||||
|
||||
# ================================== Outputs ===================================
|
||||
|
||||
# Configure what output to use when sending the data collected by the beat.
|
||||
|
||||
# ---------------------------- Elasticsearch Output ----------------------------
|
||||
# output.elasticsearch:
|
||||
# Array of hosts to connect to.
|
||||
# hosts: ["https://elasticsearch:9200"]
|
||||
|
||||
# Protocol - either `http` (default) or `https`.
|
||||
# protocol: "https"
|
||||
|
||||
# Authentication credentials - either API key or username/password.
|
||||
#api_key: "id:api_key"
|
||||
#username: "elastic"
|
||||
#password: "changeme"
|
||||
|
||||
# ------------------------------ Logstash Output -------------------------------
|
||||
output.logstash:
|
||||
# The Logstash hosts
|
||||
hosts: ["elasticsearch:5045"]
|
||||
|
||||
# Optional SSL. By default is off.
|
||||
# List of root certificates for HTTPS server verifications
|
||||
# ssl.certificate_authorities: ["/etc/elk-certs/elk-ssl.crt"]
|
||||
|
||||
# Certificate for SSL client authentication
|
||||
# ssl.certificate: "/etc/elk-certs/elk-ssl.crt"
|
||||
|
||||
# Client Certificate Key
|
||||
# ssl.key: "/etc/elk-certs/elk-ssl.key"
|
||||
|
||||
# ================================= Processors =================================
|
||||
processors:
|
||||
- add_host_metadata:
|
||||
when.not.contains.tags: forwarded
|
||||
- add_cloud_metadata: ~
|
||||
- add_docker_metadata: ~
|
||||
- add_kubernetes_metadata: ~
|
||||
|
||||
# ================================== Logging ===================================
|
||||
|
||||
# Sets log level. The default log level is info.
|
||||
# Available log levels are: error, warning, info, debug
|
||||
# logging.level: debug
|
||||
|
||||
# At debug level, you can selectively enable logging only for some components.
|
||||
# To enable all selectors use ["*"]. Examples of other selectors are "beat",
|
||||
# "publisher", "service".
|
||||
#logging.selectors: ["*"]
|
||||
|
||||
# ============================= X-Pack Monitoring ==============================
|
||||
# Filebeat can export internal metrics to a central Elasticsearch monitoring
|
||||
# cluster. This requires xpack monitoring to be enabled in Elasticsearch. The
|
||||
# reporting is disabled by default.
|
||||
|
||||
# Set to true to enable the monitoring reporter.
|
||||
#monitoring.enabled: false
|
||||
|
||||
# Sets the UUID of the Elasticsearch cluster under which monitoring data for this
|
||||
# Filebeat instance will appear in the Stack Monitoring UI. If output.elasticsearch
|
||||
# is enabled, the UUID is derived from the Elasticsearch cluster referenced by output.elasticsearch.
|
||||
#monitoring.cluster_uuid:
|
||||
|
||||
# Uncomment to send the metrics to Elasticsearch. Most settings from the
|
||||
# Elasticsearch output are accepted here as well.
|
||||
# Note that the settings should point to your Elasticsearch *monitoring* cluster.
|
||||
# Any setting that is not set is automatically inherited from the Elasticsearch
|
||||
# output configuration, so if you have the Elasticsearch output configured such
|
||||
# that it is pointing to your Elasticsearch monitoring cluster, you can simply
|
||||
# uncomment the following line.
|
||||
#monitoring.elasticsearch:
|
||||
|
||||
# ============================== Instrumentation ===============================
|
||||
|
||||
# Instrumentation support for the filebeat.
|
||||
#instrumentation:
|
||||
# Set to true to enable instrumentation of filebeat.
|
||||
#enabled: false
|
||||
|
||||
# Environment in which filebeat is running on (eg: staging, production, etc.)
|
||||
#environment: ""
|
||||
|
||||
# APM Server hosts to report instrumentation results to.
|
||||
#hosts:
|
||||
# - http://localhost:8200
|
||||
|
||||
# API Key for the APM Server(s).
|
||||
# If api_key is set then secret_token will be ignored.
|
||||
#api_key:
|
||||
|
||||
# Secret token for the APM Server(s).
|
||||
#secret_token:
|
||||
|
||||
|
||||
# ================================= Migration ==================================
|
||||
|
||||
# This allows to enable 6.7 migration aliases
|
||||
#migration.6_to_7.enabled: true
|
||||
|
||||
57
roles/filebeat-nginx/tasks/main.yml
Normal file
57
roles/filebeat-nginx/tasks/main.yml
Normal file
@@ -0,0 +1,57 @@
|
||||
- name: Update APT package cache
|
||||
apt:
|
||||
update_cache: yes
|
||||
|
||||
- name: Install dependencies
|
||||
apt:
|
||||
name: [apt-transport-https, wget]
|
||||
state: present
|
||||
|
||||
- name: Download and add Elastic GPG key
|
||||
ansible.builtin.shell: |
|
||||
wget -qO - https://artifacts.elastic.co/GPG-KEY-elasticsearch | sudo apt-key add -
|
||||
args:
|
||||
executable: /bin/bash
|
||||
|
||||
- name: Add Elastic APT repository
|
||||
ansible.builtin.copy:
|
||||
dest: /etc/apt/sources.list.d/elastic-8.x.list
|
||||
content: "deb https://artifacts.elastic.co/packages/8.x/apt stable main"
|
||||
|
||||
- name: Update APT cache after adding repo
|
||||
apt:
|
||||
update_cache: yes
|
||||
|
||||
- name: Install Filebeat
|
||||
apt:
|
||||
name: filebeat
|
||||
state: present
|
||||
|
||||
- name: Copy Filebeat configuration
|
||||
template:
|
||||
src: filebeat.yml.j2
|
||||
dest: /etc/filebeat/filebeat-nginx.yml
|
||||
|
||||
- name: Disable default Filebeat service
|
||||
systemd:
|
||||
name: filebeat
|
||||
enabled: no
|
||||
state: stopped
|
||||
|
||||
- name: Remove default Filebeat systemd service file
|
||||
file:
|
||||
path: /etc/systemd/system/filebeat.service
|
||||
state: absent
|
||||
|
||||
- name: Copy nginx Filebeat systemd service file
|
||||
template:
|
||||
src: filebeat-nginx.service.j2
|
||||
dest: /etc/systemd/system/filebeat-nginx.service
|
||||
mode: '0644'
|
||||
|
||||
- name: Enable and restart Filebeat service
|
||||
systemd:
|
||||
name: filebeat-nginx
|
||||
enabled: yes
|
||||
state: restarted
|
||||
daemon_reload: yes
|
||||
18
roles/filebeat-nginx/templates/filebeat-nginx.service.j2
Normal file
18
roles/filebeat-nginx/templates/filebeat-nginx.service.j2
Normal file
@@ -0,0 +1,18 @@
|
||||
[Unit]
|
||||
Description=Filebeat sends log files to Logstash or directly to Elasticsearch.
|
||||
Documentation=https://www.elastic.co/beats/filebeat
|
||||
Wants=network-online.target
|
||||
After=network-online.target
|
||||
|
||||
[Service]
|
||||
UMask=0027
|
||||
Environment="GODEBUG='madvdontneed=1'"
|
||||
Environment="BEAT_LOG_OPTS="
|
||||
Environment="BEAT_CONFIG_OPTS=-c /etc/filebeat/filebeat-nginx.yml"
|
||||
Environment="BEAT_PATH_OPTS=--path.home /usr/share/filebeat --path.config /etc/filebeat --path.data /var/lib/filebeat-nginx --path.logs /var/log/filebeat"
|
||||
ExecStart=/usr/share/filebeat/bin/filebeat --environment systemd $BEAT_LOG_OPTS $BEAT_CONFIG_OPTS $BEAT_PATH_OPTS
|
||||
Restart=always
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
229
roles/filebeat-nginx/templates/filebeat.yml.j2
Normal file
229
roles/filebeat-nginx/templates/filebeat.yml.j2
Normal file
@@ -0,0 +1,229 @@
|
||||
###################### Filebeat Configuration Example #########################
|
||||
|
||||
# This file is an example configuration file highlighting only the most common
|
||||
# options. The filebeat.reference.yml file from the same directory contains all the
|
||||
# supported options with more comments. You can use it as a reference.
|
||||
#
|
||||
# You can find the full configuration reference here:
|
||||
# https://www.elastic.co/guide/en/beats/filebeat/index.html
|
||||
|
||||
# For more available modules and options, please see the filebeat.reference.yml sample
|
||||
# configuration file.
|
||||
|
||||
# ============================== Filebeat inputs ===============================
|
||||
|
||||
filebeat.inputs:
|
||||
|
||||
# Each - is an input. Most options can be set at the input level, so
|
||||
# you can use different inputs for various configurations.
|
||||
# Below are the input specific configurations.
|
||||
|
||||
# filestream is an input for collecting log messages from files. It is going to replace log input in the future.
|
||||
- type: filestream
|
||||
id: nginx-filestream
|
||||
|
||||
# Change to true to enable this input configuration.
|
||||
enabled: true
|
||||
|
||||
# Paths that should be crawled and fetched. Glob based paths.
|
||||
paths:
|
||||
- /var/log/nginx/*.log
|
||||
#- c:\programdata\elasticsearch\logs\*
|
||||
|
||||
exclude_files: ['\.gz$']
|
||||
|
||||
# Exclude lines. A list of regular expressions to match. It drops the lines that are
|
||||
# matching any regular expression from the list.
|
||||
#exclude_lines: ['^DBG']
|
||||
|
||||
# Include lines. A list of regular expressions to match. It exports the lines that are
|
||||
# matching any regular expression from the list.
|
||||
#include_lines: ['^ERR', '^WARN']
|
||||
|
||||
# Exclude files. A list of regular expressions to match. Filebeat drops the files that
|
||||
# are matching any regular expression from the list. By default, no files are dropped.
|
||||
#prospector.scanner.exclude_files: ['.gz$']
|
||||
|
||||
# Optional additional fields. These fields can be freely picked
|
||||
# to add additional information to the crawled log files for filtering
|
||||
fields:
|
||||
pipeline_id: "nginx"
|
||||
fields_under_root: true
|
||||
|
||||
# ============================== Filebeat modules ==============================
|
||||
|
||||
filebeat.config.modules:
|
||||
# Glob pattern for configuration loading
|
||||
path: ${path.config}/modules.d/*.yml
|
||||
|
||||
# Set to true to enable config reloading
|
||||
reload.enabled: false
|
||||
|
||||
# Period on which files under path should be checked for changes
|
||||
#reload.period: 10s
|
||||
|
||||
# ======================= Elasticsearch template setting =======================
|
||||
|
||||
setup.template.settings:
|
||||
index.number_of_shards: 1
|
||||
#index.codec: best_compression
|
||||
#_source.enabled: false
|
||||
|
||||
|
||||
# ================================== General ===================================
|
||||
|
||||
# The name of the shipper that publishes the network data. It can be used to group
|
||||
# all the transactions sent by a single shipper in the web interface.
|
||||
#name:
|
||||
|
||||
# The tags of the shipper are included in their own field with each
|
||||
# transaction published.
|
||||
#tags: ["service-X", "web-tier"]
|
||||
|
||||
# Optional fields that you can specify to add additional information to the
|
||||
# output.
|
||||
#fields:
|
||||
# env: staging
|
||||
|
||||
# ================================= Dashboards =================================
|
||||
# These settings control loading the sample dashboards to the Kibana index. Loading
|
||||
# the dashboards is disabled by default and can be enabled either by setting the
|
||||
# options here or by using the `setup` command.
|
||||
#setup.dashboards.enabled: false
|
||||
|
||||
# The URL from where to download the dashboards archive. By default this URL
|
||||
# has a value which is computed based on the Beat name and version. For released
|
||||
# versions, this URL points to the dashboard archive on the artifacts.elastic.co
|
||||
# website.
|
||||
#setup.dashboards.url:
|
||||
|
||||
# =================================== Kibana ===================================
|
||||
|
||||
# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.
|
||||
# This requires a Kibana endpoint configuration.
|
||||
setup.kibana:
|
||||
|
||||
# Kibana Host
|
||||
# Scheme and port can be left out and will be set to the default (http and 5601)
|
||||
# In case you specify and additional path, the scheme is required: http://localhost:5601/path
|
||||
# IPv6 addresses should always be defined as: https://[2001:db8::1]:5601
|
||||
#host: "localhost:5601"
|
||||
|
||||
# Kibana Space ID
|
||||
# ID of the Kibana Space into which the dashboards should be loaded. By default,
|
||||
# the Default Space will be used.
|
||||
#space.id:
|
||||
|
||||
# =============================== Elastic Cloud ================================
|
||||
|
||||
# These settings simplify using Filebeat with the Elastic Cloud (https://cloud.elastic.co/).
|
||||
|
||||
# The cloud.id setting overwrites the `output.elasticsearch.hosts` and
|
||||
# `setup.kibana.host` options.
|
||||
# You can find the `cloud.id` in the Elastic Cloud web UI.
|
||||
#cloud.id:
|
||||
|
||||
# The cloud.auth setting overwrites the `output.elasticsearch.username` and
|
||||
# `output.elasticsearch.password` settings. The format is `<user>:<pass>`.
|
||||
#cloud.auth:
|
||||
|
||||
# ================================== Outputs ===================================
|
||||
|
||||
# Configure what output to use when sending the data collected by the beat.
|
||||
|
||||
# ---------------------------- Elasticsearch Output ----------------------------
|
||||
# output.elasticsearch:
|
||||
# Array of hosts to connect to.
|
||||
# hosts: ["https://elasticsearch:9200"]
|
||||
|
||||
# Protocol - either `http` (default) or `https`.
|
||||
# protocol: "https"
|
||||
|
||||
# Authentication credentials - either API key or username/password.
|
||||
#api_key: "id:api_key"
|
||||
#username: "elastic"
|
||||
#password: "changeme"
|
||||
|
||||
# ------------------------------ Logstash Output -------------------------------
|
||||
output.logstash:
|
||||
# The Logstash hosts
|
||||
hosts: ["elasticsearch:5044"]
|
||||
|
||||
# Optional SSL. By default is off.
|
||||
# List of root certificates for HTTPS server verifications
|
||||
# ssl.certificate_authorities: ["/etc/elk-certs/elk-ssl.crt"]
|
||||
|
||||
# Certificate for SSL client authentication
|
||||
# ssl.certificate: "/etc/elk-certs/elk-ssl.crt"
|
||||
|
||||
# Client Certificate Key
|
||||
# ssl.key: "/etc/elk-certs/elk-ssl.key"
|
||||
|
||||
# ================================= Processors =================================
|
||||
processors:
|
||||
- add_host_metadata:
|
||||
when.not.contains.tags: forwarded
|
||||
- add_cloud_metadata: ~
|
||||
- add_docker_metadata: ~
|
||||
- add_kubernetes_metadata: ~
|
||||
|
||||
# ================================== Logging ===================================
|
||||
|
||||
# Sets log level. The default log level is info.
|
||||
# Available log levels are: error, warning, info, debug
|
||||
# logging.level: debug
|
||||
|
||||
# At debug level, you can selectively enable logging only for some components.
|
||||
# To enable all selectors use ["*"]. Examples of other selectors are "beat",
|
||||
# "publisher", "service".
|
||||
#logging.selectors: ["*"]
|
||||
|
||||
# ============================= X-Pack Monitoring ==============================
|
||||
# Filebeat can export internal metrics to a central Elasticsearch monitoring
|
||||
# cluster. This requires xpack monitoring to be enabled in Elasticsearch. The
|
||||
# reporting is disabled by default.
|
||||
|
||||
# Set to true to enable the monitoring reporter.
|
||||
#monitoring.enabled: false
|
||||
|
||||
# Sets the UUID of the Elasticsearch cluster under which monitoring data for this
|
||||
# Filebeat instance will appear in the Stack Monitoring UI. If output.elasticsearch
|
||||
# is enabled, the UUID is derived from the Elasticsearch cluster referenced by output.elasticsearch.
|
||||
#monitoring.cluster_uuid:
|
||||
|
||||
# Uncomment to send the metrics to Elasticsearch. Most settings from the
|
||||
# Elasticsearch output are accepted here as well.
|
||||
# Note that the settings should point to your Elasticsearch *monitoring* cluster.
|
||||
# Any setting that is not set is automatically inherited from the Elasticsearch
|
||||
# output configuration, so if you have the Elasticsearch output configured such
|
||||
# that it is pointing to your Elasticsearch monitoring cluster, you can simply
|
||||
# uncomment the following line.
|
||||
#monitoring.elasticsearch:
|
||||
|
||||
# ============================== Instrumentation ===============================
|
||||
|
||||
# Instrumentation support for the filebeat.
|
||||
#instrumentation:
|
||||
# Set to true to enable instrumentation of filebeat.
|
||||
#enabled: false
|
||||
|
||||
# Environment in which filebeat is running on (eg: staging, production, etc.)
|
||||
#environment: ""
|
||||
|
||||
# APM Server hosts to report instrumentation results to.
|
||||
#hosts:
|
||||
# - http://localhost:8200
|
||||
|
||||
# API Key for the APM Server(s).
|
||||
# If api_key is set then secret_token will be ignored.
|
||||
#api_key:
|
||||
|
||||
# Secret token for the APM Server(s).
|
||||
#secret_token:
|
||||
|
||||
|
||||
# ================================= Migration ==================================
|
||||
|
||||
# This allows to enable 6.7 migration aliases
|
||||
#migration.6_to_7.enabled: true
|
||||
|
||||
3
roles/firewall/defaults/main.yml
Normal file
3
roles/firewall/defaults/main.yml
Normal file
@@ -0,0 +1,3 @@
|
||||
# Defaults for the base role
|
||||
---
|
||||
firewall_enable: true
|
||||
23
roles/firewall/tasks/main.yml
Normal file
23
roles/firewall/tasks/main.yml
Normal file
@@ -0,0 +1,23 @@
|
||||
---
|
||||
- name: Update apt cache if older than 1 hour
|
||||
apt:
|
||||
update_cache: yes
|
||||
cache_valid_time: 3600
|
||||
|
||||
- name: Add ufw
|
||||
apt: pkg=ufw state=latest
|
||||
|
||||
- name: Enable access via ssh
|
||||
ufw:
|
||||
rule: allow
|
||||
port: "22"
|
||||
|
||||
- name: Enable custom firewall ports
|
||||
ufw:
|
||||
rule: allow
|
||||
port: "{{ item }}"
|
||||
loop: "{{ custom_firewall_ports | default([]) }}"
|
||||
|
||||
- name: Start ufw
|
||||
ufw:
|
||||
state: enabled
|
||||
3
roles/motd/defaults/main.yml
Normal file
3
roles/motd/defaults/main.yml
Normal file
@@ -0,0 +1,3 @@
|
||||
---
|
||||
# Toggle abstract
|
||||
description: false
|
||||
69
roles/motd/tasks/main.yml
Normal file
69
roles/motd/tasks/main.yml
Normal file
@@ -0,0 +1,69 @@
|
||||
---
|
||||
- name: create motd-hostname file
|
||||
file:
|
||||
path: /etc/update-motd.d/20-hostname
|
||||
state: touch
|
||||
|
||||
- name: download motd executable from github.com
|
||||
command: curl -o /usr/local/bin/motd https://raw.githubusercontent.com/kevinmidboe/motdGO/main/motd-linux creates=/usr/local/bin/motd
|
||||
|
||||
- name: set motd executable permission
|
||||
file:
|
||||
path: /usr/local/bin/motd
|
||||
mode: +x
|
||||
|
||||
- name: generate motd-hostname from motd executable
|
||||
command: motd -font larry3d -str {{ inventory_hostname }} -parser motd
|
||||
register: motd_hostname
|
||||
|
||||
- name: write command output to 20-hostname file
|
||||
copy:
|
||||
content: "{{ motd_hostname.stdout }}"
|
||||
dest: /etc/update-motd.d/20-hostname
|
||||
|
||||
- name: set motd-hostname executable permission
|
||||
file:
|
||||
path: /etc/update-motd.d/20-hostname
|
||||
mode: +x
|
||||
|
||||
- name: generate motd-abstract
|
||||
template:
|
||||
src: 25-abstract.j2
|
||||
dest: /etc/update-motd.d/25-abstract
|
||||
owner: root
|
||||
group: root
|
||||
mode: +x
|
||||
when: description
|
||||
|
||||
- name: check if help-text motd exists
|
||||
stat:
|
||||
path: /etc/update-motd.d/10-help-text
|
||||
register: help_text
|
||||
|
||||
- name: disable help-text motd
|
||||
file:
|
||||
path: /etc/update-motd.d/10-help-text
|
||||
mode: -x
|
||||
when: help_text.stat.exists == true
|
||||
|
||||
- name: check if motd-news motd exists
|
||||
stat:
|
||||
path: /etc/update-motd.d/50-motd-news
|
||||
register: motd_news
|
||||
|
||||
- name: disable motd-news motd
|
||||
file:
|
||||
path: /etc/update-motd.d/50-motd-news
|
||||
mode: -x
|
||||
when: motd_news.stat.exists == true
|
||||
|
||||
- name: check if unminimize motd exists
|
||||
stat:
|
||||
path: /etc/update-motd.d/60-unminimize
|
||||
register: motd_unminimize
|
||||
|
||||
- name: disable unminimize motd
|
||||
file:
|
||||
path: /etc/update-motd.d/60-unminimize
|
||||
mode: -x
|
||||
when: motd_unminimize.stat.exists == true
|
||||
4
roles/motd/templates/25-abstract.j2
Normal file
4
roles/motd/templates/25-abstract.j2
Normal file
@@ -0,0 +1,4 @@
|
||||
#!/bin/dash
|
||||
|
||||
printf "\n"
|
||||
printf "\e[1;36m%s\e > {{ description }} \e[0m\n\n"
|
||||
16
roles/nginx_prometheus_exporter/README.md
Normal file
16
roles/nginx_prometheus_exporter/README.md
Normal file
@@ -0,0 +1,16 @@
|
||||
# nginx prometheus exporter
|
||||
|
||||
this install a prometheus exporter which it can pool and proxy nginx stub in response.
|
||||
|
||||
Nginx needs to be configured with the following location block:
|
||||
|
||||
```
|
||||
location /nginx_status {
|
||||
stub_status on;
|
||||
access_log off;
|
||||
allow 127.0.0.1;
|
||||
deny all;
|
||||
}
|
||||
```
|
||||
|
||||
this requires service to scrape using host in url: `http://127.0.0.1/nginx_status` and all other hosts such as public ip or localhost will receive != 20x.
|
||||
1
roles/nginx_prometheus_exporter/defaults/main.yml
Normal file
1
roles/nginx_prometheus_exporter/defaults/main.yml
Normal file
@@ -0,0 +1 @@
|
||||
---
|
||||
6
roles/nginx_prometheus_exporter/handlers/main.yaml
Normal file
6
roles/nginx_prometheus_exporter/handlers/main.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
---
|
||||
- name: Restart nginx_prometheus_exporter
|
||||
systemd:
|
||||
name: nginx_prometheus_exporter
|
||||
state: restarted
|
||||
daemon_reload: yes
|
||||
18
roles/nginx_prometheus_exporter/tasks/install.yaml
Normal file
18
roles/nginx_prometheus_exporter/tasks/install.yaml
Normal file
@@ -0,0 +1,18 @@
|
||||
---
|
||||
- name: Download Nginx Prometheus Exporter
|
||||
get_url:
|
||||
url: "https://github.com/nginx/nginx-prometheus-exporter/releases/download/v{{ nginx_exporter_version }}/nginx-prometheus-exporter_{{ nginx_exporter_version }}_linux_amd64.tar.gz"
|
||||
dest: "/tmp/nginx_prometheus_exporter.tar.gz"
|
||||
|
||||
- name: Extract Nginx Prometheus Exporter
|
||||
unarchive:
|
||||
src: "/tmp/nginx_prometheus_exporter.tar.gz"
|
||||
dest: "/tmp"
|
||||
remote_src: yes
|
||||
|
||||
- name: Move Nginx Prometheus Exporter binary
|
||||
copy:
|
||||
src: "/tmp/nginx-prometheus-exporter"
|
||||
dest: "/usr/local/bin/nginx-prometheus-exporter"
|
||||
remote_src: yes
|
||||
mode: '0755'
|
||||
3
roles/nginx_prometheus_exporter/tasks/main.yaml
Normal file
3
roles/nginx_prometheus_exporter/tasks/main.yaml
Normal file
@@ -0,0 +1,3 @@
|
||||
---
|
||||
- include_tasks: install.yaml
|
||||
- include_tasks: service.yml
|
||||
23
roles/nginx_prometheus_exporter/tasks/service.yml
Normal file
23
roles/nginx_prometheus_exporter/tasks/service.yml
Normal file
@@ -0,0 +1,23 @@
|
||||
---
|
||||
- name: Create nginx_prometheus_exporter service file
|
||||
template:
|
||||
src: "nginx_prometheus_exporter.service.j2"
|
||||
dest: "/etc/systemd/system/nginx_prometheus_exporter.service"
|
||||
mode: '0644'
|
||||
tags:
|
||||
- create_service
|
||||
|
||||
- name: Reload systemd to pick up the nginx_prometheus_exporter service
|
||||
systemd:
|
||||
daemon_reload: yes
|
||||
tags:
|
||||
- reload_systemd
|
||||
|
||||
- name: Enable and start the nginx_prometheus_exporter service
|
||||
systemd:
|
||||
name: nginx_prometheus_exporter
|
||||
state: started
|
||||
enabled: yes
|
||||
tags:
|
||||
- start_nginx_prometheus_exporter
|
||||
|
||||
@@ -0,0 +1,11 @@
|
||||
[Unit]
|
||||
Description=Nginx Prometheus Exporter
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
User=nobody
|
||||
ExecStart=/usr/local/bin/nginx-prometheus-exporter -nginx.scrape-uri=http://127.0.0.1/nginx_status
|
||||
Restart=always
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
6
roles/node_exporter/handlers/main.yml
Normal file
6
roles/node_exporter/handlers/main.yml
Normal file
@@ -0,0 +1,6 @@
|
||||
---
|
||||
- name: Restart node_exporter
|
||||
systemd:
|
||||
name: node_exporter
|
||||
state: restarted
|
||||
daemon_reload: yes
|
||||
18
roles/node_exporter/tasks/install.yml
Normal file
18
roles/node_exporter/tasks/install.yml
Normal file
@@ -0,0 +1,18 @@
|
||||
---
|
||||
- name: Download Node Exporter
|
||||
get_url:
|
||||
url: "https://github.com/prometheus/node_exporter/releases/download/v{{ node_exporter_version }}/node_exporter-{{ node_exporter_version }}.linux-amd64.tar.gz"
|
||||
dest: "/tmp/node_exporter.tar.gz"
|
||||
|
||||
- name: Extract Node Exporter
|
||||
unarchive:
|
||||
src: "/tmp/node_exporter.tar.gz"
|
||||
dest: "/tmp"
|
||||
remote_src: yes
|
||||
|
||||
- name: Move Node Exporter binary
|
||||
copy:
|
||||
src: "/tmp/node_exporter-{{ node_exporter_version }}.linux-amd64/node_exporter"
|
||||
dest: "/usr/local/bin/node_exporter"
|
||||
remote_src: yes
|
||||
mode: '0755'
|
||||
3
roles/node_exporter/tasks/main.yml
Normal file
3
roles/node_exporter/tasks/main.yml
Normal file
@@ -0,0 +1,3 @@
|
||||
---
|
||||
- include_tasks: install.yml
|
||||
- include_tasks: service.yml
|
||||
22
roles/node_exporter/tasks/service.yml
Normal file
22
roles/node_exporter/tasks/service.yml
Normal file
@@ -0,0 +1,22 @@
|
||||
---
|
||||
- name: Create node_exporter service file
|
||||
template:
|
||||
src: "node_exporter.service.j2"
|
||||
dest: "/etc/systemd/system/node_exporter.service"
|
||||
mode: '0644'
|
||||
tags:
|
||||
- create_service
|
||||
|
||||
- name: Reload systemd to pick up the node_exporter service
|
||||
systemd:
|
||||
daemon_reload: yes
|
||||
tags:
|
||||
- reload_systemd
|
||||
|
||||
- name: Enable and start the node_exporter service
|
||||
systemd:
|
||||
name: node_exporter
|
||||
state: started
|
||||
enabled: yes
|
||||
tags:
|
||||
- start_node_exporter
|
||||
11
roles/node_exporter/templates/node_exporter.service.j2
Normal file
11
roles/node_exporter/templates/node_exporter.service.j2
Normal file
@@ -0,0 +1,11 @@
|
||||
[Unit]
|
||||
Description=Prometheus Node Exporter
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
User=nobody
|
||||
ExecStart=/usr/local/bin/node_exporter
|
||||
Restart=always
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
2
roles/node_exporter/vars/main.yaml.disable
Normal file
2
roles/node_exporter/vars/main.yaml.disable
Normal file
@@ -0,0 +1,2 @@
|
||||
---
|
||||
node_exporter_version: "1.7.0"
|
||||
2
roles/oh-my-zsh/.ansible-lint
Normal file
2
roles/oh-my-zsh/.ansible-lint
Normal file
@@ -0,0 +1,2 @@
|
||||
skip_list:
|
||||
- '106' # Role name {} does not match ``^[a-z][a-z0-9_]+$`` pattern'
|
||||
19
roles/oh-my-zsh/.editorconfig
Normal file
19
roles/oh-my-zsh/.editorconfig
Normal file
@@ -0,0 +1,19 @@
|
||||
# EditorConfig: http://EditorConfig.org
|
||||
|
||||
# top-most EditorConfig file
|
||||
root = true
|
||||
|
||||
# Defaults for all editor files
|
||||
[*]
|
||||
insert_final_newline = true
|
||||
indent_style = space
|
||||
indent_size = 4
|
||||
trim_trailing_whitespace = true
|
||||
|
||||
# Files with a smaller indent
|
||||
[*.yml]
|
||||
indent_size = 2
|
||||
|
||||
# Jinja2 template files
|
||||
[*.j2]
|
||||
end_of_line = lf
|
||||
1
roles/oh-my-zsh/.moleculew/ansible_lint_version
Normal file
1
roles/oh-my-zsh/.moleculew/ansible_lint_version
Normal file
@@ -0,0 +1 @@
|
||||
4.3.4
|
||||
1
roles/oh-my-zsh/.moleculew/ansible_version
Normal file
1
roles/oh-my-zsh/.moleculew/ansible_version
Normal file
@@ -0,0 +1 @@
|
||||
2.9.1
|
||||
1
roles/oh-my-zsh/.moleculew/docker_lib_version
Normal file
1
roles/oh-my-zsh/.moleculew/docker_lib_version
Normal file
@@ -0,0 +1 @@
|
||||
4.3.1
|
||||
1
roles/oh-my-zsh/.moleculew/flake8_version
Normal file
1
roles/oh-my-zsh/.moleculew/flake8_version
Normal file
@@ -0,0 +1 @@
|
||||
3.8.3
|
||||
1
roles/oh-my-zsh/.moleculew/molecule_version
Normal file
1
roles/oh-my-zsh/.moleculew/molecule_version
Normal file
@@ -0,0 +1 @@
|
||||
3.0.8
|
||||
1
roles/oh-my-zsh/.moleculew/python_version
Normal file
1
roles/oh-my-zsh/.moleculew/python_version
Normal file
@@ -0,0 +1 @@
|
||||
3.6.7
|
||||
1
roles/oh-my-zsh/.moleculew/testinfra_version
Normal file
1
roles/oh-my-zsh/.moleculew/testinfra_version
Normal file
@@ -0,0 +1 @@
|
||||
5.3.1
|
||||
1
roles/oh-my-zsh/.moleculew/yamllint_version
Normal file
1
roles/oh-my-zsh/.moleculew/yamllint_version
Normal file
@@ -0,0 +1 @@
|
||||
1.24.2
|
||||
36
roles/oh-my-zsh/.travis.yml
Normal file
36
roles/oh-my-zsh/.travis.yml
Normal file
@@ -0,0 +1,36 @@
|
||||
---
|
||||
language: python
|
||||
python: '3.6'
|
||||
|
||||
env:
|
||||
global:
|
||||
- MOLECULEW_USE_SYSTEM=true
|
||||
matrix:
|
||||
# Spin off separate builds for each of the following versions of Ansible
|
||||
- MOLECULEW_ANSIBLE=2.8.16
|
||||
- MOLECULEW_ANSIBLE=2.9.1
|
||||
|
||||
# Require Ubuntu 16.04
|
||||
dist: xenial
|
||||
|
||||
# Require Docker
|
||||
services:
|
||||
- docker
|
||||
|
||||
install:
|
||||
# Install dependencies
|
||||
- ./moleculew wrapper-install
|
||||
|
||||
# Display versions
|
||||
- ./moleculew wrapper-versions
|
||||
|
||||
script:
|
||||
- ./moleculew test
|
||||
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
- /^(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)([\.\-].*)?$/
|
||||
|
||||
notifications:
|
||||
webhooks: https://galaxy.ansible.com/api/v1/notifications/
|
||||
33
roles/oh-my-zsh/.yamllint
Normal file
33
roles/oh-my-zsh/.yamllint
Normal file
@@ -0,0 +1,33 @@
|
||||
---
|
||||
# Based on ansible-lint config
|
||||
extends: default
|
||||
|
||||
rules:
|
||||
braces:
|
||||
max-spaces-inside: 1
|
||||
level: error
|
||||
brackets:
|
||||
max-spaces-inside: 1
|
||||
level: error
|
||||
colons:
|
||||
max-spaces-after: -1
|
||||
level: error
|
||||
commas:
|
||||
max-spaces-after: -1
|
||||
level: error
|
||||
comments: disable
|
||||
comments-indentation: disable
|
||||
document-start: disable
|
||||
empty-lines:
|
||||
max: 3
|
||||
level: error
|
||||
hyphens:
|
||||
level: error
|
||||
indentation: disable
|
||||
key-duplicates: enable
|
||||
line-length: disable
|
||||
new-line-at-end-of-file: disable
|
||||
new-lines:
|
||||
type: unix
|
||||
trailing-spaces: disable
|
||||
truthy: disable
|
||||
21
roles/oh-my-zsh/LICENSE
Normal file
21
roles/oh-my-zsh/LICENSE
Normal file
@@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2016 GantSign Ltd.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
147
roles/oh-my-zsh/README.md
Normal file
147
roles/oh-my-zsh/README.md
Normal file
@@ -0,0 +1,147 @@
|
||||
Ansible Role: Oh My Zsh
|
||||
=======================
|
||||
|
||||
[](https://travis-ci.com/gantsign/ansible-role-oh-my-zsh)
|
||||
[](https://galaxy.ansible.com/gantsign/oh-my-zsh)
|
||||
[](https://raw.githubusercontent.com/gantsign/ansible-role-oh-my-zsh/master/LICENSE)
|
||||
|
||||
Role to download, install and configure [Oh-My-Zsh](http://ohmyz.sh/).
|
||||
|
||||
**Note:** you may be better off using the alternative
|
||||
[gantsign.ansible_role_antigen](https://galaxy.ansible.com/gantsign/antigen)
|
||||
role that can install third-party Zsh plugins as well as installing Oh My Zsh
|
||||
and its plugins.
|
||||
|
||||
Requirements
|
||||
------------
|
||||
|
||||
* Ansible >= 2.8
|
||||
|
||||
* Linux Distribution
|
||||
|
||||
* Debian Family
|
||||
|
||||
* Debian
|
||||
|
||||
* Jessie (8)
|
||||
* Stretch (9)
|
||||
|
||||
* Ubuntu
|
||||
|
||||
* Xenial (16.04)
|
||||
* Bionic (18.04)
|
||||
|
||||
* RedHat Family
|
||||
|
||||
* CentOS
|
||||
|
||||
* 7
|
||||
|
||||
* Fedora
|
||||
|
||||
* 31
|
||||
|
||||
* SUSE Family
|
||||
|
||||
* openSUSE
|
||||
|
||||
* 15.1
|
||||
|
||||
* Note: other versions are likely to work but have not been tested.
|
||||
|
||||
Role Variables
|
||||
--------------
|
||||
|
||||
The following variables will change the behavior of this role (default values
|
||||
are shown below):
|
||||
|
||||
```yaml
|
||||
# Default theme
|
||||
oh_my_zsh_theme: robbyrussell
|
||||
|
||||
# Default plugins
|
||||
oh_my_zsh_plugins:
|
||||
- git
|
||||
|
||||
# Wether to install by default for all specified users.
|
||||
# May be overridden by `oh_my_zsh: install:` under each user.
|
||||
oh_my_zsh_install: yes
|
||||
|
||||
# User configuration
|
||||
# Important: oh-my-zsh is installed per user so you need to specify the users to install it for.
|
||||
users:
|
||||
- username: example1
|
||||
oh_my_zsh:
|
||||
theme: robbyrussell
|
||||
plugins:
|
||||
- git
|
||||
- username: example2
|
||||
oh_my_zsh:
|
||||
theme: robbyrussell
|
||||
plugins:
|
||||
- git
|
||||
- mvn
|
||||
- username: example3
|
||||
oh_my_zsh:
|
||||
install: no
|
||||
```
|
||||
|
||||
Example Playbook
|
||||
----------------
|
||||
|
||||
```yaml
|
||||
- hosts: servers
|
||||
roles:
|
||||
- role: gantsign.oh-my-zsh
|
||||
users:
|
||||
- username: example
|
||||
```
|
||||
|
||||
More Roles From GantSign
|
||||
------------------------
|
||||
|
||||
You can find more roles from GantSign on
|
||||
[Ansible Galaxy](https://galaxy.ansible.com/gantsign).
|
||||
|
||||
Development & Testing
|
||||
---------------------
|
||||
|
||||
This project uses [Molecule](http://molecule.readthedocs.io/) to aid in the
|
||||
development and testing; the role is unit tested using
|
||||
[Testinfra](http://testinfra.readthedocs.io/) and
|
||||
[pytest](http://docs.pytest.org/).
|
||||
|
||||
To develop or test you'll need to have installed the following:
|
||||
|
||||
* Linux (e.g. [Ubuntu](http://www.ubuntu.com/))
|
||||
* [Docker](https://www.docker.com/)
|
||||
* [Python](https://www.python.org/) (including python-pip)
|
||||
* [Ansible](https://www.ansible.com/)
|
||||
* [Molecule](http://molecule.readthedocs.io/)
|
||||
|
||||
Because the above can be tricky to install, this project includes
|
||||
[Molecule Wrapper](https://github.com/gantsign/molecule-wrapper). Molecule
|
||||
Wrapper is a shell script that installs Molecule and it's dependencies (apart
|
||||
from Linux) and then executes Molecule with the command you pass it.
|
||||
|
||||
To test this role using Molecule Wrapper run the following command from the
|
||||
project root:
|
||||
|
||||
```bash
|
||||
./moleculew test
|
||||
```
|
||||
|
||||
Note: some of the dependencies need `sudo` permission to install.
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
MIT
|
||||
|
||||
Author Information
|
||||
------------------
|
||||
|
||||
John Freeman
|
||||
|
||||
GantSign Ltd.
|
||||
Company No. 06109112 (registered in England)
|
||||
11
roles/oh-my-zsh/defaults/main.yml
Normal file
11
roles/oh-my-zsh/defaults/main.yml
Normal file
@@ -0,0 +1,11 @@
|
||||
---
|
||||
# Default theme
|
||||
oh_my_zsh_theme: robbyrussell
|
||||
|
||||
# Default plugins
|
||||
oh_my_zsh_plugins:
|
||||
- git
|
||||
|
||||
# Wether to install by default for all specified users.
|
||||
# May be overridden by `oh_my_zsh_install` under each user.
|
||||
oh_my_zsh_install: yes
|
||||
4
roles/oh-my-zsh/handlers/main.yml
Normal file
4
roles/oh-my-zsh/handlers/main.yml
Normal file
@@ -0,0 +1,4 @@
|
||||
---
|
||||
- name: dpkg-reconfigure console-setup
|
||||
become: yes
|
||||
command: /usr/sbin/dpkg-reconfigure -f noninteractive console-setup
|
||||
31
roles/oh-my-zsh/meta/main.yml
Normal file
31
roles/oh-my-zsh/meta/main.yml
Normal file
@@ -0,0 +1,31 @@
|
||||
---
|
||||
galaxy_info:
|
||||
author: John Freeman
|
||||
description: Role for installing and configuring oh-my-zsh.
|
||||
company: GantSign Ltd.
|
||||
license: MIT
|
||||
min_ansible_version: 2.8
|
||||
platforms:
|
||||
- name: EL
|
||||
versions:
|
||||
- 7
|
||||
- name: Fedora
|
||||
versions:
|
||||
- 31
|
||||
- name: opensuse
|
||||
versions:
|
||||
- 15.1
|
||||
- name: Ubuntu
|
||||
versions:
|
||||
- xenial
|
||||
- bionic
|
||||
- name: Debian
|
||||
versions:
|
||||
- jessie
|
||||
- stretch
|
||||
galaxy_tags:
|
||||
- ohmyzsh
|
||||
- zsh
|
||||
- shell
|
||||
- development
|
||||
dependencies: []
|
||||
22
roles/oh-my-zsh/molecule/default/INSTALL.rst
Normal file
22
roles/oh-my-zsh/molecule/default/INSTALL.rst
Normal file
@@ -0,0 +1,22 @@
|
||||
*******
|
||||
Docker driver installation guide
|
||||
*******
|
||||
|
||||
Requirements
|
||||
============
|
||||
|
||||
* Docker Engine
|
||||
|
||||
Install
|
||||
=======
|
||||
|
||||
Please refer to the `Virtual environment`_ documentation for installation best
|
||||
practices. If not using a virtual environment, please consider passing the
|
||||
widely recommended `'--user' flag`_ when invoking ``pip``.
|
||||
|
||||
.. _Virtual environment: https://virtualenv.pypa.io/en/latest/
|
||||
.. _'--user' flag: https://packaging.python.org/tutorials/installing-packages/#installing-to-the-user-site
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ python3 -m pip install 'molecule[docker]'
|
||||
68
roles/oh-my-zsh/molecule/default/converge.yml
Normal file
68
roles/oh-my-zsh/molecule/default/converge.yml
Normal file
@@ -0,0 +1,68 @@
|
||||
---
|
||||
- name: Converge
|
||||
hosts: all
|
||||
|
||||
pre_tasks:
|
||||
|
||||
- name: update apt cache
|
||||
apt:
|
||||
update_cache: yes
|
||||
when: ansible_os_family == 'Debian'
|
||||
changed_when: no
|
||||
|
||||
- name: create test users
|
||||
become: yes
|
||||
user:
|
||||
name: '{{ item }}'
|
||||
home: '/home/{{ item }}'
|
||||
createhome: yes
|
||||
with_items:
|
||||
- test_usr1
|
||||
- test_usr2
|
||||
- test_usr3
|
||||
- test_usr4
|
||||
- test_usr5
|
||||
|
||||
- name: install console-setup file
|
||||
become: yes
|
||||
copy:
|
||||
src: tests/console-setup.sh
|
||||
dest: /etc/default/console-setup
|
||||
force: no
|
||||
owner: root
|
||||
group: root
|
||||
mode: 'u=rwx,go=r'
|
||||
|
||||
roles:
|
||||
- role: ansible-role-oh-my-zsh
|
||||
oh_my_zsh_theme: test_theme1
|
||||
oh_my_zsh_plugins:
|
||||
- test_plugin1
|
||||
- test_plugin2
|
||||
users:
|
||||
- username: test_usr1
|
||||
- username: test_usr2
|
||||
oh_my_zsh:
|
||||
theme: test_theme2
|
||||
plugins:
|
||||
- test_plugin3
|
||||
- test_plugin4
|
||||
- username: test_usr3
|
||||
oh_my_zsh:
|
||||
install: no
|
||||
|
||||
- role: ansible-role-oh-my-zsh
|
||||
oh_my_zsh_theme: test_theme1
|
||||
oh_my_zsh_plugins:
|
||||
- test_plugin1
|
||||
- test_plugin2
|
||||
oh_my_zsh_install: no
|
||||
users:
|
||||
- username: test_usr4
|
||||
- username: test_usr5
|
||||
oh_my_zsh:
|
||||
install: yes
|
||||
theme: test_theme2
|
||||
plugins:
|
||||
- test_plugin3
|
||||
- test_plugin4
|
||||
34
roles/oh-my-zsh/molecule/default/molecule.yml
Normal file
34
roles/oh-my-zsh/molecule/default/molecule.yml
Normal file
@@ -0,0 +1,34 @@
|
||||
---
|
||||
dependency:
|
||||
name: galaxy
|
||||
|
||||
driver:
|
||||
name: docker
|
||||
|
||||
lint: |
|
||||
set -e
|
||||
yamllint .
|
||||
ansible-lint
|
||||
flake8
|
||||
|
||||
platforms:
|
||||
- name: ansible-role-oh-my-zsh-debian-min
|
||||
image: debian:8
|
||||
- name: ansible-role-oh-my-zsh-debian-max
|
||||
image: debian:9
|
||||
- name: ansible-role-oh-my-zsh-ubuntu-min
|
||||
image: ubuntu:16.04
|
||||
- name: ansible-role-oh-my-zsh-ubuntu-max
|
||||
image: ubuntu:18.04
|
||||
- name: ansible-role-oh-my-zsh-centos
|
||||
image: centos:7
|
||||
- name: ansible-role-oh-my-zsh-fedora
|
||||
image: fedora:31
|
||||
- name: ansible-role-oh-my-zsh-opensuse
|
||||
image: opensuse/leap:15.1
|
||||
|
||||
provisioner:
|
||||
name: ansible
|
||||
|
||||
verifier:
|
||||
name: testinfra
|
||||
23
roles/oh-my-zsh/molecule/default/tests/conftest.py
Normal file
23
roles/oh-my-zsh/molecule/default/tests/conftest.py
Normal file
@@ -0,0 +1,23 @@
|
||||
"""PyTest Fixtures."""
|
||||
from __future__ import absolute_import
|
||||
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
def pytest_runtest_setup(item):
|
||||
"""Run tests only when under molecule with testinfra installed."""
|
||||
try:
|
||||
import testinfra
|
||||
except ImportError:
|
||||
pytest.skip("Test requires testinfra", allow_module_level=True)
|
||||
if "MOLECULE_INVENTORY_FILE" in os.environ:
|
||||
pytest.testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
|
||||
os.environ["MOLECULE_INVENTORY_FILE"]
|
||||
).get_hosts("all")
|
||||
else:
|
||||
pytest.skip(
|
||||
"Test should run only from inside molecule.",
|
||||
allow_module_level=True
|
||||
)
|
||||
16
roles/oh-my-zsh/molecule/default/tests/console-setup.sh
Normal file
16
roles/oh-my-zsh/molecule/default/tests/console-setup.sh
Normal file
@@ -0,0 +1,16 @@
|
||||
# CONFIGURATION FILE FOR SETUPCON
|
||||
|
||||
# Consult the console-setup(5) manual page.
|
||||
|
||||
ACTIVE_CONSOLES="/dev/tty[1-6]"
|
||||
|
||||
CHARMAP="ISO-8859-1"
|
||||
|
||||
CODESET="Lat15"
|
||||
FONTFACE="VGA"
|
||||
FONTSIZE="8x16"
|
||||
|
||||
VIDEOMODE=
|
||||
|
||||
# The following is an example how to use a braille font
|
||||
# FONT='lat9w-08.psf.gz brl-8x8.psf'
|
||||
50
roles/oh-my-zsh/molecule/default/tests/test_role.py
Normal file
50
roles/oh-my-zsh/molecule/default/tests/test_role.py
Normal file
@@ -0,0 +1,50 @@
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.mark.parametrize('username', [
|
||||
'test_usr1',
|
||||
'test_usr2',
|
||||
'test_usr5',
|
||||
])
|
||||
def test_oh_my_zsh_install(host, username):
|
||||
oh_my_zsh = host.file('/home/' + username + '/.oh-my-zsh')
|
||||
assert oh_my_zsh.exists
|
||||
assert oh_my_zsh.is_directory
|
||||
assert oh_my_zsh.user == username
|
||||
assert oh_my_zsh.group in [username, 'users']
|
||||
|
||||
|
||||
@pytest.mark.parametrize('username', [
|
||||
'test_usr3',
|
||||
'test_usr4',
|
||||
])
|
||||
def test_oh_my_zsh_is_not_installed_for_excluded_users(host, username):
|
||||
oh_my_zsh = host.file('/home/' + username + '/.oh-my-zsh')
|
||||
zshrc = host.file('/home/' + username + '/.zshrc')
|
||||
assert not oh_my_zsh.exists
|
||||
assert not zshrc.exists
|
||||
|
||||
|
||||
@pytest.mark.parametrize('username,theme,plugins', [
|
||||
('test_usr1', 'test_theme1', 'test_plugin1 test_plugin2'),
|
||||
('test_usr2', 'test_theme2', 'test_plugin3 test_plugin4'),
|
||||
])
|
||||
def test_oh_my_zsh_config(host, username, theme, plugins):
|
||||
zshrc = host.file('/home/' + username + '/.zshrc')
|
||||
assert zshrc.exists
|
||||
assert zshrc.is_file
|
||||
assert zshrc.user == username
|
||||
assert zshrc.group in [username, 'users']
|
||||
assert zshrc.contains(theme)
|
||||
assert zshrc.contains(plugins)
|
||||
|
||||
|
||||
def test_console_setup(host):
|
||||
# console-setup is Debian family specific
|
||||
if host.file('/etc/debian_version').exists:
|
||||
setup = host.file('/etc/default/console-setup')
|
||||
assert setup.exists
|
||||
assert setup.is_file
|
||||
assert setup.user == 'root'
|
||||
assert setup.group == 'root'
|
||||
assert setup.contains('CHARMAP="UTF-8"')
|
||||
957
roles/oh-my-zsh/moleculew
Executable file
957
roles/oh-my-zsh/moleculew
Executable file
@@ -0,0 +1,957 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# MIT License
|
||||
#
|
||||
# Copyright (c) 2018 GantSign Ltd.
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
# of this software and associated documentation files (the "Software"), to deal
|
||||
# in the Software without restriction, including without limitation the rights
|
||||
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
# copies of the Software, and to permit persons to whom the Software is
|
||||
# furnished to do so, subject to the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be included in all
|
||||
# copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
# SOFTWARE.
|
||||
|
||||
|
||||
# Molecule Wrapper the wrapper script for Molecule
|
||||
# https://github.com/gantsign/molecule-wrapper
|
||||
|
||||
set -e
|
||||
|
||||
WRAPPER_VERSION=1.1.0
|
||||
|
||||
VERSION_DIR='.moleculew'
|
||||
PYTHON_VERSION_FILE="$VERSION_DIR/python_version"
|
||||
ANSIBLE_VERSION_FILE="$VERSION_DIR/ansible_version"
|
||||
DOCKER_LIB_VERSION_FILE="$VERSION_DIR/docker_lib_version"
|
||||
MOLECULE_VERSION_FILE="$VERSION_DIR/molecule_version"
|
||||
YAMLLINT_VERSION_FILE="$VERSION_DIR/yamllint_version"
|
||||
ANSIBLE_LINT_VERSION_FILE="$VERSION_DIR/ansible_lint_version"
|
||||
FLAKE8_VERSION_FILE="$VERSION_DIR/flake8_version"
|
||||
TESTINFRA_VERSION_FILE="$VERSION_DIR/testinfra_version"
|
||||
|
||||
BUILD_DEPENDENCIES_INSTALLLED=false
|
||||
PYENV_INSTALLED=false
|
||||
|
||||
ANSIBLE_VERSION=''
|
||||
DOCKER_LIB_VERSION=''
|
||||
MOLECULE_VERSION=''
|
||||
PYTHON_VERSION=''
|
||||
YAMLLINT_VERSION=''
|
||||
ANSIBLE_LINT_VERSION=''
|
||||
FLAKE8_VERSION=''
|
||||
TESTINFRA_VERSION=''
|
||||
USE_SYSTEM_DEPENDENCIES=false
|
||||
|
||||
PRE_ARGS=()
|
||||
MOLECULE_CMD=''
|
||||
POST_ARGS=()
|
||||
|
||||
export PATH="$HOME/.pyenv/bin:$HOME/.local/bin:$PATH"
|
||||
|
||||
hr() {
|
||||
for ((i = 1; i <= 80; i++)); do
|
||||
printf '*'
|
||||
done
|
||||
echo ''
|
||||
}
|
||||
|
||||
banner() {
|
||||
hr
|
||||
echo "$1"
|
||||
hr
|
||||
}
|
||||
|
||||
run_as_root() {
|
||||
if [[ $EUID -eq 0 ]]; then
|
||||
"$@"
|
||||
elif [ -x "$(command -v sudo)" ]; then
|
||||
sudo2 "$@"
|
||||
else
|
||||
echo "Error: sudo is not installed" >&2
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
build_dependencies_present() {
|
||||
if [[ $BUILD_DEPENDENCIES_INSTALLLED == true ]]; then
|
||||
return
|
||||
fi
|
||||
if [[ $USE_SYSTEM_DEPENDENCIES == true ]]; then
|
||||
return
|
||||
fi
|
||||
# https://github.com/pyenv/pyenv/wiki/common-build-problems
|
||||
if [[ -x "$(command -v apt-get)" ]]; then
|
||||
banner 'Installing build dependencies'
|
||||
|
||||
run_as_root apt-get update
|
||||
run_as_root apt-get install --assume-yes \
|
||||
make build-essential libssl-dev zlib1g-dev libbz2-dev \
|
||||
libreadline-dev libsqlite3-dev wget curl llvm libncurses5-dev \
|
||||
libncursesw5-dev xz-utils tk-dev libffi-dev liblzma-dev \
|
||||
git jq
|
||||
echo ''
|
||||
elif [[ -x "$(command -v dnf)" ]]; then
|
||||
banner 'Installing build dependencies'
|
||||
|
||||
run_as_root dnf install \
|
||||
zlib-devel bzip2 bzip2-devel readline-devel sqlite sqlite-devel \
|
||||
openssl-devel xz xz-devel libffi-devel \
|
||||
git curl jq
|
||||
echo ''
|
||||
elif [[ -x "$(command -v yum)" ]]; then
|
||||
banner 'Installing build dependencies'
|
||||
|
||||
run_as_root yum install \
|
||||
zlib-devel bzip2 bzip2-devel readline-devel sqlite sqlite-devel \
|
||||
openssl-devel xz xz-devel libffi-devel \
|
||||
git curl jq
|
||||
echo ''
|
||||
elif [[ -x "$(command -v zypper)" ]]; then
|
||||
banner 'Installing build dependencies'
|
||||
|
||||
run_as_root zypper install \
|
||||
zlib-devel bzip2 libbz2-devel readline-devel sqlite3 sqlite3-devel \
|
||||
libopenssl-devel xz xz-devel \
|
||||
git curl jq
|
||||
echo ''
|
||||
fi
|
||||
BUILD_DEPENDENCIES_INSTALLLED=true
|
||||
}
|
||||
|
||||
pyenv_present() {
|
||||
if [[ $PYENV_INSTALLED == true ]]; then
|
||||
return
|
||||
fi
|
||||
if [[ $USE_SYSTEM_DEPENDENCIES == true ]]; then
|
||||
return
|
||||
fi
|
||||
if [[ -x "$(command -v pyenv)" ]]; then
|
||||
PYENV_INSTALLED=true
|
||||
return
|
||||
fi
|
||||
|
||||
build_dependencies_present
|
||||
|
||||
banner "Installing pyenv for user $USER"
|
||||
bash <(curl --location https://github.com/pyenv/pyenv-installer/raw/master/bin/pyenv-installer)
|
||||
echo ''
|
||||
PYENV_INSTALLED=true
|
||||
}
|
||||
|
||||
query_latest_python_version() {
|
||||
pyenv_present
|
||||
|
||||
PYTHON_VERSION="$(~/.pyenv/plugins/python-build/bin/python-build --definitions | grep --color=never '^3\.' | grep --invert-match '\-dev$' | tail -1)"
|
||||
}
|
||||
|
||||
query_latest_package_version() {
|
||||
if [[ ! -x "$(command -v curl)" ]]; then
|
||||
build_dependencies_present
|
||||
fi
|
||||
if [[ ! -x "$(command -v jq)" ]]; then
|
||||
build_dependencies_present
|
||||
fi
|
||||
if [[ ! -x "$(command -v curl)" ]]; then
|
||||
echo 'Error: curl is not installed.' >&2
|
||||
exit 1
|
||||
fi
|
||||
if [[ ! -x "$(command -v jq)" ]]; then
|
||||
echo 'Error: jq is not installed.' >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
local version
|
||||
# shellcheck disable=SC2034
|
||||
version=$(curl --fail --silent --show-error --location "https://pypi.org/pypi/$2/json" | jq --raw-output '.info.version')
|
||||
|
||||
eval "$1=\"\$version\""
|
||||
}
|
||||
|
||||
docker_present() {
|
||||
if [[ -x "$(command -v docker)" ]]; then
|
||||
return
|
||||
fi
|
||||
if [[ $USE_SYSTEM_DEPENDENCIES == true ]]; then
|
||||
echo 'Error: docker is not installed.' >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
build_dependencies_present
|
||||
|
||||
banner 'Installing Docker'
|
||||
sh <(curl --fail --silent --show-error --location https://get.docker.com)
|
||||
run_as_root usermod --append --groups docker "$USER"
|
||||
banner "User '$USER' has been added to the 'docker' group. Logout/restart and log back in for changes to take effect."
|
||||
exit
|
||||
}
|
||||
|
||||
python_present() {
|
||||
if [[ $PYTHON_VERSION == system ]]; then
|
||||
if [[ ! -x "$(command -v python3)" ]] &&
|
||||
[[ ! -x "$(command -v python)" ]]; then
|
||||
echo 'Error: python is not installed.' >&2
|
||||
exit 1
|
||||
fi
|
||||
if [[ ! -x "$(command -v pip3)" ]] &&
|
||||
[[ ! -x "$(command -v pip)" ]]; then
|
||||
echo 'Error: pip is not installed.' >&2
|
||||
exit 1
|
||||
fi
|
||||
PYTHON_EXE="$(command -v python3 || command -v python)"
|
||||
else
|
||||
if [[ ! -x "$(command -v git)" ]]; then
|
||||
echo 'Error: git is not installed.' >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
pyenv_present
|
||||
|
||||
export PYENV_VERSION="$PYTHON_VERSION"
|
||||
if [[ ! -d "$HOME/.pyenv/versions/$PYTHON_VERSION" ]]; then
|
||||
build_dependencies_present
|
||||
|
||||
banner "Making Python version $PYTHON_VERSION available using pyenv"
|
||||
pyenv install "$PYTHON_VERSION"
|
||||
echo ''
|
||||
fi
|
||||
eval "$(pyenv init -)"
|
||||
PYTHON_EXE="$(pyenv which python)"
|
||||
fi
|
||||
}
|
||||
|
||||
virtualenv_presant() {
|
||||
if [[ ! -x "$(command -v virtualenv)" ]]; then
|
||||
banner "Installing virtualenv for user $USER"
|
||||
"$PYTHON_EXE" -m pip install --user virtualenv
|
||||
echo ''
|
||||
fi
|
||||
}
|
||||
|
||||
install_ansible() {
|
||||
banner "Installing Ansible $ANSIBLE_VERSION into virtualenv $VIRTUAL_ENV"
|
||||
pip install "ansible==$ANSIBLE_VERSION"
|
||||
echo ''
|
||||
}
|
||||
|
||||
install_docker_lib() {
|
||||
banner "Installing Python Docker $DOCKER_LIB_VERSION into virtualenv $VIRTUAL_ENV"
|
||||
pip install "docker==$DOCKER_LIB_VERSION"
|
||||
echo ''
|
||||
}
|
||||
|
||||
install_molecule() {
|
||||
banner "Installing Molecule $MOLECULE_VERSION into virtualenv $VIRTUAL_ENV"
|
||||
|
||||
pip install "molecule==$MOLECULE_VERSION"
|
||||
echo ''
|
||||
}
|
||||
|
||||
install_yamllint() {
|
||||
banner "Installing YamlLint $YAMLLINT_VERSION into virtualenv $VIRTUAL_ENV"
|
||||
|
||||
pip install "yamllint==$YAMLLINT_VERSION"
|
||||
echo ''
|
||||
}
|
||||
|
||||
install_ansible_lint() {
|
||||
banner "Installing Anssible Lint $ANSIBLE_LINT_VERSION into virtualenv $VIRTUAL_ENV"
|
||||
|
||||
pip install "ansible-lint==$ANSIBLE_LINT_VERSION"
|
||||
echo ''
|
||||
}
|
||||
|
||||
install_flake8() {
|
||||
banner "Installing Flake8 $FLAKE8_VERSION into virtualenv $VIRTUAL_ENV"
|
||||
|
||||
pip install "flake8==$FLAKE8_VERSION"
|
||||
echo ''
|
||||
}
|
||||
|
||||
install_testinfra() {
|
||||
banner "Installing Testinfra $TESTINFRA_VERSION into virtualenv $VIRTUAL_ENV"
|
||||
|
||||
pip install "testinfra==$TESTINFRA_VERSION"
|
||||
echo ''
|
||||
}
|
||||
|
||||
wrapper_clean() {
|
||||
local MOLECULE_WRAPPER_HOME="$HOME/.moleculew"
|
||||
read -r -p "Delete ${MOLECULE_WRAPPER_HOME} (y/n)? " yn
|
||||
case $yn in
|
||||
[Yy]|YES|yes|Yes)
|
||||
rm -rf "$MOLECULE_WRAPPER_HOME";
|
||||
exit
|
||||
;;
|
||||
*)
|
||||
exit
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
wrapper_upgrade() {
|
||||
curl --fail --silent --show-error --location --output moleculew.new \
|
||||
'https://raw.githubusercontent.com/gantsign/molecule-wrapper/master/moleculew' \
|
||||
&& chmod 'u+x' moleculew.new \
|
||||
&& mv --force moleculew.new moleculew
|
||||
|
||||
local NEW_VERSION
|
||||
NEW_VERSION="$(./moleculew wrapper-version)"
|
||||
if [ "$WRAPPER_VERSION" != "$NEW_VERSION" ]; then
|
||||
echo "Upgraded wrapper from version $WRAPPER_VERSION to $NEW_VERSION"
|
||||
else
|
||||
echo "You are already using the latest version"
|
||||
fi
|
||||
exit
|
||||
}
|
||||
|
||||
wrapper_version() {
|
||||
echo "$WRAPPER_VERSION"
|
||||
exit
|
||||
}
|
||||
|
||||
print_versions() {
|
||||
echo "Python: $PYTHON_VERSION"
|
||||
echo "Ansible: $ANSIBLE_VERSION"
|
||||
echo "Python Docker library: $DOCKER_LIB_VERSION"
|
||||
echo "Molecule: $MOLECULE_VERSION"
|
||||
echo "YamlLint: $YAMLLINT_VERSION"
|
||||
echo "Ansible Lint: $ANSIBLE_LINT_VERSION"
|
||||
echo "Flake8: $FLAKE8_VERSION"
|
||||
echo "Testinfra: $TESTINFRA_VERSION"
|
||||
}
|
||||
|
||||
wrapper_versions() {
|
||||
detemine_versions
|
||||
|
||||
print_versions
|
||||
exit
|
||||
}
|
||||
|
||||
wrapper_freeze() {
|
||||
detemine_versions
|
||||
|
||||
banner 'Freezing versions'
|
||||
|
||||
mkdir -p "$VERSION_DIR"
|
||||
|
||||
echo "$PYTHON_VERSION" > "$PYTHON_VERSION_FILE"
|
||||
echo "$ANSIBLE_VERSION" > "$ANSIBLE_VERSION_FILE"
|
||||
echo "$DOCKER_LIB_VERSION" > "$DOCKER_LIB_VERSION_FILE"
|
||||
echo "$MOLECULE_VERSION" > "$MOLECULE_VERSION_FILE"
|
||||
echo "$YAMLLINT_VERSION" > "$YAMLLINT_VERSION_FILE"
|
||||
echo "$ANSIBLE_LINT_VERSION" > "$ANSIBLE_LINT_VERSION_FILE"
|
||||
echo "$FLAKE8_VERSION" > "$FLAKE8_VERSION_FILE"
|
||||
echo "$TESTINFRA_VERSION" > "$TESTINFRA_VERSION_FILE"
|
||||
|
||||
print_versions
|
||||
|
||||
exit
|
||||
}
|
||||
|
||||
wrapper_unfreeze() {
|
||||
banner 'Un-freezing versions'
|
||||
|
||||
if [[ -f "$PYTHON_VERSION_FILE" ]]; then
|
||||
rm --verbose "$PYTHON_VERSION_FILE"
|
||||
fi
|
||||
if [[ -f "$ANSIBLE_VERSION_FILE" ]]; then
|
||||
rm --verbose "$ANSIBLE_VERSION_FILE"
|
||||
fi
|
||||
if [[ -f "$DOCKER_LIB_VERSION_FILE" ]]; then
|
||||
rm --verbose "$DOCKER_LIB_VERSION_FILE"
|
||||
fi
|
||||
if [[ -f "$MOLECULE_VERSION_FILE" ]]; then
|
||||
rm --verbose "$MOLECULE_VERSION_FILE"
|
||||
fi
|
||||
if [[ -f "$YAMLLINT_VERSION_FILE" ]]; then
|
||||
rm --verbose "$YAMLLINT_VERSION_FILE"
|
||||
fi
|
||||
if [[ -f "$ANSIBLE_LINT_VERSION_FILE" ]]; then
|
||||
rm --verbose "$ANSIBLE_LINT_VERSION_FILE"
|
||||
fi
|
||||
if [[ -f "$FLAKE8_VERSION_FILE" ]]; then
|
||||
rm --verbose "$FLAKE8_VERSION_FILE"
|
||||
fi
|
||||
if [[ -f "$TESTINFRA_VERSION_FILE" ]]; then
|
||||
rm --verbose "$TESTINFRA_VERSION_FILE"
|
||||
fi
|
||||
|
||||
exit
|
||||
}
|
||||
|
||||
wrapper_upgrade_versions() {
|
||||
detemine_versions
|
||||
|
||||
banner 'Upgrading versions'
|
||||
|
||||
local CURRENT_PYTHON_VERSION="$PYTHON_VERSION"
|
||||
local CURRENT_ANSIBLE_VERSION="$ANSIBLE_VERSION"
|
||||
local CURRENT_DOCKER_LIB_VERSION="$DOCKER_LIB_VERSION"
|
||||
local CURRENT_MOLECULE_VERSION="$MOLECULE_VERSION"
|
||||
local CURRENT_YAMLLINT_VERSION="$YAMLLINT_VERSION"
|
||||
local CURRENT_ANSIBLE_LINT_VERSION="$ANSIBLE_LINT_VERSION"
|
||||
local CURRENT_FLAKE8_VERSION="$FLAKE8_VERSION"
|
||||
local CURRENT_TESTINFRA_VERSION="$TESTINFRA_VERSION"
|
||||
|
||||
query_latest_python_version
|
||||
query_latest_package_version ANSIBLE_VERSION ansible
|
||||
query_latest_package_version DOCKER_LIB_VERSION docker
|
||||
query_latest_package_version MOLECULE_VERSION molecule
|
||||
query_latest_package_version YAMLLINT_VERSION yamllint
|
||||
query_latest_package_version ANSIBLE_LINT_VERSION ansible-lint
|
||||
query_latest_package_version FLAKE8_VERSION flake8
|
||||
query_latest_package_version TESTINFRA_VERSION testinfra
|
||||
echo ''
|
||||
|
||||
echo 'New versions:'
|
||||
if [[ "$CURRENT_PYTHON_VERSION" == "$PYTHON_VERSION" ]]; then
|
||||
echo "Python: $CURRENT_PYTHON_VERSION (no change)"
|
||||
else
|
||||
echo "Python: $CURRENT_PYTHON_VERSION -> $PYTHON_VERSION"
|
||||
fi
|
||||
|
||||
if [[ "$CURRENT_ANSIBLE_VERSION" == "$ANSIBLE_VERSION" ]]; then
|
||||
echo "Ansible: $CURRENT_ANSIBLE_VERSION (no change)"
|
||||
else
|
||||
echo "Ansible: $CURRENT_ANSIBLE_VERSION -> $ANSIBLE_VERSION"
|
||||
fi
|
||||
|
||||
if [[ "$CURRENT_DOCKER_LIB_VERSION" == "$DOCKER_LIB_VERSION" ]]; then
|
||||
echo "Python Docker library: $CURRENT_DOCKER_LIB_VERSION (no change)"
|
||||
else
|
||||
echo "Python Docker library: $CURRENT_DOCKER_LIB_VERSION -> $DOCKER_LIB_VERSION"
|
||||
fi
|
||||
|
||||
if [[ "$CURRENT_MOLECULE_VERSION" == "$MOLECULE_VERSION" ]]; then
|
||||
echo "Molecule: $CURRENT_MOLECULE_VERSION (no change)"
|
||||
else
|
||||
echo "Molecule: $CURRENT_MOLECULE_VERSION -> $MOLECULE_VERSION"
|
||||
fi
|
||||
|
||||
if [[ "$CURRENT_YAMLLINT_VERSION" == "$YAMLLINT_VERSION" ]]; then
|
||||
echo "YamlLint: $CURRENT_YAMLLINT_VERSION (no change)"
|
||||
else
|
||||
echo "YamlLint: $CURRENT_YAMLLINT_VERSION -> $YAMLLINT_VERSION"
|
||||
fi
|
||||
|
||||
if [[ "$CURRENT_ANSIBLE_LINT_VERSION" == "$ANSIBLE_LINT_VERSION" ]]; then
|
||||
echo "Ansible Lint: $CURRENT_ANSIBLE_LINT_VERSION (no change)"
|
||||
else
|
||||
echo "Ansible Lint: $CURRENT_ANSIBLE_LINT_VERSION -> $ANSIBLE_LINT_VERSION"
|
||||
fi
|
||||
|
||||
if [[ "$CURRENT_FLAKE8_VERSION" == "$FLAKE8_VERSION" ]]; then
|
||||
echo "Flake8: $CURRENT_FLAKE8_VERSION (no change)"
|
||||
else
|
||||
echo "Flake8: $CURRENT_FLAKE8_VERSION -> $FLAKE8_VERSION"
|
||||
fi
|
||||
|
||||
if [[ "$CURRENT_TESTINFRA_VERSION" == "$TESTINFRA_VERSION" ]]; then
|
||||
echo "Testinfra: $CURRENT_TESTINFRA_VERSION (no change)"
|
||||
else
|
||||
echo "Testinfra: $CURRENT_TESTINFRA_VERSION -> $TESTINFRA_VERSION"
|
||||
fi
|
||||
|
||||
echo ''
|
||||
|
||||
wrapper_freeze
|
||||
}
|
||||
|
||||
wrapper_help() {
|
||||
activate_virtualenv
|
||||
|
||||
molecule --help
|
||||
|
||||
echo "
|
||||
Molecule Wrapper
|
||||
|
||||
Additional options:
|
||||
--ansible VERSION Use the specified version of Ansible
|
||||
--docker-lib VERSION Use the specified version of the Python Docker
|
||||
library
|
||||
--molecule VERSION Use the specified version of Molecule
|
||||
--python VERSION Use the specified version of Python
|
||||
--yamllint VERSION Use the specified version of YamlLint
|
||||
--ansible-lint VERSION Use the specified version of Ansible Lint
|
||||
--flake8 VERSION Use the specified version of Flake8
|
||||
--testinfra VERSION Use the specified version of Testinfra
|
||||
--use-system-dependencies Use system dependencies
|
||||
|
||||
Additional commands:
|
||||
wrapper-clean Removes all the wrapper virtual environments
|
||||
wrapper-freeze Freezes the dependency versions being used
|
||||
wrapper-unfreeze Un-freezes the dependency versions
|
||||
wrapper-upgrade Upgrades the Molecule Wrapper to the latest version
|
||||
wrapper-upgrade-versions Upgrades any frozen dependency versions
|
||||
wrapper-version Displays the current version of Molecule Wrapper
|
||||
"
|
||||
}
|
||||
|
||||
query_package_versions() {
|
||||
local package_name="$1"
|
||||
local min_version="$2"
|
||||
|
||||
if [[ ! -x "$(command -v curl)" ]]; then
|
||||
build_dependencies_present > /dev/null
|
||||
fi
|
||||
if [[ ! -x "$(command -v jq)" ]]; then
|
||||
build_dependencies_present > /dev/null
|
||||
fi
|
||||
if [[ ! -x "$(command -v curl)" ]]; then
|
||||
echo 'Error: curl is not installed.' >&2
|
||||
exit 1
|
||||
fi
|
||||
if [[ ! -x "$(command -v jq)" ]]; then
|
||||
echo 'Error: jq is not installed.' >&2
|
||||
exit 1
|
||||
fi
|
||||
if [[ ! -x "$(command -v sort)" ]]; then
|
||||
echo 'Error: sort is not installed.' >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
for i in $(curl --fail --silent --show-error \
|
||||
--location "https://pypi.org/pypi/$package_name/json" \
|
||||
| jq --raw-output ".releases | keys | .[], \"$min_version.\"" \
|
||||
| grep --invert-match '[a-zA-Z]' \
|
||||
| sort --version-sort --reverse) ; do
|
||||
if [[ "$i" == "$min_version." ]]; then
|
||||
break
|
||||
fi
|
||||
echo "$i"
|
||||
done
|
||||
}
|
||||
|
||||
wrapper_options_ansible() {
|
||||
echo 'latest'
|
||||
query_package_versions 'ansible' '2.8'
|
||||
}
|
||||
|
||||
wrapper_options_docker_lib() {
|
||||
echo 'latest'
|
||||
query_package_versions 'docker' '3.0'
|
||||
}
|
||||
|
||||
wrapper_options_molecule() {
|
||||
echo 'latest'
|
||||
query_package_versions 'molecule' '3.0.6'
|
||||
}
|
||||
|
||||
wrapper_options_python() {
|
||||
if [[ ! -x "$(command -v sort)" ]]; then
|
||||
echo 'Error: sort is not installed.' >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
pyenv_present > /dev/null
|
||||
|
||||
local min_version='3.6'
|
||||
|
||||
echo 'latest'
|
||||
|
||||
for i in $( (echo "$min_version." && \
|
||||
~/.pyenv/plugins/python-build/bin/python-build --definitions) \
|
||||
| grep --color=never '^[0-9]' \
|
||||
| grep --invert-match '\-dev$' \
|
||||
| sort --version-sort --reverse) ; do
|
||||
if [[ "$i" == "$min_version." ]]; then
|
||||
break
|
||||
fi
|
||||
echo "$i"
|
||||
done
|
||||
}
|
||||
|
||||
wrapper_options_yamllint() {
|
||||
echo 'latest'
|
||||
query_package_versions 'yamllint' '1.24.2'
|
||||
}
|
||||
|
||||
wrapper_options_ansible_lint() {
|
||||
echo 'latest'
|
||||
query_package_versions 'ansible_lint' '4.2.0'
|
||||
}
|
||||
|
||||
wrapper_options_flake8() {
|
||||
echo 'latest'
|
||||
query_package_versions 'flake8' '3.8.3'
|
||||
}
|
||||
|
||||
wrapper_options_testinfra() {
|
||||
echo 'latest'
|
||||
query_package_versions 'testinfra' '5.2.2'
|
||||
}
|
||||
|
||||
wrapper_options_scenario() {
|
||||
(
|
||||
cd molecule > /dev/null &&
|
||||
for d in *; do
|
||||
if [ -d "$d" ]; then
|
||||
echo "$d"
|
||||
fi
|
||||
done
|
||||
)
|
||||
}
|
||||
|
||||
wrapper_virtualenv() {
|
||||
activate_virtualenv > /dev/null
|
||||
echo "$VIRTUAL_ENV"
|
||||
}
|
||||
|
||||
parse_args() {
|
||||
set +e
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
key="$1"
|
||||
|
||||
case $key in
|
||||
--python=*)
|
||||
PYTHON_VERSION="${1#*=}"
|
||||
shift
|
||||
;;
|
||||
--python)
|
||||
shift
|
||||
PYTHON_VERSION="$1"
|
||||
shift
|
||||
;;
|
||||
--ansible=*)
|
||||
ANSIBLE_VERSION="${1#*=}"
|
||||
shift
|
||||
;;
|
||||
--ansible)
|
||||
shift
|
||||
ANSIBLE_VERSION="$1"
|
||||
shift
|
||||
;;
|
||||
--docker-lib=*)
|
||||
DOCKER_LIB_VERSION="${1#*=}"
|
||||
shift
|
||||
;;
|
||||
--docker-lib)
|
||||
shift
|
||||
DOCKER_LIB_VERSION="$1"
|
||||
shift
|
||||
;;
|
||||
--molecule=*)
|
||||
MOLECULE_VERSION="${1#*=}"
|
||||
shift
|
||||
;;
|
||||
--molecule)
|
||||
shift
|
||||
MOLECULE_VERSION="$1"
|
||||
shift
|
||||
;;
|
||||
--yamllint=*)
|
||||
YAMLLINT_VERSION="${1#*=}"
|
||||
shift
|
||||
;;
|
||||
--yamllint)
|
||||
shift
|
||||
YAMLLINT_VERSION="$1"
|
||||
shift
|
||||
;;
|
||||
--ansible-lint=*)
|
||||
ANSIBLE_LINT_VERSION="${1#*=}"
|
||||
shift
|
||||
;;
|
||||
--ansible-lint)
|
||||
shift
|
||||
ANSIBLE_LINT_VERSION="$1"
|
||||
shift
|
||||
;;
|
||||
--flake8=*)
|
||||
FLAKE8_VERSION="${1#*=}"
|
||||
shift
|
||||
;;
|
||||
--flake8)
|
||||
shift
|
||||
FLAKE8_VERSION="$1"
|
||||
shift
|
||||
;;
|
||||
--testinfra)
|
||||
shift
|
||||
TESTINFRA_VERSION="$1"
|
||||
shift
|
||||
;;
|
||||
--testinfra=*)
|
||||
TESTINFRA_VERSION="${1#*=}"
|
||||
shift
|
||||
;;
|
||||
--use-system-dependencies)
|
||||
USE_SYSTEM_DEPENDENCIES=true
|
||||
shift
|
||||
;;
|
||||
--help)
|
||||
MOLECULE_CMD='wrapper-help'
|
||||
break
|
||||
;;
|
||||
wrapper-*)
|
||||
MOLECULE_CMD="$1"
|
||||
shift
|
||||
;;
|
||||
check|converge|create|dependency|destroy|idempotence|init|lint|list|login|matrix|prepare|side-effect|syntax|test|verify)
|
||||
if [[ "$MOLECULE_CMD" != '' ]]; then
|
||||
shift
|
||||
else
|
||||
MOLECULE_CMD="$1"
|
||||
shift
|
||||
for arg in "$@"; do
|
||||
POST_ARGS+=("$arg")
|
||||
done
|
||||
break
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
PRE_ARGS+=("$1")
|
||||
shift
|
||||
;;
|
||||
esac
|
||||
done
|
||||
set -e
|
||||
}
|
||||
|
||||
detemine_versions() {
|
||||
if [[ $USE_SYSTEM_DEPENDENCIES == false ]]; then
|
||||
USE_SYSTEM_DEPENDENCIES="$MOLECULEW_USE_SYSTEM"
|
||||
fi
|
||||
if [[ $PYTHON_VERSION == '' ]]; then
|
||||
PYTHON_VERSION="$MOLECULEW_PYTHON"
|
||||
fi
|
||||
if [[ $ANSIBLE_VERSION == '' ]]; then
|
||||
ANSIBLE_VERSION="$MOLECULEW_ANSIBLE"
|
||||
fi
|
||||
if [[ $DOCKER_LIB_VERSION == '' ]]; then
|
||||
DOCKER_LIB_VERSION="$MOLECULEW_DOCKER_LIB"
|
||||
fi
|
||||
if [[ $MOLECULE_VERSION == '' ]]; then
|
||||
MOLECULE_VERSION="$MOLECULEW_MOLECULE"
|
||||
fi
|
||||
if [[ $YAMLLINT_VERSION == '' ]]; then
|
||||
YAMLLINT_VERSION="$MOLECULEW_YAMLLINT"
|
||||
fi
|
||||
if [[ $ANSIBLE_LINT_VERSION == '' ]]; then
|
||||
ANSIBLE_LINT_VERSION="$MOLECULEW_ANSIBLE_LINT"
|
||||
fi
|
||||
if [[ $FLAKE8_VERSION == '' ]]; then
|
||||
FLAKE8_VERSION="$MOLECULEW_FLAKE8"
|
||||
fi
|
||||
if [[ $TESTINFRA_VERSION == '' ]]; then
|
||||
TESTINFRA_VERSION="$MOLECULEW_TESTINFRA"
|
||||
fi
|
||||
|
||||
if [[ $USE_SYSTEM_DEPENDENCIES == true ]]; then
|
||||
if [[ $PYTHON_VERSION != '' ]]; then
|
||||
echo "Error: --python and --use-system-dependencies cannot be used together" >&2
|
||||
exit 1
|
||||
fi
|
||||
PYTHON_VERSION=system
|
||||
elif [[ $PYTHON_VERSION == '' ]] || [[ $PYTHON_VERSION == 'default' ]]; then
|
||||
if [[ -f $PYTHON_VERSION_FILE ]]; then
|
||||
PYTHON_VERSION=$(<"$PYTHON_VERSION_FILE")
|
||||
fi
|
||||
if [[ $PYTHON_VERSION == '' ]]; then
|
||||
query_latest_python_version
|
||||
fi
|
||||
elif [[ $PYTHON_VERSION == 'latest' ]]; then
|
||||
query_latest_python_version
|
||||
fi
|
||||
|
||||
if [[ $ANSIBLE_VERSION == '' ]] || [[ $ANSIBLE_VERSION == 'default' ]]; then
|
||||
if [[ -f $ANSIBLE_VERSION_FILE ]]; then
|
||||
ANSIBLE_VERSION=$(<"$ANSIBLE_VERSION_FILE")
|
||||
fi
|
||||
if [[ $ANSIBLE_VERSION == '' ]]; then
|
||||
query_latest_package_version ANSIBLE_VERSION ansible
|
||||
fi
|
||||
elif [[ $ANSIBLE_VERSION == 'latest' ]]; then
|
||||
query_latest_package_version ANSIBLE_VERSION ansible
|
||||
fi
|
||||
|
||||
if [[ $DOCKER_LIB_VERSION == '' ]] || [[ $DOCKER_LIB_VERSION == 'default' ]]; then
|
||||
if [[ -f $DOCKER_LIB_VERSION_FILE ]]; then
|
||||
DOCKER_LIB_VERSION=$(<"$DOCKER_LIB_VERSION_FILE")
|
||||
fi
|
||||
if [[ $DOCKER_LIB_VERSION == '' ]]; then
|
||||
query_latest_package_version DOCKER_LIB_VERSION docker
|
||||
fi
|
||||
elif [[ $DOCKER_LIB_VERSION == 'latest' ]]; then
|
||||
query_latest_package_version DOCKER_LIB_VERSION docker
|
||||
fi
|
||||
|
||||
if [[ $MOLECULE_VERSION == '' ]] || [[ $MOLECULE_VERSION == 'default' ]]; then
|
||||
if [[ -f $MOLECULE_VERSION_FILE ]]; then
|
||||
MOLECULE_VERSION=$(<$MOLECULE_VERSION_FILE)
|
||||
fi
|
||||
if [[ $MOLECULE_VERSION == '' ]]; then
|
||||
query_latest_package_version MOLECULE_VERSION molecule
|
||||
fi
|
||||
elif [[ $MOLECULE_VERSION == 'latest' ]]; then
|
||||
query_latest_package_version MOLECULE_VERSION molecule
|
||||
fi
|
||||
|
||||
if [[ $YAMLLINT_VERSION == '' ]] || [[ $YAMLLINT_VERSION == 'default' ]]; then
|
||||
if [[ -f $YAMLLINT_VERSION_FILE ]]; then
|
||||
YAMLLINT_VERSION=$(<$YAMLLINT_VERSION_FILE)
|
||||
fi
|
||||
if [[ $YAMLLINT_VERSION == '' ]]; then
|
||||
query_latest_package_version YAMLLINT_VERSION yamllint
|
||||
fi
|
||||
elif [[ $YAMLLINT_VERSION == 'latest' ]]; then
|
||||
query_latest_package_version YAMLLINT_VERSION yamllint
|
||||
fi
|
||||
|
||||
if [[ $ANSIBLE_LINT_VERSION == '' ]] || [[ $ANSIBLE_LINT_VERSION == 'default' ]]; then
|
||||
if [[ -f $ANSIBLE_LINT_VERSION_FILE ]]; then
|
||||
ANSIBLE_LINT_VERSION=$(<$ANSIBLE_LINT_VERSION_FILE)
|
||||
fi
|
||||
if [[ $ANSIBLE_LINT_VERSION == '' ]]; then
|
||||
query_latest_package_version ANSIBLE_LINT_VERSION ansible-lint
|
||||
fi
|
||||
elif [[ $ANSIBLE_LINT_VERSION == 'latest' ]]; then
|
||||
query_latest_package_version ANSIBLE_LINT_VERSION ansible-lint
|
||||
fi
|
||||
|
||||
if [[ $FLAKE8_VERSION == '' ]] || [[ $FLAKE8_VERSION == 'default' ]]; then
|
||||
if [[ -f $FLAKE8_VERSION_FILE ]]; then
|
||||
FLAKE8_VERSION=$(<$FLAKE8_VERSION_FILE)
|
||||
fi
|
||||
if [[ $FLAKE8_VERSION == '' ]]; then
|
||||
query_latest_package_version FLAKE8_VERSION flake8
|
||||
fi
|
||||
elif [[ $FLAKE8_VERSION == 'latest' ]]; then
|
||||
query_latest_package_version FLAKE8_VERSION flake8
|
||||
fi
|
||||
|
||||
if [[ $TESTINFRA_VERSION == '' ]] || [[ $TESTINFRA_VERSION == 'default' ]]; then
|
||||
if [[ -f $TESTINFRA_VERSION_FILE ]]; then
|
||||
TESTINFRA_VERSION=$(<$TESTINFRA_VERSION_FILE)
|
||||
fi
|
||||
if [[ $TESTINFRA_VERSION == '' ]]; then
|
||||
query_latest_package_version TESTINFRA_VERSION testinfra
|
||||
fi
|
||||
elif [[ $TESTINFRA_VERSION == 'latest' ]]; then
|
||||
query_latest_package_version TESTINFRA_VERSION testinfra
|
||||
fi
|
||||
}
|
||||
|
||||
activate_virtualenv() {
|
||||
detemine_versions
|
||||
|
||||
MOLECULE_WRAPPER_ENV="$HOME/.moleculew/ml-${MOLECULE_VERSION}_an-${ANSIBLE_VERSION}_py-${PYTHON_VERSION}_dk-${DOCKER_LIB_VERSION}_yl-${YAMLLINT_VERSION}_al-${ANSIBLE_LINT_VERSION}_f8-${FLAKE8_VERSION}_ti-${TESTINFRA_VERSION}"
|
||||
|
||||
if [ ! -f "$MOLECULE_WRAPPER_ENV/bin/activate" ]; then
|
||||
|
||||
build_dependencies_present
|
||||
|
||||
docker_present
|
||||
|
||||
python_present
|
||||
|
||||
virtualenv_presant
|
||||
|
||||
banner "Initializing virtualenv $MOLECULE_WRAPPER_ENV"
|
||||
virtualenv "--python=$PYTHON_EXE" "$MOLECULE_WRAPPER_ENV"
|
||||
# shellcheck disable=SC1090
|
||||
source "$MOLECULE_WRAPPER_ENV/bin/activate"
|
||||
echo ''
|
||||
|
||||
install_ansible
|
||||
|
||||
install_docker_lib
|
||||
|
||||
install_molecule
|
||||
|
||||
install_yamllint
|
||||
|
||||
install_ansible_lint
|
||||
|
||||
install_flake8
|
||||
|
||||
install_testinfra
|
||||
else
|
||||
# shellcheck disable=SC1090
|
||||
source "$MOLECULE_WRAPPER_ENV/bin/activate"
|
||||
fi
|
||||
}
|
||||
|
||||
parse_args "$@"
|
||||
|
||||
case $MOLECULE_CMD in
|
||||
wrapper-clean)
|
||||
wrapper_clean
|
||||
;;
|
||||
wrapper-freeze)
|
||||
wrapper_freeze
|
||||
;;
|
||||
wrapper-help)
|
||||
wrapper_help
|
||||
;;
|
||||
wrapper-install)
|
||||
activate_virtualenv
|
||||
;;
|
||||
wrapper-options-ansible)
|
||||
wrapper_options_ansible
|
||||
;;
|
||||
wrapper-options-docker-lib)
|
||||
wrapper_options_docker_lib
|
||||
;;
|
||||
wrapper-options-molecule)
|
||||
wrapper_options_molecule
|
||||
;;
|
||||
wrapper-options-python)
|
||||
wrapper_options_python
|
||||
;;
|
||||
wrapper-options-yamllint)
|
||||
wrapper_options_yamllint
|
||||
;;
|
||||
wrapper-options-ansible-lint)
|
||||
wrapper_options_ansible_lint
|
||||
;;
|
||||
wrapper-options-flake8)
|
||||
wrapper_options_flake8
|
||||
;;
|
||||
wrapper-options-testinfra)
|
||||
wrapper_options_testinfra
|
||||
;;
|
||||
wrapper-options-scenario)
|
||||
wrapper_options_scenario
|
||||
;;
|
||||
wrapper-unfreeze)
|
||||
wrapper_unfreeze
|
||||
;;
|
||||
wrapper-upgrade)
|
||||
wrapper_upgrade
|
||||
;;
|
||||
wrapper-upgrade-versions)
|
||||
wrapper_upgrade_versions
|
||||
;;
|
||||
wrapper-version)
|
||||
wrapper_version
|
||||
;;
|
||||
wrapper-versions)
|
||||
wrapper_versions
|
||||
;;
|
||||
wrapper-virtualenv)
|
||||
wrapper_virtualenv
|
||||
;;
|
||||
wrapper-*)
|
||||
echo "Unsupported command: $1" >&2
|
||||
exit 1
|
||||
;;
|
||||
*)
|
||||
activate_virtualenv
|
||||
|
||||
# shellcheck disable=SC2086
|
||||
exec molecule "${PRE_ARGS[@]}" $MOLECULE_CMD "${POST_ARGS[@]}"
|
||||
;;
|
||||
esac
|
||||
27
roles/oh-my-zsh/tasks/debian-console-setup.yml
Normal file
27
roles/oh-my-zsh/tasks/debian-console-setup.yml
Normal file
@@ -0,0 +1,27 @@
|
||||
---
|
||||
# This is for the boot console only (i.e. not xterm, ssh or docker).
|
||||
# The oh-my-zsh prompt appears corrupted unless the console is in UTF-8.
|
||||
# If the console-setup file is missing don't create it... assume we're
|
||||
# running in an environment without a boot console (e.g. docker).
|
||||
- name: check if console-setup exists
|
||||
stat:
|
||||
path: /etc/default/console-setup
|
||||
register: console_setup_file
|
||||
|
||||
- name: install console-setup
|
||||
become: yes
|
||||
apt:
|
||||
name: console-setup
|
||||
state: present
|
||||
when: console_setup_file.stat.exists
|
||||
|
||||
- name: edit console-setup to utf-8
|
||||
become: yes
|
||||
lineinfile:
|
||||
dest: /etc/default/console-setup
|
||||
regexp: ^CHARMAP=
|
||||
line: CHARMAP="UTF-8"
|
||||
state: present
|
||||
when: console_setup_file.stat.exists
|
||||
notify:
|
||||
- dpkg-reconfigure console-setup
|
||||
84
roles/oh-my-zsh/tasks/install.yml
Normal file
84
roles/oh-my-zsh/tasks/install.yml
Normal file
@@ -0,0 +1,84 @@
|
||||
---
|
||||
- name: install dependencies
|
||||
become: true
|
||||
become_user: 'root'
|
||||
# become: "{{ ansible_distribution != 'MacOSX' }}"
|
||||
package:
|
||||
name:
|
||||
- git
|
||||
- zsh
|
||||
state: present
|
||||
with_items: "{{ users }}"
|
||||
when: "((user.oh_my_zsh | default({})).install | default(oh_my_zsh_install)) | bool"
|
||||
loop_control:
|
||||
loop_var: user
|
||||
label: '{{ user.username }}'
|
||||
|
||||
- name: clone oh-my-zsh for users
|
||||
tags:
|
||||
# Suppress warning: [ANSIBLE0006] git used in place of git module
|
||||
# Git module doesn't allow us to set `core.autocrlf=input`.
|
||||
- skip_ansible_lint
|
||||
become: true
|
||||
become_user: '{{ user.username }}'
|
||||
# core.autocrlf=input prevents https://github.com/robbyrussell/oh-my-zsh/issues/4402
|
||||
command: 'git clone -c core.autocrlf=input --depth=1 https://github.com/robbyrussell/oh-my-zsh.git .oh-my-zsh'
|
||||
args:
|
||||
chdir: '~{{ user.username }}'
|
||||
creates: '~{{ user.username }}/.oh-my-zsh'
|
||||
with_items: "{{ users }}"
|
||||
when: "((user.oh_my_zsh | default({})).install | default(oh_my_zsh_install)) | bool"
|
||||
loop_control:
|
||||
loop_var: user
|
||||
label: '{{ user.username }}'
|
||||
|
||||
- name: set permissions of oh-my-zsh for users
|
||||
become: true
|
||||
file:
|
||||
path: '~{{ user.username }}/.oh-my-zsh'
|
||||
# Prevent the cloned repository from having insecure permissions. Failing to do
|
||||
# so causes compinit() calls to fail with "command not found: compdef" errors
|
||||
# for users with insecure umasks (e.g., "002", allowing group writability).
|
||||
mode: 'go-w'
|
||||
recurse: true
|
||||
with_items: "{{ users }}"
|
||||
when: "((user.oh_my_zsh | default({})).install | default(oh_my_zsh_install)) | bool"
|
||||
loop_control:
|
||||
loop_var: user
|
||||
label: '{{ user.username }}'
|
||||
|
||||
- name: set default shell for users
|
||||
become: true
|
||||
user:
|
||||
name: '{{ user.username }}'
|
||||
shell: /bin/zsh
|
||||
with_items: "{{ users }}"
|
||||
when: "((user.oh_my_zsh | default({})).install | default(oh_my_zsh_install)) | bool"
|
||||
loop_control:
|
||||
loop_var: user
|
||||
label: '{{ user.username }}'
|
||||
|
||||
- name: write .zshrc for users
|
||||
become: true
|
||||
become_user: '{{ user.username }}'
|
||||
template:
|
||||
src: zshrc.j2
|
||||
dest: '~{{ user.username }}/.zshrc'
|
||||
backup: true
|
||||
mode: 'u=rw,go=r'
|
||||
with_items: '{{ users }}'
|
||||
when: "((user.oh_my_zsh | default({})).install | default(oh_my_zsh_install)) | bool"
|
||||
loop_control:
|
||||
loop_var: user
|
||||
label: '{{ user.username }}'
|
||||
|
||||
- name: add custom template theme
|
||||
template:
|
||||
src: robbyrussell_zsh_theme.j2
|
||||
dest: '~{{ user.username }}/.oh-my-zsh/custom/themes/robbyrussell.zsh-theme'
|
||||
mode: 'u=rw,go=r'
|
||||
with_items: '{{ users }}'
|
||||
loop_control:
|
||||
loop_var: user
|
||||
label: '{{ user.username }}'
|
||||
|
||||
6
roles/oh-my-zsh/tasks/main.yml
Normal file
6
roles/oh-my-zsh/tasks/main.yml
Normal file
@@ -0,0 +1,6 @@
|
||||
---
|
||||
- import_tasks: install.yml
|
||||
|
||||
- name: console setup for Debian family distributions
|
||||
include_tasks: debian-console-setup.yml
|
||||
when: ansible_os_family == 'Debian'
|
||||
7
roles/oh-my-zsh/templates/robbyrussell_zsh_theme.j2
Normal file
7
roles/oh-my-zsh/templates/robbyrussell_zsh_theme.j2
Normal file
@@ -0,0 +1,7 @@
|
||||
local ret_status="%(?:%{$fg_bold[green]%}%M ⋊>:%{$fg_bold[red]%}%M ⋊>)"
|
||||
PROMPT='${ret_status} %{$fg[cyan]%}%c%{$reset_color%} $(git_prompt_info)'
|
||||
|
||||
ZSH_THEME_GIT_PROMPT_PREFIX="%{$fg_bold[blue]%}git:(%{$fg[red]%}"
|
||||
ZSH_THEME_GIT_PROMPT_SUFFIX="%{$reset_color%} "
|
||||
ZSH_THEME_GIT_PROMPT_DIRTY="%{$fg[blue]%}) %{$fg[yellow]%}✗"
|
||||
ZSH_THEME_GIT_PROMPT_CLEAN="%{$fg[blue]%})"
|
||||
103
roles/oh-my-zsh/templates/zshrc.j2
Normal file
103
roles/oh-my-zsh/templates/zshrc.j2
Normal file
@@ -0,0 +1,103 @@
|
||||
{{ ansible_managed | comment }}
|
||||
|
||||
{% set oh_my_zsh = user.oh_my_zsh|default({}) %}
|
||||
|
||||
# If you come from bash you might have to change your $PATH.
|
||||
# export PATH=$HOME/bin:/usr/local/bin:$PATH
|
||||
|
||||
# Path to your oh-my-zsh installation.
|
||||
export ZSH=$HOME/.oh-my-zsh
|
||||
|
||||
# Set name of the theme to load --- if set to "random", it will
|
||||
# load a random theme each time oh-my-zsh is loaded, in which case,
|
||||
# to know which specific one was loaded, run: echo $RANDOM_THEME
|
||||
# See https://github.com/robbyrussell/oh-my-zsh/wiki/Themes
|
||||
ZSH_THEME="{{ oh_my_zsh.theme | default(oh_my_zsh_theme) }}"
|
||||
|
||||
# Set list of themes to pick from when loading at random
|
||||
# Setting this variable when ZSH_THEME=random will cause zsh to load
|
||||
# a theme from this variable instead of looking in ~/.oh-my-zsh/themes/
|
||||
# If set to an empty array, this variable will have no effect.
|
||||
# ZSH_THEME_RANDOM_CANDIDATES=( "robbyrussell" "agnoster" )
|
||||
|
||||
# Uncomment the following line to use case-sensitive completion.
|
||||
# CASE_SENSITIVE="true"
|
||||
|
||||
# Uncomment the following line to use hyphen-insensitive completion.
|
||||
# Case-sensitive completion must be off. _ and - will be interchangeable.
|
||||
# HYPHEN_INSENSITIVE="true"
|
||||
|
||||
# Uncomment the following line to disable bi-weekly auto-update checks.
|
||||
DISABLE_AUTO_UPDATE="true"
|
||||
|
||||
# Uncomment the following line to automatically update without prompting.
|
||||
# DISABLE_UPDATE_PROMPT="true"
|
||||
|
||||
# Uncomment the following line to change how often to auto-update (in days).
|
||||
# export UPDATE_ZSH_DAYS=13
|
||||
|
||||
# Uncomment the following line if pasting URLs and other text is messed up.
|
||||
# DISABLE_MAGIC_FUNCTIONS=true
|
||||
|
||||
# Uncomment the following line to disable colors in ls.
|
||||
# DISABLE_LS_COLORS="true"
|
||||
|
||||
# Uncomment the following line to disable auto-setting terminal title.
|
||||
# DISABLE_AUTO_TITLE="true"
|
||||
|
||||
# Uncomment the following line to enable command auto-correction.
|
||||
# ENABLE_CORRECTION="true"
|
||||
|
||||
# Uncomment the following line to display red dots whilst waiting for completion.
|
||||
# COMPLETION_WAITING_DOTS="true"
|
||||
|
||||
# Uncomment the following line if you want to disable marking untracked files
|
||||
# under VCS as dirty. This makes repository status check for large repositories
|
||||
# much, much faster.
|
||||
# DISABLE_UNTRACKED_FILES_DIRTY="true"
|
||||
|
||||
# Uncomment the following line if you want to change the command execution time
|
||||
# stamp shown in the history command output.
|
||||
# You can set one of the optional three formats:
|
||||
# "mm/dd/yyyy"|"dd.mm.yyyy"|"yyyy-mm-dd"
|
||||
# or set a custom format using the strftime function format specifications,
|
||||
# see 'man strftime' for details.
|
||||
# HIST_STAMPS="mm/dd/yyyy"
|
||||
|
||||
# Would you like to use another custom folder than $ZSH/custom?
|
||||
# ZSH_CUSTOM=/path/to/new-custom-folder
|
||||
|
||||
# Which plugins would you like to load?
|
||||
# Standard plugins can be found in ~/.oh-my-zsh/plugins/*
|
||||
# Custom plugins may be added to ~/.oh-my-zsh/custom/plugins/
|
||||
# Example format: plugins=(rails git textmate ruby lighthouse)
|
||||
# Add wisely, as too many plugins slow down shell startup.
|
||||
plugins=({{ oh_my_zsh.plugins | default(oh_my_zsh_plugins) | join(' ') }})
|
||||
|
||||
source $ZSH/oh-my-zsh.sh
|
||||
|
||||
# User configuration
|
||||
|
||||
# export MANPATH="/usr/local/man:$MANPATH"
|
||||
|
||||
# You may need to manually set your language environment
|
||||
# export LANG=en_US.UTF-8
|
||||
|
||||
# Preferred editor for local and remote sessions
|
||||
# if [[ -n $SSH_CONNECTION ]]; then
|
||||
# export EDITOR='vim'
|
||||
# else
|
||||
# export EDITOR='mvim'
|
||||
# fi
|
||||
|
||||
# Compilation flags
|
||||
# export ARCHFLAGS="-arch x86_64"
|
||||
|
||||
# Set personal aliases, overriding those provided by oh-my-zsh libs,
|
||||
# plugins, and themes. Aliases can be placed here, though oh-my-zsh
|
||||
# users are encouraged to define aliases within the ZSH_CUSTOM folder.
|
||||
# For a full list of active aliases, run `alias`.
|
||||
#
|
||||
# Example aliases
|
||||
# alias zshconfig="mate ~/.zshrc"
|
||||
# alias ohmyzsh="mate ~/.oh-my-zsh"
|
||||
2
roles/oh-my-zsh/vars/main.yml
Normal file
2
roles/oh-my-zsh/vars/main.yml
Normal file
@@ -0,0 +1,2 @@
|
||||
---
|
||||
# vars file for oh-my-zsh
|
||||
16
roles/prometheus-grafana/meta/main.yml
Normal file
16
roles/prometheus-grafana/meta/main.yml
Normal file
@@ -0,0 +1,16 @@
|
||||
---
|
||||
galaxy_info:
|
||||
author: Your Name
|
||||
description: Ansible role to deploy prometheus & grafana using Docker
|
||||
license: MIT
|
||||
min_ansible_version: "2.9"
|
||||
platforms:
|
||||
- name: Debian
|
||||
versions:
|
||||
- all
|
||||
galaxy_tags:
|
||||
- monitoring
|
||||
- cadvisor
|
||||
dependencies:
|
||||
- docker
|
||||
|
||||
56
roles/prometheus-grafana/tasks/grafana.yml
Normal file
56
roles/prometheus-grafana/tasks/grafana.yml
Normal file
@@ -0,0 +1,56 @@
|
||||
---
|
||||
- name: Create a Docker network for Grafana
|
||||
docker_network:
|
||||
name: monitoring_network
|
||||
state: present
|
||||
|
||||
- name: Create grafana datasources directory on host
|
||||
file:
|
||||
path: /etc/grafana/datasources
|
||||
state: directory
|
||||
mode: "0755"
|
||||
|
||||
- name: Create grafana dashboards directory on host
|
||||
file:
|
||||
path: /etc/grafana/dashboards
|
||||
state: directory
|
||||
mode: "0755"
|
||||
|
||||
- name: Copy prometheus datasource config
|
||||
template:
|
||||
src: grafana-datasources.yml.j2
|
||||
dest: /etc/grafana/datasources/prometheus.yml
|
||||
|
||||
- name: Copy grafana default dashboards config
|
||||
template:
|
||||
src: grafana-dashboards.yml.j2
|
||||
dest: /etc/grafana/dashboards/grafana-dashboards.yml
|
||||
|
||||
- name: Copy grafana dashboards
|
||||
copy:
|
||||
src: "{{ item }}"
|
||||
dest: /etc/grafana/dashboards/{{ item | basename | regex_replace('\.j2$', '') }}
|
||||
loop: "{{ query('fileglob', 'templates/grafana-dashboards/*.json') }}"
|
||||
|
||||
- name: Pull Grafana Docker image
|
||||
docker_image:
|
||||
name: grafana/grafana-oss:latest
|
||||
source: pull
|
||||
|
||||
- name: Start Grafana container
|
||||
docker_container:
|
||||
name: grafana
|
||||
image: grafana/grafana-oss:latest
|
||||
state: started
|
||||
restart: yes
|
||||
restart_policy: unless-stopped
|
||||
published_ports:
|
||||
- "3000:3000"
|
||||
volumes:
|
||||
- /etc/grafana/datasources:/etc/grafana/provisioning/datasources
|
||||
- /etc/grafana/dashboards:/etc/grafana/provisioning/dashboards
|
||||
networks:
|
||||
- name: monitoring_network
|
||||
env:
|
||||
GF_SECURITY_ADMIN_PASSWORD: "{{ env_vars.GRAFANA_PASSWORD }}" # Customize the password
|
||||
|
||||
36
roles/prometheus-grafana/tasks/loki.yml
Normal file
36
roles/prometheus-grafana/tasks/loki.yml
Normal file
@@ -0,0 +1,36 @@
|
||||
---
|
||||
- name: Create a Docker network for loki
|
||||
docker_network:
|
||||
name: monitoring_network
|
||||
state: present
|
||||
|
||||
- name: Pull loki Docker image
|
||||
docker_image:
|
||||
name: grafana/loki:latest
|
||||
source: pull
|
||||
|
||||
- name: Create loki configuration file directory on host
|
||||
file:
|
||||
path: /etc/loki
|
||||
state: directory
|
||||
mode: '0755'
|
||||
|
||||
- name: Create loki configuration file
|
||||
template:
|
||||
src: loki.yml.j2
|
||||
dest: /etc/loki/config.yml
|
||||
|
||||
- name: Start loki container
|
||||
docker_container:
|
||||
name: loki
|
||||
image: grafana/loki
|
||||
state: started
|
||||
restart: yes
|
||||
command: -config.file=/etc/loki/config.yml
|
||||
restart_policy: unless-stopped
|
||||
published_ports:
|
||||
- "3100:3100"
|
||||
volumes:
|
||||
- /etc/loki/config.yml:/etc/loki/config.yml
|
||||
networks:
|
||||
- name: monitoring_network
|
||||
11
roles/prometheus-grafana/tasks/main.yml
Normal file
11
roles/prometheus-grafana/tasks/main.yml
Normal file
@@ -0,0 +1,11 @@
|
||||
---
|
||||
# ensure we have variables from .env files
|
||||
- include_tasks: ../roles/env/tasks/main.yml
|
||||
|
||||
- include_tasks: prometheus.yml
|
||||
- include_tasks: grafana.yml
|
||||
|
||||
# disables loki and promtail
|
||||
# - include_tasks: loki.yml
|
||||
# - include_tasks: promtail.yml
|
||||
|
||||
36
roles/prometheus-grafana/tasks/prometheus.yml
Normal file
36
roles/prometheus-grafana/tasks/prometheus.yml
Normal file
@@ -0,0 +1,36 @@
|
||||
---
|
||||
- name: Create a Docker network for Prometheus
|
||||
docker_network:
|
||||
name: monitoring_network
|
||||
state: present
|
||||
|
||||
- name: Pull Prometheus Docker image
|
||||
docker_image:
|
||||
name: prom/prometheus
|
||||
source: pull
|
||||
|
||||
- name: Create Prometheus configuration file directory on host
|
||||
file:
|
||||
path: /etc/prometheus
|
||||
state: directory
|
||||
mode: '0755'
|
||||
|
||||
- name: Create Prometheus configuration file
|
||||
template:
|
||||
src: prometheus.yml.j2
|
||||
dest: /etc/prometheus/prometheus.yml
|
||||
|
||||
- name: Start Prometheus container
|
||||
docker_container:
|
||||
name: prometheus
|
||||
image: prom/prometheus:{{ prometheus_version }}
|
||||
state: started
|
||||
restart: yes
|
||||
restart_policy: unless-stopped
|
||||
published_ports:
|
||||
- "9090:9090"
|
||||
volumes:
|
||||
- /etc/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml
|
||||
networks:
|
||||
- name: monitoring_network
|
||||
|
||||
24
roles/prometheus-grafana/templates/grafana-dashboards.yml.j2
Normal file
24
roles/prometheus-grafana/templates/grafana-dashboards.yml.j2
Normal file
@@ -0,0 +1,24 @@
|
||||
apiVersion: 1
|
||||
|
||||
providers:
|
||||
# <string> an unique provider name. Required
|
||||
- name: 'ansible managed dashboards'
|
||||
# <int> Org id. Default to 1
|
||||
orgId: 1
|
||||
# <string> name of the dashboard folder.
|
||||
folder: ''
|
||||
# <string> folder UID. will be automatically generated if not specified
|
||||
folderUid: ''
|
||||
# <string> provider type. Default to 'file'
|
||||
type: file
|
||||
# <bool> disable dashboard deletion
|
||||
disableDeletion: false
|
||||
# <int> how often Grafana will scan for changed dashboards
|
||||
updateIntervalSeconds: 10
|
||||
# <bool> allow updating provisioned dashboards from the UI
|
||||
allowUiUpdates: false
|
||||
options:
|
||||
# <string, required> path to dashboard files on disk. Required when using the 'file' type
|
||||
path: /etc/grafana/provisioning/dashboards
|
||||
# <bool> use folder names from filesystem to create folders in Grafana
|
||||
foldersFromFilesStructure: true
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user