mirror of
https://github.com/KevinMidboe/playbooks-retailor.git
synced 2026-01-10 11:25:44 +00:00
ansible playbooks for retailor.io infrastructure
This commit is contained in:
23
roles/elasticsearch/README.md
Normal file
23
roles/elasticsearch/README.md
Normal file
@@ -0,0 +1,23 @@
|
||||
# elasticsearch
|
||||
|
||||
Play configures ELK stack using docker & is available without HTTPS. Configure container variables in `tasks/SERVICE.yml` files, environment variables for the services & Java can also be configured here.
|
||||
|
||||
The following are manual steps required during setup.
|
||||
|
||||
## elastic
|
||||
|
||||
After creating elasticsearch container SSH into the running host and generate a new password for user `elastic` using command:
|
||||
|
||||
```bash
|
||||
docker exec -it elasticsearch /usr/share/elasticsearch/bin/elasticsearch-reset-password -u elastic
|
||||
```
|
||||
|
||||
## kibana
|
||||
|
||||
Create a password for `kibana_system` user:
|
||||
|
||||
```bash
|
||||
export ELASTIC_PASSWORD=
|
||||
export KIBANA_PASSWORD=
|
||||
curl -s -X POST -u "elastic:${ELASTIC_PASSWORD}" -H "Content-Type: application/json" http://elasticsearch:9200/_security/user/kibana_system/_password -d "{\"password\":\"${KIBANA_PASSWORD}\"}" | grep -q "^{}";
|
||||
```
|
||||
16
roles/elasticsearch/meta/main.yml
Normal file
16
roles/elasticsearch/meta/main.yml
Normal file
@@ -0,0 +1,16 @@
|
||||
---
|
||||
galaxy_info:
|
||||
author: Your Name
|
||||
description: Ansible role to deploy prometheus & grafana using Docker
|
||||
license: MIT
|
||||
min_ansible_version: "2.9"
|
||||
platforms:
|
||||
- name: Debian
|
||||
versions:
|
||||
- all
|
||||
galaxy_tags:
|
||||
- monitoring
|
||||
- cadvisor
|
||||
dependencies:
|
||||
- docker
|
||||
|
||||
46
roles/elasticsearch/tasks/elasticsearch.yml
Normal file
46
roles/elasticsearch/tasks/elasticsearch.yml
Normal file
@@ -0,0 +1,46 @@
|
||||
---
|
||||
- name: Create a Docker network for Elasticsearch
|
||||
docker_network:
|
||||
name: elk_network
|
||||
state: present
|
||||
|
||||
- name: Pull Elasticsearch Docker image
|
||||
docker_image:
|
||||
name: docker.elastic.co/elasticsearch/elasticsearch-wolfi:{{ elk_version }}
|
||||
source: pull
|
||||
|
||||
- name: Create Elasticsearch configuration file directory on host
|
||||
file:
|
||||
path: /etc/elasticsearch
|
||||
state: directory
|
||||
mode: '0755'
|
||||
|
||||
# - name: Create Elasticsearch configuration file
|
||||
# template:
|
||||
# src: elasticsearch.yml.j2
|
||||
# dest: /etc/elasticsearch/elasticsearch.yml
|
||||
|
||||
- name: Start Elasticsearch container
|
||||
docker_container:
|
||||
name: elasticsearch
|
||||
image: docker.elastic.co/elasticsearch/elasticsearch-wolfi:{{ elk_version }}
|
||||
state: started
|
||||
restart: yes
|
||||
restart_policy: unless-stopped
|
||||
published_ports:
|
||||
- "9200:9200"
|
||||
volumes:
|
||||
- /etc/elasticsearch/esdata:/usr/share/elasticsearch/data
|
||||
networks:
|
||||
- name: elk_network
|
||||
ipv4_address: 172.19.0.2
|
||||
env:
|
||||
node.name: elasticsearch
|
||||
cluster.name: retailor-elk
|
||||
discovery.type: single-node
|
||||
bootstrap.memory_lock: "true"
|
||||
# limits elasticsearch to 2 GB of RAM
|
||||
ES_JAVA_OPTS: "-Xms1g -Xmx2g"
|
||||
# disables SSL & xpack security
|
||||
xpack.security.http.ssl.enabled: "false"
|
||||
|
||||
37
roles/elasticsearch/tasks/kibana.yml
Normal file
37
roles/elasticsearch/tasks/kibana.yml
Normal file
@@ -0,0 +1,37 @@
|
||||
---
|
||||
- name: Create a Docker network for Kibana
|
||||
docker_network:
|
||||
name: elk_network
|
||||
state: present
|
||||
|
||||
- name: Create kibana directory on host
|
||||
file:
|
||||
path: /etc/kibana
|
||||
state: directory
|
||||
mode: "0755"
|
||||
|
||||
- name: Pull Kibana Docker image
|
||||
docker_image:
|
||||
name: docker.elastic.co/kibana/kibana:{{ elk_version }}
|
||||
source: pull
|
||||
|
||||
# TODO rember to move CA cert from elastic to Kibana
|
||||
# docker cp elasticsearch:/usr/share/elasticsearch/config/certs/http_ca.crt .
|
||||
# docker cp http_ca.crt kibana:/usr/share/kibana/config/certs/ca/http_ca.crt
|
||||
- name: Start Kibana container
|
||||
docker_container:
|
||||
name: kibana
|
||||
image: docker.elastic.co/kibana/kibana:{{ elk_version }}
|
||||
state: started
|
||||
restart: yes
|
||||
restart_policy: unless-stopped
|
||||
published_ports:
|
||||
- "5601:5601"
|
||||
env:
|
||||
ELASTICSEARCH_HOSTS: "{{ env_vars.ELASTIC_HOSTS }}"
|
||||
ELASTICSEARCH_USERNAME: kibana_system
|
||||
ELASTICSEARCH_PASSWORD: "{{ env_vars.KIBANA_PASSWORD }}"
|
||||
TELEMETRY_ENABLED: "false"
|
||||
networks:
|
||||
- name: elk_network
|
||||
|
||||
64
roles/elasticsearch/tasks/logstash.yml
Normal file
64
roles/elasticsearch/tasks/logstash.yml
Normal file
@@ -0,0 +1,64 @@
|
||||
---
|
||||
- name: Create a Docker network for Logstash
|
||||
docker_network:
|
||||
name: elk_network
|
||||
state: present
|
||||
|
||||
- name: Create logstash directory on host
|
||||
file:
|
||||
path: /etc/logstash
|
||||
state: directory
|
||||
mode: "0755"
|
||||
|
||||
- name: Copy logstash config
|
||||
copy:
|
||||
src: templates/pipelines.yml.j2
|
||||
dest: /etc/logstash/pipelines.yml
|
||||
|
||||
- name: Create logstash directory on host
|
||||
file:
|
||||
path: /etc/logstash/pipeline
|
||||
state: directory
|
||||
mode: "0755"
|
||||
|
||||
- name: Copy logstash input configs
|
||||
copy:
|
||||
src: "{{ item }}"
|
||||
dest: /etc/logstash/pipeline/{{ item | basename | regex_replace('\.j2$', '') }}
|
||||
loop: "{{ query('fileglob', 'templates/logstash-conf.d/*.j2') }}"
|
||||
|
||||
- name: Pull Logstash Docker image
|
||||
docker_image:
|
||||
name: docker.elastic.co/logstash/logstash:{{ elk_version }}
|
||||
source: pull
|
||||
|
||||
# TODO rember to move CA cert from elastic to Logstash
|
||||
# docker cp elasticsearch:/usr/share/elasticsearch/config/certs/http_ca.crt .
|
||||
# docker cp http_ca.crt logstash:/usr/share/logstash/config/certs/ca/http_ca.crt
|
||||
- name: Start Logstash container
|
||||
docker_container:
|
||||
name: logstash
|
||||
image: docker.elastic.co/logstash/logstash:{{ elk_version }}
|
||||
state: started
|
||||
restart: yes
|
||||
restart_policy: unless-stopped
|
||||
command:
|
||||
- /bin/bash
|
||||
- -c
|
||||
- |
|
||||
echo "Waiting for Elasticsearch availability";
|
||||
until curl -s {{ env_vars.ELASTIC_HOSTS }} | grep -q "missing authentication credentials"; do sleep 1; done;
|
||||
echo "Starting logstash";
|
||||
/usr/share/logstash/bin/logstash
|
||||
published_ports:
|
||||
- "5044-5049:5044-5049"
|
||||
volumes:
|
||||
- /etc/logstash/pipelines.yml:/usr/share/logstash/config/pipelines.yml
|
||||
- /etc/logstash/pipeline:/usr/share/logstash/pipeline
|
||||
env:
|
||||
xpack.monitoring.enabled: "false"
|
||||
ELASTIC_USER: elastic
|
||||
ELASTIC_PASSWORD: "{{ env_vars.ELASTIC_PASSWORD }}"
|
||||
ELASTIC_HOSTS: "{{ env_vars.ELASTIC_HOSTS }}"
|
||||
networks:
|
||||
- name: elk_network
|
||||
7
roles/elasticsearch/tasks/main.yml
Normal file
7
roles/elasticsearch/tasks/main.yml
Normal file
@@ -0,0 +1,7 @@
|
||||
---
|
||||
# ensure we have variables from .env files
|
||||
- include_tasks: ../roles/env/tasks/main.yml
|
||||
|
||||
- include_tasks: elasticsearch.yml
|
||||
- include_tasks: kibana.yml
|
||||
- include_tasks: logstash.yml
|
||||
272
roles/elasticsearch/templates/filebeat.yml.j2
Normal file
272
roles/elasticsearch/templates/filebeat.yml.j2
Normal file
@@ -0,0 +1,272 @@
|
||||
###################### Filebeat Configuration Example #########################
|
||||
|
||||
# This file is an example configuration file highlighting only the most common
|
||||
# options. The filebeat.reference.yml file from the same directory contains all the
|
||||
# supported options with more comments. You can use it as a reference.
|
||||
#
|
||||
# You can find the full configuration reference here:
|
||||
# https://www.elastic.co/guide/en/beats/filebeat/index.html
|
||||
|
||||
# For more available modules and options, please see the filebeat.reference.yml sample
|
||||
# configuration file.
|
||||
|
||||
# ============================== Filebeat inputs ===============================
|
||||
|
||||
filebeat.inputs:
|
||||
|
||||
# Each - is an input. Most options can be set at the input level, so
|
||||
# you can use different inputs for various configurations.
|
||||
# Below are the input specific configurations.
|
||||
|
||||
- type: log
|
||||
|
||||
# Change to true to enable this input configuration.
|
||||
enabled: false
|
||||
|
||||
# Paths that should be crawled and fetched. Glob based paths.
|
||||
paths:
|
||||
- /var/log/ngnix/*.log
|
||||
#- c:\programdata\elasticsearch\logs\*
|
||||
|
||||
# Exclude lines. A list of regular expressions to match. It drops the lines that are
|
||||
# matching any regular expression from the list.
|
||||
#exclude_lines: ['^DBG']
|
||||
|
||||
# Include lines. A list of regular expressions to match. It exports the lines that are
|
||||
# matching any regular expression from the list.
|
||||
#include_lines: ['^ERR', '^WARN']
|
||||
|
||||
# Exclude files. A list of regular expressions to match. Filebeat drops the files that
|
||||
# are matching any regular expression from the list. By default, no files are dropped.
|
||||
#exclude_files: ['.gz$']
|
||||
|
||||
# Optional additional fields. These fields can be freely picked
|
||||
# to add additional information to the crawled log files for filtering
|
||||
#fields:
|
||||
# level: debug
|
||||
# review: 1
|
||||
|
||||
### Multiline options
|
||||
|
||||
# Multiline can be used for log messages spanning multiple lines. This is common
|
||||
# for Java Stack Traces or C-Line Continuation
|
||||
|
||||
# The regexp Pattern that has to be matched. The example pattern matches all lines starting with [
|
||||
#multiline.pattern: ^\[
|
||||
|
||||
# Defines if the pattern set under pattern should be negated or not. Default is false.
|
||||
#multiline.negate: false
|
||||
|
||||
# Match can be set to "after" or "before". It is used to define if lines should be append to a pattern
|
||||
# that was (not) matched before or after or as long as a pattern is not matched based on negate.
|
||||
# Note: After is the equivalent to previous and before is the equivalent to to next in Logstash
|
||||
#multiline.match: after
|
||||
|
||||
# filestream is an input for collecting log messages from files. It is going to replace log input in the future.
|
||||
- type: filestream
|
||||
|
||||
# Change to true to enable this input configuration.
|
||||
enabled: true
|
||||
|
||||
# Paths that should be crawled and fetched. Glob based paths.
|
||||
paths:
|
||||
- /var/log/nginx/*.log
|
||||
#- c:\programdata\elasticsearch\logs\*
|
||||
|
||||
exclude_files: ['\.gz$']
|
||||
|
||||
# Exclude lines. A list of regular expressions to match. It drops the lines that are
|
||||
# matching any regular expression from the list.
|
||||
#exclude_lines: ['^DBG']
|
||||
|
||||
# Include lines. A list of regular expressions to match. It exports the lines that are
|
||||
# matching any regular expression from the list.
|
||||
#include_lines: ['^ERR', '^WARN']
|
||||
|
||||
# Exclude files. A list of regular expressions to match. Filebeat drops the files that
|
||||
# are matching any regular expression from the list. By default, no files are dropped.
|
||||
#prospector.scanner.exclude_files: ['.gz$']
|
||||
|
||||
# Optional additional fields. These fields can be freely picked
|
||||
# to add additional information to the crawled log files for filtering
|
||||
#fields:
|
||||
# level: debug
|
||||
# review: 1
|
||||
|
||||
# ============================== Filebeat modules ==============================
|
||||
|
||||
filebeat.config.modules:
|
||||
# Glob pattern for configuration loading
|
||||
path: ${path.config}/modules.d/*.yml
|
||||
|
||||
# Set to true to enable config reloading
|
||||
reload.enabled: false
|
||||
|
||||
# Period on which files under path should be checked for changes
|
||||
#reload.period: 10s
|
||||
|
||||
# ======================= Elasticsearch template setting =======================
|
||||
|
||||
setup.template.settings:
|
||||
index.number_of_shards: 1
|
||||
#index.codec: best_compression
|
||||
#_source.enabled: false
|
||||
|
||||
|
||||
# ================================== General ===================================
|
||||
|
||||
# The name of the shipper that publishes the network data. It can be used to group
|
||||
# all the transactions sent by a single shipper in the web interface.
|
||||
#name:
|
||||
|
||||
# The tags of the shipper are included in their own field with each
|
||||
# transaction published.
|
||||
#tags: ["service-X", "web-tier"]
|
||||
|
||||
# Optional fields that you can specify to add additional information to the
|
||||
# output.
|
||||
#fields:
|
||||
# env: staging
|
||||
|
||||
# ================================= Dashboards =================================
|
||||
# These settings control loading the sample dashboards to the Kibana index. Loading
|
||||
# the dashboards is disabled by default and can be enabled either by setting the
|
||||
# options here or by using the `setup` command.
|
||||
#setup.dashboards.enabled: false
|
||||
|
||||
# The URL from where to download the dashboards archive. By default this URL
|
||||
# has a value which is computed based on the Beat name and version. For released
|
||||
# versions, this URL points to the dashboard archive on the artifacts.elastic.co
|
||||
# website.
|
||||
#setup.dashboards.url:
|
||||
|
||||
# =================================== Kibana ===================================
|
||||
|
||||
# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.
|
||||
# This requires a Kibana endpoint configuration.
|
||||
setup.kibana:
|
||||
|
||||
# Kibana Host
|
||||
# Scheme and port can be left out and will be set to the default (http and 5601)
|
||||
# In case you specify and additional path, the scheme is required: http://localhost:5601/path
|
||||
# IPv6 addresses should always be defined as: https://[2001:db8::1]:5601
|
||||
#host: "localhost:5601"
|
||||
|
||||
# Kibana Space ID
|
||||
# ID of the Kibana Space into which the dashboards should be loaded. By default,
|
||||
# the Default Space will be used.
|
||||
#space.id:
|
||||
|
||||
# =============================== Elastic Cloud ================================
|
||||
|
||||
# These settings simplify using Filebeat with the Elastic Cloud (https://cloud.elastic.co/).
|
||||
|
||||
# The cloud.id setting overwrites the `output.elasticsearch.hosts` and
|
||||
# `setup.kibana.host` options.
|
||||
# You can find the `cloud.id` in the Elastic Cloud web UI.
|
||||
#cloud.id:
|
||||
|
||||
# The cloud.auth setting overwrites the `output.elasticsearch.username` and
|
||||
# `output.elasticsearch.password` settings. The format is `<user>:<pass>`.
|
||||
#cloud.auth:
|
||||
|
||||
# ================================== Outputs ===================================
|
||||
|
||||
# Configure what output to use when sending the data collected by the beat.
|
||||
|
||||
# ---------------------------- Elasticsearch Output ----------------------------
|
||||
# output.elasticsearch:
|
||||
# Array of hosts to connect to.
|
||||
# hosts: ["elastic.schleppe:9200"]
|
||||
|
||||
# Protocol - either `http` (default) or `https`.
|
||||
# protocol: "https"
|
||||
|
||||
# Authentication credentials - either API key or username/password.
|
||||
#api_key: "id:api_key"
|
||||
#username: "elastic"
|
||||
#password: "changeme"
|
||||
|
||||
# ------------------------------ Logstash Output -------------------------------
|
||||
output.logstash:
|
||||
# The Logstash hosts
|
||||
hosts: ["elasticsearch:5400"]
|
||||
|
||||
# Optional SSL. By default is off.
|
||||
# List of root certificates for HTTPS server verifications
|
||||
# ssl.certificate_authorities: ["/etc/elk-certs/elk-ssl.crt"]
|
||||
|
||||
# Certificate for SSL client authentication
|
||||
# ssl.certificate: "/etc/elk-certs/elk-ssl.crt"
|
||||
|
||||
# Client Certificate Key
|
||||
# ssl.key: "/etc/elk-certs/elk-ssl.key"
|
||||
|
||||
# ================================= Processors =================================
|
||||
processors:
|
||||
- add_host_metadata:
|
||||
when.not.contains.tags: forwarded
|
||||
- add_cloud_metadata: ~
|
||||
- add_docker_metadata: ~
|
||||
- add_kubernetes_metadata: ~
|
||||
|
||||
# ================================== Logging ===================================
|
||||
|
||||
# Sets log level. The default log level is info.
|
||||
# Available log levels are: error, warning, info, debug
|
||||
# logging.level: debug
|
||||
|
||||
# At debug level, you can selectively enable logging only for some components.
|
||||
# To enable all selectors use ["*"]. Examples of other selectors are "beat",
|
||||
# "publisher", "service".
|
||||
#logging.selectors: ["*"]
|
||||
|
||||
# ============================= X-Pack Monitoring ==============================
|
||||
# Filebeat can export internal metrics to a central Elasticsearch monitoring
|
||||
# cluster. This requires xpack monitoring to be enabled in Elasticsearch. The
|
||||
# reporting is disabled by default.
|
||||
|
||||
# Set to true to enable the monitoring reporter.
|
||||
#monitoring.enabled: false
|
||||
|
||||
# Sets the UUID of the Elasticsearch cluster under which monitoring data for this
|
||||
# Filebeat instance will appear in the Stack Monitoring UI. If output.elasticsearch
|
||||
# is enabled, the UUID is derived from the Elasticsearch cluster referenced by output.elasticsearch.
|
||||
#monitoring.cluster_uuid:
|
||||
|
||||
# Uncomment to send the metrics to Elasticsearch. Most settings from the
|
||||
# Elasticsearch output are accepted here as well.
|
||||
# Note that the settings should point to your Elasticsearch *monitoring* cluster.
|
||||
# Any setting that is not set is automatically inherited from the Elasticsearch
|
||||
# output configuration, so if you have the Elasticsearch output configured such
|
||||
# that it is pointing to your Elasticsearch monitoring cluster, you can simply
|
||||
# uncomment the following line.
|
||||
#monitoring.elasticsearch:
|
||||
|
||||
# ============================== Instrumentation ===============================
|
||||
|
||||
# Instrumentation support for the filebeat.
|
||||
#instrumentation:
|
||||
# Set to true to enable instrumentation of filebeat.
|
||||
#enabled: false
|
||||
|
||||
# Environment in which filebeat is running on (eg: staging, production, etc.)
|
||||
#environment: ""
|
||||
|
||||
# APM Server hosts to report instrumentation results to.
|
||||
#hosts:
|
||||
# - http://localhost:8200
|
||||
|
||||
# API Key for the APM Server(s).
|
||||
# If api_key is set then secret_token will be ignored.
|
||||
#api_key:
|
||||
|
||||
# Secret token for the APM Server(s).
|
||||
#secret_token:
|
||||
|
||||
|
||||
# ================================= Migration ==================================
|
||||
|
||||
# This allows to enable 6.7 migration aliases
|
||||
#migration.6_to_7.enabled: true
|
||||
|
||||
@@ -0,0 +1,17 @@
|
||||
input {
|
||||
beats {
|
||||
port => 5045
|
||||
}
|
||||
}
|
||||
|
||||
filter {
|
||||
}
|
||||
|
||||
output {
|
||||
elasticsearch {
|
||||
index => "laravel-logs-%{+YYYY.MM}"
|
||||
hosts => "${ELASTIC_HOSTS}"
|
||||
user => "elastic"
|
||||
password => "${ELASTIC_PASSWORD}"
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,24 @@
|
||||
input {
|
||||
beats {
|
||||
port => 5044
|
||||
}
|
||||
}
|
||||
|
||||
filter {
|
||||
if [pipeline_id] == "nginx" {
|
||||
mutate { add_field => { "route" => "nginx_pipeline" } }
|
||||
} else if [pipeline_id] == "laravel" {
|
||||
mutate { add_field => { "route" => "laravel_pipeline" } }
|
||||
}
|
||||
}
|
||||
|
||||
output {
|
||||
if [pipeline_id] == "nginx" {
|
||||
pipeline { send_to => "nginx_pipeline" }
|
||||
} else if [pipeline_id] == "laravel" {
|
||||
pipeline { send_to => "laravel_pipeline" }
|
||||
} else {
|
||||
# Handle unknown cases
|
||||
stdout { codec => rubydebug }
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,39 @@
|
||||
input {
|
||||
beats {
|
||||
port => 5044
|
||||
}
|
||||
}
|
||||
|
||||
filter {
|
||||
grok {
|
||||
match => [ "message" , "%{COMBINEDAPACHELOG}+%{GREEDYDATA:extra_fields}"]
|
||||
overwrite => [ "message" ]
|
||||
}
|
||||
mutate {
|
||||
convert => ["response", "integer"]
|
||||
convert => ["bytes", "integer"]
|
||||
convert => ["responsetime", "float"]
|
||||
}
|
||||
# geoip {
|
||||
# source => "clientip"
|
||||
# add_tag => [ "nginx-geoip" ]
|
||||
# }
|
||||
date {
|
||||
match => [ "timestamp" , "dd/MMM/YYYY:HH:mm:ss Z" ]
|
||||
remove_field => [ "timestamp" ]
|
||||
}
|
||||
# useragent {
|
||||
# source => "agent"
|
||||
# }
|
||||
}
|
||||
|
||||
output {
|
||||
elasticsearch {
|
||||
index => "weblogs-%{+YYYY.MM}"
|
||||
hosts => "${ELASTIC_HOSTS}"
|
||||
user => "elastic"
|
||||
password => "${ELASTIC_PASSWORD}"
|
||||
document_type => "nginx_logs"
|
||||
}
|
||||
}
|
||||
|
||||
5
roles/elasticsearch/templates/pipelines.yml.j2
Normal file
5
roles/elasticsearch/templates/pipelines.yml.j2
Normal file
@@ -0,0 +1,5 @@
|
||||
- pipeline.id: nginx_pipeline
|
||||
path.config: "/usr/share/logstash/pipeline/nginx_pipeline.conf"
|
||||
|
||||
- pipeline.id: laravel_pipeline
|
||||
path.config: "/usr/share/logstash/pipeline/laravel_pipeline.conf"
|
||||
Reference in New Issue
Block a user