diff --git a/playbook/group_vars/all b/playbook/group_vars/all index 754f85f85c882b1430f1f306b5177aaf15bd8708..7b4ed9d2c6185af55219608d3aed395e62ca7489 100644 --- a/playbook/group_vars/all +++ b/playbook/group_vars/all @@ -1,4 +1,7 @@ --- -arch: x86_64 es_name: kibana_system -es_name_password: Kylinmanager13579! \ No newline at end of file +es_name_password: Kylinmanager13579! +needSSL: true +kibana_ssl_crt_file: /opt/kibana/config/certs/kibana-server.crt +kibana_ssl_key_file: /opt/kibana/config/certs/kibana-server.key +kibana_elastic_ca_file: /opt/kibana/config/certs/elasticsearch-ca.pem \ No newline at end of file diff --git a/playbook/host-multi-machine b/playbook/host-multi-machine new file mode 100644 index 0000000000000000000000000000000000000000..e39d92586980d758a3984ca7bc1c6fcd65e6ea19 --- /dev/null +++ b/playbook/host-multi-machine @@ -0,0 +1,11 @@ +[elasticsearch] +10.41.160.165 + +[kibana] +10.41.160.165 + +[fleet] +10.41.160.164 + +[filebeat] +10.41.160.165 \ No newline at end of file diff --git a/playbook/multi-machine.yml b/playbook/multi-machine.yml new file mode 100644 index 0000000000000000000000000000000000000000..114147bcca3b2209e86daf2a02a75501a8527a22 --- /dev/null +++ b/playbook/multi-machine.yml @@ -0,0 +1,28 @@ +--- +- name: es install + hosts: elasticsearch + remote_user: root + + roles: + - elasticsearch + +- name: kibana install + hosts: kibana + remote_user: root + + roles: + - kibana + +- name: fleet install + hosts: fleet + remote_user: root + + roles: + - fleet + +- name: filebeat install + hosts: filebeat + remote_user: root + + roles: + - filebeat diff --git a/playbook/roles/elasticsearch/tasks/main.yml b/playbook/roles/elasticsearch/tasks/main.yml index 57c42c834f7a55ca671081bf3fee2d635b13438d..8257c3af6341fffa7d17778b7e95734fa2569ba2 100644 --- a/playbook/roles/elasticsearch/tasks/main.yml +++ b/playbook/roles/elasticsearch/tasks/main.yml @@ -3,33 +3,28 @@ dnf: name: java-11-openjdk-devel* state: present +- name: mkdir elasticsearch + shell: mkdir -p /opt/elasticsearch - name: get elasticsearch from official website shell: wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-7.17.16-linux-x86_64.tar.gz --no-check-certificate args: chdir: /root/ - when: arch == x86_64 + when: ansible_architecture == "x86_64" - name: unzip tar.gz - shell: tar -xzvf elasticsearch-7.17.16-linux-x86_64.tar.gz -C /opt + shell: tar -xzvf elasticsearch-7.17.16-linux-x86_64.tar.gz -C /opt/elasticsearch --strip-components 1 args: chdir: /root/ - when: arch == x86_64 + when: ansible_architecture == "x86_64" - name: get elasticsearch from official website shell: wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-7.17.16-linux-aarch64.tar.gz --no-check-certificate args: chdir: /root/ - when: arch == aarch64 -- name: mkdir elasticsearch - shell: mkdir -p /opt/elasticsearch + when: ansible_architecture == "aarch64" - name: unzip tar.gz shell: tar -xzvf elasticsearch-7.17.16-linux-aarch64.tar.gz -C /opt/elasticsearch --strip-components 1 args: chdir: /root/ - when: arch == aarch64 -- name: unzip tar.gz - shell: tar -xzvf elasticsearch-7.17.16-linux-x86_64.tar.gz -C /opt/elasticsearch --strip-components 1 - args: - chdir: /root/ - when: arch == x86_64 + when: ansible_architecture == "aarch64" - name: add elastic user shell: groupadd elastic && useradd elastic -g elastic - name: chown /opt/elasticsearch/ diff --git a/playbook/roles/filebeat/tasks/main.yml b/playbook/roles/filebeat/tasks/main.yml index ef3aca7762ae9068f0363849768eb81207cae638..3c4820ad77748ff40eba09131fa1298219a6436b 100644 --- a/playbook/roles/filebeat/tasks/main.yml +++ b/playbook/roles/filebeat/tasks/main.yml @@ -1,30 +1,25 @@ --- +- name: mkdir filebeat + shell: mkdir -p /opt/filebeat - name: get fliebeat from official website shell: wget https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-7.17.16-linux-x86_64.tar.gz --no-check-certificate args: chdir: /root/ - when: arch == x86_64 + when: ansible_architecture == "x86_64" - name: unzip tar.gz - shell: tar -xzvf filebeat-7.17.16-linux-x86_64.tar.gz -C /opt + shell: tar -xzvf filebeat-7.17.16-linux-x86_64.tar.gz -C /opt/filebeat --strip-components 1 args: chdir: /root/ - when: arch == x86_64 + when: ansible_architecture == x86_64 - name: get filebeat from official website shell: wget https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-7.17.16-linux-arm64.tar.gz --no-check-certificate args: chdir: /root/ - when: arch == aarch64 -- name: mkdir filebeat - shell: mkdir -p /opt/filebeat + when: ansible_architecture == "aarch64" - name: unzip tar.gz shell: tar -xzvf filebeat-7.17.16-linux-arm64.tar.gz -C /opt/filebeat --strip-components 1 args: chdir: /root/ - when: arch == aarch64 -- name: unzip tar.gz - shell: tar -xzvf filebeat-7.17.16-linux-x86_64.tar.gz -C /opt/filebeat --strip-components 1 - args: - chdir: /root/ - when: arch == x86_64 + when: ansible_architecture == "aarch64" - name: start filebeat shell: nobup /opt/filebeat/filebeat -c /opt/filebeat/filebeat.yml & \ No newline at end of file diff --git a/playbook/roles/kibana/tasks/main.yml b/playbook/roles/kibana/tasks/main.yml index 00e2f2b406368cc6ed3400a8e5a361ae58f8a4db..dbebd5867e0b1ba3b97dbc8a6bdc4e658ebeed55 100644 --- a/playbook/roles/kibana/tasks/main.yml +++ b/playbook/roles/kibana/tasks/main.yml @@ -1,31 +1,26 @@ --- +- name: mkdir kibana + shell: mkdir -p /opt/kibana - name: get kibana from official website shell: wget https://artifacts.elastic.co/downloads/kibana/kibana-7.17.16-linux-x86_64.tar.gz --no-check-certificate args: chdir: /root/ - when: arch == x86_64 + when: ansible_architecture == "x86_64" - name: unzip tar.gz - shell: tar -xzvf kibana-7.17.16-linux-x86_64.tar.gz -C /opt + shell: tar -xzvf kibana-7.17.16-linux-x86_64.tar.gz -C /opt/kibana --strip-components 1 args: chdir: /root/ - when: arch == x86_64 + when: ansible_architecture == "x86_64" - name: get kibana from official website shell: wget https://artifacts.elastic.co/downloads/kibana/kibana-7.17.16-linux-aarch64.tar.gz --no-check-certificate args: chdir: /root/ - when: arch == aarch64 -- name: mkdir kibana - shell: mkdir -p /opt/kibana + when: ansible_architecture == "aarch64" - name: unzip tar.gz shell: tar -xzvf kibana-7.17.16-linux-aarch64.tar.gz -C /opt/kibana --strip-components 1 args: chdir: /root/ - when: arch == aarch64 -- name: unzip tar.gz - shell: tar -xzvf kibana-7.17.16-linux-x86_64.tar.gz -C /opt/kibana --strip-components 1 - args: - chdir: /root/ - when: arch == x86_64 + when: ansible_architecture == "aarch64" - name: change es config template: src=templates/kibana/kibana.yml.j2 dest=/opt/kibana/config/kibana.yml - name: start kibana diff --git a/playbook/templates/elasticsearch/elasticsearch.yml.j2 b/playbook/templates/elasticsearch/elasticsearch.yml.j2 index 0b2cc6f9d9e939b86abe3d92c0e6834f5f0a5aea..992beaced24d709fb201fc05b1c07def966967c2 100644 --- a/playbook/templates/elasticsearch/elasticsearch.yml.j2 +++ b/playbook/templates/elasticsearch/elasticsearch.yml.j2 @@ -34,7 +34,7 @@ path.data: /home/elastic/data # # Path to log files: # -path.logs: /opt/elasticsearch-7.17.16/logs +path.logs: /opt/elasticsearch/logs # # ----------------------------------- Memory ----------------------------------- # @@ -53,7 +53,13 @@ bootstrap.memory_lock: true # By default Elasticsearch is only accessible on localhost. Set a different # address here to expose this node on the network: # +{% if groups | length == 1 %} + network.host: {{ groups['standalone'][0] }} +{% else %} + +network.host: {{ groups['elasticsearch'][0] }} +{% endif %} # # By default Elasticsearch listens for HTTP traffic on the first free port it # finds starting at 9200. Set a specific HTTP port here: @@ -95,3 +101,12 @@ discovery.type: single-node # Refer to the following documentation for instructions. # # https://www.elastic.co/guide/en/elasticsearch/reference/7.16/configuring-stack-security.html +{% if needSSL == "true" %} + +xpack.security.enabled: true +xpack.security.authc.api_key.enabled: true +xpack.security.http.ssl.enabled: true +xpack.security.http.ssl.verification_mode: none +xpack.security.http.ssl.keystore.path: certs/elasticsearch/http.p12 +xpack.security.http.ssl.truststore.path: certs/elasticsearch/http.p12 +{%endif%} diff --git a/playbook/templates/filebeat/filebeat.yml.j2 b/playbook/templates/filebeat/filebeat.yml.j2 new file mode 100644 index 0000000000000000000000000000000000000000..5c35c2d060d5c7474c720a8b2b10aa910a1fafee --- /dev/null +++ b/playbook/templates/filebeat/filebeat.yml.j2 @@ -0,0 +1,238 @@ +###################### Filebeat Configuration Example ######################### + +# This file is an example configuration file highlighting only the most common +# options. The filebeat.reference.yml file from the same directory contains all the +# supported options with more comments. You can use it as a reference. +# +# You can find the full configuration reference here: +# https://www.elastic.co/guide/en/beats/filebeat/index.html + +# For more available modules and options, please see the filebeat.reference.yml sample +# configuration file. + +# ============================== Filebeat inputs =============================== + +filebeat.inputs: + +# Each - is an input. Most options can be set at the input level, so +# you can use different inputs for various configurations. +# Below are the input specific configurations. + +# filestream is an input for collecting log messages from files. +- type: filestream + + # Unique ID among all inputs, an ID is required. + id: my-filestream-id + + # Change to true to enable this input configuration. + enabled: false + + # Paths that should be crawled and fetched. Glob based paths. + paths: + - /var/log/*.log + #- c:\programdata\elasticsearch\logs\* + + # Exclude lines. A list of regular expressions to match. It drops the lines that are + # matching any regular expression from the list. + #exclude_lines: ['^DBG'] + + # Include lines. A list of regular expressions to match. It exports the lines that are + # matching any regular expression from the list. + #include_lines: ['^ERR', '^WARN'] + + # Exclude files. A list of regular expressions to match. Filebeat drops the files that + # are matching any regular expression from the list. By default, no files are dropped. + #prospector.scanner.exclude_files: ['.gz$'] + + # Optional additional fields. These fields can be freely picked + # to add additional information to the crawled log files for filtering + #fields: + # level: debug + # review: 1 + +# ============================== Filebeat modules ============================== + +filebeat.config.modules: + # Glob pattern for configuration loading + path: ${path.config}/modules.d/*.yml + + # Set to true to enable config reloading + reload.enabled: false + + # Period on which files under path should be checked for changes + #reload.period: 10s + +# ======================= Elasticsearch template setting ======================= + +setup.template.settings: + index.number_of_shards: 1 + #index.codec: best_compression + #_source.enabled: false + + +# ================================== General =================================== + +# The name of the shipper that publishes the network data. It can be used to group +# all the transactions sent by a single shipper in the web interface. +#name: + +# The tags of the shipper are included in their own field with each +# transaction published. +#tags: ["service-X", "web-tier"] + +# Optional fields that you can specify to add additional information to the +# output. +#fields: +# env: staging + +# ================================= Dashboards ================================= +# These settings control loading the sample dashboards to the Kibana index. Loading +# the dashboards is disabled by default and can be enabled either by setting the +# options here or by using the `setup` command. +#setup.dashboards.enabled: false + +# The URL from where to download the dashboards archive. By default this URL +# has a value which is computed based on the Beat name and version. For released +# versions, this URL points to the dashboard archive on the artifacts.elastic.co +# website. +#setup.dashboards.url: + +# =================================== Kibana =================================== + +# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API. +# This requires a Kibana endpoint configuration. +setup.kibana: + + # Kibana Host + # Scheme and port can be left out and will be set to the default (http and 5601) + # In case you specify and additional path, the scheme is required: http://localhost:5601/path + # IPv6 addresses should always be defined as: https://[2001:db8::1]:5601 + #host: "localhost:5601" + + # Kibana Space ID + # ID of the Kibana Space into which the dashboards should be loaded. By default, + # the Default Space will be used. + #space.id: + +# =============================== Elastic Cloud ================================ + +# These settings simplify using Filebeat with the Elastic Cloud (https://cloud.elastic.co/). + +# The cloud.id setting overwrites the `output.elasticsearch.hosts` and +# `setup.kibana.host` options. +# You can find the `cloud.id` in the Elastic Cloud web UI. +#cloud.id: + +# The cloud.auth setting overwrites the `output.elasticsearch.username` and +# `output.elasticsearch.password` settings. The format is `:`. +#cloud.auth: + +# ================================== Outputs =================================== + +# Configure what output to use when sending the data collected by the beat. + +# ---------------------------- Elasticsearch Output ---------------------------- +output.elasticsearch: + # Array of hosts to connect to. + {% if groups | length == 1 %} + hosts: ["localhost:9200"] + {% else %} + hosts: ["{{ groups['elasticsearch'][0] }}:9200"] + {% endif %} + + # Protocol - either `http` (default) or `https`. + {% if needSSL == "true" %} + protocol: "https" + {% endif %} + + # Authentication credentials - either API key or username/password. + #api_key: "id:api_key" + + {% if needSSL == "true" %} + username: "{{ es_name }}" + password: "{{ es_name_password }}" + {% endif %} + +# ------------------------------ Logstash Output ------------------------------- +#output.logstash: + # The Logstash hosts + #hosts: ["localhost:5044"] + + # Optional SSL. By default is off. + # List of root certificates for HTTPS server verifications + #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + + # Certificate for SSL client authentication + #ssl.certificate: "/etc/pki/client/cert.pem" + + # Client Certificate Key + #ssl.key: "/etc/pki/client/cert.key" + +# ================================= Processors ================================= +processors: + - add_host_metadata: + when.not.contains.tags: forwarded + - add_cloud_metadata: ~ + - add_docker_metadata: ~ + - add_kubernetes_metadata: ~ + +# ================================== Logging =================================== + +# Sets log level. The default log level is info. +# Available log levels are: error, warning, info, debug +#logging.level: debug + +# At debug level, you can selectively enable logging only for some components. +# To enable all selectors use ["*"]. Examples of other selectors are "beat", +# "publisher", "service". +#logging.selectors: ["*"] + +# ============================= X-Pack Monitoring ============================== +# Filebeat can export internal metrics to a central Elasticsearch monitoring +# cluster. This requires xpack monitoring to be enabled in Elasticsearch. The +# reporting is disabled by default. + +# Set to true to enable the monitoring reporter. +#monitoring.enabled: false + +# Sets the UUID of the Elasticsearch cluster under which monitoring data for this +# Filebeat instance will appear in the Stack Monitoring UI. If output.elasticsearch +# is enabled, the UUID is derived from the Elasticsearch cluster referenced by output.elasticsearch. +#monitoring.cluster_uuid: + +# Uncomment to send the metrics to Elasticsearch. Most settings from the +# Elasticsearch output are accepted here as well. +# Note that the settings should point to your Elasticsearch *monitoring* cluster. +# Any setting that is not set is automatically inherited from the Elasticsearch +# output configuration, so if you have the Elasticsearch output configured such +# that it is pointing to your Elasticsearch monitoring cluster, you can simply +# uncomment the following line. +#monitoring.elasticsearch: + +# ============================== Instrumentation =============================== + +# Instrumentation support for the filebeat. +#instrumentation: + # Set to true to enable instrumentation of filebeat. + #enabled: false + + # Environment in which filebeat is running on (eg: staging, production, etc.) + #environment: "" + + # APM Server hosts to report instrumentation results to. + #hosts: + # - http://localhost:8200 + + # API Key for the APM Server(s). + # If api_key is set then secret_token will be ignored. + #api_key: + + # Secret token for the APM Server(s). + #secret_token: + + +# ================================= Migration ================================== + +# This allows to enable 6.7 migration aliases +#migration.6_to_7.enabled: true + diff --git a/playbook/templates/kibana/kibana.yml.j2 b/playbook/templates/kibana/kibana.yml.j2 index 6f29a0f63eb0eb67ebd88137a2d1d8f8bc5c3229..3b0ca228773aa9da1340d20485bac057bc61e794 100644 --- a/playbook/templates/kibana/kibana.yml.j2 +++ b/playbook/templates/kibana/kibana.yml.j2 @@ -29,8 +29,11 @@ server.host: "0.0.0.0" #server.name: "your-hostname" # The URLs of the Elasticsearch instances to use for all your queries. +{% if groups | length == 1 %} elasticsearch.hosts: ["http://localhost:9200"] - +{% else %} +elasticsearch.hosts: ["http://{{ groups['elasticsearch'][0] }}:9200"] +{% endif %} # Kibana uses an index in Elasticsearch to store saved searches, visualizations and # dashboards. Kibana creates a new index if the index doesn't already exist. #kibana.index: ".kibana" @@ -51,9 +54,12 @@ elasticsearch.password: "{{ es_name_password }}" # Enables SSL and paths to the PEM-format SSL certificate and SSL key files, respectively. # These settings enable SSL for outgoing requests from the Kibana server to the browser. -#server.ssl.enabled: false -#server.ssl.certificate: /path/to/your/server.crt -#server.ssl.key: /path/to/your/server.key +{% if needSSL == "true" %} + +server.ssl.enabled: true +server.ssl.certificate: {{ kibana_ssl_crt_file }} +server.ssl.key: {{ kibana_ssl_key_file }} +{% endif %} # Optional settings that provide the paths to the PEM-format SSL certificate and key files. # These files are used to verify the identity of Kibana to Elasticsearch and are required when @@ -63,10 +69,14 @@ elasticsearch.password: "{{ es_name_password }}" # Optional setting that enables you to specify a path to the PEM file for the certificate # authority for your Elasticsearch instance. -#elasticsearch.ssl.certificateAuthorities: [ "/path/to/your/CA.pem" ] +{% if needSSL == "true" %} + +elasticsearch.ssl.certificateAuthorities: [ "{{ kibana_elastic_ca_file }}" ] # To disregard the validity of SSL certificates, change this setting's value to 'none'. -#elasticsearch.ssl.verificationMode: full +elasticsearch.ssl.verificationMode: none + +{% endif %} # Time in milliseconds to wait for Elasticsearch to respond to pings. Defaults to the value of # the elasticsearch.requestTimeout setting.