搭建ELK日志审计系统
一、安装Elasticsearch
1.下载解压elasticsearch
[root@node-1 ~]# wget -P /opt https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-7.12.1-linux-x86_64.tar.gz
[root@node-1 ~]# tar zxvf /opt/elasticsearch-7.12.1-linux-x86_64.tar.gz -C /usr/local/
[root@node-1 ~]# cd /usr/local
[root@node-1 local]# mv elasticsearch-7.12.1/ elasticsearch/
[root@node-1 local]# cd elasticsearch/
[root@node-1 elasticsearch]# cp config/elasticsearch.yml config/elasticsearch.bak
2.编辑配置文件
[root@node-1 elasticsearch]# cat config/elasticsearch.yml
# ======================== Elasticsearch Configuration =========================
#
# NOTE: Elasticsearch comes with reasonable defaults for most settings.
# Before you set out to tweak and tune the configuration, make sure you
# understand what are you trying to accomplish and the consequences.
#
# The primary way of configuring a node is via this file. This template lists
# the most important settings you may want to configure for a production cluster.
#
# Please consult the documentation for further information on configuration options:
# https://www.elastic.co/guide/en/elasticsearch/reference/index.html
#
# ---------------------------------- Cluster -----------------------------------
#
# Use a descriptive name for your cluster:
#
cluster.name: my-application #集群名称
#
# ------------------------------------ Node ------------------------------------
#
# Use a descriptive name for the node:
#
node.name: node-1 #节点名称
#
# Add custom attributes to the node:
#
#node.attr.rack: r1
#
# ----------------------------------- Paths ------------------------------------
#
# Path to directory where to store the data (separate multiple locations by comma):
#
#path.data: /path/to/data #指定es数据存储路径
#
# Path to log files:
#
#path.logs: /path/to/logs #指定es日志存储路径
#
# ----------------------------------- Memory -----------------------------------
#
# Lock the memory on startup:
#
bootstrap.memory_lock: true #锁定物理内存地址,避免es使用交换分区,导致性能降低
#
# Make sure that the heap size is set to about half the memory available
# on the system and that the owner of the process is allowed to use this
# limit.
#
# Elasticsearch performs poorly when the system is swapping the memory.
#
# ---------------------------------- Network -----------------------------------
#
# By default Elasticsearch is only accessible on localhost. Set a different
# address here to expose this node on the network:
#
network.host: 192.168.10.10 #设置IP绑定
#
# By default Elasticsearch listens for HTTP traffic on the first free port it
# finds starting at 9200. Set a specific HTTP port here:
#
http.port: 9200 #设置自定义端口,默认9200
#
# For more information, consult the network module documentation.
#
# --------------------------------- Discovery ----------------------------------
#
# Pass an initial list of hosts to perform discovery when this node is started:
# The default list of hosts is ["127.0.0.1", "[::1]"]
#
#discovery.seed_hosts: ["host1", "host2"] #集群节点列表
#
# Bootstrap the cluster using an initial set of master-eligible nodes:
#
#cluster.initial_master_nodes: ["node-1", "node-2"]
cluster.initial_master_nodes: ["node-1"] #初始主节点
#
# For more information, consult the discovery and cluster formation module documentation.
#
# ---------------------------------- Various -----------------------------------
#
# Require explicit names when deleting indices:
#
action.destructive_requires_name: true #通过正则或者_all删除或者关闭索引库,建议生产环境为true
3.添加elasticsearch用户,不能使用root启动
[root@node-1 elasticsearch]# groupadd elasticsearch
[root@node-1 elasticsearch]# useradd -g elasticsearch elasticsearch
[root@node-1 elasticsearch]# chown -R elasticsearch:elasticsearch /usr/local/elasticsearch
4.添加hosts文件
[root@node-1 elasticsearch]# vi /etc/hosts
192.168.10.10 node-1
5.修改sysctl.conf文件
[root@node-1 elasticsearch]# vi /etc/sysctl.conf
vm.max_map_count = 262144
[root@node-1 elasticsearch]# sysctl -p
6.修改/etc/security/limits.conf文件,修改打开文件句柄
* soft nofile 100000
* hard nofile 100000
* soft nproc 100000
* hard nproc 100000
7.修改文件/etc/systemd/system.conf,修改以下内容后重启系统生效
DefaultLimitNOFILE=65536
DefaultLimitNPROC=32000
DefaultLimitMEMLOCK=infinity
8.启动elasticsearch
[root@node-1 elasticsearch]# su elasticsearch
[elasticsearch@node-1 elasticsearch]$ ./bin/elasticsearch -d #后台运行
[elasticsearch@node-1 elasticsearch]$ ps -ef | grep elasticsearch #查询是否启动
root 2634 2029 0 15:49 pts/0 00:00:00 su elasticsearch
elastic+ 2826 1 83 15:49 pts/0 00:00:50 /usr/local/elasticsearch/jdk/bin/java -Xshare:auto -Des.networkaddress.cache.ttl=60 -Des.networkaddress.cache.negative.ttl=10 -XX:+AlwaysPreTouch -Xss1m -Djava.awt.headless=true -Dfile.encoding=UTF-8 -Djna.nosys=true -XX:-OmitStackTraceInFastThrow -XX:+ShowCodeDetailsInExceptionMessages -Dio.netty.noUnsafe=true -Dio.netty.noKeySetOptimization=true -Dio.netty.recycler.maxCapacityPerThread=0 -Dio.netty.allocator.numDirectArenas=0 -Dlog4j.shutdownHookEnabled=false -Dlog4j2.disable.jmx=true -Djava.locale.providers=SPI,COMPAT --add-opens=java.base/java.io=ALL-UNNAMED -XX:+UseG1GC -Djava.io.tmpdir=/tmp/elasticsearch-1947920176384077306 -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=data -XX:ErrorFile=logs/hs_err_pid%p.log -Xlog:gc*,gc+age=trace,safepoint:file=logs/gc.log:utctime,pid,tags:filecount=32,filesize=64m -Xms1885m -Xmx1885m -XX:MaxDirectMemorySize=988807168 -XX:G1HeapRegionSize=4m -XX:InitiatingHeapOccupancyPercent=30 -XX:G1ReservePercent=15 -Des.path.home=/usr/local/elasticsearch -Des.path.conf=/usr/local/elasticsearch/config -Des.distribution.flavor=default -Des.distribution.type=tar -Des.bundled_jdk=true -cp /usr/local/elasticsearch/lib/* org.elasticsearch.bootstrap.Elasticsearch -d
elastic+ 2847 2826 0 15:49 pts/0 00:00:00 /usr/local/elasticsearch/modules/x-pack-ml/platform/linux-x86_64/bin/controller
elastic+ 2893 2635 0 15:50 pts/0 00:00:00 grep --color=auto elasticsearch
9.浏览器验证访问
二、安装Logstash
[root@node-1 ~]# wget -P /opt https://artifacts.elastic.co/downloads/logstash/logstash-7.12.1-linux-x86_64.tar.gz
[root@node-1 ~]# tar zxvf /opt/logstash-7.12.1-linux-x86_64.tar.gz -C /usr/local/
[root@node-1 ~]# mv /usr/local/logstash-7.12.1/ /usr/local/logstash
三、安装Filebeat
1.下载解压filebeat
[root@node-1 ~]# wget -P /opt/ https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-7.12.1-linux-x86_64.tar.gz
[root@node-1 ~]# tar zxf /opt/filebeat-7.12.1-linux-x86_64.tar.gz -C /usr/local/
[root@node-1 ~]# mv /usr/local/filebeat-7.12.1-linux-x86_64/ /usr/local/filebeat
[root@node-1 ~]# cd /usr/local/filebeat
[root@node-1 filebeat]# cp filebeat.yml filebeat.yml.bak
2.启动Filebeat
[root@node-1 filebeat]# ./filebeat &
[root@node-1 filebeat]# ps -ef| grep filebeat
root 2959 2029 0 16:34 pts/0 00:00:00 ./filebeat
root 2973 2029 0 16:39 pts/0 00:00:00 grep --color=auto filebeat
3.启动logstash
[root@node-1 ~]# cd /usr/local/logstash/bin/
[root@node-1 ~]# cp ../config/logstash-sample.conf ../config/logstash.conf
[root@node-1 bin]# ./logstash -f ../config/logstash.conf &
4.验证服务开启
四、安装Kibana
1.下载解压包
[root@node-1 ~]# tar zxf /opt/kibana-7.12.1-linux-x86_64.tar.gz -C /usr/local/
[root@node-1 ~]# mv /opt/kibana-7.12.1-linux-x86_64/ /opt/kibana
[root@node-1 ~]# cd /usr/local/kibana/config
[root@node-1 ~]# cp kibana.yml kibana.yml.bak
2.编辑kibana.yml配置文件
server.port: 5601
server.host: "192.168.10.10"
elasticsearch.hosts: ["http://192.168.10.10:9200"]
3.启动kibana
[root@node-1 config]# cd /usr/local/kibana/bin/
[root@node-1 bin]# ./kibana --allow-root &
4.验证启动
五、浏览器登入查看
版权声明:本文为s363182614原创文章,遵循 CC 4.0 BY-SA 版权协议,转载请附上原文出处链接和本声明。