CentOS7部署 ELK5.5.1日志平台

2018/01/18 Linux

1、安装环境说明

两台 CentOS-7.4.1708 操作系统的服务,配置均为内存8G。

IP地址 角色
10.100.4.38 elasticsearch01+redis+logstash
10.100.4.39 elasticsearch02+kibana

2、安装 JDK

两台主机上都要安装JDK

# tar xf jdk-8u121-linux-x64.tar.gz -C /usr/local/
    
# vim /etc/profile.d/jdk.sh 
export JAVA_HOME=/usr/local/jdk1.8.0_121
export PATH=$JAVA_HOME/bin:$PATH
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
    
# source /etc/profile.d/jdk.sh

3、安装 Elasticsearch

3.1 下载RPM包

在其中一台下载,完成后复制到另一台服务器,我这里在39上面操作

root@ops-docker-elk02-bjqw:/usr/local/src # wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-5.5.1.rpm

3.2 安装elasticsearch

在两台服务器上安装

root@ops-docker-elk02-bjqw:/usr/local/src # yum -y install elasticsearch-5.5.1.rpm

3.3 修改centos7系统文件描述符数量

两台服务器都要修改

root@ops-docker-elk01-bjqw:/usr/local/src # vim /etc/security/limits.conf
# 在文件内容最下面添加
elasticsearch soft memlock unlimited
elasticsearch hard memlock unlimited 
*   soft    nofile  65535
*   hard    nofile  65535
*   soft    nproc   65535
*   hard    nproc   65535

3.4 修改elasticsearch的systemd启动脚本

两台服务器都要修改

root@ops-docker-elk01-bjqw:~ # vim /usr/lib/systemd/system/elasticsearch.service
# 取消下面三行注释
LimitNOFILE=65536 #文件描述符打开数量
LimitNPROC=2048        
LimitMEMLOCK=infinity  #内存锁,如果仅更改limits.conf文件锁住内存不会生效

3.5 修改/etc/elasticsearch/jvm.options 开启jmx监控

两台服务器都要修改

root@ops-docker-elk01-bjqw:~ # vim /etc/elasticsearch/jvm.options
# 修改JVM内存大小将默认的2g 改为 4g
-Xms4g
-Xmx4g
# 添加下面内容在 # set to headless, just in case 行下面
-Dcom.sun.management.jmxremote
-Dcom.sun.management.jmxremote.port=9001
-Dcom.sun.management.jmxremote.local.only=false
-Dcom.sun.management.jmxremote.authenticate=false
-Dcom.sun.management.jmxremote.ssl=false 

3.6 修改 elasticsearch 配置文件

修改38服务器

root@ops-docker-elk01-bjqw:~ # cd /etc/elasticsearch/
root@ops-docker-elk01-bjqw:/etc/elasticsearch # cp elasticsearch.yml{,.bak}
root@ops-docker-elk01-bjqw:/etc/elasticsearch # vim elasticsearch.yml
# 删除原有内容,添加下面内容
cluster.name: bdyg-docker-elasticsearch  
node.name: es-10-100-4-38
node.master: true 
node.data: true  
node.attr.rack: test-hot
path.conf: /etc/elasticsearch
path.data: /data/elasticsearch/data
path.logs: /data/elasticsearch/log
node.max_local_storage_nodes: 1
bootstrap.memory_lock: true
network.host: 0.0.0.0
http.port: 9200
gateway.recover_after_nodes: 1
gateway.recover_after_time: 10m
gateway.expected_nodes: 1
discovery.zen.minimum_master_nodes: 1
discovery.zen.ping_timeout: 30s
discovery.zen.fd.ping_timeout: 120s
discovery.zen.fd.ping_interval: 30s
discovery.zen.fd.ping_retries: 6
discovery.zen.ping.unicast.hosts: ["10.100.4.38","10.100.4.39"]
monitor.jvm.gc.overhead.debug: 1
monitor.jvm.gc.overhead.info: 3
monitor.jvm.gc.overhead.warn: 10

search.default_search_timeout: 150s
indices.fielddata.cache.size: 20%
indices.breaker.fielddata.limit: 40%
indices.breaker.request.limit: 30%
indices.breaker.total.limit: 60%
http.cors.enabled: true
http.cors.allow-origin: "*"
http.max_content_length: 1024mb
transport.type: netty3
http.type: netty3
thread_pool.bulk.queue_size: 4096
thread_pool.index.queue_size: 1024
thread_pool.search.queue_size: 1024
thread_pool.get.queue_size: 1024
action.destructive_requires_name: true 

修改39服务器,只修改node.name即可

cluster.name: bdyg-docker-elasticsearch  
node.name: es-10-100-4-39
node.master: true 
node.data: true  
node.attr.rack: test-hot
path.conf: /etc/elasticsearch
path.data: /data/elasticsearch/data
path.logs: /data/elasticsearch/log
node.max_local_storage_nodes: 1
bootstrap.memory_lock: true
network.host: 0.0.0.0
http.port: 9200
gateway.recover_after_nodes: 1
gateway.recover_after_time: 10m
gateway.expected_nodes: 1
discovery.zen.minimum_master_nodes: 1
discovery.zen.ping_timeout: 30s
discovery.zen.fd.ping_timeout: 120s
discovery.zen.fd.ping_interval: 30s
discovery.zen.fd.ping_retries: 6
discovery.zen.ping.unicast.hosts: ["10.100.4.38","10.100.4.39"]
monitor.jvm.gc.overhead.debug: 1
monitor.jvm.gc.overhead.info: 3
monitor.jvm.gc.overhead.warn: 10

search.default_search_timeout: 150s
indices.fielddata.cache.size: 20%
indices.breaker.fielddata.limit: 40%
indices.breaker.request.limit: 30%
indices.breaker.total.limit: 60%
http.cors.enabled: true
http.cors.allow-origin: "*"
http.max_content_length: 1024mb
transport.type: netty3
http.type: netty3
thread_pool.bulk.queue_size: 4096
thread_pool.index.queue_size: 1024
thread_pool.search.queue_size: 1024
thread_pool.get.queue_size: 1024
action.destructive_requires_name: true

3.7 创建存储数据和日志的目录

两台服务器上都要创建

root@ops-docker-elk01-bjqw:/etc/elasticsearch # mkdir -pv /data/elasticsearch/{data,log}

root@ops-docker-elk01-bjqw:/etc/elasticsearch # chown -R elasticsearch.elasticsearch /data/elasticsearch/

3.8 修改/etc/sysconfig/elasticsearch

两台服务器都要修改

root@ops-docker-elk01-bjqw:/etc/elasticsearch # vim /etc/sysconfig/elasticsearch 
JAVA_HOME=/usr/local/jdk1.8.0_121/  #取消注释,并指明JDK目录,否则可能启动时出现找不到JAVA_HOME的错误

3.9 启动 elasticsearch 服务

两台服务器上都启动服务

root@ops-docker-elk01-bjqw:~ # systemctl daemon-reload
root@ops-docker-elk01-bjqw:~ # systemctl enable elasticsearch.service
root@ops-docker-elk01-bjqw:~ # systemctl start elasticsearch.service
root@ops-docker-elk01-bjqw:~ # systemctl status elasticsearch.service

3.10 安装elasticsearch-head 插件

仅在其中一台服务器上安装即可。

克隆 elasticsearch-head 插件代码到本地

root@ops-docker-elk01-bjqw:~ # cd /usr/local/src/
root@ops-docker-elk01-bjqw:/usr/local/src # git clone https://github.com/mobz/elasticsearch-head.git

安装需要 node 如没有需要下载安装

下载地址 https://nodejs.org/dist/v5.10.1/node-v5.10.1-linux-x64.tar.gz

root@ops-docker-elk01-bjqw:/usr/local/src # wget https://nodejs.org/dist/v5.10.1/node-v5.10.1-linux-x64.tar.gz
# 解压到当前目录
root@ops-docker-elk01-bjqw:/usr/local/src # tar xf node-v5.10.1-linux-x64.tar.gz 

# 创建软连接
root@ops-docker-elk01-bjqw:/usr/local/src # ln -sv node-v5.10.1-linux-x64/bin/node  /usr/local/bin/node
root@ops-docker-elk01-bjqw:/usr/local/src # ln -sv node-v5.10.1-linux-x64/bin/npm /usr/local/bin/npm

到elasticsearch-head-master目录下,运行命令:

root@ops-docker-elk01-bjqw:/usr/local/src # cd elasticsearch-head/
root@ops-docker-elk01-bjqw:/usr/local/src/elasticsearch-head # npm install
#如果速度较慢或者安装失败,可以使用国内镜像:
npm install -g cnpm --registry=https://registry.npm.taobao.org

修改Elasticsearch配置文件:

之前在修改配置文件时已经加入了下面两项,如果你没有下面两项参数需要加上。

# 编辑elasticsearch/config/elasticsearch.yml,加入以下内容:
http.cors.enabled: true
http.cors.allow-origin: "*"

打开elasticsearch-head/Gruntfile.js,找到下面connect属性,新增hostname: ‘*’:

root@ops-docker-elk01-bjqw:/usr/local/src/elasticsearch-head # vim Gruntfile.js 
# 找到connect 在options里添加一个hostname
connect: {
    server: {
        options: {
            hostname: '10.100.4.38',
            port: 9100,
            base: '.',
            keepalive: true
        }
    }
}

启动elasticsearch-head插件:

root@ops-docker-elk01-bjqw:/usr/local/src/elasticsearch-head # nohup /usr/local/src/elasticsearch-head/node_modules/grunt/bin/grunt server &exit

root@ops-docker-elk01-bjqw:~ # ss -tnl|grep 9100
LISTEN     0      511    10.100.4.38:9100                     *:* 

在浏览器里访问http://10.100.4.38:9100

3、安装 Redis

按照规划在 10.100.4.38 服务器上面安装 Redis ,使用yum 安装就行。

root@ops-docker-elk01-bjqw:~ # yum -y install redis

# 修改Redis默认端口和监听地址
root@ops-docker-elk01-bjqw:~ # vim /etc/redis.conf 
bind 10.100.4.38
port 6381

# 启动Redis服务
root@ops-docker-elk01-bjqw:~ # systemctl enable redis.service
Created symlink from /etc/systemd/system/multi-user.target.wants/redis.service to /usr/lib/systemd/system/redis.service.
root@ops-docker-elk01-bjqw:~ # systemctl start redis.service
root@ops-docker-elk01-bjqw:~ # ss -tnl|grep 6381
LISTEN     0      511    10.100.4.38:6381                     *:* 

4、安装 Logstash

按照规划在 10.100.4.38 服务器上面安装。 logstash 使用的是二进制安装包

root@ops-docker-elk01-bjqw:/usr/local/src # tar xf logstash-5.5.1.tar.gz -C /usr/local/

root@ops-docker-elk01-bjqw:/usr/local/src # ln -sv /usr/local/logstash-5.5.1/ /usr/local/logstash
root@ops-docker-elk01-bjqw:/usr/local/src # ln -sv /usr/local/logstash/bin/logstash /usr/local/bin/

创建 logstash 配置文件目录

root@ops-docker-elk01-bjqw:/usr/local/src # mkdir /home/server_config/logstash/

5、安装 Kibana

按照规划在 10.100.4.39 服务器上面安装

root@ops-docker-elk02-bjqw:/usr/local/src # yum -y install kibana-5.5.1-x86_64.rpm 

配置 kibana 配置文件

root@ops-docker-elk02-bjqw:~ # vim /etc/kibana/kibana.yml 
server.port: 5601
server.host: "10.100.4.39"
elasticsearch.url: "http://10.100.4.38:9200"

启动 Kibana 服务

root@ops-docker-elk02-bjqw:~ # systemctl enable kibana.service
Created symlink from /etc/systemd/system/multi-user.target.wants/kibana.service to /etc/systemd/system/kibana.service.
root@ops-docker-elk02-bjqw:~ # systemctl start kibana.service
root@ops-docker-elk02-bjqw:~ # ss -tnl|grep 5601
LISTEN     0      511    10.100.4.39:5601                     *:*   

6、配置 logstash 收集 Tomcat 日志

这里是公司测试环境上的一台运行了 Tomcat 的服务器,首先我们要先在这台服务器上安装logstash。安装方法同上面的安装方法相同

root@k8s-node01-bjqw:/usr/local/src # tar xf logstash-5.5.1.tar.gz -C /usr/local/
root@k8s-node01-bjqw:/usr/local/src # ln -sv /usr/local/logstash-5.5.1/ /usr/local/logstash
root@k8s-node01-bjqw:/usr/local/src # ln -sv /usr/local/logstash/bin/logstash /usr/local/bin/
root@k8s-node01-bjqw:/usr/local/src # mkdir /home/server_config/logstash/
root@k8s-node01-bjqw:/usr/local/src # cd /home/server_config/logstash/

创建用于收集Tomcat日志的logstash文件

root@k8s-node01-bjqw:/home/server_config/logstash # cat file.conf 
input {
  file {
    type => "hsms-catalina"
    path => "/data/docker/app/tomcat/hsms/logs/catalina.out"
    start_position => "beginning"
    codec => multiline {
      pattern => "^%{YEAR}-%{MONTHNUM}-%{MONTHDAY} %{HOUR}:?%{MINUTE}(?::?%{SECOND})"
      negate => true
      what => "previous"
    }
  }
}

output {
  redis {
    port => 6381
    host => ["10.100.4.38"]
    data_type => "list"
    key => "%{type}"
  }
}

input 指定了Tomcat 的catalina.out 日志的位置,output 将日志输出到Redis的消息队列中,这里的Redis就是之前在上面步骤中安装的Redis地址。

启动logstash服务:

root@k8s-node01-bjqw:~ # nohup logstash -f /home/server_config/logstash/file.conf &

root@k8s-node01-bjqw:~ # ps -ef|grep logstash

7、配置从Redis读取数据输出到elasticsearch

在上面步骤将logstash 安装到的10.100.4.38 服务器上,现在到38服务上配置logstash

root@ops-docker-elk01-bjqw:~ # cd /home/server_config/logstash/
root@ops-docker-elk01-bjqw:/home/server_config/logstash # cat hsms.conf 
input {
    redis {
        type => "hsms-catalina"
        host => "10.100.4.38"
        port => "6381"
        data_type => "list"
        key => "hsms-catalina"
    }
}

output {
    if [type] == "hsms-catalina"{
        elasticsearch {
           action => "index"
           hosts => ["10.100.4.38:9200","10.100.4.39:9200"]
           index => "hsms-catalina-%{+YYYY.MM.dd}"
        }
    }
}

从Redis中读取出数据,而后输出到 elasticsearch 中。这样我们就能在 Kibana 中查看数据了。

启动 logstash 服务:

root@ops-docker-elk01-bjqw:~ # nohup logstash -f /home/server_config/logstash/hsms.conf &
root@ops-docker-elk01-bjqw:~ # ps -ef|grep logstash

8、查看elasticsearch-head 插件中是否有新创建的索引

在浏览器中输入http://10.100.4.38:9100 ,可以使用NGINX 服务对此页面做basic认证。

9、访问 kibana

访问http://10.100.4.39:5601 即可,我这里做了 Nginx 反向代理,所以使用域名访问。

在 Index name or pattern 这里输入elasticsearch 中Tomcat的日志的索引名称。

然后就能看到日志里的字段信息

点击屏幕左上角的 Discover 就可以看到我们的Tomcat日志啦

重要提示

如果 /etc/elasticsearch/elasticsearch.yml 配置文件用使用到 action.auto_create_index 需要将你自定义的 index 加入,否则 ES 无法出现数据。

Search

    Table of Contents