EFK搭建
环境准备环境配置安装zookeeper 安装修改主配置文件创建data目录创建myid文件启动zk服务安装kafka编辑主配置文件启动服务 -daemon(后台运行)测试kafka模拟生产者模拟消费者
安装日志收集组件:filebeat安装logstash收集多个日志的配置安装elasticsearch安装kibana
环境准备
3台minicentos7(最小化安装的centos7) 配置:2核心2G运行内存 192.168.1.9 jdk,zookeeper,filebeat,kibana 192.168.1.10 jdk,zookeeper,logstash 192.168.1.11 jdk,zookeeper,elasticsearch
环境配置
关闭防火墙 systemctl stop firewalld setenforce 0
时间同步 yum -y install ntpdate ntpdate pool.ntp.org
修改主机名 hostnamectl set-hostname kafkaf01 hostnamectl set-hostname kafkaf02 hostnamectl set-hostname kafkaf03
编辑/etc/hosts文件 192.168.1.9 kafka01 192.168.1.10 kafka02 192.168.1.11 kafka03
安装jdk环境 rpm -ivh jdk-8u131-linux-x64_.rpm java -version
安装
zookeeper 安装
tar zxvf zookeeper-3.4.14.tar.gz mv zookeeper-3.4.14 /usr/local/zookeeper
修改主配置文件
cd /usr/local/zookeeper/conf mv zoo_sample.cfg zoo.cfg
vim zoo.cfg server.1=192.168.1.9:2888:3888 server.2=192.168.1.10:2888:3888 server.3=192.168.1.11:2888:3888
端口介绍: 2181:给客户端使用(kafka) 2888:集群内部通信端口 3888:集群内部选举端口
创建data目录
mkdir -p /tmp/zookeeper
创建myid文件
1.9: echo “1” > /tmp/zookeeper/myid 1.10: echo “2” > /tmp/zookeeper/myid 1.11: echo “3” > /tmp/zookeeper/myid
启动zk服务
/usr/local/zookeeper/bin/zkServer.sh start 查看服务状态: /usr/local/zookeeper/bin/zkServer.sh status
follower leader
zk的日志: cd /usr/local/zookeeper/conf tailf zookeeper.out
安装kafka
解压: tar zxvf kafka_2.11-2.2.0.tgz mv kafka_2.11-2.2.0 /usr/local/kafka
编辑主配置文件
vim
/usr
/local
/kafka
/config
/server
.properties
1
.9:
broker
.id=0
advertised
.listeners=PLAINTEXT:
//kafka01:9092
zookeeper
.connect=192
.168
.1
.9:2181
,192
.168
.1
.10:2181
,192
.168
.1
.11:2181
1
.10:
broker
.id=1
advertised
.listeners=PLAINTEXT:
//kafka02:9092
zookeeper
.connect=192
.168
.1
.9:2181
,192
.168
.1
.10:2181
,192
.168
.1
.11:2181
1
.11
broker
.id=2
advertised
.listeners=PLAINTEXT:
//kafka03:9092
zookeeper
.connect=192
.168
.1
.9:2181
,192
.168
.1
.10:2181
,192
.168
.1
.11:2181
启动服务 -daemon(后台运行)
/usr
/local
/kafka
/bin
/kafka
-server
-start.sh
-daemon
/usr
/local
/kafka
/config
/server
.properties
查看端口号:9092
[root@kafka03 config]
tcp6 0 0 :::9092 :::
* LISTEN 3154
/java
查看日志:
tailf
/usr
/local
/kafka
/logs
/server
.log
测试kafka
创建一个topic(主题):fangzhang01
/usr
/local
/kafka
/bin
/kafka
-topics
.sh
--create
--zookeeper 192
.168
.1
.9:2181
--replication
-factor 2
--partitions 3
--topic fangzhang01
查看topic:
/usr
/local
/kafka
/bin
/kafka
-topics
.sh
--list
--zookeeper 192
.168
.1
.9:2181
模拟生产者
[root@bogon config]
>红烧肉
>宫保鸡丁
模拟消费者
[root@server01 config]
红烧肉
宫保鸡丁
安装日志收集组件:filebeat
配置yum源
filebeat
.repo
yum
-y install filebeat
编辑配置文件:
[root@bogon filebeat]
filebeat
.inputs:
- type: log
enabled: true
paths:
- /var/log
/messages
output
.kafka:
enabled: true
hosts:
["192.168.1.9:9092","192.168.1.10:9092","192.168.1.11:9092"]
topic: messages
安装logstash
收集一个日志的配置
[root@server01 conf.d]
input
{
kafka
{
bootstrap_servers =>
["192.168.1.9:9092,192.168.1.10:9092,192.168.1.11:9092"]
group_id =>
"logstash"
topics =>
"messages"
consumer_threads => 5
}
}
output
{
elasticsearch
{
hosts =>
"192.168.1.11:9200"
index =>
"msg_log-%{+YYYY.MM.dd}"
}
}
收集多个日志的配置
[root@bogon filebeat]
filebeat
.inputs:
- type: log
enabled: true
paths:
- /var/log
/messages
fields:
log_topics: messages
- type: log
enabled: true
paths:
- /var/log
/secure
fields:
log_topics: secure
output
.kafka:
enabled: true
hosts:
["192.168.1.9:9092","192.168.1.10:9092","192.168.1.11:9092"]
topic:
'%{[fields][log_topics]}'
systemctl
start filebeat
查看日志:tailf
/var/log
/filebeat
/filebeat
安装elasticsearch
修改配置文件
[root@kafka03 src]
cluster
.name: fangzhang01
node
.name: node
-1
path
.data:
/var/lib
/elasticsearch
path
.logs:
/var/log
/elasticsearch
network
.host: 192
.168
.1
.11
http
.port: 9200
验证端口:
[root@kafka03 src]
tcp6 0 0 192
.168
.1
.11:9200 :::
* LISTEN 6182
/java
安装kibana
[root@kafkaf01 ELK]
server
.port: 5601
server
.host:
"192.168.1.9"
elasticsearch
.hosts:
["http://192.168.1.11:9200"]
[root@kafkaf01 ELK]
tcp 0 0 192
.168
.1
.9:5601 0
.0
.0
.0:
* LISTEN 14558
/node