环境: centos7 192.168.10.130:jdk,zookeeper,kafka,filebeat,elasticsearch 192.168.10.131:jdk,zookeeper,kafka,logstash 192.168.10.132:jdk,zookeeper,kafka,kibana
1:时间同步
[root@localhost ~]# ntpdate pool.ntp.org2:关闭防火墙
[root@localhost ~]# systemctl stop firewalld [root@localhost ~]# setenforce 03:修改主机名
[root@localhost ~]# hostnamectl set-hostname kafka1 [root@localhost ~]# hostnamectl set-hostname kafka2 [root@localhost ~]# hostnamectl set-hostname kafka34:修改hosts文件
192.168.32.153 kafka1 192.168.32.154 kafka2 192.168.32.155 kafka35:安装jdk
[root@kafka03 src]# rpm -ivh jdk-8u131-linux-x64_.rpm6:安装zookeeper
[root@kafka01 src]# tar xzf zookeeper-3.4.14.tar.gz mv zookeeper /usr/local/zookeeper cd /usr/local/zookeeper/conf/ mv zoo_sample.cfg zoo.cfg6: 编辑zoo.cfg
server.1=192.168.32.153:2888:3888 server.2=192.168.32.154:2888:3888 server.3=192.168.32.155:2888:38887:创建data目录
mkdir /tmp/zookeeper8:配置myid
echo "1" > /tmp/zookeeper/myid echo "2" > /tmp/zookeeper/myid echo "3" > /tmp/zookeeper/myid9:运行zk服务
/usr/local/zookeeper/bin/zkServer.sh start10:查看zk的状态
[root@kafka03 conf]# /usr/local/zookeeper/bin/zkServer.sh status一个leader 两个follower
11:安装kafka
tar zxvf kafka_2.11-2.2.0.tgz mv kafka_2.11-2.2.0 /usr/local/kafka12 :编辑kafka配置文件
vim /usr/local/kafka/config/server.properties broker.id=分别为0,1,2 advertised.listeners=PLAINTEXT://(主机名kafka01,kafka02,kafk03):9092 zookeeper.connect=192.168.32.153:2181,192.168.32.154:2181,192.168.32.155:218113:启动kafka
-daemon (引用配置文件)
/usr/local/kafka/bin/kafka-server-start.sh -daemon /usr/local/kafka/config/server.properties验证:
netstat -lptnu|grep 9092 tcp6 0 0 :::9092 :::* LISTEN 9814/java14:创建一个topic
[root@kafka01 logs] /usr/local/kafka/bin/kafka-topics.sh --create --zookeeper 192.168.32.153:2181 --replication-factor 2 --partitions 3 --topic wg007 created topic wg007.15:模拟生产者:
/usr/local/kafka/bin/kafka-console-producer.sh --broker-list 192.168.32.153:9092 --topic wg00716:模拟消费者:
/usr/local/kafka/bin/kafka-console-consumer.sh --bootstrap-server 192.168.32.153:9092 --topic wg007 --from-beginning16.2 查看当前的topic
[root@kafka02 bin] /usr/local/kafka/bin/kafka-topics.sh --list --zookeeper 192.168.32.153:2181__consumer_offsets msg wg007
17:安装filebeat(收集日志的)
[root@kafka01 src] rpm -ivh filebeat-6.8.12-x86_64.rpm18:编辑filebeat.yaml
cd /etc/filebeat mv filebeat.yml filebeat.yml.bakvim filebeat.yml
filebeat.inputs: - type: log enabled: true paths: - /var/log/messages output.kafka: enabled: true hosts: ["192.168.32.153:9092","192.168.32.154:9092","192.168.32.155:9092"] topic: msg19: 安装logstash
rpm -ivh logstash-6.6.0.rpm vim /etc/logstash/conf.d/msg.conf input{ kafka{ bootstrap_servers => ["192.168.32.153:9092,192.168.32.154:9092,192.168.32.155:9092"] group_id => "logstash" topics => "msg" consumer_threads => 5 } } output{ elasticsearch{ hosts => "192.168.32.153:9200" index => "msg-%{+YYYY.MM.dd}" } }20: 安装elasticsearch
rpm -ivh elas vim /etc/elasticsearch/elas.yml cluster.name: wg007 node.name: node-1 network.host: 192.168.32.153 http.port: 920021:安装kibana
rpm -ivh kibana-6.6.2-x86_64.rpm server.port: 5601 .host: "192.168.32.155" elasticsearch.hosts: ["http://192.168.32.153:9200"]