共計 5510 個字符,預計需要花費 14 分鐘才能閱讀完成。
這篇文章主要介紹“kafka-2.11 集群的搭建步驟”,在日常操作中,相信很多人在 kafka-2.11 集群的搭建步驟問題上存在疑惑,丸趣 TV 小編查閱了各式資料,整理出簡單好用的操作方法,希望對大家解答”kafka-2.11 集群的搭建步驟”的疑惑有所幫助!接下來,請跟著丸趣 TV 小編一起來學習吧!
producer: 消息生產者, 向 kafka broker 發消息的客戶端
consumer: 消息消費者, 向 kafka broker 取消息的客戶端
Topic: 發布到 Kafka 集群消息的一個類別
broker: 一臺 kafka 服務器就是一個 broker, 一個集群由多個 broker 組成, 一個 broker 可以容納多個 topic
1. 下載安裝 zookeeper(必須先安裝 zookeeper 和 jdk)
[root@node1 ~]# wget http://mirror.bit.edu.cn/apache/zookeeper/stable/zookeeper-3.4.13.tar.gz
[root@node1 ~]# tar xvf zookeeper-3.4.13.tar.gz -C /opt/
[root@node1 ~]# cd /opt/zookeeper-3.4.13/conf/
[root@node1 conf]# vim zoo.cfg
tickTime=2000
dataDir=/opt/zookeeper-3.4.13/data
clientPort=2181
initLimit=5
syncLimit=2
server.1=node1:2888:3888
server.2=node2:2888:3888
server.3=node3:2888:3888
[root@node1 conf]# mkdir /opt/zookeeper-3.4.13/data
[root@node1 conf]# cd /opt/zookeeper-3.4.13/data –myid 必須要在 data 目錄下面, 否則會報錯
[root@node1 data]# cat myid
1
[root@node1 zookeeper-3.4.13]# cd ..
[root@node1 opt]# scp -r zookeeper-3.4.13 node2:/opt/
[root@node1 opt]# scp -r zookeeper-3.4.13 node3:/opt/
2. 在 node2 修改 myid 文件
[root@node2 opt]# cat /opt/zookeeper-3.4.13/data/myid
2
[root@node2 opt]#
3. 在 node3 修改 myid 文件
[root@node3 ~]# cat /opt/zookeeper-3.4.13/data/myid
3
[root@node3 ~]# zkServer.sh start – 每個節點都要啟動 zookeeper 服務
ZooKeeper JMX enabled by default
Using config: /opt/zookeeper-3.4.13/bin/../conf/zoo.cfg
Starting zookeeper … STARTED
[root@node3 opt]# zkCli.sh – 使用客戶端登陸
3. 下載安裝 kafka(三個節點一樣)
[root@node1 ~]# wget http://mirror.bit.edu.cn/apache/kafka/2.2.0/kafka_2.11-2.2.0.tgz
[root@node1 ~]# tar xvf kafka_2.11-2.2.0.tgz -C /opt/
[root@node1 ~]# cd /opt/kafka_2.11-2.2.0/
[root@node1 kafka_2.11-2.2.0]# cd config/
[root@node1 config]# vim server.properties
broker.id=0 – 每個 id 不一樣
zookeeper.connect=172.16.8.23:2181,172.16.8.24:2181,172.16.8.178:2181 –zookeeper 集群 IP 地址
[root@node1 config]# cd /opt/
[root@node1 opt]# scp -r kafka_2.11-2.2.0/ node2:/opt/
[root@node1 opt]# scp -r kafka_2.11-2.2.0/ node3:/opt/
[root@node1 opt]# cd kafka_2.11-2.2.0/bin/
[root@node1 bin]# ./kafka-server-start.sh ../config/server.properties – 三臺 kafka 都要后臺啟動服務
4. 查看 kafka 服務是否啟動正常
[root@node1 bin]# jps
30851 Kafka
3605 HMaster
12728 QuorumPeerMain
12712 DFSZKFailoverController
31656 Jps
3929 DataNode
15707 JournalNode
32188 NameNode
14335 ResourceManager
[root@node1 bin]# netstat -antulp | grep 30851
tcp6 0 0 :::9092 :::* LISTEN 30851/java
tcp6 0 0 :::37161 :::* LISTEN 30851/java
tcp6 0 0 172.16.8.23:40754 172.16.8.178:9092 ESTABLISHED 30851/java
tcp6 0 0 172.16.8.23:9092 172.16.8.23:39704 ESTABLISHED 30851/java
tcp6 0 0 172.16.8.23:45480 172.16.8.24:9092 ESTABLISHED 30851/java
tcp6 0 0 172.16.8.23:45294 172.16.8.178:2181 ESTABLISHED 30851/java
tcp6 0 0 172.16.8.23:39704 172.16.8.23:9092 ESTABLISHED 30851/java
[root@node1 bin]#
5. 使用命令接口
[root@node1 bin]# ./kafka-topics.sh –create –zookeeper node1:2181 –topic tongcheng –replication-factor 3 –partitions 3 – 創建 topic
Created topic tongcheng.
[root@node1 bin]# ./kafka-topics.sh –list –zookeeper node1:2181 – 查看 topic
tongcheng
[root@node1 bin]# ./kafka-topics.sh –delete –zookeeper node1:2181 –topic tongcheng – 刪除 topic
Topic tongcheng is marked for deletion.
Note: This will have no impact if delete.topic.enable is not set to true.
[root@node1 bin]# ./kafka-topics.sh –list –zookeeper node1:2181
[root@node1 bin]#
6. 發送消息 / 接收消息
[root@node1 bin]# ./kafka-console-producer.sh –broker-list node2:9092 –topic ttt
tongcheng is goods;
tong is goods;
cheng is goods!
——– 接收端 ————-
[root@node2 bin]# ./kafka-console-consumer.sh –topic ttt –bootstrap-server node1:9092,node2:9092,node3:9092 –from-beginning
tongcheng is goods;
tong is goods;
cheng is goods!
[root@node2 bin]# ./kafka-topics.sh –describe –zookeeper node1:2181 –topic ttt – 查看分區數和副本數
Topic:tttPartitionCount:1ReplicationFactor:1Configs:
Topic: tttPartition: 0Leader: 0Replicas: 0Isr: 0
[root@node2 bin]#
7. 查看 zookeeper 數據
[root@node1 bin]# ./zkCli.sh
Connecting to localhost:2181
[zk: localhost:2181(CONNECTED) 0] ls /
[cluster, controller, brokers, zookeeper, hadoop-ha, admin, isr_change_notification, log_dir_event_notification, controller_epoch, consumers, latest_producer_id_block, config, hbase][zk: localhost:2181(CONNECTED) 1]
8. 接收組消息 (當消費者發送消息時, 只能是組中一個接收都者接收消息)
[root@node1 bin]# ./kafka-console-producer.sh –broker-list node1:9092 –topic tong – 在 node1 節點發送消息
—— 啟動兩臺消費者 ———–
[root@node2 bin]# vim ../config/consumer.properties – 兩臺消費都都要修改
group.id=wuhan
[root@node2 bin]# ./kafka-console-consumer.sh –topic tong –bootstrap-server node1:9092 –consumer.config ../config/consumer.properties
[2019-04-05 20:52:09,152] WARN [Consumer clientId=consumer-1, groupId=wuhan] Error while fetching metadata with correlation id 2 :
9. 在發送端發送消息, 接收端組接收消息
[root@node1 bin]# ./kafka-console-producer.sh –broker-list node1:9092 –topic tong
[2019-04-05 20:51:31,094] INFO [GroupMetadataManager brokerId=0] Removed 0 expired offsets in 0 milliseconds. (kafka.coordinator.group.GroupMetadataManager)
[2019-04-05 20:52:09,114] INFO Creating topic tong with configuration {} and initial partition assignment Map(0 – ArrayBuffer(2)) (kafka.zk.AdminZkClient)
[2019-04-05 20:52:09,124] INFO [KafkaApi-0] Auto creation of topic tong with 1 partitions and replication factor 1 is successful (kafka.server.KafkaApis)
hello ttt;
———– 接收端 ————–
[root@node2 bin]# ./kafka-console-consumer.sh –topic tong –bootstrap-server node1:9092 –consumer.config ../config/consumer.properties – 在 node2 節點接收到消息
[2019-04-05 20:52:09,152] WARN [Consumer clientId=consumer-1, groupId=wuhan] Error while fetching metadata with correlation id 2 : {tong=LEADER_NOT_AVAILABLE}
(org.apache.kafka.clients.NetworkClient)
hello ttt;
到此,關于“kafka-2.11 集群的搭建步驟”的學習就結束了,希望能夠解決大家的疑惑。理論與實踐的搭配能更好的幫助大家學習,快去試試吧!若想繼續學習更多相關知識,請繼續關注丸趣 TV 網站,丸趣 TV 小編會繼續努力為大家帶來更多實用的文章!