hadoop生态搭建(3节点)-08.kafka配置

时间:2023-01-08 08:47:09

如果之前没有安装jdk和zookeeper,安装了的请直接跳过

https://www.oracle.com/technetwork/java/javase/downloads/java-archive-javase8-2177648.html

# ==================================================================安装 jdk

mkdir -p /usr/java
tar -zxvf ~/jdk-8u111-linux-x64.tar.gz -C /usr/java
rm -r ~/jdk-8u111-linux-x64.tar.gz

http://archive.apache.org/dist/zookeeper/

# ==================================================================安装 zookeeper
# zookeeper集群搭建要至少3台服务器,服务器都要部署zookeeper

tar -zxvf ~/zookeeper-3.4.12.tar.gz -C /usr/local
rm -r ~/zookeeper-3.4.12.tar.gz

# https://www.scala-lang.org/download/2.12.4.html
# ==================================================================安装 scala

tar -zxvf ~/scala-2.12.4.tgz -C /usr/local
rm –r ~/scala-2.12.4.tgz

# http://archive.apache.org/dist/kafka/0.10.2.1/

# ==================================================================安装 kafka

tar -zxvf ~/kafka_2.12-0.10.2.1.tgz -C /usr/local
rm –r ~/kafka_2.12-0.10.2.1.tgz

# 环境变量
# ==================================================================node1 node2 node3

vi /etc/profile

# 在export PATH USER LOGNAME MAIL HOSTNAME HISTSIZE HISTCONTROL下添加
export JAVA_HOME=/usr/java/jdk1.8.0_111
export ZOOKEEPER_HOME=/usr/local/zookeeper-3.4.12
export HADOOP_HOME=/usr/local/hadoop/hadoop-2.7.6
export MYSQL_HOME=/usr/local/mysql
export HBASE_HOME=/usr/local/hbase-1.2.4
export HIVE_HOME=/usr/local/hive-2.1.1
export SCALA_HOME=/usr/local/scala-2.12.4
export KAFKA_HOME=/usr/local/kafka_2.12-0.10.2.1 export PATH=$PATH:$JAVA_HOME/bin:$JAVA_HOME/jre/bin:$ZOOKEEPER_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$MYSQL_HOME/bin:$HBASE_HOME/bin:$HIVE_HOME/bin:$SCALA_HOME/bin:$KAFKA_HOME/bin
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar export HADOOP_INSTALL=$HADOOP_HOME
export HADOOP_MAPRED_HOME=$HADOOP_HOME
export HADOOP_COMMON_HOME=$HADOOP_HOME
export HADOOP_HDFS_HOME=$HADOOP_HOME
export YARN_HOME=$HADOOP_HOME
export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native

# ==================================================================node1

# 使环境变量生效
source /etc/profile # 查看配置结果
echo $KAFKA_HOME

# ==================================================================node2 node3

mkdir -p /usr/java

# ==================================================================node1

# jdk

# zookeeper

# https://www.cnblogs.com/zcf5522/p/9753539.html

# scala
scp -r $SCALA_HOME node2:/usr/local/
scp -r $SCALA_HOME node3:/usr/local/ # ==================================================================配置 kafka
mkdir $KAFKA_HOME/logs # 配置 kafka server.properties
vi $KAFKA_HOME/config/server.properties broker.id=1 delete.topic.enable=true
auto.create.topics.enable=true listeners=PLAINTEXT://node1:9092
#listeners=SASL_PLAINTEXT://node1:9092 log.dirs=/usr/local/kafka_2.12-0.10.2.1/logs zookeeper.connect=node1:2181,node2:2181,node3:2181/kafka # 配置ACL入口类
#authorizer.class.name=kafka.security.auth.SimpleAclAuthorizer
# 本例使用SASL_PLAINTEXT
#security.inter.broker.protocol=SASL_PLAINTEXT
#sasl.mechanism.inter.broker.protocol=PLAIN
#sasl.enabled.mechanisms=PLAIN
# 设置本例中admin为超级用户
#super.users=User:admin;User:kafka scp -r $KAFKA_HOME node2:/usr/local/
scp -r $KAFKA_HOME node3:/usr/local/

# ==================================================================node2 node3

# 使环境变量生效
source /etc/profile # 查看配置结果
echo $KAFKA_HOME

# ==================================================================node2

vi $KAFKA_HOME/config/server.properties

broker.id=2
listeners=PLAINTEXT://node2:9092
#listeners=SASL_PLAINTEXT://node2:9092

# ==================================================================node3

vi $KAFKA_HOME/config/server.properties

broker.id=3
listeners=PLAINTEXT://node3:9092
#listeners=SASL_PLAINTEXT://node3:9092

# kafka集群启动

# ==================================================================node1 node2 node3
# 启动zookeeper
zkServer.sh start
zkServer.sh status
# zkServer.sh stop # ==================================================================node1
# 需要手动在ZooKeeper中创建路径/kafka,使用如下命令连接到任意一台ZooKeeper服务器
zkCli.sh create /kafka '' # ==================================================================node1 node2 node3
# 启动 kafka
kafka-server-start.sh -daemon $KAFKA_HOME/config/server.properties
# kafka-server-start.sh $KAFKA_HOME/config/server.properties & # 验证启动进程
jps # ==================================================================kafka集群测试1
# kafka-topics.sh --delete --zookeeper node1:2181,node2:2181,node3:2181/kafka --topic clustertopic1
# 创建topic
kafka-topics.sh --create --zookeeper node1:2181,node2:2181,node3:2181/kafka --replication-factor 3 --partitions 1 --topic clustertopic1 # 查看topic列表
kafka-topics.sh --list --zookeeper node1:2181,node2:2181,node3:2181/kafka # 查询topic状态
kafka-topics.sh --describe --zookeeper node1:2181,node2:2181,node3:2181/kafka --topic clustertopic1 # 生产消息
kafka-console-producer.sh --broker-list node1:9092,node2:9092,node3:9092 --topic clustertopic1
> This is a message
> This is another message # 消费消息
kafka-console-consumer.sh --bootstrap-server node1:9092,node2:9092,node3:9092 --from-beginning --topic clustertopic1 # 停止
kafka-server-stop.sh
zkServer.sh stop # ==================================================================kafka集群测试2
# kafka-topics.sh --delete --zookeeper node1:2181,node2:2181,node3:2181/kafka --topic clustertopic2
# 创建topic
kafka-topics.sh --create --zookeeper node1:2181,node2:2181,node3:2181/kafka --replication-factor 3 --partitions 3 --topic clustertopic2 # 查看topic列表
kafka-topics.sh --list --zookeeper node1:2181/kafka # 查询topic状态
kafka-topics.sh --describe --zookeeper node1:2181,node2:2181,node3:2181/kafka --topic clustertopic2 # 生产消息 在一台服务器上创建一个生产者
kafka-console-producer.sh --broker-list node1:9092,node2:9092,node3:9092 --topic clustertopic2
> This is a message
> This is another message # 消费消息,在另外一个节点启动消费进程
kafka-console-consumer.sh --bootstrap-server node1:9092,node2:9092,node3:9092 --from-beginning --topic clustertopic2 # 停止
kafka-server-stop.sh
zkServer.sh stop shutdown -h now
# 快照 kafka

# 以下配置可以暂时不用配置

# ==================================================================使用 SASL/PLAIN 认证

# ==================================================================node1
# 配置kafka server端(每个broker)
vi $KAFKA_HOME/config/server.properties broker.id=1 delete.topic.enable=true
auto.create.topics.enable=true #listeners=PLAINTEXT://node1:9092
listeners=SASL_PLAINTEXT://node1:9092 log.dirs=/usr/local/kafka_2.12-0.10.2.1/logs zookeeper.connect=node1:2181,node2:2181,node3:2181/kafka # SASL机制
security.inter.broker.protocol=SASL_PLAINTEXT
sasl.mechanism.inter.broker.protocol=PLAIN
sasl.enabled.mechanisms=PLAIN # 完成身份验证的类
authorizer.class.name=kafka.security.auth.SimpleAclAuthorizer
# 如果没有找到ACL(访问控制列表)配置,则允许任何操作。
allow.everyone.if.no.acl.found=true # 设置admin为超级用户
super.users=User:admin;User:kafka # ==================================================================node2
vi $KAFKA_HOME/config/server.properties broker.id=2 delete.topic.enable=true
auto.create.topics.enable=true #listeners=PLAINTEXT://node2:9092
listeners=SASL_PLAINTEXT://node2:9092 log.dirs=/usr/local/kafka_2.12-0.10.2.1/logs zookeeper.connect=node1:2181,node2:2181,node3:2181/kafka # SASL机制
security.inter.broker.protocol=SASL_PLAINTEXT
sasl.mechanism.inter.broker.protocol=PLAIN
sasl.enabled.mechanisms=PLAIN # 完成身份验证的类
authorizer.class.name=kafka.security.auth.SimpleAclAuthorizer
# 如果没有找到ACL(访问控制列表)配置,则允许任何操作。
allow.everyone.if.no.acl.found=true # 设置admin为超级用户
super.users=User:admin;User:kafka # ==================================================================node3
vi $KAFKA_HOME/config/server.properties broker.id=3 delete.topic.enable=true
auto.create.topics.enable=true #listeners=PLAINTEXT://node3:9092
listeners=SASL_PLAINTEXT://node3:9092 log.dirs=/usr/local/kafka_2.12-0.10.2.1/logs zookeeper.connect=node1:2181,node2:2181,node3:2181/kafka # SASL机制
security.inter.broker.protocol=SASL_PLAINTEXT
sasl.mechanism.inter.broker.protocol=PLAIN
sasl.enabled.mechanisms=PLAIN # 完成身份验证的类
authorizer.class.name=kafka.security.auth.SimpleAclAuthorizer
# 如果没有找到ACL(访问控制列表)配置,则允许任何操作。
allow.everyone.if.no.acl.found=true # 设置admin为超级用户
super.users=User:admin;User:kafka # ==================================================================node1
# user_userName必须配置admin用户
vi $KAFKA_HOME/config/kafka_server_jaas.conf KafkaServer {
org.apache.kafka.common.security.plain.PlainLoginModule required
username="admin"
password="Admin*123456"
user_admin="Admin*123456"
user_kafka="Kafka*123456"
user_producer="Producer*123456"
user_consumer="Consumer*123456";
}; # 这个配置定义了3个用户(kafka、producer和consumer)。在KafkaServer部分,username和password是broker用于初始化连接到其他的broker,
# 在这个例子中,kafka用户为broker间的通讯,useruserName定义了所有连接到broker和broker验证的所有的客户端连接包括其他broker的用户密码。
# useruserName必须配置admin用户,否则报错(useruserName必须配置kafka用户未验证) # JAAS文件作为每个broker的jvm参数,在kafka-server-start.sh脚本中增加如下配置
vi $KAFKA_HOME/bin/kafka-server-start.sh if [ "x$KAFKA_OPTS" = "x" ]; then
export KAFKA_OPTS="-Djava.security.auth.login.config=/usr/local/kafka_2.12-0.10.2.1/config/kafka_server_jaas.conf"
fi # 配置kafka client端 PLAIN机制下kafka生产者/消费者如何生产/消费
# kafka_client_jaas 可以分别建立不同如 producer_jaas.conf 和 consumer_jaas.conf
vi $KAFKA_HOME/config/kafka_client_jaas.conf KafkaClient {
org.apache.kafka.common.security.plain.PlainLoginModule required
username="kafka"
password="Kafka*123456";
}; # vi $KAFKA_HOME/config/producer_jaas.conf
#KafkaClient {
# org.apache.kafka.common.security.plain.PlainLoginModule required
# username="producer"
# password="Producer*123456";
#}; #vi $KAFKA_HOME/config/consumer_jaas.conf
#KafkaClient {
# org.apache.kafka.common.security.plain.PlainLoginModule required
# username="consumer"
# password="Consumer*123456";
#}; # KafkaClient部分,username和password是客户端用来配置客户端连接broker的用户,在这个例子中,客户端使用kafka 用户连接到broker。
# 修改 producer 和 consumer 的配置文件(配置注意空格不然报错)
vi $KAFKA_HOME/config/producer.properties security.protocol=SASL_PLAINTEXT
sasl.mechanism=PLAIN vi $KAFKA_HOME/config/consumer.properties security.protocol=SASL_PLAINTEXT
sasl.mechanism=PLAIN # 在启动 producer 和 consumer 时,分别添加jvm参数
vi $KAFKA_HOME/bin/kafka-console-consumer.sh if [ "x$KAFKA_OPTS" = "x" ]; then
export KAFKA_OPTS="-Djava.security.auth.login.config=/usr/local/kafka_2.12-0.10.2.1/config/kafka_client_jaas.conf"
fi vi $KAFKA_HOME/bin/kafka-console-producer.sh if [ "x$KAFKA_OPTS" = "x" ]; then
export KAFKA_OPTS="-Djava.security.auth.login.config=/usr/local/kafka_2.12-0.10.2.1/config/kafka_client_jaas.conf"
fi # 在启动 producer 和 consumer 时,分别添加jvm参数
#vi $KAFKA_HOME/bin/kafka-console-producer.sh
#if [ "x$KAFKA_OPTS" = "x" ]; then
# export KAFKA_OPTS="-Djava.security.auth.login.config=/usr/local/kafka_2.12-0.10.2.1/config/producer_jaas.conf"
#fi #vi $KAFKA_HOME/bin/kafka-console-consumer.sh
#if [ "x$KAFKA_OPTS" = "x" ]; then
# export KAFKA_OPTS="-Djava.security.auth.login.config=/usr/local/kafka_2.12-0.10.2.1/config/consumer_jaas.conf"
#fi # ==================================================================node1
scp -r $KAFKA_HOME/config/kafka_server_jaas.conf node2:$KAFKA_HOME/config/
scp -r $KAFKA_HOME/config/kafka_client_jaas.conf node2:$KAFKA_HOME/config/
scp -r $KAFKA_HOME/config/producer.properties node2:$KAFKA_HOME/config/
scp -r $KAFKA_HOME/config/consumer.properties node2:$KAFKA_HOME/config/ scp -r $KAFKA_HOME/bin/kafka-server-start.sh node2:$KAFKA_HOME/bin/
scp -r $KAFKA_HOME/bin/kafka-console-consumer.sh node2:$KAFKA_HOME/bin/
scp -r $KAFKA_HOME/bin/kafka-console-producer.sh node2:$KAFKA_HOME/bin/ scp -r $KAFKA_HOME/config/kafka_server_jaas.conf node3:$KAFKA_HOME/config/
scp -r $KAFKA_HOME/config/kafka_client_jaas.conf node3:$KAFKA_HOME/config/
scp -r $KAFKA_HOME/config/producer.properties node3:$KAFKA_HOME/config/
scp -r $KAFKA_HOME/config/consumer.properties node3:$KAFKA_HOME/config/ scp -r $KAFKA_HOME/bin/kafka-server-start.sh node3:$KAFKA_HOME/bin/
scp -r $KAFKA_HOME/bin/kafka-console-consumer.sh node3:$KAFKA_HOME/bin/
scp -r $KAFKA_HOME/bin/kafka-console-producer.sh node3:$KAFKA_HOME/bin/ # 启动
# ==================================================================node1 node2 node3
zkServer.sh start zkServer.sh status kafka-server-start.sh -daemon $KAFKA_HOME/config/server.properties # ==================================================================node1
# kafka-topics.sh --delete --zookeeper node1:2181/kafka --topic sasltopic1
# 创建topic
kafka-topics.sh --create --zookeeper node1:2181/kafka --replication-factor 1 --partitions 1 --topic sasltopic1 # 查看topic列表
kafka-topics.sh --list --zookeeper node1:2181/kafka # 查询topic状态
kafka-topics.sh --describe --zookeeper node1:2181/kafka --topic sasltopic1 # 生产者授权
kafka-acls.sh --authorizer kafka.security.auth.SimpleAclAuthorizer --authorizer-properties zookeeper.connect=node1:2181/kafka --add --allow-principal User:kafka --operation Write --topic sasltopic1 # 生产者
kafka-console-producer.sh --broker-list node1:9092 --topic sasltopic1 --producer.config $KAFKA_HOME/config/producer.properties
> This is a message
> This is another message # 消费者 主题Read授权
kafka-acls.sh --authorizer kafka.security.auth.SimpleAclAuthorizer --authorizer-properties zookeeper.connect=node1:2181/kafka --add --allow-principal User:kafka --operation Read --topic sasltopic1 # 消费者 组Read授权
kafka-acls.sh --authorizer kafka.security.auth.SimpleAclAuthorizer --authorizer-properties zookeeper.connect=node1:2181/kafka --add --allow-principal User:kafka --operation Read --group sasltopic1group1 # 消费者
kafka-console-consumer.sh --bootstrap-server node2:9092 --topic sasltopic1 --from-beginning --consumer.config $KAFKA_HOME/config/consumer.properties # 停止
kafka-server-stop.sh
zkServer.sh stop # 启动
# ==================================================================node1 node2 node3
zkServer.sh start zkServer.sh status kafka-server-start.sh -daemon $KAFKA_HOME/config/server.properties # ==================================================================node1
# kafka-topics.sh --delete --zookeeper node1:2181/kafka --topic sasltopic2
# 创建topic
kafka-topics.sh --create --zookeeper node1:2181,node2:2181,node3:2181/kafka --replication-factor 3 --partitions 3 --topic sasltopic2 # 查看topic列表
kafka-topics.sh --list --zookeeper node1:2181/kafka # 查询topic状态
kafka-topics.sh --describe --zookeeper node1:2181,node2:2181,node3:2181/kafka --topic sasltopic2 # 生产者授权
kafka-acls.sh --authorizer kafka.security.auth.SimpleAclAuthorizer --authorizer-properties zookeeper.connect=node1:2181,node2:2181,node3:2181/kafka --add --allow-principal User:kafka --operation Write --topic sasltopic2 # 生产者
kafka-console-producer.sh --broker-list node1:9092,node2:9092,node3:9092 --topic sasltopic2 --producer.config $KAFKA_HOME/config/producer.properties
> This is a message
> This is another message # 消费者 主题Read授权
kafka-acls.sh --authorizer kafka.security.auth.SimpleAclAuthorizer --authorizer-properties zookeeper.connect=node1:2181,node2:2181,node3:2181/kafka --add --allow-principal User:kafka --operation Read --topic sasltopic2 # 消费者 组Read授权
kafka-acls.sh --authorizer kafka.security.auth.SimpleAclAuthorizer --authorizer-properties zookeeper.connect=node1:2181,node2:2181,node3:2181/kafka --add --allow-principal User:kafka --operation Read --group sasltopic2group2 # 消费者
kafka-console-consumer.sh --bootstrap-server node1:9092,node2:9092,node3:9092 --topic sasltopic2 --from-beginning --consumer.config $KAFKA_HOME/config/consumer.properties # 停止
kafka-server-stop.sh
zkServer.sh stop shutdown -h now
# 快照 kafka_saslplain

# ==================================================================多节点zookeeper下认证 SASL认证配置

# ==================================================================node1
vi $ZOOKEEPER_HOME/conf/zoo.cfg authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider
requireClientAuthScheme=sasl
jaasLoginRenew=3600000 # ==================================================================node2
vi $ZOOKEEPER_HOME/conf/zoo.cfg authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider
requireClientAuthScheme=sasl
jaasLoginRenew=3600000 # ==================================================================node3
vi $ZOOKEEPER_HOME/conf/zoo.cfg authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider
requireClientAuthScheme=sasl
jaasLoginRenew=3600000 # ==================================================================node1
vi $KAFKA_HOME/config/kafka_zoo_jaas.conf ZKServer {
org.apache.kafka.common.security.plain.PlainLoginModule required
username="kafka"
password="Kafka*123456"
user_kafka="Kafka*123456";
}; vi $KAFKA_HOME/bin/zookeeper-server-start.sh if [ "x$KAFKA_OPTS" = "x" ]; then
export KAFKA_OPTS="-Djava.security.auth.login.config=/usr/local/kafka_2.12-0.10.2.1/config/kafka_zoo_jaas.conf -Dzookeeper.sasl.serverconfig=ZKServer"
fi scp -r $KAFKA_HOME/config/kafka_zoo_jaas.conf node2:$KAFKA_HOME/config/
scp -r $KAFKA_HOME/config/kafka_zoo_jaas.conf node3:$KAFKA_HOME/config/ scp -r $KAFKA_HOME/bin/zookeeper-server-start.sh node2:$KAFKA_HOME/bin/
scp -r $KAFKA_HOME/bin/zookeeper-server-start.sh node3:$KAFKA_HOME/bin/ # 启动
# ==================================================================node1 node2 node3
zkServer.sh start zkServer.sh status kafka-server-start.sh -daemon $KAFKA_HOME/config/server.properties # ==================================================================node1
# kafka-topics.sh --delete --zookeeper node1:2181/kafka --topic sasltopic3
# 创建topic
kafka-topics.sh --create --zookeeper node1:2181,node2:2181,node3:2181/kafka --replication-factor 3 --partitions 3 --topic sasltopic3 # 查看topic列表
kafka-topics.sh --list --zookeeper node1:2181/kafka # 查询topic状态
kafka-topics.sh --describe --zookeeper node1:2181,node2:2181,node3:2181/kafka --topic sasltopic3 # 生产者授权
kafka-acls.sh --authorizer kafka.security.auth.SimpleAclAuthorizer --authorizer-properties zookeeper.connect=node1:2181,node2:2181,node3:2181/kafka --add --allow-principal User:kafka --operation Write --topic sasltopic3 # 生产者
kafka-console-producer.sh --broker-list node1:9092,node2:9092,node3:9092 --topic sasltopic3 --producer.config $KAFKA_HOME/config/producer.properties
> This is a message
> This is another message # 消费者 主题Read授权
kafka-acls.sh --authorizer kafka.security.auth.SimpleAclAuthorizer --authorizer-properties zookeeper.connect=node1:2181,node2:2181,node3:2181/kafka --add --allow-principal User:kafka --operation Read --topic sasltopic3 # 消费者 组Read授权
kafka-acls.sh --authorizer kafka.security.auth.SimpleAclAuthorizer --authorizer-properties zookeeper.connect=node1:2181,node2:2181,node3:2181/kafka --add --allow-principal User:kafka --operation Read --group sasltopic3group3 # 消费者
kafka-console-consumer.sh --bootstrap-server node1:9092,node2:9092,node3:9092 --topic sasltopic3 --from-beginning --consumer.config $KAFKA_HOME/config/consumer.properties # 停止
kafka-server-stop.sh
zkServer.sh stop shutdown -h now
# 快照 kafka_morezookeeper_saslplain