前一篇文章介绍了如何yum安装简单的kubernetes集群,其中etcd是单点部署。本篇我们来搭建etcd集群,方便日后搭建kubernetes HA集群架构。
1,环境配置说明
etcd1 192.168.20.71
etcd2 192.168.20.72
etcd3 192.168.20.73
2,etcd版本
[root@master1 ~]# etcdctl --version
etcdctl version: 3.2.22
API version: 2
3,开始安装
etcd每台安装步骤都一样,所以etcd2、etcd3都以etcd1步骤一样
安装只需一条命令即可:
yum -y install etcd
4,查看并修改etcd配置文件
#修配etcd配置文件
[root@master1 ~]# ls /etc/etcd/
etcd.conf etcd.conf.back
[root@master1 ~]# grep -v '^#' /etc/etcd/etcd.conf
ETCD_NAME=etcd1 ETCD_DATA_DIR="/var/lib/etcd/etcd1" ETCD_LISTEN_PEER_URLS="http://192.168.20.71:2380" ETCD_LISTEN_CLIENT_URLS="http://127.0.0.1:2379,http://192.168.20.71:2379" ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.20.71:2380" ETCD_INITIAL_CLUSTER="etcd1=http://192.168.20.71:2380,etcd2=http://192.168.20.72:2380,etcd3=http://192.168.20.73:2380" ETCD_INITIAL_CLUSTER_STATE="new" ETCD_INITIAL_CLUSTER_TOKEN="etcd-test" ETCD_ADVERTISE_CLIENT_URLS="http://192.168.20.71:2379"
5,启动etcd服务
#etcd启动配置文件为:
[root@master1 ~]# cat /usr/lib/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target [Service]
Type=notify
WorkingDirectory=/var/lib/etcd/
EnvironmentFile=-/etc/etcd/etcd.conf
User=etcd
# set GOMAXPROCS to number of processors
ExecStart=/bin/bash -c "GOMAXPROCS=$(nproc) /usr/bin/etcd --name=\"${ETCD_NAME}\" --data-dir=\"${ETCD_DATA_DIR}\" \
--listen-peer-urls=\"${ETCD_LISTEN_PEER_URLS}\" \
--listen-client-urls=\"${ETCD_LISTEN_CLIENT_URLS}\" \
--advertise-client-urls=\"${ETCD_ADVERTISE_CLIENT_URLS}\" \
--initial-cluster-token=\"${ETCD_INITIAL_CLUSTER_TOKEN}\" \
--initial-cluster=\"${ETCD_INITIAL_CLUSTER}\" \
--initial-cluster-state=\"${ETCD_INITIAL_CLUSTER_STATE}\""
Restart=on-failure
LimitNOFILE= [Install]
WantedBy=multi-user.target #启动etcd服务
systemctl start etcd
#查看启动状态
systemctl status etcd
6,etcd2、etcd3配置文件
#etcd2配置文件
[root@master2 ~]# cat /etc/etcd/etcd.conf
ETCD_NAME=etcd2 ETCD_DATA_DIR="/var/lib/etcd/etcd2" ETCD_LISTEN_PEER_URLS="http://192.168.20.72:2380" ETCD_LISTEN_CLIENT_URLS="http://127.0.0.1:2379,http://192.168.20.72:2379" ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.20.72:2380" ETCD_INITIAL_CLUSTER="etcd1=http://192.168.20.71:2380,etcd2=http://192.168.20.72:2380,etcd3=http://192.168.20.73:2380" ETCD_INITIAL_CLUSTER_STATE="new" ETCD_INITIAL_CLUSTER_TOKEN="etcd-test" ETCD_ADVERTISE_CLIENT_URLS="http://192.168.20.72:2379" #etcd3 配置文件
[root@master ~]# cat /etc/etcd/etcd.conf
ETCD_NAME=etcd3 ETCD_DATA_DIR="/var/lib/etcd/etcd3" ETCD_LISTEN_PEER_URLS="http://192.168.20.73:2380" ETCD_LISTEN_CLIENT_URLS="http://127.0.0.1:2379,http://192.168.20.73:2379" ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.20.73:2380" ETCD_INITIAL_CLUSTER="etcd1=http://192.168.20.71:2380,etcd2=http://192.168.20.72:2380,etcd3=http://192.168.20.73:2380" ETCD_INITIAL_CLUSTER_STATE="new" ETCD_INITIAL_CLUSTER_TOKEN="etcd-test" ETCD_ADVERTISE_CLIENT_URLS="http://192.168.20.73:2379" #分别启动etcd2、etcd3服务
#查看cluster状态
[root@master1 ~]# etcdctl cluster-health
member 85b5f1a0537e385d is healthy: got healthy result from http://192.168.20.71:2379
member 9f304c9e0feb949d is healthy: got healthy result from http://192.168.20.72:2379
member ec71f609370df393 is healthy: got healthy result from http://192.168.20.73:2379
cluster is healthy #列出etcd服务状态
[root@master1 ~]# etcdctl member list
85b5f1a0537e385d: name=etcd1 peerURLs=http://192.168.20.71:2380 clientURLs=http://192.168.20.71:2379 isLeader=false
9f304c9e0feb949d: name=etcd2 peerURLs=http://192.168.20.72:2380 clientURLs=http://192.168.20.72:2379 isLeader=false
ec71f609370df393: name=etcd3 peerURLs=http://192.168.20.73:2380 clientURLs=http://192.168.20.73:2379 isLeader=true #从列出信息可以看出,目前是etcd3为主节点。
#查看etcd服务启动日志,可通过 tail -f /var/log/message 动态查看
7,至此,etcd集群已配置完成。接下来可以对kubernetes集群apiserver配置文件进行修改,使其指向etcd集群
#修改master节点,apiserver配置文件
[root@master ~]# cat /etc/kubernetes/apiserver
###
## kubernetes system config
KUBE_API_ADDRESS="--address=0.0.0.0"
KUBE_API_PORT="--port=8080"
KUBELET_PORT="--kubelet-port=10250"
KUBE_ETCD_SERVERS="--etcd-servers=http://192.168.20.71:2379,http://192.168.20.72:2379,http://192.168.20.73:2379"
KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16"
KUBE_API_ARGS="--service_account_key_file=/etc/kubernetes/serviceaccount.key"
KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota"
KUBE_API_ARGS="" #k8s集群做任何调整后,都需要重启服务
#重启master各组件,可连起来写
systemctl restart kube-apiserver kube-controller-manager kube-scheduler
#重启node1、node2各组件
systemctl restart kubelet kube-proxy #再次在master节点查看etcd、node集群状态
#测试,可关闭一台etcd服务,创建一个pod,无异常
#通过测试可以得出,etcd集群至少需要2个etcd节点才可以正常工作。
8,如何创建pod,参加下一篇文章。