在公司,使用dns切换,可能会比keepalived+haproxy,更精简的易维护。
毕竟,高可用只是偶尔切换,不是时时切换。
且dns解析在自己可控时,更不会影响k8s线上使用了。
(部分代码,由于担心太冗长,已使用xxx代替,这些xxx完全可以自己生成的)
使用步骤如下:
先在每个master上安装etcd,运行如下命令:
sh k8s.sh k8s.xxx.com.cn etcd
等每个master上的etcd安装好之后,运行如下命令:
sh k8s.sh k8s.xxx.com.cn master
最后,在每个node节点上,运行如下命令:
sh k8s.sh k8s.xxx.com.cn node
#! /usr/bin/env bash set -e set -u set -x #使用系统的PATH环境 export PATH=$(echo $PATH) #接收及判断参数合法性 #sh k8s.sh k8s.xxx.com.cn etcd #sh k8s.sh k8s.xxx.com.cn master #sh k8s.sh k8s.xxx.com.cn node K8S_DOMAIN=$1 OP_STEP=$2 #判断参数 if [ $# -ne 2 ]; then echo "wrong args." echo "usage `basename $0` [K8S_DOMAIN] [init|cert|etcd|master|node]" exit 110 fi #每一个新集群,此处必须修改 #指定使用集群ip的网络端口,THIS_HOST必是HOST_1,HOST_2,HOST_3中的一个 ETH=eth0 HOST_1=1.2.3.4 HOST_2=1.2.3.5 HOST_3=1.2.3.6 THIS_HOST=$(ip addr show ${ETH} |grep inet|grep -v inet6|awk '{print $2}'|awk -F '/' '{print $1}') LOCAL_HOST=$(hostname) LOCAL_HOST_L=${LOCAL_HOST,,} #定义常量,如有必要,可有更多常量加入 #安装k8s版本 K8S_VER=v1.12.6 #用于判断是否安装了kubelet等软件 KUBE_VER=${K8S_VER/v/} #apiserver端口 K8S_API_PORT=6443 #k8s加入的token K8S_JOIN_TOKEN=xxxxxx.xxxxxxxxxabcdef #可以操作kubectl的普通用户 General_user=k8s #生成cert证书的cfssl软件目录 cs="./cfssl" csj="./cfssljson" #cert证书在k8s中的安装目录 pki_dir="/etc/kubernetes/pki" #所有docker的来源仓库,不再需要外网 REGISTRY=harbor.demo.cn/3rd_part/k8s.gcr.io #etcd集群版本(最好和k8s要求的版本一致3.2.24) ETCD_VERSION=3.2.18 #etcd服务端口及其它集群状态(peer之间,未用https) ETCD_CLI_PORT=2379 ETCD_CLU_PORT=2380 TOKEN=pa-k8s-etcd-token CLUSTER_STATE=new CLUSTER=${HOST_1}=http://${HOST_1}:${ETCD_CLU_PORT},${HOST_2}=http://${HOST_2}:${ETCD_CLU_PORT},${HOST_3}=http://${HOST_3}:${ETCD_CLU_PORT} etcd_data_dir=/etcd/etcd-data #定义当前目录 CUR_DIR=$(cd $(dirname $0); pwd) #判断eth0 IP是否在集群内 function ip_in_cluster() { if [[ ${THIS_HOST} != ${HOST_1} && ${THIS_HOST} != ${HOST_2} && ${THIS_HOST} != ${HOST_3} ]]; then echo "ip not in the etcd cluster host." exit 110 fi } #每次全新生成文件 function if_file_exist_del() { if [ -e $1 ]; then rm -f $1 fi } #重置iptables function iptables_clear() { iptables -P INPUT ACCEPT iptables -P FORWARD ACCEPT iptables -P OUTPUT ACCEPT iptables -t nat -P PREROUTING ACCEPT iptables -t nat -P POSTROUTING ACCEPT iptables -t nat -P OUTPUT ACCEPT iptables -t mangle -P PREROUTING ACCEPT iptables -t mangle -P OUTPUT ACCEPT iptables -F iptables -t nat -F iptables -t mangle -F iptables -X iptables -t nat -X iptables -t mangle -X } #服务器初化,尽量标准化 function system_init() { #停止firewall防火墙,并禁止开机自启动 systemctl stop firewalld.service systemctl disable firewalld.service #重置iptables iptables_clear #禁止selinux sed -i "s/^SELINUX=enforcing/SELINUX=disabled/g" /etc/sysconfig/selinux sed -i "s/^SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config sed -i "s/^SELINUX=permissive/SELINUX=disabled/g" /etc/sysconfig/selinux sed -i "s/^SELINUX=permissive/SELINUX=disabled/g" /etc/selinux/config set +e setenforce 0 set -e #关闭swap交换内存,K8S强制的,不然安装会报错 swapoff -a sed -i 's/.*swap.*/#&/' /etc/fstab #配置k8s内核参数 k8s_kernel_conf=/etc/sysctl.d/k8s.conf if_file_exist_del $k8s_kernel_conf cat<<EOF>$k8s_kernel_conf net.ipv4.ip_forward = 1 net.bridge.bridge-nf-call-ip6tables = 1 net.bridge.bridge-nf-call-iptables = 1 fs.may_detach_mounts = 1 vm.overcommit_memory=1 vm.panic_on_oom=0 fs.inotify.max_user_watches=89100 fs.file-max=52706963 fs.nr_open=52706963 net.netfilter.nf_conntrack_max=2310720 vm.swappiness=0 EOF #使配置生效 sysctl -p sysctl --system # 安装工具及加载br_netfilter, ipvs yum install bridge-utils ipset ipvsadm sysstat libseccomp conntrack conntrack-tools socat -y modprobe br_netfilter modprobe -- ip_vs modprobe -- ipip modprobe -- tun modprobe -- ip_vs_rr modprobe -- ip_vs_wrr modprobe -- ip_vs_sh modprobe -- nf_conntrack_ipv4 modprobe -- nf_conntrack_ipv6 ipvs_no=$(cat /etc/rc.local|grep ip_vs|wc -l) if [ $ipvs_no -eq 0 ]; then echo "modprobe br_netfilter" >> /etc/rc.local echo "modprobe -- ip_vs" >> /etc/rc.local echo "modprobe -- ipip" >> /etc/rc.local echo "modprobe -- tun" >> /etc/rc.local echo "modprobe -- ip_vs_rr" >> /etc/rc.local echo "modprobe -- ip_vs_wrr" >> /etc/rc.local echo "modprobe -- ip_vs_sh" >> /etc/rc.local echo "modprobe -- nf_conntrack_ipv4" >> /etc/rc.local echo "modprobe -- nf_conntrack_ipv6" >> /etc/rc.local fi # 为etcd数据和k8s证书生成目录 mkdir -p ${pki_dir} && mkdir -p ${etcd_data_dir} rm -rf ${pki_dir}/* && mkdir -p ${etcd_data_dir}/* chown -R docker.docker ${pki_dir} ${etcd_data_dir} chmod -R 755 ${pki_dir} ${etcd_data_dir} #安装必须软件,将其cp到指定运行目录 kube_version=$(rpm -qa|grep kubelet) kube_version=${kube_version:=None} if [[ ${kube_version} =~ ${KUBE_VER} ]]; then kubeadm reset -f else set +e yum remove kubeadm -y yum remove kubectl -y yum remove kubelet -y set -e yum localinstall *.rpm -y tar xf cni-plugins-amd64-v0.7.5.tgz -C /opt/cni/bin /bin/cp /usr/bin/kube* /usr/local/bin/ fi #配置docker用户可以命令行的相关命令 k8s_sudoers_conf=/etc/sudoers.d/k8s_sudoers if_file_exist_del $k8s_sudoers_conf cat<<EOF>$k8s_sudoers_conf docker ALL = (root) NOPASSWD:/bin/systemctl restart docker docker ALL = (root) NOPASSWD:/bin/systemctl reload docker docker ALL = (root) NOPASSWD:/bin/systemctl daemon-reload docker ALL = (root) NOPASSWD:/bin/systemctl start kubelet docker ALL = (root) NOPASSWD:/bin/systemctl stop kubelet' docker ALL = (root) NOPASSWD:/bin/systemctl restart kubelet' docker ALL = (root) NOPASSWD:/bin/systemctl status kubelet' docker ALL = (root) NOPASSWD:/usr/local/bin/kubeadm' docker ALL = (root) NOPASSWD:/usr/local/bin/kubectl' EOF #如果以前安装过k8s,清除相关的虚拟网络接口 ifconfig -a|grep -vE '(^[[:space:]]|^$)'|grep -E '(veth|flannel|kube|cni|dummy)'|awk -F ":" '{print $1}'|awk '{for(i=1;i<=NF;i++){print "ip link set " $i " down";}}'|sh ifconfig -a|grep -vE '(^[[:space:]]|^$)'|grep -E '(veth|flannel|kube|cni|dummy)'|awk -F ":" '{print $1}'|awk '{for(i=1;i<=NF;i++){print "ip link delete " $i;}}'|sh modprobe -r ipip modprobe -r ip_gre modprobe ipip modprobe ip_gre #自定义pause的image仓库,摆脱对外上网的依赖 kubelet_sysconfig=/etc/sysconfig/kubelet if_file_exist_del $kubelet_sysconfig cat >$kubelet_sysconfig<<EOF KUBELET_EXTRA_ARGS="--pod-infra-container-image=harbor.paic.com.cn/3rd_part/k8s.gcr.io/pause:3.1" EOF # 启动 systemctl daemon-reload systemctl enable kubelet && systemctl restart kubelet echo "=================================" echo "k8s init system success" } #将相应的文件写入shell,摆脱对太多yaml文件或key文件的依赖 function saPub() { cat<<EOF>saen.pub xxx EOF } function saKey() { cat<<EOF>saen.key xxx EOF } function caCrt() { cat<<EOF>caen.crt xxx EOF } function caKey() { cat<<EOF>caen.key xxx EOF } function frontProxyCrt() { cat<<EOF>front-proxy-ca.crt xxx -----END CERTIFICATE----- EOF } function frontProxyKey() { cat<<EOF>front-proxy-ca.key xxx EOF } function caCsr() { cat<<EOF>ca-csr.json { "CN": "kubernetes", "key": { "algo": "rsa", "size": 2048 }, "ca": { "expiry": "438000h" } } EOF } function caConfig() { cat<<EOF>ca-config.json { "signing": { "default": { "expiry": "438000h" }, "profiles": { "server": { "expiry": "438000h", "usages": [ "signing", "key encipherment", "server auth" ] }, "client": { "expiry": "438000h", "usages": [ "signing", "key encipherment", "client auth" ] }, "peer": { "expiry": "438000h", "usages": [ "signing", "key encipherment", "server auth", "client auth" ] } } } } EOF } function etcdServer() { cat<<EOF>etcd-server.json { "CN": "etcdServer", "hosts": [ "127.0.0.1", "localhost", "${LOCAL_HOST_L}", "${K8S_DOMAIN}", "${HOST_1}", "${HOST_2}", "${HOST_3}" ], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "O": "etcd", "OU": "etcd Security", "C": "CN", "L": "ShangHai", "ST": "ShangHai" } ] } EOF } function etcdPeer() { cat<<EOF>etcd-peer.json { "CN": "etcdPeer", "hosts": [ "127.0.0.1", "localhost", "${LOCAL_HOST_L}", "${K8S_DOMAIN}", "${HOST_1}", "${HOST_2}", "${HOST_3}" ], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "O": "etcd", "OU": "etcd Security", "C": "CN", "L": "ShangHai", "ST": "ShangHai" } ] } EOF } function etcdClient() { cat<<EOF>etcd-client.json { "CN": "etcdClient", "key": { "algo": "rsa", "size": 2048 }, "names": [ { "O": "etcd", "OU": "etcd Security", "C": "CN", "L": "ShangHai", "ST": "ShangHai" } ] } EOF } function frontProxyClient() { cat<<EOF>front-proxy-client.json { "CN": "front-proxy-client", "key": { "algo": "rsa", "size": 2048 } } EOF } function apiServer() { cat<<EOF>apiserver.json { "CN": "kube-apiserver", "hosts": [ "kubernetes", "kubernetes.default", "kubernetes.default.svc", "kubernetes.default.svc.cluster.local", "10.96.0.1", "${LOCAL_HOST_L}", "${K8S_DOMAIN}", "${HOST_1}", "${HOST_2}", "${HOST_3}" ], "key": { "algo": "rsa", "size": 2048 } } EOF } function apiKubClient() { cat<<EOF>apiserver-kubelet-client.json { "CN": "kube-apiserver-kubelet-client", "key": { "algo": "rsa", "size": 2048 }, "names": [ { "O": "system:masters" } ] } EOF } ######################################### ######################################### #生成kubeadm安装文件 kubeadm_conf=kubeadm-config.yaml if_file_exist_del $kubeadm_conf cat << EOF >$kubeadm_conf apiVersion: kubeadm.k8s.io/v1alpha3 kind: InitConfiguration apiEndpoint: advertiseAddress: ${THIS_HOST} bindPort: ${K8S_API_PORT} bootstrapTokens: - groups: - system:bootstrappers:kubeadm:default-node-token token: ${K8S_JOIN_TOKEN} ttl: 0s usages: - signing - authentication --- apiVersion: kubeadm.k8s.io/v1alpha3 kind: ClusterConfiguration etcd: external: endpoints: - https://${HOST_1}:${ETCD_CLI_PORT} - https://${HOST_2}:${ETCD_CLI_PORT} - https://${HOST_3}:${ETCD_CLI_PORT} caFile: ${pki_dir}/etcd/ca.crt certFile: ${pki_dir}/apiserver-etcd-client.crt keyFile: ${pki_dir}/apiserver-etcd-client.key imageRepository: ${REGISTRY} kubernetesVersion: ${K8S_VER} #controlPlaneEndpoint: ${K8S_DOMAIN}:${K8S_API_PORT} networking: podSubnet: 10.244.0.0/16 serviceSubnet: 10.96.0.0/12 --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration mode: "ipvs" EOF #生成node节点自动续期证书 auto_cet_server=node-auto-cert-server.yaml if_file_exist_del $auto_cet_server cat << EOF >$auto_cet_server apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: annotations: rbac.authorization.kubernetes.io/autoupdate: "true" labels: kubernetes.io/bootstrapping: rbac-defaults name: system:certificates.k8s.io:certificatesigningrequests:selfnodeserver rules: - apiGroups: - certificates.k8s.io resources: - certificatesigningrequests/selfnodeserver verbs: - create --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: kubeadm:node-autoapprove-certificate-server roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: system:certificates.k8s.io:certificatesigningrequests:selfnodeserver subjects: - apiGroup: rbac.authorization.k8s.io kind: Group name: system:nodes EOF #生成flannel 网络插件 flannel=flannel.yaml if_file_exist_del $flannel #将相应的文件写入shell,摆脱对太多yaml文件或key文件的依赖 cat << EOF >$flannel xxx EOF #初始化证书,记得延长证书过期时间,及api的ca一定不能和front proxy的ca相同。 function cert_init() { # caCsr set +x rm *.csr&&rm *.json&&rm *.crt&&rm *.key&&rm *.pem&&rm *.pub set -x saPub saKey caCrt caKey frontProxyCrt frontProxyKey caConfig etcdServer etcdPeer etcdClient frontProxyClient apiServer apiKubClient # $cs gencert -initca ca-csr.json |$csj -bare ca # mv ca.pem ca.crt&&mv ca-key.pem ca.key $cs gencert -ca=ca.crt -ca-key=ca.key -config=ca-config.json -profile=server etcd-server.json|$csj -bare server $cs gencert -ca=ca.crt -ca-key=ca.key -config=ca-config.json -profile=client etcd-client.json|$csj -bare client $cs gencert -ca=ca.crt -ca-key=ca.key -config=ca-config.json -profile=peer etcd-peer.json|$csj -bare peer $cs gencert -ca=front-proxy-ca.crt -ca-key=front-proxy-ca.key -config=ca-config.json -profile=client front-proxy-client.json|$csj -bare front-proxy-client $cs gencert -ca=ca.crt -ca-key=ca.key -config=ca-config.json -profile=server apiserver.json|$csj -bare apiserver $cs gencert -ca=ca.crt -ca-key=ca.key -config=ca-config.json -profile=client apiserver-kubelet-client.json|$csj -bare apiserver-kubelet-client mkdir -p $pki_dir/etcd cp server.pem $pki_dir/etcd/server.crt&cp server-key.pem $pki_dir/etcd/server.key cp client.pem $pki_dir/etcd/healthcheck-client.crt&&cp client-key.pem $pki_dir/etcd/healthcheck-client.key cp client.pem $pki_dir/apiserver-etcd-client.crt&&cp client-key.pem $pki_dir/apiserver-etcd-client.key cp peer.pem $pki_dir/etcd/peer.crt&&cp peer-key.pem $pki_dir/etcd/peer.key cp ca.crt $pki_dir/etcd/ca.crt&&cp ca.key $pki_dir/etcd/ca.key cp front-proxy-ca.crt $pki_dir/front-proxy-ca.crt&&cp front-proxy-ca.key $pki_dir/front-proxy-ca.key cp front-proxy-client.pem $pki_dir/front-proxy-client.crt&&cp front-proxy-client-key.pem $pki_dir/front-proxy-client.key cp ca.crt $pki_dir/ca.crt&&cp ca.key $pki_dir/ca.key cp apiserver.pem $pki_dir/apiserver.crt&cp apiserver-key.pem $pki_dir/apiserver.key cp apiserver-kubelet-client.pem $pki_dir/apiserver-kubelet-client.crt&&cp apiserver-kubelet-client-key.pem $pki_dir/apiserver-kubelet-client.key cp sa.pub $pki_dir/sa.pub&&cp sa.key $pki_dir/sa.key rm *.csr&&rm *.json&&rm *.crt&&rm *.key&&rm *.pem&&rm *.pub echo "=================================" echo "all k8s cert(include etcd) create success" } # etcd容器化安装,最好需要在所有master节点上先安装好etcd function etcd_install() { # 如果有以前数据,先清除 set +e docker stop etcd && docker rm etcd rm -rf ${etcd_data_dir}/* systemctl restart docker set -e # 运行docker docker run \ -d \ -p ${ETCD_CLI_PORT}:${ETCD_CLI_PORT} \ -p ${ETCD_CLU_PORT}:${ETCD_CLU_PORT} \ --volume=${etcd_data_dir}:${etcd_data_dir} \ --volume=${pki_dir}:${pki_dir} \ --name etcd ${REGISTRY}/etcd-amd64:${ETCD_VERSION} \ /usr/local/bin/etcd \ --data-dir=/etcd-data --name ${THIS_HOST} \ --initial-advertise-peer-urls http://${THIS_HOST}:${ETCD_CLU_PORT} \ --listen-peer-urls http://0.0.0.0:${ETCD_CLU_PORT} \ --advertise-client-urls https://${THIS_HOST}:${ETCD_CLI_PORT} \ --listen-client-urls https://0.0.0.0:${ETCD_CLI_PORT} \ --initial-cluster ${CLUSTER} \ --initial-cluster-state ${CLUSTER_STATE} \ --initial-cluster-token ${TOKEN} \ --cert-file=${pki_dir}/etcd/server.crt \ --key-file=${pki_dir}/etcd/server.key \ --trusted-ca-file=${pki_dir}/etcd/ca.crt echo "=================================" echo "etcd start success" } function etcd_reset() { set +e docker stop etcd rm -rf ${etcd_data_dir}/* docker rm etcd set -e } #master安装及附带操作 function master_install(){ ipvsadm -C kubeadm init --config $kubeadm_conf mkdir -p $HOME/.kube \cp -f /etc/kubernetes/admin.conf $HOME/.kube/config chown $(id -u):$(id -g) $HOME/.kube/config #为docker生成config文件 General_user_HOME=`cat /etc/passwd |grep -e ^${General_user} |awk -F: '{print $6}'` mkdir -p ${General_user_HOME}/.kube \cp -f /etc/kubernetes/admin.conf ${General_user_HOME}/.kube/config chown -R $(id -u ${General_user}):$(id -g ${General_user}) ${General_user_HOME}/.kube chown $(id -u ${General_user}):$(id -g ${General_user}) ${General_user_HOME}/.kube/config #应用flannel及节点证书自动验证 kubectl apply -f $auto_cet_server kubectl apply -f $flannel rm $kubeadm_conf rm $auto_cet_server rm $flannel echo "=================================" echo "master install success(include flannel and node cert auto approv)" } #node节点加入命令,由于设置了token为0s,所以token可以固定下来。 function node_join(){ system_init kubeadm join ${K8S_DOMAIN}:${K8S_API_PORT} --token ${K8S_JOIN_TOKEN} --discovery-token-unsafe-skip-ca-verification echo "=================================" echo "node join success" } #命令shell选项,产生分叉 case ${OP_STEP} in "etcd") ip_in_cluster system_init cert_init etcd_install ;; "etcd_reset") ip_in_cluster etcd_reset ;; "master") ip_in_cluster master_install ;; "node") node_join ;; *) echo "usage `basename $0` [K8S_DOMAIN] [etcd|master|node]" ;; esac