GlusterFS是一个开源的分布式文件系统,具有强大的横向扩展能力,通过扩展能够支持数PB存储容量和处理数千客户端。集成来自多台服务器上的磁盘存储资源到单一全局命名空间,已提供共享文件存储。
GlusterFS 具有高扩展性、高可用性、高性能、可横向扩展等特点,并且其没有元数据服务器的设计,让整个服务没有单点故障的隐患。
- GlusterFS 安装环境要求
- 三个节点(k8s-master03 k8s-node01 k8s-node02)
- 每个节点必须至少连接一个原始块设备(如空的本地磁盘)供 heketi 使用。这些设备上不得有任何数据,因为它们将由 heketi 格式化和分区。简单意思就是需要一个没有数据的空的本地硬盘。
部署GlusterFS
- K8s-master03 k8s-node01 k8s-node02 添加三块50G的硬盘三台机器已经添加了一块硬盘/dev/sdd
[root@k8s-master03 ~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sdd 8:48 0 50G 0 disk
- 三台节点安装
yum -y install centos-release-gluster7.noarchyum -y install glusterfs-server glusterfs-cli glusterfs-geo-replicationsystemctl enable glusterd && systemctl start glusterd && systemctl status glusterd
- 在master01节点安装hekeit
modprobe dm_snapshot && modprobe dm_mirror && modprobe dm_thin_pool
yum install -y centos-release-gluster
yum install -y heketi heketi-client
- 配置heketi.json
[root@k8s-master01 heketi]# cat heketi.json
{
"_port_comment": "Heketi Server Port Number",
"port": "8080",
"_use_auth": "Enable JWT authorization. Please enable for deployment",
"use_auth": false,
"_jwt": "Private keys for access",
"jwt": {
"_admin": "Admin has access to all APIs",
"admin": {
"key": "admin@com"
},
"_user": "User only has access to volumes endpoint",
"user": {
"key": "user@admin"
}
},
"_glusterfs_comment": "GlusterFS Configuration",
"glusterfs": {
"_executor_comment": [
"Execute plugin. Possible choices: mock, ssh",
"mock: This setting is used for testing and development.",
" It will not send commands to any node.",
"ssh: This setting will notify Heketi to ssh to the nodes.",
" It will need the values in sshexec to be configured.",
"kubernetes: Communicate with GlusterFS containers over",
" Kubernetes exec api."
],
"executor": "ssh",
"_sshexec_comment": "SSH username and private key file information",
"sshexec": {
"keyfile": "/etc/heketi/heketi_key",
"user": "root",
"port": "22",
"fstab": "/etc/fstab"
},
"_kubeexec_comment": "Kubernetes configuration",
"kubeexec": {
"host" :"https://kubernetes.host:8443",
"cert" : "/path/to/crt.file",
"insecure": false,
"user": "kubernetes username",
"password": "password for kubernetes user",
"namespace": "OpenShift project or Kubernetes namespace",
"fstab": "Optional: Specify fstab file on node. Default is etc/fstab"
},
"_db_comment": "Database file name",
"db": "/var/lib/heketi/heketi.db",
"_loglevel_comment": [
"Set log level. Choices are:",
" none, critical, error, warning, info, debug",
"Default is warning"
],
"loglevel" : "debug"
}
}
[root@k8s-master01 heketi]# ssh-keygen -t rsa -q -f etc/heketi/heketi_key -N ""
[root@k8s-master01 heketi]# chown heketi:heketi etc/heketi/heketi_key
[root@k8s-master01 heketi]# ssh-copy-id -i etc/heketi/heketi_key.pub root@192.168.102.73
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/etc/heketi/heketi_key.pub"
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
Number of key(s) added: 1
Now try logging into the machine, with: "ssh 'root@192.168.102.73'"
and check to make sure that only the key(s) you wanted were added.
[root@k8s-master01 heketi]# ssh-copy-id -i etc/heketi/heketi_key.pub root@192.168.102.74
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/etc/heketi/heketi_key.pub"
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
Number of key(s) added: 1
Now try logging into the machine, with: "ssh 'root@192.168.102.74'"
and check to make sure that only the key(s) you wanted were added.
[root@k8s-master01 heketi]# ssh-copy-id -i etc/heketi/heketi_key.pub root@192.168.102.75
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/etc/heketi/heketi_key.pub"
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
Number of key(s) added: 1
Now try logging into the machine, with: "ssh 'root@192.168.102.75'"
and check to make sure that only the key(s) you wanted were added.
[root@k8s-master01 heketi]#systemctl enable heketi
[root@k8s-master01 heketi]# systemctl start heketi
[root@k8s-master01 heketi]# systemctl status heketi
[root@k8s-master01 heketi]# curl http://127.0.0.1:8080/hello
Hello from Heketi
- 设置gfs集群
创建topology.json文件
# vim topology.json
{
"clusters": [
{
"nodes": [
{
"node": {
"hostnames": {
"manage": [
"192.168.102.73"
],
"storage": [
"192.168.102.73"
]
},
"zone": 1
},
"devices": [
"/dev/sdd"
]
},
{
"node": {
"hostnames": {
"manage": [
"192.168.102.74"
],
"storage": [
"192.168.102.74"
]
},
"zone": 1
},
"devices": [
"/dev/sdd"
]
},
{
"node": {
"hostnames": {
"manage": [
"192.168.102.75"
],
"storage": [
"192.168.102.75"
]
},
"zone": 1
},
"devices": [
"/dev/sdd"
]
}
]
}
]
}
- 初始化heketi
[root@k8s-master01 ~]# heketi-cli --server http://127.0.0.1:8080 --user admin --secret admin@com topology load --json=/root/topology.json
Creating cluster ... ID: 449e2bf90d0035fb773adc14e8fa6c58
Allowing file volumes on cluster.
Allowing block volumes on cluster.
Creating node 192.168.102.73 ... ID: 4a49fa88df44baab4459e81cc99cb9da
Adding device dev/sdd ... OK
Creating node 192.168.102.74 ... ID: 0aa3789e12d2894284051035e675f0b5
Adding device dev/sdd ... OK
Creating node 192.168.102.75 ... ID: 2c609ca994b4b5a1ee925b7184138eb4
Adding device dev/sdd ... OK
[root@k8s-master01 ~]# heketi-cli --user admin --secret admin@com topology info --server http://localhost:8080
Cluster Id: 449e2bf90d0035fb773adc14e8fa6c58
File: true
Block: true
Volumes:
Nodes:
Node Id: 0aa3789e12d2894284051035e675f0b5
State: online
Cluster Id: 449e2bf90d0035fb773adc14e8fa6c58
Zone: 1
Management Hostnames: 192.168.102.74
Storage Hostnames: 192.168.102.74
Devices:
Id:d399d7d2446c2ed0237011387c2f7391 Name:/dev/sdd State:online Size (GiB):49 Used (GiB):0 Free (GiB):49
Bricks:
Node Id: 2c609ca994b4b5a1ee925b7184138eb4
State: online
Cluster Id: 449e2bf90d0035fb773adc14e8fa6c58
Zone: 1
Management Hostnames: 192.168.102.75
Storage Hostnames: 192.168.102.75
Devices:
Id:51b4e5810f2ac199404377132c805666 Name:/dev/sdd State:online Size (GiB):49 Used (GiB):0 Free (GiB):49
Bricks:
Node Id: 4a49fa88df44baab4459e81cc99cb9da
State: online
Cluster Id: 449e2bf90d0035fb773adc14e8fa6c58
Zone: 1
Management Hostnames: 192.168.102.73
Storage Hostnames: 192.168.102.73
Devices:
Id:cc6ab4e8472db465cf59dff58a626015 Name:/dev/sdd State:online Size (GiB):49 Used (GiB):0 Free (GiB):49
Bricks:
[root@k8s-master01 ~]#
- 创建secret和storageclass
#生成secret资源,其中”key”值需要转换为base64编码格式
[root@k8s-master01 ~]# echo -n "admin@com"|base64
YWRtaW5AY29t
[root@k8s-master01 ~]# vim heketi-secret.yaml
apiVersion: v1
kind: Secret
metadata:
name: heketi-secret
namespace: default
data:
# base64 encoded password. E.g.: echo -n "mypassword" | base64
key: YWRtaW5AY29t
type: kubernetes.io/glusterfs
[root@k8s-master01 ~]# kubectl create -f heketi-secret.yaml
secret/heketi-secret created
#配置storageclass
[root@k8s-master01 ~]# vim gluster-heketi-storageclass.yaml
apiVersion: storage.k8s.io/v1beta1
kind: StorageClass
metadata:
name: gluster-heketi-storageclass
provisioner: kubernetes.io/glusterfs
reclaimPolicy: Delete
parameters:
resturl: "http://192.168.102.71:8080"
restauthenabled: "true"
restuser: "admin"
secretNamespace: "default"
secretName: "heketi-secret"
volumetype: "replicate:3"
[root@k8s-master01 ~]# kubectl create -f gluster-heketi-storageclass.yaml
[root@k8s-master01 ~]# kubectl describe storageclass gluster-heketi-storageclass
Name: gluster-heketi-storageclass
IsDefaultClass: No
Annotations: <none>
Provisioner: kubernetes.io/glusterfs
Parameters: restauthenabled=true,resturl=http://192.168.102.71:8080,restuser=admin,secretName=heketi-secret,secretNamespace=default,volumetype=replicate:3
AllowVolumeExpansion: <unset>
MountOptions: <none>
ReclaimPolicy: Delete
VolumeBindingMode: Immediate
Events: <none>
????参数解释:
- provisioner:表示存储分配器,需要根据后端存储的不同而变更
- reclaimPolicy: 默认即”Delete”,删除pvc后,相应的pv及后端的volume,brick(lvm)等一起删除;设置为”Retain”时则保留数据,需要手工处理
- resturl:heketi API服务提供的url
- restauthenabled:可选参数,默认值为”false”,heketi服务开启认证时必须设置为”true”
- restuser:可选参数,开启认证时设置相应用户名
- secretNamespace:可选参数,开启认证时可以设置为使用持久化存储的namespace
- secretName:可选参数,开启认证时,需要将heketi服务的认证密码保存在secret资源中
- volumetype:可选参数,设置卷类型及其参数,如果未分配卷类型,则有分配器决定卷类型;如”volumetype: replicate:3”表示3副本的replicate卷,”volumetype: disperse:4:2”表示disperse卷,其中‘4’是数据,’2’是冗余校验,”volumetype: none”表示distribute卷
Mongodb基于存gluster-storageclass实战
- 创建存储类
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: gluster-storageclass
provisioner: kubernetes.io/glusterfs
parameters:
resturl: "http://192.168.102.71:8080"
restuser: admin
secretName: heketi-secret
secretNamespace: default
volumetype: replicate:3
- 部署mongo 副本集群
---
# Source: mongodb/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: mongodb-replicaset
namespace: "default"
labels:
app.kubernetes.io/name: mongodb
helm.sh/chart: mongodb-13.0.1
app.kubernetes.io/instance: mongodb-replicaset
app.kubernetes.io/managed-by: Helm
secrets:
- name: mongodb-replicaset
automountServiceAccountToken: true
---
# Source: mongodb/templates/common-scripts-cm.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: mongodb-replicaset-common-scripts
namespace: "default"
labels:
app.kubernetes.io/name: mongodb
helm.sh/chart: mongodb-13.0.1
app.kubernetes.io/instance: mongodb-replicaset
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: mongodb
data:
startup-probe.sh: |
#!/bin/bash
mongosh $TLS_OPTIONS --port $MONGODB_PORT_NUMBER --eval 'db.hello().isWritablePrimary || db.hello().secondary' | grep -q 'true'
readiness-probe.sh: |
#!/bin/bash
# Run the proper check depending on the version
[[ $(mongod -version | grep "db version") =~ ([0-9]+\.[0-9]+\.[0-9]+) ]] && VERSION=${BASH_REMATCH[1]}
. /opt/bitnami/scripts/libversion.sh
VERSION_MAJOR="$(get_sematic_version "$VERSION" 1)"
VERSION_MINOR="$(get_sematic_version "$VERSION" 2)"
VERSION_PATCH="$(get_sematic_version "$VERSION" 3)"
if [[ ( "$VERSION_MAJOR" -ge 5 ) || ( "$VERSION_MAJOR" -ge 4 && "$VERSION_MINOR" -ge 4 && "$VERSION_PATCH" -ge 2 ) ]]; then
mongosh $TLS_OPTIONS --port $MONGODB_PORT_NUMBER --eval 'db.hello().isWritablePrimary || db.hello().secondary' | grep -q 'true'
else
mongosh $TLS_OPTIONS --port $MONGODB_PORT_NUMBER --eval 'db.isMaster().ismaster || db.isMaster().secondary' | grep -q 'true'
fi
ping-mongodb.sh: |
#!/bin/bash
mongosh $TLS_OPTIONS --port $MONGODB_PORT_NUMBER --eval "db.adminCommand('ping')"
---
# Source: mongodb/templates/replicaset/scripts-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: mongodb-replicaset-scripts
namespace: "default"
labels:
app.kubernetes.io/name: mongodb
helm.sh/chart: mongodb-13.0.1
app.kubernetes.io/instance: mongodb-replicaset
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: mongodb
data:
setup.sh: |-
#!/bin/bash
. /opt/bitnami/scripts/mongodb-env.sh
. /opt/bitnami/scripts/libfs.sh
. /opt/bitnami/scripts/liblog.sh
. /opt/bitnami/scripts/libvalidations.sh
if is_empty_value "$MONGODB_ADVERTISED_PORT_NUMBER"; then
export MONGODB_ADVERTISED_PORT_NUMBER="$MONGODB_PORT_NUMBER"
fi
info "Advertised Hostname: $MONGODB_ADVERTISED_HOSTNAME"
info "Advertised Port: $MONGODB_ADVERTISED_PORT_NUMBER"
# Check for existing replica set in case there is no data in the PVC
# This is for cases where the PVC is lost or for MongoDB caches without
# persistence
current_primary=""
if is_dir_empty "${MONGODB_DATA_DIR}/db"; then
info "Data dir empty, checking if the replica set already exists"
current_primary=$(mongosh admin --host "mongodb-replicaset-0.mongodb-replicaset-headless.default.svc.cluster.local:27017,mongodb-replicaset-1.mongodb-replicaset-headless.default.svc.cluster.local:27017" --eval 'db.runCommand("ismaster")' | awk -F\' '/primary/ {print $2}')
if ! is_empty_value "$current_primary"; then
info "Detected existing primary: ${current_primary}"
fi
fi
if ! is_empty_value "$current_primary" && [[ "$MONGODB_ADVERTISED_HOSTNAME:$MONGODB_ADVERTISED_PORT_NUMBER" == "$current_primary" ]]; then
info "Advertised name matches current primary, configuring node as a primary"
export MONGODB_REPLICA_SET_MODE="primary"
elif ! is_empty_value "$current_primary" && [[ "$MONGODB_ADVERTISED_HOSTNAME:$MONGODB_ADVERTISED_PORT_NUMBER" != "$current_primary" ]]; then
info "Current primary is different from this node. Configuring the node as replica of ${current_primary}"
export MONGODB_REPLICA_SET_MODE="secondary"
export MONGODB_INITIAL_PRIMARY_HOST="${current_primary%:*}"
export MONGODB_INITIAL_PRIMARY_PORT_NUMBER="${current_primary#*:}"
export MONGODB_SET_SECONDARY_OK="yes"
elif [[ "$MY_POD_NAME" = "mongodb-replicaset-0" ]]; then
info "Pod name matches initial primary pod name, configuring node as a primary"
export MONGODB_REPLICA_SET_MODE="primary"
else
info "Pod name doesn't match initial primary pod name, configuring node as a secondary"
export MONGODB_REPLICA_SET_MODE="secondary"
export MONGODB_INITIAL_PRIMARY_PORT_NUMBER="$MONGODB_PORT_NUMBER"
fi
if [[ "$MONGODB_REPLICA_SET_MODE" == "secondary" ]]; then
export MONGODB_INITIAL_PRIMARY_ROOT_USER="$MONGODB_ROOT_USER"
export MONGODB_INITIAL_PRIMARY_ROOT_PASSWORD="$MONGODB_ROOT_PASSWORD"
export MONGODB_ROOT_PASSWORD=""
export MONGODB_EXTRA_USERNAMES=""
export MONGODB_EXTRA_DATABASES=""
export MONGODB_EXTRA_PASSWORDS=""
export MONGODB_ROOT_PASSWORD_FILE=""
export MONGODB_EXTRA_USERNAMES_FILE=""
export MONGODB_EXTRA_DATABASES_FILE=""
export MONGODB_EXTRA_PASSWORDS_FILE=""
fi
exec /opt/bitnami/scripts/mongodb/entrypoint.sh opt/bitnami/scripts/mongodb/run.sh
setup-hidden.sh: |-
#!/bin/bash
. /opt/bitnami/scripts/mongodb-env.sh
echo "Advertised Hostname: $MONGODB_ADVERTISED_HOSTNAME"
echo "Advertised Port: $MONGODB_ADVERTISED_PORT_NUMBER"
echo "Configuring node as a hidden node"
export MONGODB_REPLICA_SET_MODE="hidden"
export MONGODB_INITIAL_PRIMARY_ROOT_USER="$MONGODB_ROOT_USER"
export MONGODB_INITIAL_PRIMARY_ROOT_PASSWORD="$MONGODB_ROOT_PASSWORD"
export MONGODB_INITIAL_PRIMARY_PORT_NUMBER="$MONGODB_PORT_NUMBER"
export MONGODB_ROOT_PASSWORD=""
export MONGODB_EXTRA_USERNAMES=""
export MONGODB_EXTRA_DATABASES=""
export MONGODB_EXTRA_PASSWORDS=""
export MONGODB_ROOT_PASSWORD_FILE=""
export MONGODB_EXTRA_USERNAMES_FILE=""
export MONGODB_EXTRA_DATABASES_FILE=""
export MONGODB_EXTRA_PASSWORDS_FILE=""
exec /opt/bitnami/scripts/mongodb/entrypoint.sh opt/bitnami/scripts/mongodb/run.sh
---
# Source: mongodb/templates/arbiter/headless-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: mongodb-replicaset-arbiter-headless
namespace: "default"
labels:
app.kubernetes.io/name: mongodb
helm.sh/chart: mongodb-13.0.1
app.kubernetes.io/instance: mongodb-replicaset
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: arbiter
spec:
type: ClusterIP
clusterIP: None
ports:
- name: tcp-mongodb
port: 27017
targetPort: mongodb
selector:
app.kubernetes.io/name: mongodb
app.kubernetes.io/instance: mongodb-replicaset
app.kubernetes.io/component: arbiter
---
# Source: mongodb/templates/replicaset/headless-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: mongodb-replicaset-headless
namespace: "default"
labels:
app.kubernetes.io/name: mongodb
helm.sh/chart: mongodb-13.0.1
app.kubernetes.io/instance: mongodb-replicaset
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: mongodb
spec:
type: ClusterIP
clusterIP: None
publishNotReadyAddresses: true
ports:
- name: "mongodb"
port: 27017
targetPort: mongodb
selector:
app.kubernetes.io/name: mongodb
app.kubernetes.io/instance: mongodb-replicaset
app.kubernetes.io/component: mongodb
---
# Source: mongodb/templates/arbiter/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: mongodb-replicaset-arbiter
namespace: "default"
labels:
app.kubernetes.io/name: mongodb
helm.sh/chart: mongodb-13.0.1
app.kubernetes.io/instance: mongodb-replicaset
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: arbiter
spec:
serviceName: mongodb-replicaset-arbiter-headless
podManagementPolicy: OrderedReady
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/name: mongodb
app.kubernetes.io/instance: mongodb-replicaset
app.kubernetes.io/component: arbiter
template:
metadata:
labels:
app.kubernetes.io/name: mongodb
helm.sh/chart: mongodb-13.0.1
app.kubernetes.io/instance: mongodb-replicaset
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: arbiter
spec:
serviceAccountName: mongodb-replicaset
affinity:
podAffinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchLabels:
app.kubernetes.io/name: mongodb
app.kubernetes.io/instance: mongodb-replicaset
app.kubernetes.io/component: arbiter
namespaces:
- "default"
topologyKey: kubernetes.io/hostname
weight: 1
nodeAffinity:
securityContext:
fsGroup: 1001
sysctls: []
initContainers:
containers:
- name: mongodb-arbiter
image: hub.kce.ksyun.com/leoxinyuan/mongodb:6.0.1-debian-11-r1
imagePullPolicy: "IfNotPresent"
securityContext:
runAsNonRoot: true
runAsUser: 1001
env:
- name: BITNAMI_DEBUG
value: "false"
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: K8S_SERVICE_NAME
value: "mongodb-replicaset-arbiter-headless"
- name: MONGODB_REPLICA_SET_MODE
value: "arbiter"
- name: MONGODB_INITIAL_PRIMARY_HOST
value: mongodb-replicaset-0.mongodb-replicaset-headless.$(MY_POD_NAMESPACE).svc.cluster.local
- name: MONGODB_REPLICA_SET_NAME
value: "rs0"
- name: MONGODB_ADVERTISED_HOSTNAME
value: "$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local"
- name: MONGODB_PORT_NUMBER
value: "27017"
- name: ALLOW_EMPTY_PASSWORD
value: "yes"
ports:
- containerPort: 27017
name: mongodb
livenessProbe:
failureThreshold: 6
initialDelaySeconds: 30
periodSeconds: 20
successThreshold: 1
timeoutSeconds: 10
tcpSocket:
port: mongodb
readinessProbe:
failureThreshold: 6
initialDelaySeconds: 5
periodSeconds: 20
successThreshold: 1
timeoutSeconds: 10
tcpSocket:
port: mongodb
resources:
limits: {}
requests: {}
---
# Source: mongodb/templates/replicaset/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: mongodb-replicaset
namespace: "default"
labels:
app.kubernetes.io/name: mongodb
helm.sh/chart: mongodb-13.0.1
app.kubernetes.io/instance: mongodb-replicaset
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: mongodb
spec:
serviceName: mongodb-replicaset-headless
podManagementPolicy: OrderedReady
replicas: 2
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/name: mongodb
app.kubernetes.io/instance: mongodb-replicaset
app.kubernetes.io/component: mongodb
template:
metadata:
labels:
app.kubernetes.io/name: mongodb
helm.sh/chart: mongodb-13.0.1
app.kubernetes.io/instance: mongodb-replicaset
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: mongodb
spec:
serviceAccountName: mongodb-replicaset
affinity:
podAffinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchLabels:
app.kubernetes.io/name: mongodb
app.kubernetes.io/instance: mongodb-replicaset
app.kubernetes.io/component: mongodb
namespaces:
- "default"
topologyKey: kubernetes.io/hostname
weight: 1
nodeAffinity:
securityContext:
fsGroup: 1001
sysctls: []
containers:
- name: mongodb
image: hub.kce.ksyun.com/leoxinyuan/mongodb:6.0.1-debian-11-r1
imagePullPolicy: "IfNotPresent"
securityContext:
runAsNonRoot: true
runAsUser: 1001
command:
- /scripts/setup.sh
env:
- name: BITNAMI_DEBUG
value: "false"
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: MY_POD_HOST_IP
valueFrom:
fieldRef:
fieldPath: status.hostIP
- name: K8S_SERVICE_NAME
value: "mongodb-replicaset-headless"
- name: MONGODB_INITIAL_PRIMARY_HOST
value: mongodb-replicaset-0.$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local
- name: MONGODB_REPLICA_SET_NAME
value: "rs0"
- name: MONGODB_ADVERTISED_HOSTNAME
value: "$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local"
- name: ALLOW_EMPTY_PASSWORD
value: "yes"
- name: MONGODB_SYSTEM_LOG_VERBOSITY
value: "0"
- name: MONGODB_DISABLE_SYSTEM_LOG
value: "no"
- name: MONGODB_DISABLE_JAVASCRIPT
value: "no"
- name: MONGODB_ENABLE_JOURNAL
value: "yes"
- name: MONGODB_PORT_NUMBER
value: "27017"
- name: MONGODB_ENABLE_IPV6
value: "no"
- name: MONGODB_ENABLE_DIRECTORY_PER_DB
value: "no"
ports:
- name: mongodb
containerPort: 27017
livenessProbe:
failureThreshold: 6
initialDelaySeconds: 30
periodSeconds: 20
successThreshold: 1
timeoutSeconds: 10
exec:
command:
- /bitnami/scripts/ping-mongodb.sh
readinessProbe:
failureThreshold: 6
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
exec:
command:
- /bitnami/scripts/readiness-probe.sh
resources:
limits: {}
requests: {}
volumeMounts:
- name: datadir
mountPath: /bitnami/mongodb
subPath:
- name: common-scripts
mountPath: /bitnami/scripts
- name: scripts
mountPath: /scripts/setup.sh
subPath: setup.sh
volumes:
- name: common-scripts
configMap:
name: mongodb-replicaset-common-scripts
defaultMode: 0550
- name: scripts
configMap:
name: mongodb-replicaset-scripts
defaultMode: 0755
volumeClaimTemplates:
- metadata:
name: datadir
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "8Gi"
storageClassName: gluster-storageclass
volumeClaimTemplates
:表示一类PVC的模板,系统会根据有状态服务-StatefulSet配置的replicas数量,创建相应数量的PVC。这些PVC除了名字不一样之外其他配置都是一样的。
StatefulSet特点:
- 稳定的部署次序:有序部署或扩展,需要根据定义的顺序依次进行(即从0到N-1,在下一个Pod运行之前,所有之前的Pod必须都是Running和Ready状态)。
- 稳定的扩展次序:有序收缩或删除,需要根据定义的顺序依次进行(即从N-1到0,在下一个Pod运行之前,所有之前的Pod必须都是Running和Ready状态)。
- 稳定的网络标志:Pod重新调度后其PodName和HostName不变。
- 稳定的持久化存储:基于PVC,Pod重新调度后仍能访问到相同的持久化数据。
1.创建并查看
[root@k8s-master01 ~]# kubectl apply -f mongo-rs.yaml
#查看,两个副本集一个仲裁
[root@k8s-master01 helm]# kubectl get po | grep mongodb-replicaset
mongodb-replicaset-0 1/1 Running 0 3m42s
mongodb-replicaset-1 1/1 Running 0 2m57s
mongodb-replicaset-arbiter-0 1/1 Running 0 3m41s
#登录mongo查看集群信息
[root@k8s-master01 helm]# kubectl run --namespace default mongodb-replicaset-client --rm --tty -i --restart='Never' --env="MONGODB_ROOT_PASSWORD=$MONGODB_ROOT_PASSWORD" --image hub.kce.ksyun.com/leoxinyuan/mongodb:6.0.1-debian-11-r1 --command -- bash
If you don't see a command prompt, try pressing enter.
I have no name!@mongodb-replicaset-client:/$
I have no name!@mongodb-replicaset-client:/$ mongosh admin --host "mongodb-replicaset-0.mongodb-replicaset-headless.default.svc.cluster.local:27017,mongodb-replicaset-1.mongodb-replicaset-headless.default.svc.cluster.local:27017"
Current Mongosh Log ID: 63086040b50e326074e07ca8
Connecting to: mongodb://mongodb-replicaset-0.mongodb-replicaset-headless.default.svc.cluster.local:27017,mongodb-replicaset-1.mongodb-replicaset-headless.default.svc.cluster.local:27017/admin?appName=mongosh+1.5.4
Using MongoDB: 6.0.1
Using Mongosh: 1.5.4
For mongosh info see: https://docs.mongodb.com/mongodb-shell/
To help improve our products, anonymous usage data is collected and sent to MongoDB periodically (https://www.mongodb.com/legal/privacy-policy).
You can opt-out by running the disableTelemetry() command.
------
The server generated these startup warnings when booting
2022-08-26T05:54:04.279+00:00: /sys/kernel/mm/transparent_hugepage/enabled is 'always'. We suggest setting it to 'never'
2022-08-26T05:54:04.279+00:00: vm.max_map_count is too low
------
------
Enable MongoDB's free cloud-based monitoring service, which will then receive and display
metrics about your deployment (disk utilization, CPU, operation statistics, etc).
The monitoring data will be available on a MongoDB website with a unique URL accessible to you
and anyone you share the URL with. MongoDB may use this information to make product
improvements and to suggest MongoDB products and deployment options to you.
To enable free monitoring, run the following command: db.enableFreeMonitoring()
To permanently disable this reminder, run the following command: db.disableFreeMonitoring()
------
rs0 [primary] admin>
rs0 [primary] admin>
rs0 [primary] admin>
rs0 [primary] admin>
rs0 [primary] admin>
rs0 [primary] admin> rs.status();
{
set: 'rs0',
date: ISODate("2022-08-26T05:55:20.483Z"),
myState: 1,
term: Long("2"),
syncSourceHost: '',
syncSourceId: -1,
heartbeatIntervalMillis: Long("2000"),
majorityVoteCount: 1,
writeMajorityCount: 1,
votingMembersCount: 1,
writableVotingMembersCount: 1,
optimes: {
lastCommittedOpTime: { ts: Timestamp({ t: 1661493315, i: 1 }), t: Long("2") },
lastCommittedWallTime: ISODate("2022-08-26T05:55:15.265Z"),
readConcernMajorityOpTime: { ts: Timestamp({ t: 1661493315, i: 1 }), t: Long("2") },
appliedOpTime: { ts: Timestamp({ t: 1661493315, i: 1 }), t: Long("2") },
durableOpTime: { ts: Timestamp({ t: 1661493315, i: 1 }), t: Long("2") },
lastAppliedWallTime: ISODate("2022-08-26T05:55:15.265Z"),
lastDurableWallTime: ISODate("2022-08-26T05:55:15.265Z")
},
lastStableRecoveryTimestamp: Timestamp({ t: 1661493295, i: 1 }),
electionCandidateMetrics: {
lastElectionReason: 'electionTimeout',
lastElectionDate: ISODate("2022-08-26T05:54:05.257Z"),
electionTerm: Long("2"),
lastCommittedOpTimeAtElection: { ts: Timestamp({ t: 0, i: 0 }), t: Long("-1") },
lastSeenOpTimeAtElection: { ts: Timestamp({ t: 1661493240, i: 14 }), t: Long("1") },
numVotesNeeded: 1,
priorityAtElection: 5,
electionTimeoutMillis: Long("10000"),
newTermStartDate: ISODate("2022-08-26T05:54:05.262Z"),
wMajorityWriteAvailabilityDate: ISODate("2022-08-26T05:54:05.265Z")
},
members: [
{
_id: 0,
name: 'mongodb-replicaset-0.mongodb-replicaset-headless.default.svc.cluster.local:27017',
health: 1,
state: 1,
stateStr: 'PRIMARY',
uptime: 79,
optime: { ts: Timestamp({ t: 1661493315, i: 1 }), t: Long("2") },
optimeDate: ISODate("2022-08-26T05:55:15.000Z"),
lastAppliedWallTime: ISODate("2022-08-26T05:55:15.265Z"),
lastDurableWallTime: ISODate("2022-08-26T05:55:15.265Z"),
syncSourceHost: '',
syncSourceId: -1,
infoMessage: 'Could not find member to sync from',
electionTime: Timestamp({ t: 1661493245, i: 1 }),
electionDate: ISODate("2022-08-26T05:54:05.000Z"),
configVersion: 1,
configTerm: 2,
self: true,
lastHeartbeatMessage: ''
}
],
ok: 1,
'$clusterTime': {
clusterTime: Timestamp({ t: 1661493315, i: 1 }),
signature: {
hash: Binary(Buffer.from("0000000000000000000000000000000000000000", "hex"), 0),
keyId: Long("0")
}
},
operationTime: Timestamp({ t: 1661493315, i: 1 })
}
至此,k8s使用GlusterFS存储类已完成,并介绍了基于StatefulSet创建Mongo的方式及使用GlusterFS动态存储类实战。