通过ceph-deploy搭建ceph 13.2.5 mimic

时间:2021-10-30 12:44:13

一、ceph介绍

  1. 操作系统需要内核版本在kernel 3.10+或CentOS7以上版本中部署
  2. 通过deploy工具安装简化部署过程,本文中选用的ceph-deploy版本为1.5.39
  3. 至少准备6个环境,分别为1个ceph-admin管理节点、3个mon/mgr/mds节点、2个osd节点

二、ceph安装

1. 部署ceph-admin

  • a) 配置主机名,配置hosts文件。
shell> hostnamectl --static set-hostname shyt-ceph-admin
shell> cat /etc/hosts
10.52.0.181 shyt-ceph-mon1
10.52.0.182 shyt-ceph-mon2
10.52.0.183 shyt-ceph-mon3
10.52.0.201 shyt-ceph-osd-node1
10.52.0.202 shyt-ceph-osd-node2
  • b) 生成ssh key文件并复制到各个节点
shell> ssh-keygen 
Generating public/private rsa key pair.
Enter file in which to save the key (/root/.ssh/id_rsa): 
Enter passphrase (empty for no passphrase): 
Enter same passphrase again: 
Your identification has been saved in /root/.ssh/id_rsa.
Your public key has been saved in /root/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:TvZDQwvZpIKFAeSyh8Y1QhEOG9EzKaHaNN1rMl8kxfI root@shyt-ceph-admin
The key's randomart image is:
+---[RSA 2048]----+
|=O=o.o... .      |
|*+=..+...=      |
|+++=o +o= o      |
|o*o..  =Eo .    |
|+oo o o S +      |
|..  = = o .    |
|      . . o      |
|          .    |
|                |
+----[SHA256]-----+

shell> ssh-copy-id shyt-ceph-mon1
shell> ssh-copy-id shyt-ceph-mon2
shell> ssh-copy-id shyt-ceph-mon3
shell> ssh-copy-id shyt-ceph-osd-node1
shell> ssh-copy-id shyt-ceph-osd-node2
  • c) 安装ceph-deploy
# 修改本地yum源
shell> wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
shell> yum clean all
shell> yum makecache

shell> yum -y install https://mirrors.aliyun.com/ceph/rpm-mimic/el7/noarch/ceph-deploy-1.5.39-0.noarch.rpm
shell> ceph-deploy --version
1.5.39
  • d) 创建部署目录
shell> mkdir deploy_ceph_cluster && cd deploy_ceph_cluster

2. 部署mon/mgr/mds节点

  • a) 配置主机名
shell> hostnamectl --static set-hostname shyt-ceph-mon1
  • b) 修改yum源
shell> wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
shell> wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
shell> yum clean all
shell> yum makecache
  • c) 创建Ceph Monitor节点(在ceph-admin中执行)
# 生成ceph配置文件、monitor秘钥文件以及部署日志文件。
shell> ceph-deploy new shyt-ceph-mon1 shyt-ceph-mon2 shyt-ceph-mon3
  • d) 在ceph.conf配置中增加以下信息(注释版详见附件)
shell> cat /etc/ceph/ceph.conf
[global]
    osd pool default size = 3
    osd pool default min size = 1
    public network = 10.52.0.0/24
    cluster network = 10.52.0.0/24
    cephx require signatures = true
    cephx cluster require signatures = true
    cephx service require signatures = true
    cephx sign messages = true

[mon]
    mon data size warn = 15*1024*1024*1024
    mon data avail warn = 30
    mon data avail crit = 10
    # 由于ceph集群中存在异构PC,导致时钟偏移总是大于默认0.05s,为了方便同步直接把时钟偏移设置成0.5s
    mon clock drift allowed = 2
    mon clock drift warn backoff = 30
    mon allow pool delete = true
    mon osd allow primary affinity = true

[osd]
    osd journal size = 10000
    osd mkfs type = xfs
    osd max write size = 512
    osd client message size cap = 2147483648
    osd deep scrub stride = 131072
    osd op threads = 16
    osd disk threads = 4
    osd map cache size = 1024
    osd map cache bl size = 128
    #osd mount options xfs = "rw,noexec,nodev,noatime,nodiratime,nobarrier"
    osd recovery op priority = 5
    osd recovery max active = 10
    osd max backfills = 4
    osd min pg log entries = 30000
    osd max pg log entries = 100000
    osd mon heartbeat interval = 40
    ms dispatch throttle bytes = 148576000
    objecter inflight ops = 819200
    osd op log threshold = 50
    osd crush chooseleaf type = 0
    filestore xattr use omap = true
    filestore min sync interval = 10
    filestore max sync interval = 15
    filestore queue max ops = 25000
    filestore queue max bytes = 1048576000
    filestore queue committing max ops = 50000
    filestore queue committing max bytes = 10485760000
    filestore split multiple = 8
    filestore merge threshold = 40
    filestore fd cache size = 1024
    filestore op threads = 32
    journal max write bytes = 1073714824
    journal max write entries = 10000
    journal queue max ops = 50000
    journal queue max bytes = 10485760000
    
[mds]
    debug ms = 1/5

[client]
    rbd cache = true
    rbd cache size = 335544320
    rbd cache max dirty = 134217728
    rbd cache max dirty age = 30
    rbd cache writethrough until flush = false
    rbd cache max dirty object = 2
    rbd cache target dirty = 235544320
  • e) 安装ceph软件包
shell> ceph-deploy install shyt-ceph-mon1 shyt-ceph-mon2 shyt-ceph-mon3 \
--release mimic \
--repo-url http://mirror.tuna.tsinghua.edu.cn/ceph/rpm-mimic/el7/ \
--gpg-url http://mirror.tuna.tsinghua.edu.cn/ceph/keys/release.asc
  • f) 配置初始monitor、并收集所有密钥
shell> ceph-deploy mon create-initial
  • g) 分发配置文件
# 通过ceph-deploy将配置文件以及密钥拷贝至其他节点,使得不需要指定mon地址以及用户信息就可以直接管理我们的ceph集群
shell> ceph-deploy admin shyt-ceph-mon1 shyt-ceph-mon2 shyt-ceph-mon3
  • h)配置mgr
# 运行ceph health,打印
# HEALTH_WARN no active mgr
# 自从ceph 12开始,manager是必须的,应该为每个运行monitor的机器添加一个mgr,否则集群处于WARN状态。
shell> ceph-deploy mgr create shyt-ceph-mon1:cephsvr-16101 shyt-ceph-mon2:cephsvr-16102 shyt-ceph-mon3:cephsvr-16103

# 提示:当ceph-mgr发生故障,相当于整个ceph集群都会出现严重问题,
# 建议在每个mon中都创建独立的ceph-mgr(至少3个ceph mon节点),只需要在每个mon节点参考上面的方法进行创建即可(每个mgr需要不同的独立命名)。 

# 关闭ceph-mgr的方式
shell> systemctl stop ceph-mgr@cephsvr-16101

3. 部署osd节点

  • a) 配置主机名
shell> hostnamectl --static set-hostname shyt-ceph-osd-node1
  • b) 修改yum源
shell> wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
shell> wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
shell> yum clean all
shell> yum makecache
  • c) 安装ceph软件包
shell> ceph-deploy install shyt-ceph-osd-node1 shyt-ceph-osd-node2 \
--release mimic \
--repo-url http://mirror.tuna.tsinghua.edu.cn/ceph/rpm-mimic/el7/ \
--gpg-url http://mirror.tuna.tsinghua.edu.cn/ceph/keys/release.asc
  • d) 配置osd节点
shell> ceph-deploy disk zap shyt-ceph-osd-node1:sdb shyt-ceph-osd-node1:sdc shyt-ceph-osd-node1:sdd
shell> ceph-deploy osd create shyt-ceph-osd-node1:sdb shyt-ceph-osd-node1:sdc shyt-ceph-osd-node1:sdd
  • e) 分发配置文件
shell> ceph-deploy admin shyt-ceph-osd-node1 shyt-ceph-osd-node2

# 查看ceph osd节点状态
shell> ceph -s
shell> ceph osd tree

三、启用Dashboard

  • 在任意节点中执行,开启dashboard支持
# 启用dashboard插件
shell> ceph mgr module enable dashboard
# 生成自签名证书
shell> ceph dashboard create-self-signed-cert
Self-signed certificate created
# 配置dashboard监听IP和端口
shell> ceph config set mgr mgr/dashboard/server_port 8080
# 配置dashboard认证
shell> ceph dashboard set-login-credentials root 123456
Username and password updated
# 关闭SSL支持,只用HTTP的方式访问
shell> ceph config set mgr mgr/dashboard/ssl false
# 每个mon节点重启dashboard使配置生效
shell> systemctl restart ceph-mgr.target
# 浏览器访问 http://10.52.0.181:8080

# 查看ceph-mgr服务
shell> ceph mgr services
{
    "dashboard": "http://shyt-ceph-mon1:8080/"
}

四、创建Ceph MDS角色

1. 安装ceph mds

# 为防止单点故障,需要部署多台MDS节点
shell> ceph-deploy mds create shyt-ceph-mon1 shyt-ceph-mon2 shyt-ceph-mon3

2、手动创建data和metadata池

shell> ceph osd pool create data 128 128
shell> ceph osd pool create metadata 128 128
shell> ceph fs new cephfs metadata data
shell> ceph mds stat
cephfs-1/1/1 up {0=shyt-ceph-mon3=up:active}, 2 up:standby

3、挂载cephfs文件系统

shell> wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
shell> wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
shell> cat >> /etc/yum.repos.d/ceph.repo << EOF
[ceph]
name=Ceph packages for $basearch
baseurl=http://mirror.tuna.tsinghua.edu.cn/ceph/rpm-mimic/el7/$basearch
enabled=1
gpgcheck=1
priority=1
type=rpm-md
gpgkey=http://mirror.tuna.tsinghua.edu.cn/ceph/keys/release.asc

[ceph-noarch]
name=Ceph noarch packages
baseurl=http://mirror.tuna.tsinghua.edu.cn/ceph/rpm-mimic/el7/noarch
enabled=1
gpgcheck=1
priority=1
type=rpm-md
gpgkey=http://mirror.tuna.tsinghua.edu.cn/ceph/keys/release.asc

[ceph-source]
name=Ceph source packages
baseurl=http://mirror.tuna.tsinghua.edu.cn/ceph/rpm-mimic/el7/SRPMS
enabled=0
gpgcheck=1
type=rpm-md
gpgkey=http://mirror.tuna.tsinghua.edu.cn/ceph/keys/release.asc
priority=1

EOF

shell> yum clean all
shell> yum makecache
shell> yum -y install https://mirrors.aliyun.com/ceph/rpm-mimic/el7/x86_64/ceph-fuse-13.2.5-0.el7.x86_64.rpm
# 创建ceph目录,将ceph.client.admin.keyring和ceph.conf文件拷贝到该目录下。
shell> mkdir /etc/ceph/
# 创建挂载目录
shell> mkdir /storage
shell> ceph-fuse /storage
# 加入开机启动项
shell> echo "ceph-fuse /storage" >> /etc/rc.d/rc.local