不使用ceph-deploy 手动安装两个节点的Ceph集群

时间:2022-09-09 12:34:52


用ceph-deploy安装ceph很方便,但是却难以了解到更深一点的东西,所以用手动(manual)的方式,在两台服务器上面安装了一次,过程记录如下:


#说明:服务器两台,主机名为:controller、network。注意这两台服务器的硬盘和分区是互不相同的。

#操作系统:CentOS7.0

#安装前需要做好主机名解析以及对准确服务器的时间

#ceph mon 安装在两台服务器上面
#ceph osd 安装在两台服务器上面


#GET PACKAGES,在所有ceph节点上面执行

#ADD KEYS
rpm --import 'https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc'

#ADD CEPH EXTRAS
vim /etc/yum.repos.d/ceph-extras.repo
================================================================================
[ceph-extras]
name=Ceph Extras Packages
baseurl=http://ceph.com/packages/ceph-extras/rpm/centos6/$basearch
enabled=1
priority=2
gpgcheck=1
type=rpm-md
gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc


[ceph-extras-noarch]
name=Ceph Extras noarch
baseurl=http://ceph.com/packages/ceph-extras/rpm/centos6/noarch
enabled=1
priority=2
gpgcheck=1
type=rpm-md
gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc


[ceph-extras-source]
name=Ceph Extras Sources
baseurl=http://ceph.com/packages/ceph-extras/rpm/centos6/SRPMS
enabled=1
priority=2
gpgcheck=1
type=rpm-md
gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc
================================================================================


#ADD CEPH
vim /etc/yum.repos.d/ceph.repo
================================================================================
[ceph]
name=Ceph packages for $basearch
baseurl=http://ceph.com/rpm-giant/el7/$basearch
enabled=1
priority=2
gpgcheck=1
type=rpm-md
gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc


[ceph-noarch]
name=Ceph noarch packages
baseurl=http://ceph.com/rpm-giant/el7/noarch
enabled=1
priority=2
gpgcheck=1
type=rpm-md
gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc


[ceph-source]
name=Ceph source packages
baseurl=http://ceph.com/rpm-giant/el7/SRPMS
enabled=0
priority=2
gpgcheck=1
type=rpm-md
gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc
================================================================================




yum clean all
yum makecache






#INSTALL CEPH STORAGE CLUSTER,在所有ceph节点上面执行


#INSTALLING WITH RPM
yum install -y yum-plugin-priorities


vim /etc/yum/pluginconf.d/priorities.conf
================================================================================
[main]
enabled = 1
================================================================================


yum install -y snappy leveldb gdisk python-argparse gperftools-libs
yum install -y ceph


ll /etc/ceph
ll /var/lib/ceph


#MANUAL DEPLOYMENT


#在controller上面执行,记下生成的uuid
uuidgen
e5277097-ce93-4a45-95e2-ff5d81b39001


vim /etc/ceph/ceph.conf
================================================================================
[global]
fsid = e5277097-ce93-4a45-95e2-ff5d81b39001
mon initial members = controller
mon host = 10.10.10.155
public network = 10.10.10.0/24
auth cluster required = cephx
auth service required = cephx
auth client required = cephx
osd journal size = 1024
filestore xattr use omap = true
osd pool default size = 2
osd pool default min size = 1
osd pool default pg num = 128
osd pool default pgp num = 128
osd crush chooseleaf type = 1
================================================================================


#Create a keyring for your cluster and generate a monitor secret key
ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'
ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --set-uid=0 --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow'
ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring
monmaptool --create --add controller 10.10.10.155 --fsid e5277097-ce93-4a45-95e2-ff5d81b39001 /tmp/monmap
mkdir -p /var/lib/ceph/mon/ceph-controller
ceph-mon --mkfs -i controller --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring
touch /var/lib/ceph/mon/ceph-controller/done
touch /var/lib/ceph/mon/ceph-controller/sysvinit
/etc/init.d/ceph start mon.controller
ceph quorum_status --format json-pretty
ceph osd lspools
ceph -s
ceph auth list


#将controller节点上面的/etc/ceph目录下的相关文件复制到network节点上面
scp /etc/ceph/ceph* network:/etc/ceph/


#在network上面安装mon
mkdir -p /var/lib/ceph/mon/ceph-network
ceph auth get mon. -o /tmp/ceph.mon.keyring
ceph mon getmap -o /tmp/monmap
ceph-mon --mkfs -i network --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring
touch /var/lib/ceph/mon/ceph-network/done
touch /var/lib/ceph/mon/ceph-network/sysvinit


#在controller上面执行如下
sed -i "/^mon initial members/c mon initial members = controller, network" /etc/ceph/ceph.conf
sed -i "/^mon host/c            mon host = 10.10.10.155, 10.10.10.157"     /etc/ceph/ceph.conf
/etc/init.d/ceph restart mon.controller
/etc/init.d/ceph status  mon.controller


#在network上面执行如下
sed -i "/^mon initial members/c mon initial members = controller, network" /etc/ceph/ceph.conf
sed -i "/^mon host/c            mon host = 10.10.10.155, 10.10.10.157"     /etc/ceph/ceph.conf
/etc/init.d/ceph start  mon.network
/etc/init.d/ceph status mon.network


#在两台服务器上面执行如下,查看相关服务的状态
ceph quorum_status --format json-pretty
ceph -s
ceph auth list


#ADDING OSDS


#在controller上面执行如下
mkdir -p /var/lib/ceph/osd
ceph-disk prepare --cluster ceph --cluster-uuid e5277097-ce93-4a45-95e2-ff5d81b39001 --fs-type xfs /dev/sdb1 /dev/sdb2
ceph-disk activate /dev/sdb1


#将controller上面的/var/lib/ceph/bootstrap-osd/ceph.keyring复制到network上面
scp /var/lib/ceph/bootstrap-osd/ceph.keyring network:/var/lib/ceph/bootstrap-osd/


#在network上面执行如下
mkdir -p /var/lib/ceph/osd
ceph-disk prepare --cluster ceph --cluster-uuid e5277097-ce93-4a45-95e2-ff5d81b39001 --fs-type xfs /dev/sda1 /dev/sda2
ceph-disk activate /dev/sda1


#在两台服务器上面执行如下,查看相关服务的状态
ceph quorum_status --format json-pretty
ceph -s
ceph df
ceph osd tree
ceph auth list


#启动或者停止osd
/etc/init.d/ceph start osd.{osd-num}
/etc/init.d/ceph stop  osd.{osd-num}


#参考的ceph官方文档如下

http://ceph.com/docs/giant/install/

http://ceph.com/docs/giant/install/get-packages/
http://ceph.com/docs/giant/install/install-storage-cluster/
http://ceph.com/docs/giant/install/manual-deployment/
http://ceph.com/docs/giant/rados/operations/add-or-rm-mons/
http://ceph.com/docs/giant/rados/operations/add-or-rm-osds/

#ceph软件包网址
http://ceph.com/packages/



--------------------------------------------------------------------------------------------------


自动化安装部署带有HA 的OpenStack 集群

支持的网络组件:

  nova-network (FlapDHCPManager、VlanManager)

  neutron (gre、vxlan)

可自动化安装Ceph作为OpenStack的后端存储
带有卸载功能,可以对OpenStack/Ceph集群进行完全卸载
访问地址:   http://prostack.lightcloud.cc   ( 最后是.cc,不是.cn  )