写在前面:如果此文有幸被某位朋友看见并发现有错的地方,希望批评指正。如有不明白的地方,愿可一起探讨。
案例拓扑图
说明:
ansible主机主要作用在于配置和安装两台corosync+pacemaker+drdb+mysql主机
配置ansible主机
安装ansible
# yum -y install ansible
编译/etc/ansible/hosts文件
# vim /etc/ansible/hosts
[hbhosts]172.16.115.60172.16.115.61
提供秘钥认证机制
# ssh-keygen -t rsa -P ''# ansible hbhosts --module-name=copy --args='src=/root/.ssh/id_rsa.pub dest=/root/.ssh/authorized_keys owner=root group=root mode=600' --ask-pass
提供roles各文件
# mkdir -pv corosync/roles/{common,ha,crmsh,drbd}# mkdir corosync/roles/common/{files,tasks}# mkdir corosync/roles/ha/{files,handlers,tasks}# mkdir corosync/roles/crmsh/{files,tasks}# mkdir corosync/roles/drbd/{files,tasks}
为roles各个目录文件提供相关的配置文件
# vim corosync/roles/common/files/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4::1 localhost localhost.localdomain localhost6 localhost6.localdomain6172.16.0.1 server.magelinux.com server172.16.115.60 kpa1.muluhe.com kpa1172.16.115.61 kpa2.muluhe.com kpa2
# vim corosync/roles/common/tasks/main.yml
- name: hosts file copy: src=hosts dest=/etc/hosts- name: sync time cron: name="sync time" minute="*/3" job="/usr/sbin/ntpdate 172.16.0.1 &> /dev/null"
# vim corosync/roles/ha/files/corosync.conf
compatibility: whitetanktotem { version: 2 secauth: on threads: 0 interface { ringnumber: 0 bindnetaddr: 172.16.0.0 mcastaddr: 226.194.115.15 mcastport: 5405 ttl: 1 }}logging { fileline: off to_stderr: no to_logfile: yes to_syslog: no logfile: /var/log/cluster/corosync.log debug: off timestamp: on logger_subsys { subsys: AMF debug: off }}amf { mode: disabled}service { ver: 0 name: pacemaker}aisexec { user: root group: root}
生成authkey文件并将生成的corosync文件复制到/root/corosync/roles/common/files目录下(前提是此主机安装了corosync才行)
# corosync-keygen Corosync Cluster Engine Authentication key generator.Gathering 1024 bits for key from /dev/random.Press keys on your keyboard to generate entropy.Writing corosync key to /etc/corosync/authkey.# cp /etc/corosync/authley /root/corosync/roles/common/files/
# vim corosync/roles/ha/tasks/main.yml
- name: installed corosync and pacemaker yum: name={{ item }} state=present with_items: - corosync - pacemaker tags: inst- name: auth key file copy: src=authkey dest=/etc/corosync/authkey owner=root group=root mode=0400 tags: authkey- name: configration file copy: src=corosync.conf dest=/etc/corosync/corosync.conf tags: conf notify: - restart corosync- name: start corosync service: name=corosync state=started enabled=no tags: start
# vim corosync/roles/ha/handlers/main.yml
- name: restart corosync service: name=corosync state=restarted
将crmsh-1.2.6-4.el6.x86_64.rpm和pssh-2.3.1-2.el6.x86_64.rpm安装包放入/root/corosync/roles/crmsh/files目录中
# vim corosyn/roles/crmsh/tasks/main.yml
- name: copy pssh and crmsh copy: src={{ item }} dest=/tmp/ with_items: - pssh-2.3.1-2.el6.x86_64.rpm - crmsh-1.2.6-4.el6.x86_64.rpm- name: install pssh and crmsh yum: name={{ item }} state=present with_items: - /tmp/pssh-2.3.1-2.el6.x86_64.rpm - /tmp/crmsh-1.2.6-4.el6.x86_64.rpm
将drbd-8.4.3-33.el6.x86_64.rpm和drbd-kmdl-2.6.32-431.el6-8.4.3-33.el6.x86_64.rpm包放入/root/corosync/roles/drbd/files目录中
# vim corosync/roles/drbd/tasks/main.yml
- name: copy drbd and drbd-kmdl copy: src={{ item }} dest=/tmp/ with_items: - drbd-8.4.3-33.el6.x86_64.rpm - drbd-kmdl-2.6.32-431.el6-8.4.3-33.el6.x86_64.rpm- name: install drdb and drbd-kmdl yum: name={{ item }} state=present with_items: - /tmp/drbd-8.4.3-33.el6.x86_64.rpm - /tmp/drbd-kmdl-2.6.32-431.el6-8.4.3-33.el6.x86_64.rpm
# cd /root/corosync# vim ha.yml
- name: install and config corosync remote_user: root hosts: hbhosts roles: - common - ha - crmsh
安装各配置文件提供的任务
# ansible-playbook ha.yml
配置两台drbd主机
前提:提供两个空闲分区
在两台drbd主机上提供相同的drbd相关配置文件
# vim global_common.conf
global { usage-count no; # minor-count dialog-refresh disable-ip-verification}common { protocol C; handlers { # pri-on-incon-degr "/usr/lib/drbd/notify-pri-on-incon-degr.sh; /usr/lib/drbd/notify-emergency-reboot.sh; echo b > /proc/sysrq-trigger ; reboot -f"; # pri-lost-after-sb "/usr/lib/drbd/notify-pri-lost-after-sb.sh; /usr/lib/drbd/notify-emergency-reboot.sh; echo b > /proc/sysrq-trigger ; reboot -f"; # local-io-error "/usr/lib/drbd/notify-io-error.sh; /usr/lib/drbd/notify-emergency-shutdown.sh; echo o > /proc/sysrq-trigger ; halt -f"; # fence-peer "/usr/lib/drbd/crm-fence-peer.sh"; # split-brain "/usr/lib/drbd/notify-split-brain.sh root"; # out-of-sync "/usr/lib/drbd/notify-out-of-sync.sh root"; # before-resync-target "/usr/lib/drbd/snapshot-resync-target-lvm.sh -p 15 -- -c 16k"; # after-resync-target /usr/lib/drbd/unsnapshot-resync-target-lvm.sh; } startup { #wfc-timeout 120; #degr-wfc-timeout 120; } disk { on-io-error detach; #fencing resource-only; } net { cram-hmac-alg "sha1"; shared-secret "mydrbdlab"; } syncer { rate 1000M; }}
# vim mydata.res
resource mydata { on kpa1.muluhe.com { device /dev/drbd1; disk /dev/sda5; address 172.16.115.60:7790; meta-disk internal; } on kpa2.muluhe.com { device /dev/drbd1; disk /dev/sda5; address 172.16.115.61:7790; meta-disk internal; }}
在两节点上初始化已定义的资源并启动服务
# drbdadm create-md mydata# /etc/init.d/drbd start
接下来,我们将IP地址为172.16.115.60的那台主机设置为primary,并安装mysql
# drbdadm primary --force mydata# mke2fs -t ext4 -L DRBD /dev/drbd1# mkdir /mysql# mount /dev/drbd1 /mysql# cd /mysql# mkdir data
安装mysql
# groupadd -r -g 336 mysql# useradd -g mysql -r -u 3306 mysql# tar xf mariadb-5.5.36-linux-x86_64.tar.gz -C /usr/local# cd /usr/local# ln -sv mariadb-5.5.36-linux-x86_64/ mysql# cd mysql# chown -R root:mysql ./*# mkdir /etc/mysql# cp support-files/my-large.cnf /etc/mysql/my.cnf# vim /etc/mysql/my.cnf datadir = /mysql/data# cp support-files/mysql.server /etc/rc.d/init.d/mysqld# chmod +x /etc/rc.d/init.d/mysqld# chkconfig --add mysqld# chkconfig mysqld off# scripts/mysql_install_db --user=mysql --datadir=/mysql/data# vim /etc/profile.d/mysql.sh export PATH=/usr/local/mysql/bin:$PATH# source /etc/profile.d/mysql.sh# /etc/init.d/mysqld start# mysqlMariaDB [(none)]> GRANT ALL ON *.* TO 'root'@'172.16.%.%' IDENTIFIED BY 'password';MariaDB [(none)]> FLUSH PRIVILEGES;
接下来将IP地址为172.16.115.60这台主机降为secondary,并将IP地址为172.16.115.61那台主机升为primary,然后再安装mysql
利用crm工具配置mysql高可用
在IP地址为172.16.115.60这台主机上进行配置
# crmcrm(live)# configure crm(live)configure# property stonith-enabled=falsecrm(live)configure# property no-quorum-policy=ignorecrm(live)configure# primitive mydrbd ocf:linbit:drbd params drbd_resource=mydata op monitor role=Master interval=20s timeout=20s op monitor role=Slave interval=30s timeout=20s op start timeout=240s op stop timeout=100scrm(live)configure# primitive myfs ocf:heartbeat:Filesystem params device=/dev/drbd1 directory=/mysql fstype=ext4 op monitor interval=20s timeout=40s op start timeout=60s op stop timeout=60scrm(live)configure# primitive myip ocf:heartbeat:IPaddr params ip=172.16.115.100 op monitor interval=30s timeout=20scrm(live)configure# primitive myserver lsb:mysqld op monitor interval=30s timeout=20scrm(live)configure# group myservice myip myfs myservercrm(live)configure# ms ms_mydrbd mydrbd meta master-max=1 master-node-max=1 clone-max=2 clone-node-max=1 notify=truecrm(live)configure# colocation myfs_with_ms_mydrbd_master inf: myfs ms_mydrbd:Mastercrm(live)configure# colocation myserver_with_myfs inf: myserver myfscrm(live)configure# order myfs_after_ms_mydrbd_master inf: ms_mydrbd:promote myfs:startcrm(live)configure# commit crm(live)configure# cdcrm(live)# statusLast updated: Wed Oct 8 18:27:32 2014Last change: Wed Oct 8 18:27:06 2014 via cibadmin on kpa1.muluhe.comStack: classic openais (with plugin)Current DC: kpa2.muluhe.com - partition with quorumVersion: 1.1.10-14.el6-368c7262 Nodes configured, 2 expected votes5 Resources configuredOnline: [ kpa1.muluhe.com kpa2.muluhe.com ] Resource Group: myservice myip(ocf::heartbeat:IPaddr):Started kpa1.muluhe.com myfs(ocf::heartbeat:Filesystem):Started kpa1.muluhe.com myserver(lsb:mysqld):Started kpa1.muluhe.com Master/Slave Set: ms_mydrbd [mydrbd] Masters: [ kpa1.muluhe.com ] Slaves: [ kpa2.muluhe.com ]crm(live)#
测试mysql高可用性
在IP地址为172.16.15.1这台主机上进行测试
# mysql -u root -h 172.16.115.100 -p
将IP地址为172.16.115.60这台主机standby
# crm node standby
再次在IP地址为172.16.15.1这台主机上进行测试
# mysql -u root -h 172.16.115.100 -p
测试结果表明,两台drbd主机可高可用mysql