如何搭建 & 扩容 LVM
PV就是硬盘分区
VG是硬盘分区的集合
LV是VG的部分空间
[root@sky dev]# pvcreate /dev/sdd1 /dev/sdd4 /dev/sdd5 /dev/sdd6 /dev/sdd7 WARNING: ext4 signature detected on /dev/sdd1 at offset 1080. Wipe it? [y/n] y Wiping ext4 signature on /dev/sdd1. Physical volume "/dev/sdd1" successfully created Physical volume /dev/sdd4 not found Device /dev/sdd4 not found (or ignored by filtering). WARNING: xfs signature detected on /dev/sdd5 at offset 0. Wipe it? [y/n] y Wiping xfs signature on /dev/sdd5. Physical volume "/dev/sdd5" successfully created WARNING: ext3 signature detected on /dev/sdd6 at offset 1080. Wipe it? [y/n] y Wiping ext3 signature on /dev/sdd6. Physical volume "/dev/sdd6" successfully created Physical volume "/dev/sdd7" successfully created [root@sky dev]# vgcreate vg0 /dev/sdd1 /dev/sdd5 /dev/sdd6 /dev/sdd7 Volume group "vg0" successfully created [root@sky dev]# [root@sky dev]# [root@sky dev]# lvcreate -L 1G -n lv0 vg0 Logical volume "lv0" created [root@sky dev]# [root@sky dev]# vgscan Reading all physical volumes. This may take a while... Found volume group "rhel" using metadata type lvm2 Found volume group "vg0" using metadata type lvm2 [root@sky dev]#
[root@sky dev]# vgdisplay vg0
--- Volume group ---
VG Name vg0
System ID
Format lvm2
Metadata Areas 4
Metadata Sequence No 2
VG Access read/write
VG Status resizable
MAX LV 0
Cur LV 1
Open LV 0
Max PV 0
Cur PV 4
Act PV 4
VG Size 3.98 GiB
PE Size 4.00 MiB
Total PE 1020
Alloc PE / Size 256 / 1.00 GiB
Free PE / Size 764 / 2.98 GiB
VG UUID WuV3Xm-u4IM-QD2A-lqMJ-qHf5-XbkU-uUSBqE
[root@sky dev]#
[root@sky dev]# lvdisplay /dev/vg0/lv0
--- Logical volume ---
LV Path /dev/vg0/lv0
LV Name lv0
VG Name vg0
LV UUID 3U5Ijl-lpsv-n3EK-IXrZ-6HjH-SYr2-V0TwGW
LV Write Access read/write
LV Creation host, time sky.com, 2015-11-29 17:49:59 +0800
LV Status available
# open 0
LV Size 1.00 GiB
Current LE 256
Segments 2
Allocation inherit
Read ahead sectors auto
- currently set to 8192
Block device 253:3
[root@sky dev]# mkfs -t xfs /dev/vg0/lv0
meta-data=/dev/vg0/lv0 isize=256 agcount=4, agsize=65536 blks
= sectsz=512 attr=2, projid32bit=1
= crc=0
data = bsize=4096 blocks=262144, imaxpct=25
= sunit=0 swidth=0 blks
naming =version 2 bsize=4096 ascii-ci=0 ftype=0
log =internal log bsize=4096 blocks=2560, version=2
= sectsz=512 sunit=0 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
[root@sky dev]# mkdir /home/lvtest
[root@sky dev]# mount /dev/vg0/lv0 /home/lvtest/
[root@sky dev]# df -hT
文件系统 类型 容量 已用 可用 已用% 挂载点
/dev/mapper/rhel-root xfs 17G 6.7G 10G 41% /
devtmpfs devtmpfs 487M 0 487M 0% /dev
tmpfs tmpfs 496M 80K 496M 1% /dev/shm
tmpfs tmpfs 496M 7.2M 489M 2% /run
tmpfs tmpfs 496M 0 496M 0% /sys/fs/cgroup
/dev/sda1 xfs 497M 102M 395M 21% /boot
/dev/mapper/rhel-var xfs 997M 243M 755M 25% /var
/dev/mapper/vg0-lv0 xfs 1014M 33M 982M 4% /home/lvtest
[root@sky dev]#
LV扩容:
[root@sky dev]# umount /dev/vg0/lv0 [root@sky dev]# lvextend -L 2G /dev/vg0/lv0 Extending logical volume lv0 to 2.00 GiB Logical volume lv0 successfully resized [root@sky dev]# resize2fs -f /dev/vg0/lv0 resize2fs 1.42.9 (28-Dec-2013) The filesystem is already 524288 blocks long. Nothing to do! [root@sky dev]# [root@sky dev]# mount /dev/vg0/lv0 /home/lvtest/ [root@sky dev]# df -hT 文件系统 类型 容量 已用 可用 已用% 挂载点 /dev/mapper/rhel-root xfs 17G 6.7G 10G 41% / devtmpfs devtmpfs 487M 0 487M 0% /dev tmpfs tmpfs 496M 80K 496M 1% /dev/shm tmpfs tmpfs 496M 7.2M 489M 2% /run tmpfs tmpfs 496M 0 496M 0% /sys/fs/cgroup /dev/sda1 xfs 497M 102M 395M 21% /boot /dev/mapper/rhel-var xfs 997M 243M 755M 25% /var /dev/mapper/vg0-lv0 ext4 2.0G 6.0M 1.8G 1% /home/lvtest [root@sky dev]#
vi /etc/fstab
umount /dev/vg0/lv0
cp /etc/*.conf /home/lvtest/
ls ./ | wc –l
硬RAID、软RAID
RAID0 加速
RAID1 备份
RAID5 包含12,存储效率高
RAID10
RAID01
[root@sky ~]# mdadm -Cv /dev/md0 -a yes -n 3 -l 5 /dev/sdb /dev/sdc /dev/sdd mdadm: layout defaults to left-symmetric mdadm: layout defaults to left-symmetric mdadm: chunk size defaults to 512K mdadm: /dev/sdb appears to be part of a raid array: level=raid0 devices=0 ctime=Thu Jan 1 08:00:00 1970 mdadm: partition table exists on /dev/sdb but will be lost or meaningless after creating array mdadm: size set to 2095616K mdadm: largest drive (/dev/sdb) exceeds size (2095616K) by more than 1% Continue creating array? Continue creating array? (y/n) y mdadm: Defaulting to version 1.2 metadata mdadm: array /dev/md0 started. [root@sky ~]# mdadm -Q /dev/md0 /dev/md0: 3.100GiB raid5 3 devices, 0 spares. Use mdadm --detail for more detail. [root@sky ~]# mdadm --detail /dev/md0 /dev/md0: Version : 1.2 Creation Time : Sun Nov 29 18:59:42 2015 Raid Level : raid5 Array Size : 4191232 (4.00 GiB 4.29 GB) Used Dev Size : 2095616 (2046.84 MiB 2145.91 MB) Raid Devices : 3 Total Devices : 3 Persistence : Superblock is persistent Update Time : Sun Nov 29 18:59:56 2015 State : clean Active Devices : 3 Working Devices : 3 Failed Devices : 0 Spare Devices : 0 Layout : left-symmetric Chunk Size : 512K Name : sky.com:0 (local to host sky.com) UUID : 1e38c834:6e11129b:ec1e8abf:d9439688 Events : 18 Number Major Minor RaidDevice State 0 8 16 0 active sync /dev/sdb 1 8 32 1 active sync /dev/sdc 3 8 48 2 active sync /dev/sdd
[root@sky ~]# mdadm /dev/md0 -f /dev/sdb
mdadm: set /dev/sdb faulty in /dev/md0
[root@sky ~]# mdadm -D /dev/md0
/dev/md0:
Version : 1.2
Creation Time : Sun Nov 29 18:59:42 2015
Raid Level : raid5
Array Size : 4191232 (4.00 GiB 4.29 GB)
Used Dev Size : 2095616 (2046.84 MiB 2145.91 MB)
Raid Devices : 3
Total Devices : 3
Persistence : Superblock is persistent
Update Time : Sun Nov 29 19:05:13 2015
State : clean, degraded
Active Devices : 2
Working Devices : 2
Failed Devices : 1
Spare Devices : 0
Layout : left-symmetric
Chunk Size : 512K
Name : sky.com:0 (local to host sky.com)
UUID : 1e38c834:6e11129b:ec1e8abf:d9439688
Events : 20
Number Major Minor RaidDevice State
0 0 0 0 removed
1 8 32 1 active sync /dev/sdc
3 8 48 2 active sync /dev/sdd
0 8 16 - faulty /dev/sdb
[root@sky ~]#
[root@sky ~]# mdadm -S /dev/md0
mdadm: stopped /dev/md0
[root@sky ~]#