亚欧色一区w666天堂,色情一区二区三区免费看,少妇特黄A片一区二区三区,亚洲人成网站999久久久综合,国产av熟女一区二区三区

  • 發布文章
  • 消息中心
點贊
收藏
評論
分享
原創

RAID研究

2023-08-10 06:14:56
14
0

獨立硬盤冗余陣列RAID, Redundant Array of Independent Disks),簡稱磁盤陣列。利用虛擬化存儲技術把多個硬盤組合起來,成為一個或多個硬盤陣列組,目的為提升性能或資料冗余,或是兩者同時提升。

RAID 0

RAID 0將兩個以上的磁盤并聯起來,成為一個大容量的磁盤。在存放數據時,分段后分散存儲在這些磁盤中,因為讀寫時都可以并行處理,所以在所有的級別中,RAID 0的速度是最快的。但是RAID 0既沒有冗余功能,也不具備容錯能力,如果一個磁盤(物理)損壞,所有數據都會丟失。

創建RAID 0

借助mdadm工具將sdc與sdd磁盤構建成RAID 0

[ecx@evm-ci0603f1ccmlom5ne6e0 ~]$ lsblk
NAME   MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sda      8:0    0   80G  0 disk 
└─sda1   8:1    0   80G  0 part /
sdb      8:16   0 1000G  0 disk /data
sdc      8:32   0  100G  0 disk 
sdd      8:48   0  100G  0 disk 
sr0     11:0    1    1M  0 rom
[ecx@evm-ci0603f1ccmlom5ne6e0 ~]$ mdadm -Cv -l0 -c64 -n2 /dev/md0 /dev/sd{c,d}
-bash: mdadm: command not found

提示沒有mdadm工具,進行mdadm安裝

[ecx@evm-ci0603f1ccmlom5ne6e0 ~]$ sudo yum -y install mdadm

創建raid 0

[root@evm-ci0603f1ccmlom5ne6e0 ~]# mdadm -Cv -l0 -c64 -n2 /dev/md0 /dev/sd{c,d}
mdadm: Defaulting to version 1.2 metadata
mdadm: array /dev/md0 started.
[root@evm-ci0603f1ccmlom5ne6e0 ~]# cat /proc/mdstat
Personalities : [raid0] 
md0 : active raid0 sdd[1] sdc[0]
      209582080 blocks super 1.2 64k chunks
      
unused devices: <none>

創建文件系統與掛載點

[root@evm-ci0603f1ccmlom5ne6e0 ~]# mkfs.xfs /dev/md0
meta-data=/dev/md0               isize=512    agcount=16, agsize=3274704 blks
         =                       sectsz=512   attr=2, projid32bit=1
         =                       crc=1        finobt=0, sparse=0
data     =                       bsize=4096   blocks=52395264, imaxpct=25
         =                       sunit=16     swidth=32 blks
naming   =version 2              bsize=4096   ascii-ci=0 ftype=1
log      =internal log           bsize=4096   blocks=25584, version=2
         =                       sectsz=512   sunit=16 blks, lazy-count=1
realtime =none                   extsz=4096   blocks=0, rtextents=0

[root@evm-ci0603f1ccmlom5ne6e0 ~]# mkdir -p /raid0
[root@evm-ci0603f1ccmlom5ne6e0 ~]# mount /dev/md0 /raid0
[root@evm-ci0603f1ccmlom5ne6e0 ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/sda1        80G  1.9G   79G   3% /
devtmpfs         16G     0   16G   0% /dev
tmpfs            16G     0   16G   0% /dev/shm
tmpfs            16G   17M   16G   1% /run
tmpfs            16G     0   16G   0% /sys/fs/cgroup
/dev/sdb       1000G  134M 1000G   1% /data
tmpfs           3.2G     0  3.2G   0% /run/user/1001
/dev/md0        200G   33M  200G   1% /raid0

加入重啟配置

[root@evm-ci0603f1ccmlom5ne6e0 ~]# vi /etc/fstab

#
# /etc/fstab
# Created by anaconda on Thu Aug  8 12:37:33 2019
#
# Accessible filesystems, by reference, are maintained under '/dev/disk'
# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info
#
UUID=3ef2b806-efd7-4eef-aaa2-2584909365ff /                       xfs     defaults        0 0
/dev/md0                                  /raid0                  xfs     defaults        0 0 

讀寫測試

寫測試

[root@evm-ci0603f1ccmlom5ne6e0 ~]# time dd if=/dev/zero of=/raid0/10GWrite bs=1k count=10000000
10000000+0 records in
10000000+0 records out
10240000000 bytes (10 GB) copied, 31.0588 s, 330 MB/s

real    0m31.061s
user    0m1.110s
sys     0m20.009s

讀測試

[root@evm-ci0603f1ccmlom5ne6e0 ~]# time dd if=/raid0/10GWrite of=/dev/null bs=1k
10000000+0 records in
10000000+0 records out
10240000000 bytes (10 GB) copied, 5.83972 s, 1.8 GB/s

real    0m5.841s
user    0m0.777s
sys     0m5.064s

擴展RAID 0

查看當前磁盤陣列配置

[root@evm-ci0603f1ccmlom5ne6e0 ~]# mdadm --detail /dev/md0
/dev/md0:
           Version : 1.2
     Creation Time : Thu Jun  8 10:28:41 2023
        Raid Level : raid0
        Array Size : 209582080 (199.87 GiB 214.61 GB)
      Raid Devices : 2
     Total Devices : 2
       Persistence : Superblock is persistent
...

添加新磁盤

[root@evm-ci0603f1ccmlom5ne6e0 ~]# mdadm --add /dev/md0 /dev/sdb
mdadm: Cannot open /dev/sdb: Device or resource busy

解掛再次添加

[root@evm-ci0603f1ccmlom5ne6e0 ~]# umount /dev/sdb /data
umount: /data: not mounted
[root@evm-ci0603f1ccmlom5ne6e0 ~]# mdadm --add /dev/md0 /dev/sdb
mdadm: add new device failed for /dev/sdb as 2: Invalid argument

使用grow命令,RAID 0變成了RAID 4

[root@evm-ci0603f1ccmlom5ne6e0 ~]# mdadm --grow /dev/md0 --level=0 --raid-devices=3 --backup-file=/data1/backup --add /dev/sdb
mdadm: level of /dev/md0 changed to raid4
mdadm: added /dev/sdb
[root@evm-ci0603f1ccmlom5ne6e0 ~]# mdadm --detail /dev/md0
/dev/md0:
           Version : 1.2
     Creation Time : Thu Jun  8 10:28:41 2023
        Raid Level : raid4
        Array Size : 209582080 (199.87 GiB 214.61 GB)
     Used Dev Size : 104791040 (99.94 GiB 107.31 GB)
      Raid Devices : 4
     Total Devices : 3
       Persistence : Superblock is persistent

       Update Time : Fri Jun  9 03:15:52 2023
             State : clean, FAILED, reshaping 
    Active Devices : 2
   Working Devices : 3
    Failed Devices : 0
     Spare Devices : 1

此時磁盤仍然是200G

[root@evm-ci0603f1ccmlom5ne6e0 ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/sda1        80G  2.0G   79G   3% /
devtmpfs         16G     0   16G   0% /dev
tmpfs            16G     0   16G   0% /dev/shm
tmpfs            16G   25M   16G   1% /run
tmpfs            16G     0   16G   0% /sys/fs/cgroup
/dev/md0        200G   33M  200G   1% /raid0
tmpfs           3.2G     0  3.2G   0% /run/user/1001
tmpfs           3.2G     0  3.2G   0% /run/user/0

執行mdadm --detail /dev/md0,發現raid0處于reshape status狀態。

[root@evm-ci0603f1ccmlom5ne6e0 ~]# cat /proc/mdstat 
Personalities : [raid0] [raid6] [raid5] [raid4] 
md0 : active raid4 sdb[3] sdd[1] sdc[0]
      209582080 blocks super 1.2 level 4, 64k chunk, algorithm 5 [4/3] [UU__]
      [==>..................]  reshape = 12.4% (13032960/104791040) finish=1151.1min speed=1328K/sec
      
unused devices: <none>
0條評論
0 / 1000
福爾摩斯軍
8文章數
0粉絲數
福爾摩斯軍
8 文章 | 0 粉絲
福爾摩斯軍
8文章數
0粉絲數
福爾摩斯軍
8 文章 | 0 粉絲
原創

RAID研究

2023-08-10 06:14:56
14
0

獨立硬盤冗余陣列RAID, Redundant Array of Independent Disks),簡稱磁盤陣列。利用虛擬化存儲技術把多個硬盤組合起來,成為一個或多個硬盤陣列組,目的為提升性能或資料冗余,或是兩者同時提升。

RAID 0

RAID 0將兩個以上的磁盤并聯起來,成為一個大容量的磁盤。在存放數據時,分段后分散存儲在這些磁盤中,因為讀寫時都可以并行處理,所以在所有的級別中,RAID 0的速度是最快的。但是RAID 0既沒有冗余功能,也不具備容錯能力,如果一個磁盤(物理)損壞,所有數據都會丟失。

創建RAID 0

借助mdadm工具將sdc與sdd磁盤構建成RAID 0

[ecx@evm-ci0603f1ccmlom5ne6e0 ~]$ lsblk
NAME   MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sda      8:0    0   80G  0 disk 
└─sda1   8:1    0   80G  0 part /
sdb      8:16   0 1000G  0 disk /data
sdc      8:32   0  100G  0 disk 
sdd      8:48   0  100G  0 disk 
sr0     11:0    1    1M  0 rom
[ecx@evm-ci0603f1ccmlom5ne6e0 ~]$ mdadm -Cv -l0 -c64 -n2 /dev/md0 /dev/sd{c,d}
-bash: mdadm: command not found

提示沒有mdadm工具,進行mdadm安裝

[ecx@evm-ci0603f1ccmlom5ne6e0 ~]$ sudo yum -y install mdadm

創建raid 0

[root@evm-ci0603f1ccmlom5ne6e0 ~]# mdadm -Cv -l0 -c64 -n2 /dev/md0 /dev/sd{c,d}
mdadm: Defaulting to version 1.2 metadata
mdadm: array /dev/md0 started.
[root@evm-ci0603f1ccmlom5ne6e0 ~]# cat /proc/mdstat
Personalities : [raid0] 
md0 : active raid0 sdd[1] sdc[0]
      209582080 blocks super 1.2 64k chunks
      
unused devices: <none>

創建文件系統與掛載點

[root@evm-ci0603f1ccmlom5ne6e0 ~]# mkfs.xfs /dev/md0
meta-data=/dev/md0               isize=512    agcount=16, agsize=3274704 blks
         =                       sectsz=512   attr=2, projid32bit=1
         =                       crc=1        finobt=0, sparse=0
data     =                       bsize=4096   blocks=52395264, imaxpct=25
         =                       sunit=16     swidth=32 blks
naming   =version 2              bsize=4096   ascii-ci=0 ftype=1
log      =internal log           bsize=4096   blocks=25584, version=2
         =                       sectsz=512   sunit=16 blks, lazy-count=1
realtime =none                   extsz=4096   blocks=0, rtextents=0

[root@evm-ci0603f1ccmlom5ne6e0 ~]# mkdir -p /raid0
[root@evm-ci0603f1ccmlom5ne6e0 ~]# mount /dev/md0 /raid0
[root@evm-ci0603f1ccmlom5ne6e0 ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/sda1        80G  1.9G   79G   3% /
devtmpfs         16G     0   16G   0% /dev
tmpfs            16G     0   16G   0% /dev/shm
tmpfs            16G   17M   16G   1% /run
tmpfs            16G     0   16G   0% /sys/fs/cgroup
/dev/sdb       1000G  134M 1000G   1% /data
tmpfs           3.2G     0  3.2G   0% /run/user/1001
/dev/md0        200G   33M  200G   1% /raid0

加入重啟配置

[root@evm-ci0603f1ccmlom5ne6e0 ~]# vi /etc/fstab

#
# /etc/fstab
# Created by anaconda on Thu Aug  8 12:37:33 2019
#
# Accessible filesystems, by reference, are maintained under '/dev/disk'
# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info
#
UUID=3ef2b806-efd7-4eef-aaa2-2584909365ff /                       xfs     defaults        0 0
/dev/md0                                  /raid0                  xfs     defaults        0 0 

讀寫測試

寫測試

[root@evm-ci0603f1ccmlom5ne6e0 ~]# time dd if=/dev/zero of=/raid0/10GWrite bs=1k count=10000000
10000000+0 records in
10000000+0 records out
10240000000 bytes (10 GB) copied, 31.0588 s, 330 MB/s

real    0m31.061s
user    0m1.110s
sys     0m20.009s

讀測試

[root@evm-ci0603f1ccmlom5ne6e0 ~]# time dd if=/raid0/10GWrite of=/dev/null bs=1k
10000000+0 records in
10000000+0 records out
10240000000 bytes (10 GB) copied, 5.83972 s, 1.8 GB/s

real    0m5.841s
user    0m0.777s
sys     0m5.064s

擴展RAID 0

查看當前磁盤陣列配置

[root@evm-ci0603f1ccmlom5ne6e0 ~]# mdadm --detail /dev/md0
/dev/md0:
           Version : 1.2
     Creation Time : Thu Jun  8 10:28:41 2023
        Raid Level : raid0
        Array Size : 209582080 (199.87 GiB 214.61 GB)
      Raid Devices : 2
     Total Devices : 2
       Persistence : Superblock is persistent
...

添加新磁盤

[root@evm-ci0603f1ccmlom5ne6e0 ~]# mdadm --add /dev/md0 /dev/sdb
mdadm: Cannot open /dev/sdb: Device or resource busy

解掛再次添加

[root@evm-ci0603f1ccmlom5ne6e0 ~]# umount /dev/sdb /data
umount: /data: not mounted
[root@evm-ci0603f1ccmlom5ne6e0 ~]# mdadm --add /dev/md0 /dev/sdb
mdadm: add new device failed for /dev/sdb as 2: Invalid argument

使用grow命令,RAID 0變成了RAID 4

[root@evm-ci0603f1ccmlom5ne6e0 ~]# mdadm --grow /dev/md0 --level=0 --raid-devices=3 --backup-file=/data1/backup --add /dev/sdb
mdadm: level of /dev/md0 changed to raid4
mdadm: added /dev/sdb
[root@evm-ci0603f1ccmlom5ne6e0 ~]# mdadm --detail /dev/md0
/dev/md0:
           Version : 1.2
     Creation Time : Thu Jun  8 10:28:41 2023
        Raid Level : raid4
        Array Size : 209582080 (199.87 GiB 214.61 GB)
     Used Dev Size : 104791040 (99.94 GiB 107.31 GB)
      Raid Devices : 4
     Total Devices : 3
       Persistence : Superblock is persistent

       Update Time : Fri Jun  9 03:15:52 2023
             State : clean, FAILED, reshaping 
    Active Devices : 2
   Working Devices : 3
    Failed Devices : 0
     Spare Devices : 1

此時磁盤仍然是200G

[root@evm-ci0603f1ccmlom5ne6e0 ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/sda1        80G  2.0G   79G   3% /
devtmpfs         16G     0   16G   0% /dev
tmpfs            16G     0   16G   0% /dev/shm
tmpfs            16G   25M   16G   1% /run
tmpfs            16G     0   16G   0% /sys/fs/cgroup
/dev/md0        200G   33M  200G   1% /raid0
tmpfs           3.2G     0  3.2G   0% /run/user/1001
tmpfs           3.2G     0  3.2G   0% /run/user/0

執行mdadm --detail /dev/md0,發現raid0處于reshape status狀態。

[root@evm-ci0603f1ccmlom5ne6e0 ~]# cat /proc/mdstat 
Personalities : [raid0] [raid6] [raid5] [raid4] 
md0 : active raid4 sdb[3] sdd[1] sdc[0]
      209582080 blocks super 1.2 level 4, 64k chunk, algorithm 5 [4/3] [UU__]
      [==>..................]  reshape = 12.4% (13032960/104791040) finish=1151.1min speed=1328K/sec
      
unused devices: <none>
文章來自個人專欄
文章 | 訂閱
0條評論
0 / 1000
請輸入你的評論
0
0