Keepalived高可用、部署Ceph分布式存储

 Keepalived高可用、部署Ceph分布式存储

day03
深入理解程序的数据存储
验证
安装额外的调度器
在调度器上配置Keepalived
验证高可用
配置ceph
配置ceph fs
数据从NFS迁移到ceph fs

深入理解程序的数据存储

  • 程序在保存文字数据时,是存到数据库中了
  • 序在保存非文字数据(如图片、视频、压缩包等)时,是存到相应的文件目录中

验证

  • 发一篇文章,文章内容包含文字和图片
  • 在NFS上查看图片

[root@nfs 01]# ls /web_share/html/wp-content/uploads/
2022
[root@nfs 01]# ls /web_share/html/wp-content/uploads/2022/01/
html_css.jpeg

  • 在数据库服务器上查看文字数据

[root@database ~]# mysql
MariaDB [(none)]> use wordpress;
MariaDB [wordpress]> select * from wp_posts\G

安装额外的调度器

  • proxy2:eth0->192.168.88.6/24;eth1 -> 192.168.99.6/24
  • 准备环境

[root@zzgrhel8 ~]# vm clone proxy2

[root@zzgrhel8 ~]# virsh console proxy2
CentOS Linux 7 (Core)
Kernel 3.10.0-862.el7.x86_64 on an x86_64
localhost login: root
Password: a

# 执行以下命令进行初始化
hostnamectl set-hostname proxy2
nmcli connection modify eth1 ipv4.method manual ipv4.addresses 192.168.99.6/24
nmcli connection modify eth0 ipv4.method manual ipv4.addresses 192.168.88.6/24
nmcli connection down eth1
nmcli connection up eth1
nmcli connection down eth0
nmcli connection up eth0
echo a | passwd --stdin root

[root@localhost ~]# logout
CentOS Linux 7 (Core)
Kernel 3.10.0-862.el7.x86_64 on an x86_64
proxy2 login: # 按ctrl+]

  • 配置HAProxy服务器

[root@proxy2 ~]# yum install -y haproxy
# 将第1台haproxy配置文件拷贝到haprox2
[root@proxy ~]# scp /etc/haproxy/haproxy.cfg 192.168.99.6:/etc/haproxy/
# 起动第2台haproxy2的服务
[root@proxy2 ~]# systemctl enable haproxy.service --now
[root@proxy2 ~]# ss -tlnp | grep :80
LISTEN 0 128 *:80

在调度器上配置Keepalived

  • 安装并修改配置文件

[root@proxy ~]# yum install -y keepalived.x86_64
[root@proxy2 ~]# yum install -y keepalived.x86_64
[root@proxy ~]# vim /etc/keepalived/keepalived.conf
1 ! Configuration File for keepalived
2
3 global_defs {
4 notification_email {
5 acassen@firewall.loc
6 failover@firewall.loc
7 sysadmin@firewall.loc
8 }
9 notification_email_from Alexandre.Cassen@firewall.loc
10 smtp_server 192.168.200.1
11 smtp_connect_timeout 30
12 router_id proxy1 # 改这里
13 vrrp_iptables # 加一行
14 vrrp_skip_check_adv_addr
15 vrrp_strict
16 vrrp_garp_interval 0
17 vrrp_gna_interval 0
18 }
19
20 vrrp_instance VI_1 {
21 state MASTER
22 interface eth0 # 注意网卡名
23 virtual_router_id 51
24 priority 100
25 advert_int 1
26 authentication {
27 auth_type PASS
28 auth_pass 1111
29 }
30 virtual_ipaddress {
31 192.168.88.80 # VIP地址
32 }
33 }
[root@proxy ~]# systemctl enable keepalived.service --now
[root@proxy ~]# ip a s eth0 | grep '4\.80'
inet 192.168.88.80/32 scope global eth0

# 修改proxy2的配置,并启动
[root@proxy2 ~]# vim /etc/keepalived/keepalived.conf
1 ! Configuration File for keepalived
2
3 global_defs {
4 notification_email {
5 acassen@firewall.loc
6 failover@firewall.loc
7 sysadmin@firewall.loc
8 }
9 notification_email_from Alexandre.Cassen@firewall.loc
10 smtp_server 192.168.200.1
11 smtp_connect_timeout 30
12 router_id proxy2 # 改id
13 vrrp_iptables # 加一行
14 vrrp_skip_check_adv_addr
15 vrrp_strict
16 vrrp_garp_interval 0
17 vrrp_gna_interval 0
18 }
19
20 vrrp_instance VI_1 {
21 state BACKUP # 改状态
22 interface eth0 # 注意网卡名
23 virtual_router_id 51
24 priority 80 # 优先级低于MASTER
25 advert_int 1
26 authentication {
27 auth_type PASS
28 auth_pass 1111
29 }
30 virtual_ipaddress {
31 192.168.88.80 # VIP地址
32 }
33 }
[root@proxy2 ~]# systemctl enable keepalived.service --now
[root@proxy2 ~]# ip a s eth0 | grep '4\.80' # 查不到

[root@zzgrhel8 ~]# vim /etc/hosts
... ...
192.168.88.80 www.lab.com

验证高可用

  • 在浏览器所在的主机上查看www.lab.com的地址

[root@zzgrhel8 ~]# ping -c2 www.lab.com
PING www.lab.com (192.168.88.80) 56(84) bytes of data.

  • 验证VIP

# vip在proxy上
[root@proxy ~]# ip a s eth0 | grep '4\.80'
inet 192.168.88.80/32 scope global eth0
[root@proxy2 ~]# ip a s eth0 | grep '4\.80' # 没有vip

# 模拟proxy故障,将其关机
[root@proxy ~]# shutdown -h now
# 查看proxy2上有没有出现vip
[root@proxy2 ~]# ip a s eth0 | grep '4\.80'
inet 192.168.88.80/32 scope global eth0 # 已经出现vip
# 浏览器上继续访问http://www.lab.com,服务仍然可用

# 重新启动proxy,vip将会切回
[root@proxy ~]# ip a s eth0 | grep '4\.80'
inet 192.168.88.80/32 scope global eth0
[root@proxy2 ~]# ip a s eth0 | grep '4\.80' # vip消失

配置ceph

主机角色

主机名

IP地址

ceph节点1

node1

192.168.99.41/24

ceph节点2

node2

192.168.99.42/24

ceph节点3

node3

192.168.99.43/24

  • 每台机器还要再添加2块20GB的硬盘

[root@zzgrhel8 ~]# vm clone node{1..3}

[root@zzgrhel8 ~]# virsh console node1
CentOS Linux 7 (Core)
Kernel 3.10.0-862.el7.x86_64 on an x86_64
localhost login: root
Password: a

# 执行以下命令进行初始化
hostnamectl set-hostname node1
nmcli connection modify eth1 ipv4.method manual ipv4.addresses 192.168.99.41/24
nmcli connection down eth1
nmcli connection up eth1
echo a | passwd --stdin root

[root@localhost ~]# logout
CentOS Linux 7 (Core)
Kernel 3.10.0-862.el7.x86_64 on an x86_64
localhost login: # 按ctrl+]

[root@zzgrhel8 ~]# virsh console node2
CentOS Linux 7 (Core)
Kernel 3.10.0-862.el7.x86_64 on an x86_64
localhost login: root
Password: a

# 执行以下命令进行初始化
hostnamectl set-hostname node2
nmcli connection modify eth1 ipv4.method manual ipv4.addresses 192.168.99.42/24
nmcli connection down eth1
nmcli connection up eth1
echo a | passwd --stdin root

[root@localhost ~]# logout
CentOS Linux 7 (Core)
Kernel 3.10.0-862.el7.x86_64 on an x86_64
localhost login: # 按ctrl+]

[root@zzgrhel8 ~]# virsh console node3
CentOS Linux 7 (Core)
Kernel 3.10.0-862.el7.x86_64 on an x86_64
localhost login: root
Password: a

# 执行以下命令进行初始化
hostnamectl set-hostname node3
nmcli connection modify eth1 ipv4.method manual ipv4.addresses 192.168.99.43/24
nmcli connection down eth1
nmcli connection up eth1
echo a | passwd --stdin root

[root@localhost ~]# logout
CentOS Linux 7 (Core)
Kernel 3.10.0-862.el7.x86_64 on an x86_64
localhost login: # 按ctrl+]

# 查看3台机器的硬盘
[root@node1 ~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
vda 253:0 0 30G 0 disk
└─vda1 253:1 0 30G 0 part /
vdb 253:16 0 20G 0 disk
vdc 253:32 0 20G 0 disk

[root@node2 ~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
vda 253:0 0 30G 0 disk
└─vda1 253:1 0 30G 0 part /
vdb 253:16 0 20G 0 disk
vdc 253:32 0 20G 0 disk

[root@node3 ~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
vda 253:0 0 30G 0 disk
└─vda1 253:1 0 30G 0 part /
vdb 253:16 0 20G 0 disk
vdc 253:32 0 20G 0 disk

  • 配置ceph yum源

# 在node1-3节点上配置yum
[root@node1 ~]# cat /etc/yum.repos.d/local.repo
[local_repo]
name=CentOS-$releasever - Base
baseurl=ftp://192.168.99.240/dvd
enabled=1
gpgcheck=0

[root@node1 ~]# vim /etc/yum.repos.d/ceph.repo
[osd]
name=ceph osd
baseurl=ftp://192.168.99.240/ceph/OSD
enabled=1
gpgcheck=0

[mon]
name=ceph mon
baseurl=ftp://192.168.99.240/ceph/MON
enabled=1
gpgcheck=0

[tools]
name=ceph tools
baseurl=ftp://192.168.99.240/ceph/Tools
enabled=1
gpgcheck=0

[root@node1 ~]# yum repolist
... ...
repolist: 10,013

[root@node1 ~]# scp /etc/yum.repos.d/ceph.repo 192.168.99.42:/etc/yum.repos.d/
[root@node1 ~]# ^42^43

  • 各节点务必关闭selinux和防火墙
  • 集群安装前的准备工作

# ceph为我们提供了一个ceph-deploy工具,可以在某一节点上统一操作全部节点
# 将Node1作为部署节点,将来的操作都在node1上进行。这样,需要node1能够免密操作其他主机
[root@node1 ~]# ssh-keygen # 生成密钥对
[root@node1 ~]# for i in {41..43}
> do
> ssh-copy-id 192.168.99.$i
> done

# 在所有的主机上配置名称解析。注意,解析的名字必须是该机器的主机名
[root@node1 ~]# for i in {1..3}
> do
> echo -e "192.168.99.4$i\tnode$i" >> /etc/hosts
> done
[root@node1 ~]# cat /etc/hosts
... ...
192.168.99.41 node1
192.168.99.42 node2
192.168.99.43 node3

[root@node2 ~]# for i in {1..3}; do echo -e "192.168.99.4$i\tnode$i" >> /etc/hosts; done
[root@node3 ~]# for i in {1..3}; do echo -e "192.168.99.4$i\tnode$i" >> /etc/hosts; done

  • 安装集群

# 在3个节点上安装软件包
[root@node1 ~]# for i in node{1..3}
> do
> ssh $i yum install -y ceph-mon ceph-osd ceph-mds ceph-radosgw
> done


# 配置pubserver为ntp服务器
[root@pubserver ~]# yum install -y chrony
[root@pubserver ~]# vim /etc/chrony.conf
allow 192.168.99.0/24 # 授权192.168.99.0/24可以时钟同步
local stratum 10 # 即使没有从一个源同步时钟,也为其他主机提供时间
[root@pubserver ~]# systemctl restart chronyd

# 配置node1-3成为pubserver的NTP客户端
[root@node1 ~]# for i in node{1..3}
> do
> ssh $i yum install -y chrony
> done
[root@node1 ~]# vim /etc/chrony.conf
# 将所有的server注释加上以下内容
server 192.168.99.240 iburst
[root@node1 ~]# scp /etc/chrony.conf node2:/etc/
[root@node1 ~]# scp /etc/chrony.conf node3:/etc/
[root@node1 ~]# for i in node{1..3}
> do
> ssh $i systemctl restart chronyd
> done

# 验证时间是否同步 node1前面有^*表示同步成功
[root@node1 ~]# chronyc sources -v
... ...
^* pubserver 10 6 17 40 -4385ns[-1241us] +/- 162us

# 在node1上安装ceph-deploy部署工具
[root@node1 ~]# yum install -y ceph-deploy
# 查看使用帮助
[root@node1 ~]# ceph-deploy --help
[root@node1 ~]# ceph-deploy mon --help # 查看mon子命令的帮助

# 创建ceph-deploy工作目录
[root@node1 ~]# mkdir ceph-cluster
[root@node1 ~]# cd ceph-cluster

# 创建一个新的集群。
[root@node1 ceph-cluster]# ceph-deploy new node{1..3}
[root@node1 ceph-cluster]# ls
ceph.conf ceph-deploy-ceph.log ceph.mon.keyring
[root@node1 ceph-cluster]# tree .
.
├── ceph.conf # 集群配置文件
├── ceph-deploy-ceph.log # 日志文件
└── ceph.mon.keyring # 共享密钥

# 开启分层快照功能。
[root@node1 ceph-cluster]# vim ceph.conf # 尾部追加一行如下
rbd_default_features = 1

# 初始化monitor
[root@node1 ceph-cluster]# ceph-deploy mon create-initial
[root@node1 ceph-cluster]# systemctl status ceph-mon*
● ceph-mon@node1.service .. ..
[root@node2 ~]# systemctl status ceph*
● ceph-mon@node2.service ... ...
[root@node3 ~]# systemctl status ceph*
● ceph-mon@node3.service ... ...
# 注意:这些服务在30分钟之内只能启动3次,超过报错。

# 查看集群状态
[root@node1 ceph-cluster]# ceph -s
health HEALTH_ERR # 因为还没有硬盘,所以状态是HEALTH_ERR


# 创建OSD
[root@node1 ceph-cluster]# ceph-deploy disk --help
# 初始化各主机的硬盘。vmware应该是sdb和sdc
[root@node1 ceph-cluster]# ceph-deploy disk zap node1:vdb node1:vdc
[root@node1 ceph-cluster]# ceph-deploy disk zap node2:vdb node2:vdc
[root@node1 ceph-cluster]# ceph-deploy disk zap node3:vdb node3:vdc

# 创建存储空间。ceph会硬盘分为两个分区,一个分区大小为5GB,用于保存ceph的内部资源;另一个分区是剩余全部空间
[root@node1 ceph-cluster]# ceph-deploy osd --help
[root@node1 ceph-cluster]# ceph-deploy osd create node1:vd{b,c}
[root@node1 ceph-cluster]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
vda 253:0 0 30G 0 disk
└─vda1 253:1 0 30G 0 part /
vdb 253:16 0 20G 0 disk
├─vdb1 253:17 0 15G 0 part /var/lib/ceph/osd/ceph-0
└─vdb2 253:18 0 5G 0 part
vdc 253:32 0 20G 0 disk
├─vdc1 253:33 0 15G 0 part /var/lib/ceph/osd/ceph-1
└─vdc2 253:34 0 5G 0 part
# 将会出现2个osd进程,因为有两块硬盘用于ceph
[root@node1 ceph-cluster]# systemctl status ceph-osd*

# 继续初始化其他节点的OSD
[root@node1 ceph-cluster]# ceph-deploy osd create node2:vd{b,c}
[root@node1 ceph-cluster]# ceph-deploy osd create node3:vd{b,c}

# 查看集群状态
[root@node1 ceph-cluster]# ceph -s
health HEALTH_OK # 状态是HEALTH_OK表示一切正常

配置ceph fs

  • 安装并启用mds

# 在node1配置MDS
[root@node1 ~]# cd ceph-cluster/
[root@node1 ceph-cluster]# ceph-deploy mds create node3
[root@node3 ~]# systemctl status ceph-mds*

# 1. 新建一个名为data1的存储池,目的是存储数据,有100个PG
[root@node1 ceph-cluster]# ceph osd pool create data1 100

# 2. 新建一个名为metadata1的存储池,目的是存储元数据
[root@node1 ceph-cluster]# ceph osd pool create metadata1 100

# 3. 创建名为myfs1的cephfs,数据保存到data1中,元数据保存到metadata1中
[root@node1 ceph-cluster]# ceph fs new myfs1 metadata1 data1

# 查看存储池
[root@node1 ceph-cluster]# ceph osd lspools
0 rbd,1 data1,2 metadata1,
[root@node1 ceph-cluster]# ceph df
GLOBAL:
SIZE AVAIL RAW USED %RAW USED
92093M 91574M 519M 0.56
POOLS:
NAME ID USED %USED MAX AVAIL OBJECTS
rbd 0 86469k 0.28 30488M 2606
data1 1 0 0 30488M 0
metadata1 2 2068 0 30488M 20

# 查看创建文件系统
[root@node1 ceph-cluster]# ceph fs ls
name: myfs1, metadata pool: metadata1, data pools: [data1 ]

数据从NFS迁移到ceph fs

  • 停止使用NFS共享

# 停止nginx服务
[root@web1 ~]# systemctl stop nginx
[root@web2 ~]# systemctl stop nginx
[root@web3 ~]# systemctl stop nginx

# 卸载NFS目录
[root@web1 ~]# umount /usr/local/nginx/html/
[root@web2 ~]# umount /usr/local/nginx/html/
[root@web3 ~]# umount /usr/local/nginx/html/

# 删除nfs自动挂载
[root@web1 ~]# sed -i '$d' /etc/fstab
[root@web2 ~]# sed -i '$d' /etc/fstab
[root@web3 ~]# sed -i '$d' /etc/fstab

  • 配置web服务器使用ceph fs

# 1. 配置各web服务器的yum,安装ceph客户端软件
[root@node1 ~]# scp /etc/yum.repos.d/ceph.repo 192.168.99.11:/etc/yum.repos.d/
[root@node1 ~]# ^11^12
[root@node1 ~]# ^12^13

[root@web1 ~]# yum install -y ceph-common libcephfs1
[root@web2 ~]# yum install -y ceph-common libcephfs1
[root@web3 ~]# yum install -y ceph-common libcephfs1

# 2. 查看连接ceph的用户名和密码
[root@node1 ~]# cat /etc/ceph/ceph.client.admin.keyring
[client.admin]
key = AQAah95hfWQFOhAAg3hcQ2FtFuCYB1lRKJMCLQ==

# 3. 挂载ceph fs
[root@web1 ~]# vim /etc/fstab # 注意以下是一行
192.168.99.41:6789,192.168.99.42:6789,192.168.99.43:6789:/ /usr/local/nginx/html ceph _netdev,name=admin,secret=AQAah95hfWQFOhAAg3hcQ2FtFuCYB1lRKJMCLQ== 0 0
[root@web1 ~]# mount -a
[root@web1 ~]# df -h

[root@web2 ~]# vim /etc/fstab # 注意以下是一行
192.168.99.41:6789,192.168.99.42:6789,192.168.99.43:6789:/ /usr/local/nginx/html ceph _netdev,name=admin,secret=AQAah95hfWQFOhAAg3hcQ2FtFuCYB1lRKJMCLQ== 0 0
[root@web2 ~]# mount -a
[root@web2 ~]# df -h

[root@web3 ~]# vim /etc/fstab # 注意以下是一行
192.168.99.41:6789,192.168.99.42:6789,192.168.99.43:6789:/ /usr/local/nginx/html ceph _netdev,name=admin,secret=AQAah95hfWQFOhAAg3hcQ2FtFuCYB1lRKJMCLQ== 0 0
[root@web3 ~]# mount -a
[root@web3 ~]# df -h

  • 将nfs的网站程序传到ceph

[root@nfs ~]# cd /web_share/html/
[root@nfs html]# tar czpf /root/web.tar.gz ./*
[root@nfs html]# scp /root/web.tar.gz 192.168.99.11:/root/

[root@web1 ~]# tar xf web.tar.gz -C /usr/local/nginx/html/

  • 启动所有的nginx服务

[root@web1 ~]# systemctl start nginx
[root@web2 ~]# systemctl start nginx
[root@web3 ~]# systemctl start nginx

posted @   zky-1  阅读(153)  评论(0编辑  收藏  举报
相关博文:
阅读排行:
· winform 绘制太阳,地球,月球 运作规律
· AI与.NET技术实操系列(五):向量存储与相似性搜索在 .NET 中的实现
· 超详细:普通电脑也行Windows部署deepseek R1训练数据并当服务器共享给他人
· 【硬核科普】Trae如何「偷看」你的代码?零基础破解AI编程运行原理
· 上周热点回顾(3.3-3.9)
点击右上角即可分享
微信分享提示