StorageClass+k8s版mysql主从(副)

 

1、StorageClass

StorageClass    存储类。
pv             存储对象。

StorageClass 是按照pvc的要求,动态的生成pv,以后yaml只要配置StorageClass参数就可以了,不用再写pv了

Helm --> StorageClass,用Helm下载StorageClass

1.1、安装Helm

前提先把nfs装了
Helm就类似于py的pip,linux的yum,它是k8s的下载工具
Github:https://github.com/helm/helm/releases
下载连接:wget https://get.helm.sh/helm-v3.7.2-linux-amd64.tar.gz
# 解压,把启动文件移动/usr/local/bin/目录下
[root@k8s-master-01 ~]# tar -xf helm-v3.7.2-linux-amd64.tar.gz
[root@k8s-master-01 ~]# mv linux-amd64/helm /usr/local/bin/

yum   --->   yum源
helm  --->   helm源

1、添加helm源
官方的
[root@k8s-master-01 ~]# helm repo add moikot https://moikot.github.io/helm-charts
"moikot" has been added to your repositories
阿里的
[root@k8s-master-01 ~]# helm repo add aliyun https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts
"aliyun" has been added to your repositories
微软的
[root@k8s-master-01 ~]# helm repo add stable http://mirror.azure.cn/kubernetes/charts/
"stable" has been added to your repositories

# 搜索nfs-client包
[root@k8s-master-01 ~]# helm search repo nfs-client
NAME                         CHART VERSION APP VERSION DESCRIPTION                                      
moikot/nfs-client-provisioner 1.3.0       3.1.0     nfs-client is an automatic provisioner that use...
stable/nfs-client-provisioner 1.2.11       3.1.0     DEPRECATED - nfs-client is an automatic provisi...
# 从stable源进行下载
[root@k8s-master-01 ~]# helm pull stable/nfs-client-provisioner
# 解压
[root@k8s-master-01 ~]# tar -xf nfs-client-provisioner-1.2.11.tgz
# 进入解压文件内
[root@k8s-master-01 ~]# cd nfs-client-provisioner/
# 修改values.yaml
[root@k8s-master-01 nfs-client-provisioner]# vim values.yaml
nfs:
server: 192.168.11.101                            =======>修改nfs-client服务的挂载点ip
path: /nfs/v3 =======>修改nfs-client服务的挂载目录
# 安装
[root@k8s-master-01 nfs-client-provisioner]# helm install nfs ./ -n kube-system
# 查看sc
[root@k8s-master-01 nfs-client-provisioner]# kubectl get sc
NAME         PROVISIONER                               RECLAIMPOLICY   VOLUMEBINDINGMODE   ALLOWVOLUMEEXPANSION   AGE
nfs-client   cluster.local/nfs-nfs-client-provisioner   Delete         Immediate           true                   28m

1.2、测试SC

案例:测试下sc是否能自动生成pv,注意事项:nfs的权限
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: sc-test
spec:
selector:
  matchLabels:
    app: sc-test
template:
  metadata:
    labels:
      app: sc-test
  spec:
    containers:
       - name: nginx
        image: nginx
        volumeMounts:
           - mountPath: /usr/share/nginx/html
            name: html
    volumes:
       - name: html
        persistentVolumeClaim: # ===========》存储卷类型pvc
          claimName: nfs-pvc # ===========》pvc的name
---
apiVersion: v1
kind: PersistentVolumeClaim # ===========》创建一个pvc
metadata:
name: nfs-pvc    # ===========》pvc的name
namespace: default
spec:
storageClassName: nfs-client # ===========》storageClassName通过kubectl get sc获得
accessModes:   # ==========》访问策略
   - "ReadWriteMany"
resources:
  requests:
    storage: "10Gi" # ===========》pvc存储卷的大小


[root@k8s-master-01 k8s]# kubectl apply -f sc-test.yaml
deployment.apps/nfs created
persistentvolumeclaim/nfs created
[root@k8s-master-01 k8s]# kubectl get pvc
NAME     STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
nfs-pvc   Bound   pvc-7063818c-2ed9-4e55-8e43-b42ffee66560   10Gi       RWX           nfs-client     12m
[root@k8s-master-01 k8s]# kubectl get pods -o wide
NAME                       READY   STATUS   RESTARTS   AGE   IP           NODE         NOMINATED NODE   READINESS GATES
sc-test-5d94bf9556-vjvs6   1/1     Running   0         12m   10.241.48.3   k8s-node-02   <none>           <none>
# 来到之前挂载的/nfs/v1的目录,会生成一个存储卷pvc-7063818c-2ed9-4e55-8e43-b42ffee66560
# echo `pwd` > index.html
# curl 10.241.48.3
# 如果不是403,那么就表示挂载成功了

2、MySQL一主多从

2.1、二进制安装2台MySQL

官网:https://www.mysql.com/
# 首先必须先卸载mariadb
[root@localhost ~]# rpm -qa | grep mysql
[root@localhost ~]# rpm -qa | grep mariadb
mariadb-libs-5.5.68-1.el7.x86_64
[root@localhost ~]# yum -y remove mariadb-libs.x86_64

1、下载安装包
[root@mysql-master ~]# wget https://dev.mysql.com/get/Downloads/MySQL-5.7/mysql-5.7.36-linux-glibc2.12-x86_64.tar.gz

2、安装
[root@mysql-master ~]# tar -xf mysql-5.7.36-linux-glibc2.12-x86_64.tar.gz -C /usr/local/
[root@mysql-master local]# ln -s /usr/local/mysql-5.7.36-linux-glibc2.12-x86_64 /usr/local/mysql

3、统一用户
[root@mysql-master ~]# useradd -M -s /sbin/nologin -r mysql

4、依赖包
[root@mysql-master ~]# yum install -y ncurses-devel libaio-devel gcc gcc-c++ numactl libaio glibc cmake autoconf

5、创建一个数据存放目录
[root@mysql-master ~]# mkdir /mysql_data

6、统一授权mysql的所有相关目录
[root@mysql-master ~]# chown mysql.mysql /mysql_data
[root@mysql-master ~]# chown mysql.mysql -R /usr/local/mysql
[root@mysql-master ~]# chown mysql.mysql -R /usr/local/mysql-5.7.36-linux-glibc2.12-x86_64/

7、添加配置文件
[root@mysql-master ~]# vim /etc/my.cnf
[mysqld]
# 安装目录
basedir=/usr/local/mysql
# 数据目录
datadir=/mysql_data
# 端口
port=3306
# socket文件存放地
socket=/usr/local/mysql/mysql.sock
# 数据库默认的字符集编码
character-set-server=utf8
# 错误日志存放路径
log-error=/var/log/mysqld.log
# PID 文件存放路径
pid-file=/tmp/mysqld.pid

# mysql客户端配置
[mysql]
socket=/usr/local/mysql/mysql.sock

# mysql客户端配置
[client]
socket=/usr/local/mysql/mysql.sock

8、初始化mysql
[root@mysql-master ~]# touch /var/log/mysqld.log
[root@mysql-master ~]# chown mysql.mysql /var/log/mysqld.log
[root@mysql-master ~]# /usr/local/mysql/bin/mysqld --initialize --user=mysql --basedir=/usr/local/mysql --datadir=/mysql_data
# 初始化后在/mysql_data,数据目录下就会有数据产生了

9、加入Systemd管理,注册MySQL服务
[root@mysql-master ~]# vim /usr/lib/systemd/system/mysqld.service
[Unit]
Description=MySQL Server
Documentation=man:mysqld(8)
Documentation=https://dev.mysql.com/doc/refman/en/using-systemd.html
After=network.target
After=syslog.target
[Install]
WantedBy=multi-user.target
[Service]
User=mysql
Group=mysql
# --defaults-file使用的是我们写的/etc/my.cnf的这个配置文件
ExecStart=/usr/local/mysql/bin/mysqld --defaults-file=/etc/my.cnf
LimitNOFILE = 5000

10、测试启动mysql
[root@mysql-master ~]# systemctl daemon-reload
[root@mysql-master ~]# systemctl start mysqld
[root@mysql-master ~]# systemctl enable mysqld

11、测试连接,由于/usr/local/mysql不是在系统环境变量里面,所以我们要把这个目录加入环境变量
[root@mysql-master mysql]# vim /etc/profile
export MYSQL_HOME=/usr/local/mysql
export PATH=$PATH:$MYSQL_HOME/bin
# 配置完以后重新加载profile配置文件
[root@mysql-master mysql]# source /etc/profile

# 获取mysql的密码来登录mysql
# 密码的路径在/var/log/mysqld.log里面
[root@mysql-master mysql]# grep 'temporary password' /var/log/mysqld.log
# 登录数据库
[root@mysql-master mysql]# mysql -uroot -p'F?twnkaZ2rZJ'
mysql: [Warning] Using a password on the command line interface can be insecure.
Welcome to the MySQL monitor. Commands end with ; or \g.
Your MySQL connection id is 3
Server version: 5.7.36

Copyright (c) 2000, 2021, Oracle and/or its affiliates.

Oracle is a registered trademark of Oracle Corporation and/or its
affiliates. Other names may be trademarks of their respective
owners.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

mysql>

# 修改默认密码
mysql> alter user   root@localhost   identified by  'Test123!';
Query OK, 0 rows affected (0.00 sec)

2.2、部署主从复制(异步复制)

2.2.1、第一步:创建复制用户(一定是在主库上创建)
1.在主库上创建一个用于复制的账号
mysql> grant replication slave on *.* to 'xl'@'%' identified by '123456';
Query OK, 0 rows affected, 1 warning (0.00 sec)

2.刷新权限
mysql> flush privileges;
Query OK, 0 rows affected (0.00 sec)
2.2.2、开启binlog日志(主库和从库)
# 主库和从库中都要开启binlog日志
[root@mysql-master ~]# vim /etc/my.cnf
# 在集群中,server_id必须唯一(就是说主库和从库的server-id一定不能一样!!)
server-id=1
# binlog日志一定要放在数据目录里
log-bin=/mysql_data/log-bin/binlog

#在数据目录里创建log-bin目录并赋予权限
[root@mysql-master mysql_data]# mkdir /mysql_data/log-bin
[root@mysql-master mysql_data]# chown mysql.mysql -R /mysql_data/log-bin
[root@mysql-master ~]# systemctl restart mysqld
2.2.3、实现主从复制(在从节点上实现)
1、查看主节点binlog日志的状态
mysql> show master status ;
+---------------+----------+--------------+------------------+-------------------+
| File         | Position | Binlog_Do_DB | Binlog_Ignore_DB | Executed_Gtid_Set |
+---------------+----------+--------------+------------------+-------------------+
| binlog.000001 |      154 |             |                 |                   |
+---------------+----------+--------------+------------------+-------------------+
1 row in set (0.00 sec)

2、在从库配置主从复制

change master to
# 主库服务器的ip
master_host='192.168.15.60',
master_port=3306,
# 复制用户的id
master_user='xl',
# 复制用户的密码
master_password='123456',
# 主库binlog的名字
master_log_file='binlog.000001',
# 主库日志偏移量,即从何处开始复制
master_log_pos=154;

3、开启主从复制
mysql> start slave ;
Query OK, 0 rows affected (0.00 sec)

4、查看状态
# Slave_IO_Running和Slave_SQL_Running这2个线程状态都是yes的话就表示开启了主从复制
mysql> show slave status \G
*************************** 1. row ***************************
            Slave_IO_Running: Yes
          Slave_SQL_Running: Yes
# 进入主节点创建一个库,去从节点查看也能看得到

3.k8s容器内部署主从

大概原理
# server-id ===》 由于实现主从的话server-id必须要唯一,所以一定的要在容器启动之后,mysql启动之前改server-id
# sed -i "s/server-id=1/server-id=$RANDOM/g" /etc/mysql/mysql.conf.d/server.cnf 把这个命令放在mysql的启动脚本里entrypoint.sh(起一个pod查看pod就可以看到mysql的启动脚本了)
# 把entrypoint.sh这个脚本从容器内拷贝出来docker cp pod-id:/usr/local/bin/docker-entrypoint.sh .
# 重写一个mysql镜像
FROM mysql:5.7
ADD docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh
ADD mysqld.conf /etc/mysql/mysql.conf.d/server.cnf

具体步骤:
# 1.首先创建1个mysql容器
[root@k8s-master-01 ~]# docker run -d -e MYSQL_ROOT_PASSWORD=123456 mysql:5.7
# 1.查看mysql容器的id
[root@k8s-master-01 ~]# docker ps
# 把容器里面的docker-entrypoint.sh拷贝出来
[root@k8s-master-01 ~]# docker cp 0966ffa302a2:/usr/local/bin/docker-entrypoint.sh .
# 编辑docker-entrypoint.sh
[root@k8s-master-01 ~]# vim docker-entrypoint.sh
# 在文件开头加入一句话
sed -i 's/server-id=1/server-id=$RANDOM/g' /etc/mysql/mysql.conf.d/mysqld.cnf
# 创建mysqld.cnf配置文件,因为这个目录下才有mysql权限
[root@k8s-master-01 ~]# vim mysqld.cnf
[mysqld]
server-id=1
log-bin=/var/lib/mysql/binlog
# 编写mysql的dockerfile
FROM mysql:5.7
ADD docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh
ADD mysqld.cnf /etc/mysql/mysql.conf.d/server.cnf
# 生成镜像
[root@k8s-master-01 ~]# docker build -t xuliang19950120/mysql:v1 .
# 上传镜像
[root@k8s-master-01 mysql-container]# docker push xuliang19950120/mysql:v1

# 编写数据库主从容器的yaml文件
# mysql主节点部署
---
kind: Secret
apiVersion: v1
metadata:
name: mysql
data:
MYSQL_ROOT_PASSWORD: MTIzNDU2
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: mysql-master
spec:
selector:
  matchLabels:
    app: mysql
    devel: mysql-master
template:
  metadata:
    labels:
      app: mysql
      devel: mysql-master
  spec:
    containers:
      - name: mysql
        image: tiantian654321/mysql:v1
        imagePullPolicy: IfNotPresent
        envFrom:
          - secretRef:
              name: mysql
---
kind: Service
apiVersion: v1
metadata:
name: mysql-master
spec:
ports:
  - port: 3306
    targetPort: 3306
    name: mysql-master
selector:
  app: mysql
  devel: mysql-master
---
kind: Job
apiVersion: batch/v1
metadata:
name: create-user
spec:
template:
  spec:
    restartPolicy: OnFailure
    containers:
      - name: mysql
        image: tiantian654321/mysql:v1
        command:
          - "/bin/sh"
          - "-c"
          - |
            while true;
            do
              /usr/bin/mysql -uroot -p123456 -hmysql-master.default.svc.cluster.local -e"status;" &>/dev/null

              if [ $? -eq 0 ];then

                /usr/bin/mysql -uroot -p123456 -hmysql-master.default.svc.cluster.local -e "grant replication slave on *.* to 'xl'@'%' identified by '123456';"
                /usr/bin/mysql -uroot -p123456 -hmysql-master.default.svc.cluster.local -e "flush privileges;"

                break

              fi

            done
             
# 如下表示运行成功
[root@k8s-master-01 mysql-container]# kubectl apply -f mysql-master.yaml
secret/mysql created
deployment.apps/mysql-master created
service/mysql-master created
job.batch/create-user created
[root@k8s-master-01 mysql-container]# kubectl get pod
NAME                           READY   STATUS     RESTARTS   AGE
create-user-z5rgt             0/1     Completed   0         8s
mysql-master-586b779f4-75l6d   1/1     Running     0         8s

# 启动用户user也创建了
[root@k8s-master-01 mysql-container]# kubectl exec -it mysql-master-7578b8c897-5bsjs -- bash
[root@k8s-master-01 mysql-container]# mysql -uroot -p123456
mysql> SELECT User FROM mysql.user;
+---------------+
| User         |
+---------------+
| root         |
| xl           |
| mysql.session |
| mysql.sys     |
| root         |
+---------------+
5 rows in set (0.00 sec)


#==================================================================================================================

# mysql从节点部署
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: mysql-slave
spec:
selector:
  matchLabels:
    app: mysql
    devel: mysql-slave
template:
  metadata:
    labels:
      app: mysql
      devel: mysql-slave
  spec:
    containers:
      - name: mysql
        image: xuliang19950120/mysql:v2
        envFrom:
          - secretRef:
              name: mysql
---
kind: Service
apiVersion: v1
metadata:
name: mysql-slave
spec:
selector:
  app: mysql
  devel: mysql-slave
ports:
  - port: 3306
    targetPort: 3306
    name: slave
---
kind: Job
apiVersion: batch/v1
metadata:
name: add-cluster
spec:
template:
  spec:
    restartPolicy: OnFailure
    containers:
      - name: mysql
        image: xuliang19950120/mysql:v2
        command:
          - "/bin/sh"
          - "-c"
          - |

            while true;
            do

                /usr/bin/mysql -uroot -p123456 -hmysql-master.default.svc.cluster.local -e"status;" &>/dev/null

                if [ $? -eq 0 ];then

                    mysql -uroot -p123456 -hmysql-master.default.svc.cluster.local -e "show master status ;" | awk 'NR==2{print $0}' > log

                    FILE=`cat log | awk '{print $1}'`
                    Position=`cat log | awk '{print $2}'`

                    mysql -uroot -p123456 -hmysql-slave.default.svc.cluster.local -e "change master to master_host='mysql-master.default.svc.cluster.local', master_port=3306, master_user='xl', master_password='123456', master_log_file='${FILE}', master_log_pos=${Position};"

                    mysql -uroot -p123456 -hmysql-slave.default.svc.cluster.local -e "start slave;"

                    break
                fi

            done
             
# 如下表示运行成功        
[root@k8s-master-01 mysql-container]# kubectl apply -f mysql-slave.yaml
deployment.apps/mysql-slave created
service/mysql-slave created
job.batch/add-cluster created
[root@k8s-master-01 mysql-container]# kubectl get pod
NAME                           READY   STATUS             RESTARTS   AGE
add-cluster-nvhds             0/1     ContainerCreating   0         4s
create-user-z5rgt             0/1     Completed           0         2m1s
mysql-master-586b779f4-75l6d   1/1     Running             0         2m1s
mysql-slave-779c5f8f75-hp566   0/1     ContainerCreating   0         4s

# 进入mysql从节点
[root@k8s-master-01 mysql-container]# kubectl exec -it mysql-slave-779c5f8f75-hp566 -- bash
root@mysql-slave-779c5f8f75-hp566:/# mysql -uroot -p123456
# Slave_IO_Running和Slave_SQL_Running这2个线程状态都是yes的话就表示开启了主从复制
mysql> show slave status \G
*************************** 1. row ***************************
            Slave_IO_Running: Yes
          Slave_SQL_Running: Yes

# 注意: 启动的用户,主节点和从节点必须一样(记得修改),这里都要为xl
# server-id检查必须是唯一
# 注意:如果show slave status \G出现Empty set的话,就把mysql从节点的里面的job任务删除,再执行一下就可以了
如下:
[root@k8s-master-01 mysql-container]# kubectl delete jobs.batch add-cluster
[root@k8s-master-01 mysql-container]# kubectl apply -f mysql-slave.yaml
# 再次进入从节点查看就可以成功了

 

 

 

posted @ 2022-01-12 22:43  甜甜de微笑  阅读(122)  评论(1编辑  收藏  举报