使用轻量级边缘化的k3s部署kubernetes集群以及高可用集群
搭建k3s并部署k8s集群
1、Centos7基础环境配置
1.升级centos内核到5.4
修改各几点对应主句名
#根据自己定义修改主机名,不能重名
echo master > /etc/hostname
echo node > /etc/hostname
升级内核
2.1:导入ELRepo公钥
[root@localhost ~]#rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
2.2:安装ELRepo
yum -y install https://www.elrepo.org/elrepo-release-7.0-4.el7.elrepo.noarch.rpm
2.3:安装kernel-it内核
[root@localhost ~]# yum -y --disablerepo='*' --enablerepo=elrepo-kernel install kernel-lt
2.4:查看启动器
awk -F\' '$1=="menuentry " {print i++ " : " $2}' /etc/grub2.cfg
2.5:设置默认内核启动项 然后重启服务器
grub2-set-default 0
2.安装docker-ce版
注意!如果使用的k8s集群是要基于docker而不是基于controller的话需要安装docker
#配置docker社区版仓库
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
#安装docker,默认最新版本
yum install docker-ce -y
配置docker镜像加速,这里使用的是阿里云镜像加速,可以在阿里云台找到加速url
sudo mkdir -p /etc/docker
tee /etc/docker/daemon.json <<-'EOF'
{
"registry-mirrors": ["https://bmd06omc.mirror.aliyuncs.com"]
}
EOF
systemctl daemon-reload
systemctl restart docker
systemctl enable docker #开机自启
2、快速部署k3s一主两从
1.部署k3s的主
注意k3s自签的ssl证书ca时间是5年,etcd时间是10年如果需要修改自签证书有效期在创建集群的时候不要启动k3s或者启动后停掉k3s,备份/etc/kubernets/ssl下面的证书文件,修改配置的json文件的时间范围,使用cfssl手动创建如下的ca证书以及key;
(ca.csr,ca.pem,ca-key.pem,etcd.pem,etcd-key.pem)
在master服务器执行
#执行完默认启动所有服务默认端口6443
1.基于k3s自带的容器controller部署方法
[root@master ~]# curl -sfL https://get.k3s.io | sh -
#基于docker为容器的安装方式
[root@master ~]#curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="server --docker" sh -s -
查看token添加node节点时使用
[root@master ~]# cat /var/lib/rancher/k3s/server/node-token
2.在从节点部署k3s-agent
#master地址
[root@node2 ~]# k3s_url="https://192.168.137.3:6443"
#通过在主节点获取到的token在node节点添加认证变量
k3s_token="K106153a00154358abcbb8eef6e274fda6a4aa27e1f3cc60230fc406a53841e22d0::server:61ecd11a0f58ca8367280a403feafafb"
#基于k3s自带的容积方式加入节点到主节点
[root@node2 ~]# curl -sfL https://get.k3s.io | K3S_URL=${k3s_url} K3S_TOKEN=${k3s_token} sh -
#基于docker方式加入集群
curl -sfL https://get.k3s.io | K3S_URL=${k3s_url} K3S_TOKEN=${k3s_token} sh -s - agent --docker
主节点查看节点状态
[root@master ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
master Ready control-plane,master 27m v1.20.6+k3s1
node2 Ready <none> 8m41s v1.20.6+k3s1
node1 Ready <none> 3m35s v1.20.6+k3s1
3.安装helm命令
#Helm命令帮助管理kubernetes应用,即使在复杂的kubernetes应用程序,都可以帮助 定义安装升级
wget https://get.helm.sh/helm-v3.2.4-linux-amd64.tar.gz
tar -zxvf helm-v3.2.4-linux-amd64.tar.gz
mv linux-amd64/helm /usr/local/bin/helm
[root@master ~]# helm version
#添加helm Chart 仓库
helm repo add rancher-stable https://releases.rancher.com/server-charts/stable
3、K3s部署k8s高可用集群
注意!高可用部署适用于生产环境
1.二进制安装数据库
#方式一使用数据库的方式存储管理的方式管理集群
Mysql官网下载二进制mysql-5.7.34-linux-glibc2.5-x86_64.tar.gz
安装mysql5.7.34(安装在/usr/local/mysql)
(1) 、创建mysql用户账号
useradd -s /sbin/nologin -M mysql
(2) 、解压压缩包
tar xf mysql-5.7.34-linux-glibc2.5-x86_64.tar.gz -C /usr/local/
(3) 、重命名
cd /usr/local
mv mysql-5.7.34-linux-glibc2.5-x86_64 mysql
(4) 、复制配置文件(可以使用系统的my.cnf不需要cp)
cp /data/mysql/support-files/my-default.cnf /etc/my.cnf
修改my.cnf配置并使用官方建议的参数
[client]
port = 3306
socket = /usr/local/mysql/mysql.sock
default-character-set = utf8
[mysqld]
skip-name-resolve
user = mysql
basedir = /usr/local/mysql
datadir = /usr/local/mysql/data
port = 3306
server_id = 10
socket = /usr/local/mysql/mysql.sock
pid-file = /usr/local/mysql/mysql.pid
log-error = /usr/local/mysql/mysql.err
log-bin = /usr/local/mysql/mysql-bin
character-set-server=utf8
(5) 、初始化数据库
yum -y install autoconf (此包安装时会安装Data:Dumper模块)
chown mysql:mysql -R /usr/local/mysql
/usr/local/mysql/bin/mysql_install_db \
--defaults-file=/etc/my.cnf \
--user=mysql \
--basedir=/usr/local/mysql \
--datadir=/usr/local/mysql/data
配置并启动mysql
cp /usr/local/mysql/support-files/mysql.server /etc/init.d/mysqld
chmod 777 /etc/init.d/mysqld
[root@master local]# chown mysql:mysql -R /usr/local/mysql
(*注意,mysql二进制安装默认的路径为/usr/local/mysql,如果不是这个路经需要修改启动脚本里/usr/local/mysql需要替换)
service mysqld start
添加自启动
chkconfig --add mysqld
chkconfig mysqld on
chkconfig --list mysqld
配置环境变量
vim /etc/profile (添加变量) export PATH="$PATH:/usr/local/mysql/bin"
source /etc/profile
查看初始密码
[root@master mysql]# cat /root/.mysql_secret
修改mysql密码(mysql5.7密码)
> mysqladmin -u root password '123456' (根据自己情况修改)
如果找初始密码不是空可以这样来跳过密码重新设置
停止数据库
[root@k8s-node03 ~]# vim /etc/my.cnf
[mysqld] 下添加如下内容
skip-grant-tables
[root@k8s-node03 ~]# mysql -u root -p (直接回车修改密码后再退出删除skip重启数据库)
update user set authentication_string=password("spms") where user="root";
清理无用的mysql用户及库
登陆mysql
> select user,host from mysql.user; (查看现有的用户)
> drop user "root"@"::1";
> drop user ""@"localhost";
> GRANT ALL PRIVILEGES on *.* to 'root'@'%' identified by '123456';
> drop user ""@"localhost.localdomain";
> drop user "root"@"localhost.localdomain";
2.基于etcd库管理k3s集群安装etcd集群
1)安装cfssl
生成证书时可在任一节点完成,这里在k8s-master01主机执行,证书只需要创建一次即可,以后在向集群中添加新节点时只要将 /etc/kubernetes/ssl 目录下的证书拷贝到新节点上即可。
[root@k8s-master01 `]# mkdir k8s/cfss -p
[root@k8s-master01 k8s]# cd k8s/cfss/
[root@k8s-master01 cfssl]# wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
[root@k8s-master01 cfssl]# chmod +x cfssl_linux-amd64
[root@k8s-master01 cfssl]# cp cfssl_linux-amd64 /usr/local/bin/cfssl
[root@k8s-master01 cfssl]# wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
[root@k8s-master01 cfssl]# chmod +x cfssljson_linux-amd64
[root@k8s-master01 cfssl]# cp cfssljson_linux-amd64 /usr/local/bin/cfssljson
[root@k8s-master01 cfssl]# wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
[root@k8s-master01 cfssl]# chmod +x cfssl-certinfo_linux-amd64
[root@k8s-master01 cfssl]# cp cfssl-certinfo_linux-amd64 /usr/local/bin/cfssl-certinfo
2)创建CA根证书
由于维护多套CA实在过于繁杂,这里CA证书用来签署集群其它组件的证书
这个文件中包含后面签署etcd、kubernetes等其它证书的时候用到的配置
[root@k8s-master01 cfss]# mkdir -pv /opt/k8s/certs
[root@k8s-master01 cfss]# cd /opt/k8s/certs
root@k8s-master01 ~]# vim /opt/k8s/certs/ca-config.json
{
"signing": {
"default": {
"expiry": "438000h"
},
"profiles": {
"kubernetes": {
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
],
"expiry": "438000h"
}
}
}
}
- ca-config.json:可以定义多个 profiles,分别指定不同的过期时间、使用场景等参数;后续在签名证书时使用某个 profile;
- signing:表示该证书可用于签名其它证书;生成的 ca.pem 证书中 CA=TRUE;
- server auth:表示client可以用该 CA 对server提供的证书进行验证;
- client auth:表示server可以用该CA对client提供的证书进行验证;
- expiry: 表示证书过期时间,我们设置10年,当然你如果比较在意安全性,可以适当减少
3) 创建 CA 证书签名请求模板
[root@k8s-master01 ~]# vim /opt/k8s/certs/ca-csr.json
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"ca": {
"expiry": "438000h"
},
"names": [
{
"C": "CN",
"ST": "ShangHai",
"L": "ShangHai",
"O": "k8s",
"OU": "System"
}
]
}
4)生成CA证书、私钥和csr证书签名请求
该命令会生成运行CA所必需的文件ca-key.pem(私钥)和ca.pem(证书),还会生成ca.csr(证书签名请求),用于交叉签名或重新签名。
[root@k8s-master01 ~]# cd /opt/k8s/certs/
[root@k8s-master01 cfss]# cfssl gencert -initca /opt/k8s/certs/ca-csr.json | cfssljson -bare ca
2020/07/12 23:40:00 [INFO] generating a new CA key and certificate from CSR
2020/07/12 23:40:00 [INFO] generate received request
2020/07/12 23:40:00 [INFO] received CSR
2020/07/12 23:40:00 [INFO] generating key: rsa-2048
2020/07/12 23:40:00 [INFO] encoded CSR
2020/07/12 23:40:00 [INFO] signed certificate with serial number 354020342575279199560368628329657160760292915284
5)分发证书
[root@k8s-master01 certs]# ansible k8s-all -m copy -a 'src=/opt/k8s/certs/ca.csr dest=/etc/etcd/'
[root@k8s-master01 certs]# ansible k8s-all -m copy -a 'src=/opt/k8s/certs/ca-key.pem dest=/etc/etcd/'
[root@k8s-master01 certs]# ansible k8s-all -m copy -a 'src=/opt/k8s/certs/ca.pem dest=/etc/etcd/'
etcd 是k8s集群最重要的组件,用来存储k8s的所有服务信息, etcd 挂了,集群就挂了,我们这里把etcd部署在master三台节点上做高可用,etcd集群采用raft算法选举Leader, 由于Raft算法在做决策时需要多数节点的投票,所以etcd一般部署集群推荐奇数个节点,推荐的数量为3、5或者7个节点构成一个集群
官方地址 https://github.com/coreos/etcd/releases
#可以安装ansible快速安装etcd集群
yum -y install epel-release
yum -y install ansible
1)下载etcd二进制文件
etcd命令为下载的二进制文件,解压后复制到指定目录即可
[root@k8s-master01 ~]# cd k8s/
[root@k8s-master01 k8s]#wget https://mirrors.huaweicloud.com/etcd/v3.3.23/etcd-v3.3.23-linux-amd64.tar.gz
[root@k8s-master01 k8s]# tar -xf etcd-v3.3.23-linux-amd64.tar.gz
[root@k8s-master01 k8s]# cd etcd-v3.3.23-linux-amd64/ ##有2个文件,etcdctl是操作etcd的命令
##把etcd二进制文件传输etcd节点
[root@k8s-master01 ~]# ansible k8s-all -m copy -a 'src=/root/k8s/etcd-v3.3.23-linux-amd64/etcd dest=/usr/local/bin/ mode=0755'
[root@k8s-master01 ~]# ansible k8s-all -m copy -a 'src=/root/k8s/etcd-v3.3.23-linux-amd64/etcdctl dest=/usr/local/bin/ mode=0755'
说明:若是不用ansible,可以直接用scp把两个文件传输到三个master节点的/usr/local/bin/目录下
2)创建etcd证书请求模板文件
[root@k8s-master01 ~]# vim /opt/k8s/certs/etcd-csr.json ##证书请求文件
{
"CN": "etcd",
"hosts": [
"127.0.0.1",
"192.168.113.138",
"192.168.113.139",
"192.168.113.140",
"192.168.113.141",
"192.168.113.142"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "ShangHai",
"L": "ShangHai",
"O": "k8s",
"OU": "System"
}
]
}
说明:hosts中的IP为各etcd节点IP及本地127地址,etcd的证书需要签入所有节点ip,在生产环境中hosts列表最好多预留几个IP,这样后续扩展节点或者因故障需要迁移时不需要再重新生成证书
3)生成证书及私钥
注意命令中使用的证书的具体位置
[root@k8s-master01 etcd-v3.4.9-linux-amd64]# cd /opt/k8s/certs/
[root@k8s-master01 certs]# cfssl gencert -ca=/opt/k8s/certs/ca.pem -ca-key=/opt/k8s/certs/ca-key.pem -config=/opt/k8s/certs/ca-config.json -profile=kubernetes etcd-csr.json | cfssljson -bare etcd
2020/07/12 23:57:10 [INFO] generate received request
2020/07/12 23:57:10 [INFO] received CSR
2020/07/12 23:57:10 [INFO] generating key: rsa-2048
2020/07/12 23:57:11 [INFO] encoded CSR
2020/07/12 23:57:11 [INFO] signed certificate with serial number 117864690592567978439940422940262623097240517922
2020/07/12 23:57:11 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
4)查看证书
etcd.csr是签署时用到的中间文件,如果你不打算自己签署证书,而是让第三方的CA机构签署,只需要把etcd.csr文件提交给CA机构。
[root@k8s-master01 certs]# ll etcd*
-rw-r--r--. 1 root root 1066 Apr 22 17:17 etcd.csr-rw-r--r--. 1 root root 293 Apr 22 17:10 etcd-csr.json-rw-------. 1 root root 1679 Apr 22 17:17 etcd-key.pem-rw-r--r--. 1 root root 1444 Apr 22 17:17 etcd.pem
5)证书分发
把生成的etcd证书复制到创建的证书目录并放至所有etcd节点
正常情况下只需要copy这三个文件即可,ca.pem(已经存在)、etcd-key.pem、etcd.pem
ansible k8s-all -m copy -a 'src=/opt/k8s/certs/etcd.pem dest=/etc/etcd/'
ansible k8s-all -m copy -a 'src=/opt/k8s/certs/etcd-key.pem dest=/etc/etcd/'
6)修改etcd配置参数
为了安全性起我这里使用单独的用户启动 Etcd
##创建etcd用户和组
[root@k8s-master01 ~]# ansible k8s-all -m group -a 'name=etcd'
[root@k8s-master01 ~]# ansible k8s-all -m user -a 'name=etcd group=etcd comment="etcd user" shell=/sbin/nologin home=/var/lib/etcd createhome=no'
##创建etcd数据存放目录并授权
[root@k8s-master01 ~]# ansible k8s-all -m file -a 'path=/var/lib/etcd state=directory owner=etcd group=etcd'
7)配置etcd配置文件
etcd.conf配置文件信息,配置文件中涉及证书,etcd用户需要对其有可读权限,否则会提示无法获取证书,644权限即可
[root@k8s-master01 certs]# mkdir -pv /opt/k8s/cfg
[root@k8s-master01 certs]# vim /opt/k8s/cfg/etcd.conf
#[member]
ETCD_NAME="etcd01"
ETCD_DATA_DIR="/var/lib/etcd"
#ETCD_SNAPSHOT_COUNTER="10000"
#ETCD_HEARTBEAT_INTERVAL="100"
#ETCD_ELECTION_TIMEOUT="1000"
ETCD_LISTEN_PEER_URLS="https://10.10.0.18:2380"
ETCD_LISTEN_CLIENT_URLS="https://10.10.0.18:2379,https://127.0.0.1:2379"
#ETCD_MAX_SNAPSHOTS="5"
#ETCD_MAX_WALS="5"
#ETCD_CORS=""
ETCD_AUTO_COMPACTION_RETENTION="1"
ETCD_QUOTA_BACKEND_BYTES="8589934592"
ETCD_MAX_REQUEST_BYTES="5242880"
#[cluster]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://10.10.0.18:2380"
# if you use different ETCD_NAME (e.g. test),
# set ETCD_INITIAL_CLUSTER value for this name, i.e. "test=http://..."
ETCD_INITIAL_CLUSTER="etcd01=https://10.10.0.18:2380,etcd02=https://10.10.0.19:2380,etcd03=https://10.10.0.20:2380"
ETCD_INITIAL_CLUSTER_STATE="new"
ETCD_INITIAL_CLUSTER_TOKEN="k8s-etcd-cluster"
ETCD_ADVERTISE_CLIENT_URLS="https://10.10.0.18:2379"
#[security]
CLIENT_CERT_AUTH="true"
ETCD_CA_FILE="/etc/etcd/ca.pem"
ETCD_CERT_FILE="/etc/etcd/etcd.pem"
ETCD_KEY_FILE="/etc/etcd/etcd-key.pem"
PEER_CLIENT_CERT_AUTH="true"
ETCD_PEER_CA_FILE="/etc/etcd/ca.pem"
ETCD_PEER_CERT_FILE="/etc/etcd/etcd.pem"
ETCD_PEER_KEY_FILE="/etc/etcd/etcd-key.pem"
参数解释:
- ETCD_NAME:etcd节点成员名称,在一个etcd集群中必须唯一性,可使用Hostname或者machine-id
- ETCD_LISTEN_PEER_URLS:和其它成员节点间通信地址,每个节点不同,必须使用IP,使用域名无效
- ETCD_LISTEN_CLIENT_URLS:对外提供服务的地址,通常为本机节点。使用域名无效
- ETCD_INITIAL_ADVERTISE_PEER_URLS:节点监听地址,并会通告集群其它节点
- ETCD_INITIAL_CLUSTER:集群中所有节点信息,格式为:节点名称+监听的本地端口,及:ETCD_NAME:https://ETCD_INITIAL_ADVERTISE_PEER_URLS
- ETCD_ADVERTISE_CLIENT_URLS:节点成员客户端url列表,对外公告此节点客户端监听地址,可以使用域名
- ETCD_AUTO_COMPACTION_RETENTION: 在一个小时内为mvcc键值存储的自动压实保留。0表示禁用自动压缩
- ETCD_QUOTA_BACKEND_BYTES: ETCDdb存储数据大小,默认2G,推荐8G
- ETCD_MAX_REQUEST_BYTES: 事务中允许的最大操作数,默认1.5M,官方推荐10M,我这里设置5M,大家根据自己实际业务设置
由于我们是三个节点etcd集群,所以需要把etcd.conf配置文件复制到另外2个节点,并把上面参数解释中红色参数修改为对应主机IP。
分发etcd.conf配置文件,当然你不用ansible,可以直接用scp命令把配置文件传输到三台机器对应位置,然后三台机器分别修改IP、ETCD_NAME等参数。
[root@k8s-master01 certs]# ansible k8s-all -m shell -a 'mkdir -p /etc/kubernetes/config'
[root@k8s-master01 config]# ansible k8s-all -m copy -a 'src=/opt/k8s/cfg/etcd.conf dest=/etc/kubernetes/config/etcd.conf'
##登陆对应主机修改配置文件,把对应IP修改为本地IP
注意!各自的etcd配置文件修改对应的ip
编辑etcd.service 启动文件
[root@k8s-master01 certs]# mkdir -pv /opt/k8s/unit
[root@k8s-master01 ~]# vim /opt/k8s/unit/etcd.service
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
[Service]
Type=notify
WorkingDirectory=/var/lib/etcd/
EnvironmentFile=-/etc/kubernetes/config/etcd.conf
User=etcd
# set GOMAXPROCS to number of processors
ExecStart=/bin/bash -c "GOMAXPROCS=$(nproc) /usr/local/bin/etcd --name=\"${ETCD_NAME}\" --data-dir=\"${ETCD_DATA_DIR}\" --listen-client-urls=\"${ETCD_LISTEN_CLIENT_URLS}\""
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
[root@k8s-master01 ~]# ansible k8s-all -m copy -a 'src=/opt/k8s/unit/etcd.service dest=/usr/lib/systemd/system/etcd.service'
[root@k8s-node1 ~]# sed -i 's/10.10.0.18/192.168.3.3/g' /etc/kubernetes/config/etcd.conf
[root@k8s-master01 certs]# vim /etc/kubernetes/config/etcd.conf(分别修改如下内容)
ETCD_NAME="etcd01"
ETCD_LISTEN_CLIENT_URLS="https://192.168.3.3:2379,https://127.0.0.1:2379"
ETCD_INITIAL_CLUSTER=
[root@k8s-master01 ~]# ansible k8s-all -m shell -a 'systemctl daemon-reload'
[root@k8s-master01 ~]# ansible k8s-all -m shell -a 'systemctl enable etcd'
[root@k8s-master01 ~]# ansible k8s-all -m shell -a 'systemctl start etcd'
注意!
这里需要所有etcd服务同时启动,在三台机器上同时执行启动命令,启动其中一台后,服务会卡在那里,直到集群中所有etcd节点都已启动。我这里因为是ansible远程执行,所以没有出现这个问题。
8)验证集群
etcd3版本,查看集群状态时,需要指定对应的证书位置
etcdctl --endpoints=https://192.168.3.3:2379,https://192.168.3.4:2379,https://192.168.3.5:2379 --cert-file=/etc/etcd/etcd.pem --ca-file=/etc/etcd/ca.pem --key-file=/etc/etcd/etcd-key.pem cluster-health
member ce541da5f7cc9b5 is healthy: got healthy result from https://192.168.3.3:2379
member 435b1a3be115aec6 is healthy: got healthy result from https://192.168.3.4:2379
member 6a6686c7a775ffbc is healthy: got healthy result from https://192.168.3.5:2379
cluster is healthy
## 可以看到集群显示健康,并可以看到isLeader=true 所在节点
3.k3s高可用集群安装
1)部署k3s前对环境进行优化
编写优化脚本
vim k3s_init.sh
脚本编写完成后运行脚本sh k3s_init.sh
#!/bin/sh
# Disable the SELinux.
sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config
# Turn off and disable the firewalld.
systemctl stop firewalld
systemctl disable firewalld
# Modify related kernel parameters & Disable the swap.
cat > /etc/sysctl.d/k3s.conf << EOF
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.tcp_tw_recycle = 0
vm.swappiness = 0
vm.overcommit_memory = 1
vm.panic_on_oom = 0
net.ipv6.conf.all.disable_ipv6 = 1
EOF
sysctl -p /etc/sysctl.d/k8s.conf >&/dev/null
swapoff -a
sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
modprobe br_netfilter
# Add ipvs modules
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules
bash /etc/sysconfig/modules/ipvs.modules
# Install rpm
yum install -y conntrack ntpdate ntp ipvsadm ipset jq iptables curl sysstat libseccomp wget
#使用数据源为etcd的方法,分别在三台master上执行下列命令等待安装
如果需要基于docker为容器添加参数INSTALL_K3S_EXEC="server --docker"
curl -sfL http://rancher-mirror.cnrancher.com/k3s/k3s-install.sh |K3S_DATASTORE_ENDPOINT='https://192.168.137.3:2379,https://192.168.137.4:2379,https://192.168.137.5:2379' INSTALL_K3S_MIRROR=cn INSTALL_K3S_EXEC="server --docker --data-dir=/etc/k3s --kube-proxy-arg=proxy-mode=ipvs --datastore-cafile=/etc/etcd/ca.pem --datastore-certfile=/etc/etcd/etcd.pem --datastore-keyfile=/etc/etcd/etcd-key.pem" sh -
#使用mysql作为数据源,需要做一些修改
授权数据库
MariaDB [(none)]> GRANT ALL PRIVILEGES on *.* to root@"%" IDENTIFIED By '123456';
curl -sfL https://get.k3s.io | sh -s - server --docker --datastore-endpoint='mysql://root:123456@tcp(192.168.113.3:3306)/k3s'mysql://user:password@tcp(host:port)/database
在mysql所在的节点执行
curl -sfL https://get.k3s.io | sh -s - server --docker --datastore-endpoint='mysql://root:123456@tcp(127.0.0.1:3306)/k3s'
这个格式就是这样的,请根据实际情况替换
安装好查看k8s集群状态
推荐三个master节点,所以上面的命令可以在三个主机上都执行一次
使用kubectl get nodes命令查看状态
NAME STATUS ROLES AGE VERSION
master1.example.ex Ready master 137d v1.20.7+k3s1
master2.example.ex Ready master 137d v1.20.7+k3s1
master3.example.ex Ready master 137d v1.20.7+k3s1
4.加入agent的方式
在mastart查看tocker
cat /etc/docker/server/node-token #这里的/home/docker目录是上面用--data-dir命令指定的。如果不指定,默认在/var/lib/rancher/k3s中
K10336a5e410440ca65fb6c9ca59d0352c9618c169723dbd8e9c233e9a228ccefef::server:18040a84a14ea4c626f0fe7c12f3d66e
在node节点执行
curl -sfL https://docs.rancher.cn/k3s/k3s-install.sh | INSTALL_K3S_MIRROR=cn INSTALL_K3S_EXEC="agent --server https://192.148.137.3:6443 --docker --token=K10336a5e410440ca65fb6c9ca59d0352c9618c169723dbd8e9c233e9a228ccefef::server:18040a84a14ea4c626f0fe7c12f3d66e" sh -
5. 部署rancher-2.X
编辑yaml文件并部署
kubectal apply -f rancher.yaml
rancher yaml 内容如下
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: proxy-clusterrole-kubeapiserver
rules:
- apiGroups: [""]
resources:
- nodes/metrics
- nodes/proxy
- nodes/stats
- nodes/log
- nodes/spec
verbs: ["get", "list", "watch", "create"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: proxy-role-binding-kubernetes-master
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: proxy-clusterrole-kubeapiserver
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: kube-apiserver
---
apiVersion: v1
kind: Namespace
metadata:
name: cattle-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: cattle
namespace: cattle-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cattle-admin-binding
namespace: cattle-system
labels:
cattle.io/creator: "norman"
subjects:
- kind: ServiceAccount
name: cattle
namespace: cattle-system
roleRef:
kind: ClusterRole
name: cattle-admin
apiGroup: rbac.authorization.k8s.io
---
apiVersion: v1
kind: Secret
metadata:
name: cattle-credentials-2d27727
namespace: cattle-system
type: Opaque
data:
url: "aHR0cHM6Ly8xMC4xOC4zMi4yNTozMDQ0Mw=="
token: "ZnZyc2JkN2JmdGJ4dmhncWNqbDRyamd4dGg0Z2tiZjljZHh4cXZ4aHB4ejVieGNkbnZoenBw"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cattle-admin
labels:
cattle.io/creator: "norman"
rules:
- apiGroups:
- '*'
resources:
- '*'
verbs:
- '*'
- nonResourceURLs:
- '*'
verbs:
- '*'
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: rancher
namespace: cattle-system
labels:
app: rancher
spec:
replicas: 1
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
selector:
matchLabels:
app: rancher
template:
metadata:
labels:
app: rancher
spec:
containers:
- name: rancher
image: rancher/rancher:stable
imagePullPolicy: Always
env:
- name: AUDIT_LEVEL
value: "3"
- name: CATTLE_SYSTEM_CATALOG
value: "bundled"
ports:
- containerPort: 80
name: http
- containerPort: 443
name: https
readinessProbe:
tcpSocket:
port: 80
initialDelaySeconds: 20
periodSeconds: 10
livenessProbe:
tcpSocket:
port: 80
initialDelaySeconds: 600
periodSeconds: 20
serviceAccountName: cattle
---
apiVersion: v1
kind: Service
metadata:
name: rancher
namespace: cattle-system
spec:
ports:
- port: 80
name: http
protocol: TCP
nodePort: 30080
- port: 443
name: https
protocol: TCP
nodePort: 30443
sessionAffinity: ClientIP
externalTrafficPolicy: Cluster
sessionAffinityConfig:
clientIP:
timeoutSeconds: 10800
type: NodePort
selector:
app: rancher
6.卸载k3s
serve节点执行/usr/local/bin/k3s-uninstall.sh
Agent节点执行/usr/local/bin/k3s-agent-uninstall.sh