k8s安装--Kubeadm部署Kubernetes

一。准备工作,master和node都要做

[root@localhost ~]# systemctl stop firewalld
[root@localhost ~]# systemctl disable firewalld
Removed symlink /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service.
Removed symlink /etc/systemd/system/basic.target.wants/firewalld.service.
[root@localhost ~]# sed -i 's/enforcing/disabled/' /etc/selinux/config
[root@localhost ~]# setenforce 0
[root@localhost ~]# sed -ri 's/.*swap.*/#&/' /etc/fstab
[root@localhost ~]# swapoff -a
[root@localhost ~]#

 

[root@localhost ~]# cat >> /etc/hosts << EOF
> 192.168.1.200 k8smaster
> 192.168.1.201 k8snode
> EOF
[root@localhost ~]# cat > /etc/sysctl.d/k8s.conf << EOF
> net.bridge.bridge-nf-call-ip6tables = 1
> net.bridge.bridge-nf-call-iptables = 1
> EOF
[root@localhost ~]# sysctl --system
* Applying /usr/lib/sysctl.d/00-system.conf ...
* Applying /usr/lib/sysctl.d/50-default.conf ...
kernel.sysrq = 16
kernel.core_uses_pid = 1
net.ipv4.conf.default.rp_filter = 1
net.ipv4.conf.all.rp_filter = 1
net.ipv4.conf.default.accept_source_route = 0
net.ipv4.conf.all.accept_source_route = 0
net.ipv4.conf.default.promote_secondaries = 1
net.ipv4.conf.all.promote_secondaries = 1
fs.protected_hardlinks = 1
fs.protected_symlinks = 1
* Applying /usr/lib/sysctl.d/60-libvirtd.conf ...
fs.aio-max-nr = 1048576
* Applying /etc/sysctl.d/99-sysctl.conf ...
* Applying /etc/sysctl.d/k8s.conf ...
* Applying /etc/sysctl.conf ...

 

[root@localhost ~]# ntpdate time.windows.com
15 Oct 01:18:21 ntpdate[12996]: adjust time server 20.189.79.72 offset 0.000498 sec

 

二。安装docker,master和node都要做

[root@localhost ~]# yum install wget -y
Loaded plugins: fastestmirror, langpacks
Loading mirror speeds from cached hostfile
* base: mirrors.huaweicloud.com
* extras: mirrors.huaweicloud.com
* updates: mirrors.huaweicloud.com
Resolving Dependencies
--> Running transaction check
---> Package wget.x86_64 0:1.14-13.el7 will be updated
---> Package wget.x86_64 0:1.14-18.el7_6.1 will be an update
--> Finished Dependency Resolution

Dependencies Resolved

==============================================================================================================================================================================================
Package Arch Version Repository Size
==============================================================================================================================================================================================
Updating:
wget x86_64 1.14-18.el7_6.1 base 547 k

Transaction Summary
==============================================================================================================================================================================================
Upgrade 1 Package

Total size: 547 k
Downloading packages:
warning: /var/cache/yum/x86_64/7/base/packages/wget-1.14-18.el7_6.1.x86_64.rpm: Header V3 RSA/SHA256 Signature, key ID f4a80eb5: NOKEY
Retrieving key from file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
Importing GPG key 0xF4A80EB5:
Userid : "CentOS-7 Key (CentOS 7 Official Signing Key) <security@centos.org>"
Fingerprint: 6341 ab27 53d7 8a78 a7c2 7bb1 24c6 a8a7 f4a8 0eb5
Package : centos-release-7-3.1611.el7.centos.x86_64 (@anaconda)
From : /etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
Running transaction check
Running transaction test
Transaction test succeeded
Running transaction
Updating : wget-1.14-18.el7_6.1.x86_64 1/2
Cleanup : wget-1.14-13.el7.x86_64 2/2
Verifying : wget-1.14-18.el7_6.1.x86_64 1/2
Verifying : wget-1.14-13.el7.x86_64 2/2

Updated:
wget.x86_64 0:1.14-18.el7_6.1

Complete!
[root@localhost ~]# wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo
--2022-10-15 01:37:18-- https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
Resolving mirrors.aliyun.com (mirrors.aliyun.com)... 240e:bf:c800:2911:3::3fe, 240e:bf:c800:2911:3::3fd, 219.144.101.248, ...
Connecting to mirrors.aliyun.com (mirrors.aliyun.com)|240e:bf:c800:2911:3::3fe|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 2081 (2.0K) [application/octet-stream]
Saving to: ‘/etc/yum.repos.d/docker-ce.repo’

100%[====================================================================================================================================================>] 2,081 --.-K/s in 0.01s

2022-10-15 01:37:19 (175 KB/s) - ‘/etc/yum.repos.d/docker-ce.repo’ saved [2081/2081]

[root@localhost ~]#
[root@localhost ~]#
[root@localhost ~]#
[root@localhost ~]# cat /etc/yum.repos.d/docker-ce.repo
[docker-ce-stable]
name=Docker CE Stable - $basearch
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/$releasever/$basearch/stable
enabled=1
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg

[docker-ce-stable-debuginfo]
name=Docker CE Stable - Debuginfo $basearch
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/$releasever/debug-$basearch/stable
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg

[docker-ce-stable-source]
name=Docker CE Stable - Sources
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/$releasever/source/stable
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg

[docker-ce-test]
name=Docker CE Test - $basearch
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/$releasever/$basearch/test
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg

[docker-ce-test-debuginfo]
name=Docker CE Test - Debuginfo $basearch
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/$releasever/debug-$basearch/test
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg

[docker-ce-test-source]
name=Docker CE Test - Sources
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/$releasever/source/test
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg

[docker-ce-nightly]
name=Docker CE Nightly - $basearch
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/$releasever/$basearch/nightly
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg

[docker-ce-nightly-debuginfo]
name=Docker CE Nightly - Debuginfo $basearch
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/$releasever/debug-$basearch/nightly
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg

[docker-ce-nightly-source]
name=Docker CE Nightly - Sources
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/$releasever/source/nightly
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg


[root@localhost ~]#
[root@localhost ~]#
[root@localhost ~]#
[root@localhost ~]# yum install docker-ce-19.03.13 -y
Loaded plugins: fastestmirror, langpacks
docker-ce-stable | 3.5 kB 00:00:00
(1/2): docker-ce-stable/7/x86_64/updateinfo | 55 B 00:00:00
(2/2): docker-ce-stable/7/x86_64/primary_db | 82 kB 00:00:00
Loading mirror speeds from cached hostfile
* base: mirrors.huaweicloud.com
* extras: mirrors.huaweicloud.com
* updates: mirrors.huaweicloud.com
Resolving Dependencies
--> Running transaction check
---> Package docker-ce.x86_64 3:19.03.13-3.el7 will be installed
--> Processing Dependency: container-selinux >= 2:2.74 for package: 3:docker-ce-19.03.13-3.el7.x86_64
--> Processing Dependency: containerd.io >= 1.2.2-3 for package: 3:docker-ce-19.03.13-3.el7.x86_64
--> Processing Dependency: docker-ce-cli for package: 3:docker-ce-19.03.13-3.el7.x86_64
--> Running transaction check
---> Package container-selinux.noarch 2:2.119.2-1.911c772.el7_8 will be installed
--> Processing Dependency: selinux-policy-targeted >= 3.13.1-216.el7 for package: 2:container-selinux-2.119.2-1.911c772.el7_8.noarch
--> Processing Dependency: selinux-policy-base >= 3.13.1-216.el7 for package: 2:container-selinux-2.119.2-1.911c772.el7_8.noarch
--> Processing Dependency: selinux-policy >= 3.13.1-216.el7 for package: 2:container-selinux-2.119.2-1.911c772.el7_8.noarch
--> Processing Dependency: policycoreutils >= 2.5-11 for package: 2:container-selinux-2.119.2-1.911c772.el7_8.noarch
---> Package containerd.io.x86_64 0:1.6.8-3.1.el7 will be installed
---> Package docker-ce-cli.x86_64 1:20.10.18-3.el7 will be installed
--> Processing Dependency: docker-scan-plugin(x86-64) for package: 1:docker-ce-cli-20.10.18-3.el7.x86_64
--> Running transaction check
---> Package docker-scan-plugin.x86_64 0:0.17.0-3.el7 will be installed
---> Package policycoreutils.x86_64 0:2.5-8.el7 will be updated
--> Processing Dependency: policycoreutils = 2.5-8.el7 for package: policycoreutils-python-2.5-8.el7.x86_64
---> Package policycoreutils.x86_64 0:2.5-34.el7 will be an update
--> Processing Dependency: libsepol >= 2.5-10 for package: policycoreutils-2.5-34.el7.x86_64
--> Processing Dependency: libsemanage >= 2.5-14 for package: policycoreutils-2.5-34.el7.x86_64
--> Processing Dependency: libselinux-utils >= 2.5-14 for package: policycoreutils-2.5-34.el7.x86_64
---> Package selinux-policy.noarch 0:3.13.1-102.el7 will be updated
---> Package selinux-policy.noarch 0:3.13.1-268.el7_9.2 will be an update
---> Package selinux-policy-targeted.noarch 0:3.13.1-102.el7 will be updated
---> Package selinux-policy-targeted.noarch 0:3.13.1-268.el7_9.2 will be an update
--> Running transaction check
---> Package libselinux-utils.x86_64 0:2.5-6.el7 will be updated
---> Package libselinux-utils.x86_64 0:2.5-15.el7 will be an update
--> Processing Dependency: libselinux(x86-64) = 2.5-15.el7 for package: libselinux-utils-2.5-15.el7.x86_64
---> Package libsemanage.x86_64 0:2.5-4.el7 will be updated
--> Processing Dependency: libsemanage = 2.5-4.el7 for package: libsemanage-python-2.5-4.el7.x86_64
---> Package libsemanage.x86_64 0:2.5-14.el7 will be an update
---> Package libsepol.x86_64 0:2.5-6.el7 will be updated
---> Package libsepol.x86_64 0:2.5-10.el7 will be an update
---> Package policycoreutils-python.x86_64 0:2.5-8.el7 will be updated
---> Package policycoreutils-python.x86_64 0:2.5-34.el7 will be an update
--> Processing Dependency: setools-libs >= 3.3.8-4 for package: policycoreutils-python-2.5-34.el7.x86_64
--> Running transaction check
---> Package libselinux.x86_64 0:2.5-6.el7 will be updated
--> Processing Dependency: libselinux(x86-64) = 2.5-6.el7 for package: libselinux-python-2.5-6.el7.x86_64
---> Package libselinux.x86_64 0:2.5-15.el7 will be an update
---> Package libsemanage-python.x86_64 0:2.5-4.el7 will be updated
---> Package libsemanage-python.x86_64 0:2.5-14.el7 will be an update
---> Package setools-libs.x86_64 0:3.3.8-1.1.el7 will be updated
---> Package setools-libs.x86_64 0:3.3.8-4.el7 will be an update
--> Running transaction check
---> Package libselinux-python.x86_64 0:2.5-6.el7 will be updated
---> Package libselinux-python.x86_64 0:2.5-15.el7 will be an update
--> Finished Dependency Resolution

Dependencies Resolved

==============================================================================================================================================================================================
Package Arch Version Repository Size
==============================================================================================================================================================================================
Installing:
docker-ce x86_64 3:19.03.13-3.el7 docker-ce-stable 24 M
Installing for dependencies:
container-selinux noarch 2:2.119.2-1.911c772.el7_8 extras 40 k
containerd.io x86_64 1.6.8-3.1.el7 docker-ce-stable 33 M
docker-ce-cli x86_64 1:20.10.18-3.el7 docker-ce-stable 30 M
docker-scan-plugin x86_64 0.17.0-3.el7 docker-ce-stable 3.7 M
Updating for dependencies:
libselinux x86_64 2.5-15.el7 base 162 k
libselinux-python x86_64 2.5-15.el7 base 236 k
libselinux-utils x86_64 2.5-15.el7 base 151 k
libsemanage x86_64 2.5-14.el7 base 151 k
libsemanage-python x86_64 2.5-14.el7 base 113 k
libsepol x86_64 2.5-10.el7 base 297 k
policycoreutils x86_64 2.5-34.el7 base 917 k
policycoreutils-python x86_64 2.5-34.el7 base 457 k
selinux-policy noarch 3.13.1-268.el7_9.2 updates 498 k
selinux-policy-targeted noarch 3.13.1-268.el7_9.2 updates 7.0 M
setools-libs x86_64 3.3.8-4.el7 base 620 k

Transaction Summary
==============================================================================================================================================================================================
Install 1 Package (+ 4 Dependent packages)
Upgrade ( 11 Dependent packages)

Total size: 101 M
Total download size: 91 M
Downloading packages:
(1/5): container-selinux-2.119.2-1.911c772.el7_8.noarch.rpm | 40 kB 00:00:00
warning: /var/cache/yum/x86_64/7/docker-ce-stable/packages/docker-ce-19.03.13-3.el7.x86_64.rpm: Header V4 RSA/SHA512 Signature, key ID 621e9f35: NOKEY ] 223 kB/s | 48 MB 00:03:14 ETA
Public key for docker-ce-19.03.13-3.el7.x86_64.rpm is not installed
(2/5): docker-ce-19.03.13-3.el7.x86_64.rpm | 24 MB 00:03:49
(3/5): containerd.io-1.6.8-3.1.el7.x86_64.rpm | 33 MB 00:05:11
(4/5): docker-scan-plugin-0.17.0-3.el7.x86_64.rpm | 3.7 MB 00:00:33
(5/5): docker-ce-cli-20.10.18-3.el7.x86_64.rpm | 30 MB 00:04:09
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Total 194 kB/s | 91 MB 00:07:58
Retrieving key from https://mirrors.aliyun.com/docker-ce/linux/centos/gpg
Importing GPG key 0x621E9F35:
Userid : "Docker Release (CE rpm) <docker@docker.com>"
Fingerprint: 060a 61c5 1b55 8a7f 742b 77aa c52f eb6b 621e 9f35
From : https://mirrors.aliyun.com/docker-ce/linux/centos/gpg
Running transaction check
Running transaction test
Transaction test succeeded
Running transaction
Updating : libsepol-2.5-10.el7.x86_64 1/27
Updating : libselinux-2.5-15.el7.x86_64 2/27
Updating : libsemanage-2.5-14.el7.x86_64 3/27
Updating : libselinux-utils-2.5-15.el7.x86_64 4/27
Updating : policycoreutils-2.5-34.el7.x86_64 5/27
Updating : selinux-policy-3.13.1-268.el7_9.2.noarch 6/27
Installing : docker-scan-plugin-0.17.0-3.el7.x86_64 7/27
Installing : 1:docker-ce-cli-20.10.18-3.el7.x86_64 8/27
Updating : selinux-policy-targeted-3.13.1-268.el7_9.2.noarch 9/27
Updating : libsemanage-python-2.5-14.el7.x86_64 10/27
Updating : setools-libs-3.3.8-4.el7.x86_64 11/27
Updating : libselinux-python-2.5-15.el7.x86_64 12/27
Updating : policycoreutils-python-2.5-34.el7.x86_64 13/27
Installing : 2:container-selinux-2.119.2-1.911c772.el7_8.noarch 14/27
Installing : containerd.io-1.6.8-3.1.el7.x86_64 15/27
Installing : 3:docker-ce-19.03.13-3.el7.x86_64 16/27
Cleanup : selinux-policy-targeted-3.13.1-102.el7.noarch 17/27
Cleanup : policycoreutils-python-2.5-8.el7.x86_64 18/27
Cleanup : selinux-policy-3.13.1-102.el7.noarch 19/27
Cleanup : policycoreutils-2.5-8.el7.x86_64 20/27
Cleanup : libselinux-utils-2.5-6.el7.x86_64 21/27
Cleanup : setools-libs-3.3.8-1.1.el7.x86_64 22/27
Cleanup : libselinux-python-2.5-6.el7.x86_64 23/27
Cleanup : libsemanage-python-2.5-4.el7.x86_64 24/27
Cleanup : libsemanage-2.5-4.el7.x86_64 25/27
Cleanup : libselinux-2.5-6.el7.x86_64 26/27
Cleanup : libsepol-2.5-6.el7.x86_64 27/27
Verifying : libselinux-2.5-15.el7.x86_64 1/27
Verifying : 2:container-selinux-2.119.2-1.911c772.el7_8.noarch 2/27
Verifying : 1:docker-ce-cli-20.10.18-3.el7.x86_64 3/27
Verifying : selinux-policy-targeted-3.13.1-268.el7_9.2.noarch 4/27
Verifying : containerd.io-1.6.8-3.1.el7.x86_64 5/27
Verifying : policycoreutils-2.5-34.el7.x86_64 6/27
Verifying : libselinux-utils-2.5-15.el7.x86_64 7/27
Verifying : policycoreutils-python-2.5-34.el7.x86_64 8/27
Verifying : docker-scan-plugin-0.17.0-3.el7.x86_64 9/27
Verifying : setools-libs-3.3.8-4.el7.x86_64 10/27
Verifying : libsemanage-python-2.5-14.el7.x86_64 11/27
Verifying : libsemanage-2.5-14.el7.x86_64 12/27
Verifying : libselinux-python-2.5-15.el7.x86_64 13/27
Verifying : libsepol-2.5-10.el7.x86_64 14/27
Verifying : 3:docker-ce-19.03.13-3.el7.x86_64 15/27
Verifying : selinux-policy-3.13.1-268.el7_9.2.noarch 16/27
Verifying : policycoreutils-2.5-8.el7.x86_64 17/27
Verifying : libsepol-2.5-6.el7.x86_64 18/27
Verifying : libselinux-2.5-6.el7.x86_64 19/27
Verifying : libselinux-python-2.5-6.el7.x86_64 20/27
Verifying : libselinux-utils-2.5-6.el7.x86_64 21/27
Verifying : libsemanage-python-2.5-4.el7.x86_64 22/27
Verifying : policycoreutils-python-2.5-8.el7.x86_64 23/27
Verifying : selinux-policy-3.13.1-102.el7.noarch 24/27
Verifying : selinux-policy-targeted-3.13.1-102.el7.noarch 25/27
Verifying : libsemanage-2.5-4.el7.x86_64 26/27
Verifying : setools-libs-3.3.8-1.1.el7.x86_64 27/27

Installed:
docker-ce.x86_64 3:19.03.13-3.el7

Dependency Installed:
container-selinux.noarch 2:2.119.2-1.911c772.el7_8 containerd.io.x86_64 0:1.6.8-3.1.el7 docker-ce-cli.x86_64 1:20.10.18-3.el7 docker-scan-plugin.x86_64 0:0.17.0-3.el7

Dependency Updated:
libselinux.x86_64 0:2.5-15.el7 libselinux-python.x86_64 0:2.5-15.el7 libselinux-utils.x86_64 0:2.5-15.el7 libsemanage.x86_64 0:2.5-14.el7
libsemanage-python.x86_64 0:2.5-14.el7 libsepol.x86_64 0:2.5-10.el7 policycoreutils.x86_64 0:2.5-34.el7 policycoreutils-python.x86_64 0:2.5-34.el7
selinux-policy.noarch 0:3.13.1-268.el7_9.2 selinux-policy-targeted.noarch 0:3.13.1-268.el7_9.2 setools-libs.x86_64 0:3.3.8-4.el7

Complete!
[root@localhost ~]#

配置加速器

/etc/docker/daemon.json添加:

{

"registry-mirrors": ["https://registry.docker-cn.com"]

}

执行 systemctl enable docker.service

 

[root@localhost etc]# systemctl enable docker.service
Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service.

三。接下来需要搭建:kubeadm、kubelet、kubectl,master和node都要做

[root@localhost etc]# cat > /etc/yum.repos.d/kubernetes.repo << EOF
> [kubernetes]
> name=Kubernetes
> baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
> enabled=1
> gpgcheck=0
> repo_gpgcheck=0
> gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
> EOF
[root@localhost etc]# yum install kubelet-1.19.4 kubeadm-1.19.4 kubectl-1.19.4 -y
Loaded plugins: fastestmirror, langpacks
kubernetes | 1.4 kB 00:00:00
kubernetes/primary | 118 kB 00:00:00
Loading mirror speeds from cached hostfile
* base: mirrors.huaweicloud.com
* extras: mirrors.huaweicloud.com
* updates: mirrors.huaweicloud.com
kubernetes 870/870
Resolving Dependencies
--> Running transaction check
---> Package kubeadm.x86_64 0:1.19.4-0 will be installed
--> Processing Dependency: kubernetes-cni >= 0.8.6 for package: kubeadm-1.19.4-0.x86_64
--> Processing Dependency: cri-tools >= 1.13.0 for package: kubeadm-1.19.4-0.x86_64
---> Package kubectl.x86_64 0:1.19.4-0 will be installed
---> Package kubelet.x86_64 0:1.19.4-0 will be installed
--> Processing Dependency: socat for package: kubelet-1.19.4-0.x86_64
--> Processing Dependency: conntrack for package: kubelet-1.19.4-0.x86_64
--> Running transaction check
---> Package conntrack-tools.x86_64 0:1.4.4-7.el7 will be installed
--> Processing Dependency: libnetfilter_conntrack >= 1.0.6 for package: conntrack-tools-1.4.4-7.el7.x86_64
--> Processing Dependency: libnetfilter_cttimeout.so.1(LIBNETFILTER_CTTIMEOUT_1.1)(64bit) for package: conntrack-tools-1.4.4-7.el7.x86_64
--> Processing Dependency: libnetfilter_cttimeout.so.1(LIBNETFILTER_CTTIMEOUT_1.0)(64bit) for package: conntrack-tools-1.4.4-7.el7.x86_64
--> Processing Dependency: libnetfilter_cthelper.so.0(LIBNETFILTER_CTHELPER_1.0)(64bit) for package: conntrack-tools-1.4.4-7.el7.x86_64
--> Processing Dependency: libnetfilter_queue.so.1()(64bit) for package: conntrack-tools-1.4.4-7.el7.x86_64
--> Processing Dependency: libnetfilter_cttimeout.so.1()(64bit) for package: conntrack-tools-1.4.4-7.el7.x86_64
--> Processing Dependency: libnetfilter_cthelper.so.0()(64bit) for package: conntrack-tools-1.4.4-7.el7.x86_64
---> Package cri-tools.x86_64 0:1.25.0-0 will be installed
---> Package kubernetes-cni.x86_64 0:1.1.1-0 will be installed
---> Package socat.x86_64 0:1.7.3.2-2.el7 will be installed
--> Running transaction check
---> Package libnetfilter_conntrack.x86_64 0:1.0.4-2.el7 will be updated
---> Package libnetfilter_conntrack.x86_64 0:1.0.6-1.el7_3 will be an update
---> Package libnetfilter_cthelper.x86_64 0:1.0.0-11.el7 will be installed
---> Package libnetfilter_cttimeout.x86_64 0:1.0.0-7.el7 will be installed
---> Package libnetfilter_queue.x86_64 0:1.0.2-2.el7_2 will be installed
--> Finished Dependency Resolution

Dependencies Resolved

==============================================================================================================================================================================================
Package Arch Version Repository Size
==============================================================================================================================================================================================
Installing:
kubeadm x86_64 1.19.4-0 kubernetes 8.3 M
kubectl x86_64 1.19.4-0 kubernetes 9.0 M
kubelet x86_64 1.19.4-0 kubernetes 19 M
Installing for dependencies:
conntrack-tools x86_64 1.4.4-7.el7 base 187 k
cri-tools x86_64 1.25.0-0 kubernetes 8.2 M
kubernetes-cni x86_64 1.1.1-0 kubernetes 15 M
libnetfilter_cthelper x86_64 1.0.0-11.el7 base 18 k
libnetfilter_cttimeout x86_64 1.0.0-7.el7 base 18 k
libnetfilter_queue x86_64 1.0.2-2.el7_2 base 23 k
socat x86_64 1.7.3.2-2.el7 base 290 k
Updating for dependencies:
libnetfilter_conntrack x86_64 1.0.6-1.el7_3 base 55 k

Transaction Summary
==============================================================================================================================================================================================
Install 3 Packages (+7 Dependent packages)
Upgrade ( 1 Dependent package)

Total size: 61 M
Total download size: 61 M
Downloading packages:
(1/10): conntrack-tools-1.4.4-7.el7.x86_64.rpm | 187 kB 00:00:00
(2/10): e382ead81273ab8ebcddf14cc15bf977e44e1fd541a2cfda6ebe5741c255e59f-cri-tools-1.25.0-0.x86_64.rpm | 8.2 MB 00:00:44
(3/10): afa24df75879f7793f2b22940743e4d40674f3fcb5241355dd07d4c91e4866df-kubeadm-1.19.4-0.x86_64.rpm | 8.3 MB 00:00:45
(4/10): c1fdeadba483d54bedecb0648eb7426cc3d7222499179911961f493a0e07fcd0-kubectl-1.19.4-0.x86_64.rpm | 9.0 MB 00:00:48
(5/10): libnetfilter_cthelper-1.0.0-11.el7.x86_64.rpm | 18 kB 00:00:00
(6/10): libnetfilter_queue-1.0.2-2.el7_2.x86_64.rpm | 23 kB 00:00:00
(7/10): socat-1.7.3.2-2.el7.x86_64.rpm | 290 kB 00:00:00
(8/10): libnetfilter_cttimeout-1.0.0-7.el7.x86_64.rpm | 18 kB 00:00:00
(9/10): 318243df021e3a348c865a0a0b3b7d4802fe6c6c1f79500c6130d5c6b628766c-kubelet-1.19.4-0.x86_64.rpm | 19 MB 00:01:43
(10/10): 14083ac8b11792469524dae98ebb6905b3921923937d6d733b8abb58113082b7-kubernetes-cni-1.1.1-0.x86_64.rpm | 15 MB 00:01:18
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Total 364 kB/s | 61 MB 00:02:51
Running transaction check
Running transaction test
Transaction test succeeded
Running transaction
Installing : libnetfilter_cthelper-1.0.0-11.el7.x86_64 1/12
Installing : socat-1.7.3.2-2.el7.x86_64 2/12
Installing : libnetfilter_cttimeout-1.0.0-7.el7.x86_64 3/12
Updating : libnetfilter_conntrack-1.0.6-1.el7_3.x86_64 4/12
Installing : kubectl-1.19.4-0.x86_64 5/12
Installing : libnetfilter_queue-1.0.2-2.el7_2.x86_64 6/12
Installing : conntrack-tools-1.4.4-7.el7.x86_64 7/12
Installing : kubernetes-cni-1.1.1-0.x86_64 8/12
Installing : kubelet-1.19.4-0.x86_64 9/12
Installing : cri-tools-1.25.0-0.x86_64 10/12
Installing : kubeadm-1.19.4-0.x86_64 11/12
Cleanup : libnetfilter_conntrack-1.0.4-2.el7.x86_64 12/12
Verifying : cri-tools-1.25.0-0.x86_64 1/12
Verifying : kubelet-1.19.4-0.x86_64 2/12
Verifying : kubernetes-cni-1.1.1-0.x86_64 3/12
Verifying : libnetfilter_queue-1.0.2-2.el7_2.x86_64 4/12
Verifying : kubeadm-1.19.4-0.x86_64 5/12
Verifying : kubectl-1.19.4-0.x86_64 6/12
Verifying : libnetfilter_conntrack-1.0.6-1.el7_3.x86_64 7/12
Verifying : libnetfilter_cttimeout-1.0.0-7.el7.x86_64 8/12
Verifying : socat-1.7.3.2-2.el7.x86_64 9/12
Verifying : libnetfilter_cthelper-1.0.0-11.el7.x86_64 10/12
Verifying : conntrack-tools-1.4.4-7.el7.x86_64 11/12
Verifying : libnetfilter_conntrack-1.0.4-2.el7.x86_64 12/12

Installed:
kubeadm.x86_64 0:1.19.4-0 kubectl.x86_64 0:1.19.4-0 kubelet.x86_64 0:1.19.4-0

Dependency Installed:
conntrack-tools.x86_64 0:1.4.4-7.el7 cri-tools.x86_64 0:1.25.0-0 kubernetes-cni.x86_64 0:1.1.1-0 libnetfilter_cthelper.x86_64 0:1.0.0-11.el7
libnetfilter_cttimeout.x86_64 0:1.0.0-7.el7 libnetfilter_queue.x86_64 0:1.0.2-2.el7_2 socat.x86_64 0:1.7.3.2-2.el7

Dependency Updated:
libnetfilter_conntrack.x86_64 0:1.0.6-1.el7_3

Complete!
[root@localhost etc]# systemctl enable kubelet.service
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.

查看安装是否成功:
[root@localhost etc]# yum list installed | grep kubelet
kubelet.x86_64 1.19.4-0 @kubernetes
[root@localhost etc]# yum list installed | grep kubeadm
kubeadm.x86_64 1.19.4-0 @kubernetes
[root@localhost etc]# yum list installed | grep kubectl
kubectl.x86_64 1.19.4-0 @kubernetes

四。部署Kubernetes Master主节点,在master执行

[root@localhost etc]# kubeadm init --apiserver-advertise-address=192.168.1.200 --image-repository registry.aliyuncs.com/google_containers --kubernetes-version v1.19.4 --service-cidr=10.96.0.0/12 --pod-network-cidr=10.244.0.0/16
W1015 02:35:34.688872 15327 kubelet.go:200] cannot automatically set CgroupDriver when starting the Kubelet: cannot execute 'docker info -f {{.CgroupDriver}}': exit status 1
W1015 02:35:34.711100 15327 configset.go:348] WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io]
[init] Using Kubernetes version: v1.19.4
[preflight] Running pre-flight checks
[preflight] The system verification failed. Printing the output from the verification:
KERNEL_VERSION: 3.10.0-514.el7.x86_64
CONFIG_NAMESPACES: enabled
CONFIG_NET_NS: enabled
CONFIG_PID_NS: enabled
CONFIG_IPC_NS: enabled
CONFIG_UTS_NS: enabled
CONFIG_CGROUPS: enabled
CONFIG_CGROUP_CPUACCT: enabled
CONFIG_CGROUP_DEVICE: enabled
CONFIG_CGROUP_FREEZER: enabled
CONFIG_CGROUP_SCHED: enabled
CONFIG_CPUSETS: enabled
CONFIG_MEMCG: enabled
CONFIG_INET: enabled
CONFIG_EXT4_FS: enabled (as module)
CONFIG_PROC_FS: enabled
CONFIG_NETFILTER_XT_TARGET_REDIRECT: enabled (as module)
CONFIG_NETFILTER_XT_MATCH_COMMENT: enabled (as module)
CONFIG_OVERLAY_FS: enabled (as module)
CONFIG_AUFS_FS: not set - Required for aufs.
CONFIG_BLK_DEV_DM: enabled (as module)
OS: Linux
CGROUPS_CPU: enabled
CGROUPS_CPUACCT: enabled
CGROUPS_CPUSET: enabled
CGROUPS_DEVICES: enabled
CGROUPS_FREEZER: enabled
CGROUPS_MEMORY: enabled
CGROUPS_HUGETLB: enabled
CGROUPS_PIDS: enabled
error execution phase preflight: [preflight] Some fatal errors occurred:
[ERROR CRI]: container runtime is not running: output: Client:
Context: default
Debug Mode: false
Plugins:
app: Docker App (Docker Inc., v0.9.1-beta3)
buildx: Docker Buildx (Docker Inc., v0.9.1-docker)
scan: Docker Scan (Docker Inc., v0.17.0)

Server:
ERROR: Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?
errors pretty printing info
, error: exit status 1
[ERROR Service-Docker]: docker service is not active, please run 'systemctl start docker.service'
[ERROR IsDockerSystemdCheck]: cannot execute 'docker info -f {{.CgroupDriver}}': exit status 1
[ERROR FileContent--proc-sys-net-bridge-bridge-nf-call-iptables]: /proc/sys/net/bridge/bridge-nf-call-iptables does not exist
[ERROR FileContent--proc-sys-net-ipv4-ip_forward]: /proc/sys/net/ipv4/ip_forward contents are not set to 1
[ERROR SystemVerification]: error verifying Docker info: "Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?"
[preflight] If you know what you are doing, you can make a check non-fatal with `--ignore-preflight-errors=...`
To see the stack trace of this error execute with --v=5 or higher
[root@localhost etc]# reboot

报错 重启下

重启后重新部署主节点(在master机器上执行)

[root@k8smaster ~]# kubeadm init --apiserver-advertise-address=192.168.1.200 --image-repository registry.aliyuncs.com/google_containers --kubernetes-version v1.19.4 --service-cidr=10.96.0.0/12 --pod-network-cidr=10.244.0.0/16
W1015 02:42:46.753336 10344 configset.go:348] WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io]
[init] Using Kubernetes version: v1.19.4
[preflight] Running pre-flight checks
[WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [k8smaster kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.1.200]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [k8smaster localhost] and IPs [192.168.1.200 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [k8smaster localhost] and IPs [192.168.1.200 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 21.006221 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.19" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node k8smaster as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node k8smaster as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: ldxc3h.xo65e5vce0n9nzdz
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.1.200:6443 --token ldxc3h.xo65e5vce0n9nzdz \
--discovery-token-ca-cert-hash sha256:f7afde1770e767386be35332aa4703258516b5feb3238e619b7fb9b90a019596

在master机器上执行上面结果中的命令
[root@k8smaster ~]# mkdir -p $HOME/.kube
[root@k8smaster ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@k8smaster ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config
[root@k8smaster ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8smaster NotReady master 9m49s v1.19.4
[root@k8smaster ~]#

五。接下来把node节点加入Kubernetes master中,在Node机器上执行;

向集群添加新节点,执行的命令就是kubeadm init最后输出的kubeadm join命令:

kubeadm join 192.168.1.200:6443 --token ldxc3h.xo65e5vce0n9nzdz \
--discovery-token-ca-cert-hash sha256:f7afde1770e767386be35332aa4703258516b5feb3238e619b7fb9b90a019596 

 此时在master查看node还是notready状态,需要安装网络插件两边进行通信

 

六。部署网络插件,在master执行

wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml 执行被拒

应用kube-flannel.yml文件得到运行时容器

kubectl apply -f kube-flannel.yml (在master机器上执行)

查询还是一直 NotReady:

[root@k8smaster ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8smaster NotReady master 4h26m v1.19.4
k8snode NotReady <none> 12m v1.19.4

查看pod  coredns是Pending状态
[root@k8smaster ~]# kubectl get pods -n kube-system -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
coredns-6d56c8448f-56m7h 0/1 Pending 0 4h26m <none> <none> <none> <none>
coredns-6d56c8448f-n4qrh 0/1 Pending 0 4h26m <none> <none> <none> <none>

应该是kube-flannel.yml有问题,换了新的kube-flannel.yml执行kubectl apply -f kube-flannel.yml    coredns正常了,节点ready,kube-flannel.yml:

复制代码
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
  name: psp.flannel.unprivileged
  annotations:
    seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
    seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
    apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
    apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
  privileged: false
  volumes:
  - configMap
  - secret
  - emptyDir
  - hostPath
  allowedHostPaths:
  - pathPrefix: "/etc/cni/net.d"
  - pathPrefix: "/etc/kube-flannel"
  - pathPrefix: "/run/flannel"
  readOnlyRootFilesystem: false
  runAsUser:
    rule: RunAsAny
  supplementalGroups:
    rule: RunAsAny
  fsGroup:
    rule: RunAsAny
  allowPrivilegeEscalation: false
  defaultAllowPrivilegeEscalation: false
  allowedCapabilities: ['NET_ADMIN', 'NET_RAW']
  defaultAddCapabilities: []
  requiredDropCapabilities: []
  hostPID: false
  hostIPC: false
  hostNetwork: true
  hostPorts:
  - min: 0
    max: 65535
  seLinux:
    rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: flannel
rules:
- apiGroups: ['extensions']
  resources: ['podsecuritypolicies']
  verbs: ['use']
  resourceNames: ['psp.flannel.unprivileged']
- apiGroups:
  - ""
  resources:
  - pods
  verbs:
  - get
- apiGroups:
  - ""
  resources:
  - nodes
  verbs:
  - list
  - watch
- apiGroups:
  - ""
  resources:
  - nodes/status
  verbs:
  - patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: flannel
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: flannel
subjects:
- kind: ServiceAccount
  name: flannel
  namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: flannel
  namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
  name: kube-flannel-cfg
  namespace: kube-system
  labels:
    tier: node
    app: flannel
data:
  cni-conf.json: |
    {
      "name": "cbr0",
      "cniVersion": "0.3.1",
      "plugins": [
        {
          "type": "flannel",
          "delegate": {
            "hairpinMode": true,
            "isDefaultGateway": true
          }
        },
        {
          "type": "portmap",
          "capabilities": {
            "portMappings": true
          }
        }
      ]
    }
  net-conf.json: |
    {
      "Network": "10.244.0.0/16",
      "Backend": {
        "Type": "vxlan"
      }
    }
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: kube-flannel-ds
  namespace: kube-system
  labels:
    tier: node
    app: flannel
spec:
  selector:
    matchLabels:
      app: flannel
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
            - matchExpressions:
              - key: kubernetes.io/os
                operator: In
                values:
                - linux
      hostNetwork: true
      priorityClassName: system-node-critical
      tolerations:
      - operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
      - name: install-cni-plugin
        image: rancher/mirrored-flannelcni-flannel-cni-plugin:v1.1.0
        command:
        - cp
        args:
        - -f
        - /flannel
        - /opt/cni/bin/flannel
        volumeMounts:
        - name: cni-plugin
          mountPath: /opt/cni/bin
      - name: install-cni
        image: rancher/mirrored-flannelcni-flannel:v0.18.1
        command:
        - cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        volumeMounts:
        - name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
        image: rancher/mirrored-flannelcni-flannel:v0.18.1
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        resources:
          requests:
            cpu: "100m"
            memory: "50Mi"
          limits:
            cpu: "100m"
            memory: "50Mi"
        securityContext:
          privileged: false
          capabilities:
            add: ["NET_ADMIN", "NET_RAW"]
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        - name: EVENT_QUEUE_DEPTH
          value: "5000"
        volumeMounts:
        - name: run
          mountPath: /run/flannel
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
        - name: xtables-lock
          mountPath: /run/xtables.lock
      volumes:
      - name: run
        hostPath:
          path: /run/flannel
      - name: cni-plugin
        hostPath:
          path: /opt/cni/bin
      - name: cni
        hostPath:
          path: /etc/cni/net.d
      - name: flannel-cfg
        configMap:
          name: kube-flannel-cfg
      - name: xtables-lock
        hostPath:
          path: /run/xtables.lock
          type: FileOrCreate
复制代码

 

posted @   聚散浮沉  阅读(1538)  评论(0编辑  收藏  举报
相关博文:
阅读排行:
· 无需6万激活码!GitHub神秘组织3小时极速复刻Manus,手把手教你使用OpenManus搭建本
· Manus爆火,是硬核还是营销?
· 终于写完轮子一部分:tcp代理 了,记录一下
· 别再用vector<bool>了!Google高级工程师:这可能是STL最大的设计失误
· 单元测试从入门到精通
点击右上角即可分享
微信分享提示