部署 Kubernetes + KubeVirt + istio + harbor

一、基础环境准备

在openstack平台上创建两台虚拟机:
主: 10.104.43.110
备: 10.104.43.118

1、修改主机名并配置映射

# 所有节点修改主机名
[root@k8s-h-master ~]# hostnamectl set-hostname master
[root@k8s-h-master ~]# bash
[root@master ~]# 
[root@k8s-h-node ~]# hostnamectl set-hostname node
[root@k8s-h-node ~]# bash
[root@node ~]# 

# 所有节点配置映射
[root@master ~]# echo '
10.104.43.110 master
10.104.43.118 node' >> /etc/hosts
[root@node ~]# echo '
> 10.104.43.110 master
> 10.104.43.118 node' >> /etc/hosts

2、配置关闭防火墙和selinux

# 所有节点关闭selinux
# 关闭SELinux,否则后续K8S挂载目录时可能报错 Permission denied
[root@master ~]# sed  -i 's/SELINUX=enforcing/SELINUX=disabled/g'  /etc/selinux/config;
[root@master ~]# setenforce 0
[root@node ~]# sed  -i 's/SELINUX=enforcing/SELINUX=disabled/g'  /etc/selinux/config;
[root@node ~]# setenforce 0

# 所有节点关闭防火墙
[root@master ~]# systemctl stop firewalld.service
[root@master ~]# systemctl disable firewalld.service
[root@node ~]# systemctl stop firewalld.service
[root@node ~]# systemctl disable firewalld.service

# 关闭swap
# 如果开启了swap分区,kubelet会启动失败(可以通过将参数 --fail-swap-on 设置为false来忽略swap on),故需要在每个node节点机器上关闭swap分区。
# 这里索性将所有节点的swap分区都关闭,同时注释/etc/fstab中相应的条目,防止开机自动挂载swap分区:
[root@master ~]# swapoff -a
[root@master ~]# sed -ri 's/.*swap.*/#&/' /etc/fstab
[root@node ~]# swapoff -a
[root@node ~]# sed -ri 's/.*swap.*/#&/' /etc/fstab

# 允许iptables检查桥接流量
[root@master ~]# cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
br_netfilter
EOF

[root@master ~]# cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF

[root@master ~]# sudo sysctl --system

[root@node ~]# cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
br_netfilter
EOF

[root@node ~]# cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF

[root@node ~]# sysctl --system

3、配置yum源

# 删除默认yum源
[root@master yum.repos.d]# rm -rf /etc/yum.repos.d/*
[root@node yum.repos.d]# rm -rf /etc/yum.repos.d/*

# 下载阿里云源
[root@master yum.repos.d]# curl -o /etc/yum.repos.d/CentOS-Base.repo  http://mirrors.aliyun.com/repo/Centos-7.repo
[root@node yum.repos.d]# curl -o /etc/yum.repos.d/CentOS-Base.repo  http://mirrors.aliyun.com/repo/Centos-7.repo
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed
100  2523  100  2523    0     0  33599      0 --:--:-- --:--:-- --:--:-- 34561

# 下载docker-ce源
[root@master yum.repos.d]# yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
[root@node yum.repos.d]# yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
已加载插件:fastestmirror
adding repo from: https://download.docker.com/linux/centos/docker-ce.repo
grabbing file https://download.docker.com/linux/centos/docker-ce.repo to /etc/yum.repos.d/docker-ce.repo
repo saved to /etc/yum.repos.d/docker-ce.repo

4、安装配置docker

# 安装docker
[root@master yum.repos.d]# yum install -y docker-ce-24.0.2
[root@node yum.repos.d]# yum install -y docker-ce-24.0.2

# 配置daemon
[root@master ~]# mkdir -p /etc/docker
[root@node ~]# mkdir -p /etc/docker
[root@master ~]# tee /etc/docker/daemon.json <<-'EOF'
> {
>   "registry-mirrors": ["https://nxwgbmaq.mirror.aliyuncs.com"],
>   "exec-opts": ["native.cgroupdriver=systemd"],
>   "log-driver": "json-file",
>   "log-opts": {
>     "max-size": "100m"
>   },
>   "storage-driver": "overlay2"
> }
> EOF
[root@node ~]# tee /etc/docker/daemon.json <<-'EOF'
> {
>   "registry-mirrors": ["https://nxwgbmaq.mirror.aliyuncs.com"],
>   "exec-opts": ["native.cgroupdriver=systemd"],
>   "log-driver": "json-file",
>   "log-opts": {
>     "max-size": "100m"
>   },
>   "storage-driver": "overlay2"
> }
> EOF
{
  "registry-mirrors": ["https://nxwgbmaq.mirror.aliyuncs.com"],
  "exec-opts": ["native.cgroupdriver=systemd"],
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "100m"
  },
  "storage-driver": "overlay2"
}

# 加载配置重启服务
[root@master ~]# systemctl daemon-reload
[root@master ~]# systemctl restart docker
[root@node ~]# systemctl daemon-reload
[root@node ~]# systemctl restart docker

# docker服务开机启动
[root@master ~]# systemctl enable docker
Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service.
[root@node ~]# systemctl enable docker
Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service.

# 修改/etc/containerd/config.toml
[root@master ~]# containerd config default > /etc/containerd/config.toml
[root@master ~]# vi /etc/containerd/config.toml 
            SystemdCgroup = true
    sandbox_image = "registry.aliyuncs.com/google_containers/pause:3.7"

[root@master ~]# scp /etc/containerd/config.toml root@node:/etc/containerd/

# 开机自启动
[root@master ~]# systemctl enable containerd --now
Created symlink from /etc/systemd/system/multi-user.target.wants/containerd.service to /usr/lib/systemd/system/containerd.service.
[root@node ~]# systemctl enable containerd --now
Created symlink from /etc/systemd/system/multi-user.target.wants/containerd.service to /usr/lib/systemd/system/containerd.service.

# 设置k8s运行时
[root@master ~]# crictl config runtime-endpoint /run/containerd/containerd.sock
[root@master ~]# crictl config image-endpoint /run/containerd/containerd.sock

[root@node ~]# crictl config runtime-endpoint /run/containerd/containerd.sock
[root@node ~]# crictl config image-endpoint /run/containerd/containerd.sock

二、安装kubelet、kubeadm、kubectl(所有机器上执行)

# 配置k8s源
[root@master ~]# cat <<EOF | sudo tee /etc/yum.repos.d/kubernetes.repo
> [kubernetes]
> name=Kubernetes
> baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
> enabled=1
> gpgcheck=0
> repo_gpgcheck=0
> gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
>    http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
> exclude=kubelet kubeadm kubectl
> EOF
[root@node ~]# cat <<EOF | sudo tee /etc/yum.repos.d/kubernetes.repo
> [kubernetes]
> name=Kubernetes
> baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
> enabled=1
> gpgcheck=0
> repo_gpgcheck=0
> gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
>    http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
> exclude=kubelet kubeadm kubectl
> EOF
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
   http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
exclude=kubelet kubeadm kubectl

# 安装kubelet、kubeadm、kubectl
[root@master ~]#  yum install -y kubelet-1.25.0 kubeadm-1.25.0 kubectl-1.25.0 --disableexcludes=kubernetes
[root@node ~]#  yum install -y kubelet-1.25.0 kubeadm-1.25.0 kubectl-1.25.0 --disableexcludes=kubernetes

# 开机启动kubelet
[root@master ~]# systemctl enable kubelet
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.
[root@node ~]# systemctl enable kubelet
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.

三、下载需要的镜像(所有机器执行)

# 生成拉取镜像脚本
[root@master ~]# tee ./images.sh <<-'EOF'
> #!/bin/bash
> images=(
> kube-apiserver:v1.20.9
> kube-proxy:v1.20.9
> kube-controller-manager:v1.20.9
> kube-scheduler:v1.20.9
> coredns:1.7.0
> etcd:3.4.13-0
> pause:3.2
> )
> for imageName in ${images[@]} ; do
> docker pull registry.cn-hangzhou.aliyuncs.com/lfy_k8s_images/$imageName
> done
> EOF
[root@node ~]# tee ./images.sh <<-'EOF'
> #!/bin/bash
> images=(
> kube-apiserver:v1.20.9
> kube-proxy:v1.20.9
> kube-controller-manager:v1.20.9
> kube-scheduler:v1.20.9
> coredns:1.7.0
> etcd:3.4.13-0
> pause:3.2
> )
> for imageName in ${images[@]} ; do
> docker pull registry.cn-hangzhou.aliyuncs.com/lfy_k8s_images/$imageName
> done
> EOF
# 改权限并执行
[root@master ~]# chmod +x ./images.sh && sh images.sh 
[root@node ~]# chmod +x ./images.sh && sh images.sh 

四、master节点执行部署

1、初始化主节点

# 主节点初始化
# service-cidr 和 pod-network-cidr 最好就用这个,不然需要修改后面的 kube-flannel.yaml 文件
# apiserver-advertise-address:指定master监听的地址
# image-repository:指定镜像源
# kubernetes-version: 指定k8s版本
# service-cidr:集群内部网络;pod-network-cidr:pod的网络
[root@master ~]# kubeadm init \
--apiserver-advertise-address=10.104.43.110 \
--image-repository registry.cn-hangzhou.aliyuncs.com/lfy_k8s_images \
--kubernetes-version v1.25.0 \
--service-cidr=10.96.0.0/16 \
--pod-network-cidr=10.244.0.0/16
[init] Using Kubernetes version: v1.20.9
[preflight] Running pre-flight checks
...略
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config
Alternatively, if you are the root user, you can run:
  export KUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 10.104.43.110:6443 --token ll9ohw.8o003i4avuqd8v40 \
    --discovery-token-ca-cert-hash sha256:9e941ac4927f094c9cb13e5557ebb6efad50539731c272687c15b99953346cc1

# (非必须)初始化失败后重置操作
[root@master ~]# kubeadm reset
[root@master ~]# systemctl daemon-reload
[root@master ~]# systemctl restart kubelet

# 执行以下命令可使用kubectl管理工具
[root@master ~]# mkdir -p $HOME/.kube
[root@master ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@master ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config
[root@master ~]# kubectl get nodes
NAME     STATUS     ROLES                  AGE     VERSION
master   NotReady   control-plane,master   4m39s   v1.20.9

配置文件初始化方法:

# 生成配置文件并修改
[root@master ~]# kubeadm config print init-defaults > a.yaml
[root@master ~]# vi a.yaml 
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 10.104.43.110      # 修改master Ip
  bindPort: 6443
nodeRegistration:
  criSocket: unix:///run/containerd/containerd.sock    # 修改运行时位置
  imagePullPolicy: IfNotPresent
  name: master                         # 修改节点名为 master
  taints: null
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns: {}
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: registry.aliyuncs.com/google_containers      # 修改镜像仓库
kind: ClusterConfiguration
kubernetesVersion: 1.25.0
networking:
  dnsDomain: cluster.local
  podSubnet: 10.244.0.0/16                    # 增加pod网段信息
  serviceSubnet: 10.96.0.0/12
scheduler: {}

# 重启运行时
[root@master ~]# systemctl restart containerd

# 主节点初始化
[root@master ~]# kubeadm  init --config=a.yaml

[root@master ~]#  mkdir -p $HOME/.kube
[root@master ~]#   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@master ~]#   sudo chown $(id -u):$(id -g) $HOME/.kube/config

# node节点加入
[root@node ~]# systemctl restart containerd
[root@node ~]# kubeadm join 10.104.43.110:6443 --token abcdef.0123456789abcdef \
	--discovery-token-ca-cert-hash sha256:698015f9e20592404d0bbe2ca03b955b61f1e733c39b155bf4843f87994157c6

2、安装网络组件

# 安装calico.yaml
[root@master ~]# yum install -y wget
[root@master ~]# wget https://docs.projectcalico.org/v3.8/manifests/calico.yaml --no-check-certificate
[root@master ~]# ls
anaconda-ks.cfg  calico.yaml  images.sh 

[root@master ~]# vi calico.yaml 
# 修改calica.yaml,将 value: "192.168.0.0/16" 改为 pod-network-cidr的值:"10.244.0.0/16"
 - name: CALICO_IPV4POOL_CIDR
   value: "10.244.0.0/16"

[root@master ~]# kubectl apply -f calico.yaml

用 kube-flannel 安装网络组件

[root@master ~]# wget https://file.54sxh.cn/k8s/kube-flannel.yml

[root@master ~]# kubectl apply -f kube-flannel.yml

[root@master ~]# kubectl get node
NAME     STATUS   ROLES           AGE     VERSION
master   Ready    control-plane   6m55s   v1.25.0
node     Ready    <none>          4m49s   v1.25.0

五、加入node节点(新节点加入时操作)

token来自master节点执行kubeinit的结果。

[root@node ~]# kubeadm join 10.104.43.110:6443 --token ll9ohw.8o003i4avuqd8v40 \
>     --discovery-token-ca-cert-hash sha256:9e941ac4927f094c9cb13e5557ebb6efad50539731c272687c15b99953346cc1
[preflight] Running pre-flight checks
        [WARNING SystemVerification]: this Docker version is not on the list of validated versions: 20.10.7. Latest validated version: 19.03
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.


# 在master中执行命令验证
[root@master ~]# kubectl get node
NAME     STATUS   ROLES                  AGE   VERSION
master   Ready    control-plane,master   67m   v1.20.9
node     Ready    <none>                 71s   v1.20.9
[root@master ~]# kubectl get pod -A
NAMESPACE     NAME                                      READY   STATUS    RESTARTS   AGE
kube-system   calico-kube-controllers-bcc6f659f-lldr9   1/1     Running   0          9m30s
kube-system   calico-node-dcbkg                         1/1     Running   0          9m30s
kube-system   calico-node-mmmh9                         1/1     Running   0          115s
kube-system   coredns-5897cd56c4-bf6jd                  1/1     Running   0          67m
kube-system   coredns-5897cd56c4-g7pb8                  1/1     Running   0          67m
kube-system   etcd-master                               1/1     Running   0          67m
kube-system   kube-apiserver-master                     1/1     Running   0          67m
kube-system   kube-controller-manager-master            1/1     Running   0          67m
kube-system   kube-proxy-l6zpk                          1/1     Running   0          115s
kube-system   kube-proxy-xnr7l                          1/1     Running   0          67m
kube-system   kube-scheduler-master                     1/1     Running   0          67m

# 若令牌忘记或过期,可以获取新令牌
[root@localhost ~]# kubeadm token create --print-join-command

六、部署kebevirt

kubevirt项目地址:https://github.com/kubevirt/kubevirt

# 所有节点安装如下程序
yum install -y qemu-kvm libvirt virt-install bridge-utils

# 使用 virt-host-validate 命令来确保你的主机能够运行虚拟化
[root@master ~]# virt-host-validate qemu
[root@node ~]# virt-host-validate qemu
  QEMU: Checking for hardware virtualization                                 : PASS
  QEMU: Checking if device /dev/kvm exists                                   : PASS
  QEMU: Checking if device /dev/kvm is accessible                            : PASS
  QEMU: Checking if device /dev/vhost-net exists                             : PASS
  QEMU: Checking if device /dev/net/tun exists                               : PASS
  QEMU: Checking for cgroup 'memory' controller support                      : PASS
  QEMU: Checking for cgroup 'memory' controller mount-point                  : PASS
  QEMU: Checking for cgroup 'cpu' controller support                         : PASS
  QEMU: Checking for cgroup 'cpu' controller mount-point                     : PASS
  QEMU: Checking for cgroup 'cpuacct' controller support                     : PASS
  QEMU: Checking for cgroup 'cpuacct' controller mount-point                 : PASS
  QEMU: Checking for cgroup 'cpuset' controller support                      : PASS
  QEMU: Checking for cgroup 'cpuset' controller mount-point                  : PASS
  QEMU: Checking for cgroup 'devices' controller support                     : PASS
  QEMU: Checking for cgroup 'devices' controller mount-point                 : PASS
  QEMU: Checking for cgroup 'blkio' controller support                       : PASS
  QEMU: Checking for cgroup 'blkio' controller mount-point                   : PASS
  QEMU: Checking for device assignment IOMMU support                         : WARN (No ACPI DMAR table found, IOMMU either disabled in BIOS or not supported by this hardware platform)

# 下载准备
[root@master ~]# wget https://file.54sxh.cn/k8s/kubevirt-operator.yaml
[root@master ~]# wget https://file.54sxh.cn/k8s/kubevirt-cr.yaml

# 创建operator
[root@master ~]# kubectl create -f kubevirt-operator.yaml 

# 检查服务是否ready
[root@master ~]# kubectl get pod -n kubevirt
NAME                             READY   STATUS    RESTARTS   AGE
virt-operator-5d5bdf9c5b-qj5gf   1/1     Running   0          21s
virt-operator-5d5bdf9c5b-zq92t   1/1     Running   0          21s

# 创建cr
[root@master ~]# kubectl create -f kubevirt-cr.yaml 
kubevirt.kubevirt.io/kubevirt created


# 开启软件虚拟化
[root@master ~]# kubectl -n kubevirt patch kubevirt kubevirt --type=merge --patch '{"spec":{"configuration":{"developerConfiguration":{"useEmulation":true}}}}'
kubevirt.kubevirt.io/kubevirt patched

# 等待创建完成
[root@master ~]# kubectl get pod -n kubevirt
NAME                               READY   STATUS              RESTARTS   AGE
virt-api-695695c8bf-tkc2d          1/1     Running             0          49s
virt-api-695695c8bf-z2svg          1/1     Running             0          49s
virt-controller-6b8dbbd8c5-6bspb   0/1     ContainerCreating   0          13s
virt-controller-6b8dbbd8c5-lxbcj   0/1     ContainerCreating   0          13s
virt-handler-thlcs                 0/1     Init:0/1            0          13s
virt-operator-5d5bdf9c5b-qj5gf     1/1     Running             0          2m42s
virt-operator-5d5bdf9c5b-zq92t     1/1     Running             0          2m42s

七、部署istio

1、概念

在Service Mesh中有如下两个概念:

  • Data Plane:作用是处理网格内服务间的通信,并完成服务发现、负载均衡、流量管理、健康检查等功能;
  • Control Plane:作用是管理和配置智能代理用于路由流量,同时配置Mixers来应用策略、收集指标。

(1)istio核心组件

Envoy:Istio 使用 Envoy调解服务网格中所有服务的入站和出站流量。属于数据平面。
Mixer:负责在服务网格上执行访问控制和使用策略,以及收集从Envoy和其他服务自动监控到的数据。
Pilot:为 Envoy sidecar 提供服务发现功能,为智能路由(例如 A/B 测试、金丝雀部署等)和弹性(超时、重试、熔断器等)提供流量管理功能。属于控制平面。
Citadel:提供访问控制和用户身份认证功能。

(2)istio可视化管理组件

Vistio:用于近乎实时地监控应用程序和集群之间的网络流量。
Kiali:提供可视化服务网格拓扑、断路器和请求率等功能。Kiali还包括 Jaeger Tracing,可以提供开箱即用的分布式跟踪功能。
jaeger:用于展示istio微服务调用链关系,以及微服务工作状态监测。注意,在生产环境中,应使用Elasticsearch或cassandra持久化存储jaeger数据。

2、istio手动部署

istio官方文档:https://istio.io/latest/zh/docs/setup/getting-started/

# 下载istio安装包
[root@master ~]# curl -L https://git.io/getLatestIstio | sh -
# 如果上面操作网络不可达,可以在github中找到脚本:https://github.com/istio/istio/blob/master/release/downloadIstioCandidate.sh
# 上传脚本,修改权限
[root@master ~]# chmod +x downloadIstioCandidate.sh
# 执行脚本
[root@master ~]# ./downloadIstioCandidate.sh

# 创建namespace
[root@master ~]# kubectl create namespace istio-system
namespace/istio-system created

# 转到 Istio 包目录
# 安装目录包含:
# samples/ 目录下的示例应用程序
# bin/ 目录下的 istioctl 客户端二进制文件。
[root@master ~]# cd istio-1.17.2/

# 将 istioctl 客户端添加到路径
[root@master istio-1.17.2]# export PATH=$PWD/bin:$PATH
[root@master istio-1.17.2]# echo $PATH
/root/istio-1.17.2/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/root/bin

# 安装采用 demo 配置组合。 选择它是因为它包含了一组专为测试准备的功能集合,另外还有用于生产或性能测试的配置组合
[root@master istio-1.17.2]# istioctl install --set profile=demo -y
✔ Istio core installed                                                                
✔ Istiod installed                                                                    
✔ Ingress gateways installed                                                          
✔ Egress gateways installed                                                           
✔ Installation complete                                                               
Making this installation the default for injection and validation.

八、部署harbor

Harbor是一个用于存储和分发Docker镜像的企业级Registry服务器。

harbor官方文档:https://goharbor.io/docs/2.8.0/install-config/installation-prereqs/

# 1.curl命令从GitHub上的Docker Compose仓库下载二进制文件
# 语法:curl -L "<GitHub上的Docker Compose仓库网址>" -o /usr/local/bin/docker-compose
# 下载实例:
[root@master ~]# curl -L "https://github.com/docker/compose/releases/download/v2.6.0/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed
  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0
100 24.7M  100 24.7M    0     0  39116      0  0:11:03  0:11:03 --:--:-- 55673

# 2.将二进制文件更改为可执行文件
[root@master ~]# chmod +x /usr/local/bin/docker-compose
[root@master jenkins]# docker-compose --version
Docker Compose version v2.6.0

# 3.下载harbor安装包
[root@master ~]# wget https://github.com/goharbor/harbor/releases/download/v2.8.2/harbor-offline-installer-v2.8.2.tgz

# 4.解压harbor安装包
[root@master ~]# mkdir /data
[root@master ~]# tar -zxvf harbor-offline-installer-v2.8.2.tgz -C /data/
[root@master ~]# cd /data/harbor/
[root@master harbor]# ls
common.sh  harbor.v2.8.2.tar.gz  harbor.yml.tmpl  install.sh  LICENSE  prepare

# 5.修改harbor配置文件
[root@master harbor]# cp harbor.yml.tmpl harbor.yml
# 删除https相关配置、修改hostname
[root@master harbor]# vim harbor.yml
hostname: 10.104.43.110
# https:
#   # https port for harbor, default is 443
#   port: 443
#   # The path of cert and key files for nginx
#   certificate: /your/certificate/path
#   private_key: /your/private/key/path

# 6.prepare让配置生效、拉取harbor镜像
[root@master harbor]# ./prepare

# 7.安装harbor
[root@master harbor]# ./install.sh

# 8.查看harbor安装状态
[root@master harbor]# docker-compose ps
NAME                COMMAND                  SERVICE             STATUS               PORTS
harbor-core         "/harbor/entrypoint.…"   core                running (starting)   
harbor-db           "/docker-entrypoint.…"   postgresql          running (starting)   
harbor-jobservice   "/harbor/entrypoint.…"   jobservice          running (starting)   
harbor-log          "/bin/sh -c /usr/loc…"   log                 running (healthy)    127.0.0.1:1514->10514/tcp
harbor-portal       "nginx -g 'daemon of…"   portal              running (starting)   
nginx               "nginx -g 'daemon of…"   proxy               running (starting)   0.0.0.0:80->8080/tcp, :::80->8080/tcp
redis               "redis-server /etc/r…"   redis               running (starting)   
registry            "/home/harbor/entryp…"   registry            running (healthy)    
registryctl         "/home/harbor/start.…"   registryctl         running (starting) 

# 9.访问harbor:http://10.104.43.110
输入默认用户名密码:admin/Harbor12345,登录harbor

# 10.修改docker配置文件,添加harbor仓库地址
[root@master ~]# vi /etc/docker/daemon.json
[root@master ~]# cat /etc/docker/daemon.json 
{
  "registry-mirrors": ["https://registry.docker-cn.com"],
  "insecure-registries": ["http://10.104.43.110"]
}
[root@master ~]# systemctl daemon-reload
[root@master ~]# systemctl restart docker

九、平台配置

1、containerd配置

containerd配置国内镜像源及使用私有镜像仓库。这里配置为本地harbor。

# 1.修改containerd配置文件
[root@master ~]# vi /etc/containerd/config.toml
# 在[plugins."io.containerd.grpc.v1.cri".registry.mirrors]下添加harbor仓库地址
      [plugins."io.containerd.grpc.v1.cri".registry.mirrors]
      [plugins."io.containerd.grpc.v1.cri".registry.mirrors."10.104.43.110"]
      endpoint = ["http://10.104.43.110"]

# 2.重启containerd
[root@master ~]# systemctl daemon-reload
[root@master ~]# systemctl restart containerd
[root@master ~]# systemctl status containerd
● containerd.service - containerd container runtime
   Loaded: loaded (/usr/lib/systemd/system/containerd.service; enabled; vendor preset: disabled)
   Active: active (running) since Fri 2023-06-16 09:35:16 UTC; 6s ago

2、自动补全

# 1.安装bash-completion
[root@master ~]# yum install bash-completion -y

# 2.配置自动补全
[root@master ~]# source /usr/share/bash-completion/bash_completion
[root@master ~]# source <(kubectl completion bash)
[root@master ~]# echo "source <(kubectl completion bash)" >> .bashrc 

# 3.包管理工具补齐(helm装之后才需要)
[root@master ~]# source <(helm completion bash)

# 4.查看自动补全是否生效
[root@master ~]# kubectl get no<Tab><Tab>
posted @ 2023-05-30 13:39  休耕  阅读(577)  评论(0编辑  收藏  举报