ubuntu使用kubeasz快速部署k8s-v1.21.0集群
由于ubuntu内核版本更新,而docker容器对linux内核新功能要求比较高,所以使用ubuntu作为docker容器的宿主机更加友好一点。
https://github.com/easzlab/kubeasz/blob/master/docs/setup/00-planning_and_overall_intro.md
1.基础系统配置
2c/4g内存/40g硬盘(该配置仅测试用)
export release=3.1.0
curl -C- -fLO --retry 3 https://github.com/easzlab/kubeasz/releases/download/${release}/ezdown
cat /root/ez/ezdown
DOCKER_VER=19.03.15
K8S_BIN_VER=v1.20.0
root@ubuntu20:/etc/docker# cat daemon.json { "registry-mirrors": ["https://e83z5xiz.mirror.aliyuncs.com"], "insecure-registries": ["192.168.80.250:80"] }
root@kubeasz:/etc/kubeasz/down# docker load -i pause_3.4.1.tar root@kubeasz:/etc/kubeasz/down# docker tag easzlab/pause-amd64:3.4.1 192.168.80.250:80/chuan/pause-amd64:3.4.1 root@kubeasz:/etc/kubeasz/down# docker push 192.168.80.250:80/chuan/pause-amd64:3.4.1
root@ubuntu20:/etc/kubeasz# cat ezdown #!/bin/bash #-------------------------------------------------- # This script is used for: # 1. to download the scripts/binaries/images needed for installing a k8s cluster with kubeasz # 2. to run kubeasz in a container (optional) # @author: gjmzj # @usage: ./ezdown # @repo: https://github.com/easzlab/kubeasz # @ref: https://github.com/kubeasz/dockerfiles #-------------------------------------------------- set -o nounset set -o errexit #set -o xtrace # default settings, can be overridden by cmd line options, see usage DOCKER_VER=20.10.5 KUBEASZ_VER=3.1.0 K8S_BIN_VER=v1.21.0 EXT_BIN_VER=0.9.4 SYS_PKG_VER=0.4.1 HARBOR_VER=v2.1.3 REGISTRY_MIRROR=CN # images needed by k8s cluster calicoVer=v3.15.3 flannelVer=v0.13.0-amd64 dnsNodeCacheVer=1.17.0 corednsVer=1.8.0 dashboardVer=v2.2.0 dashboardMetricsScraperVer=v1.0.6 metricsVer=v0.3.6 pauseVer=3.4.1 nfsProvisionerVer=v4.0.1 export ciliumVer=v1.4.1 export kubeRouterVer=v0.3.1 export kubeOvnVer=v1.5.3 export promChartVer=12.10.6 export traefikChartVer=9.12.3 function usage() { echo -e "\033[33mUsage:\033[0m ezdown [options] [args]" cat <<EOF option: -{DdekSz} -C stop&clean all local containers -D download all into "$BASE" -P download system packages for offline installing -R download Registry(harbor) offline installer -S start kubeasz in a container -d <ver> set docker-ce version, default "$DOCKER_VER" -e <ver> set kubeasz-ext-bin version, default "$EXT_BIN_VER" -k <ver> set kubeasz-k8s-bin version, default "$K8S_BIN_VER" -m <str> set docker registry mirrors, default "CN"(used in Mainland,China) -p <ver> set kubeasz-sys-pkg version, default "$SYS_PKG_VER" -z <ver> set kubeasz version, default "$KUBEASZ_VER" EOF } function logger() { TIMESTAMP=$(date +'%Y-%m-%d %H:%M:%S') case "$1" in debug) echo -e "$TIMESTAMP \033[36mDEBUG\033[0m $2" ;; info) echo -e "$TIMESTAMP \033[32mINFO\033[0m $2" ;; warn) echo -e "$TIMESTAMP \033[33mWARN\033[0m $2" ;; error) echo -e "$TIMESTAMP \033[31mERROR\033[0m $2" ;; *) ;; esac } function download_docker() { if [[ "$REGISTRY_MIRROR" == CN ]];then DOCKER_URL="https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/static/stable/x86_64/docker-${DOCKER_VER}.tgz" else DOCKER_URL="https://download.docker.com/linux/static/stable/x86_64/docker-${DOCKER_VER}.tgz" fi if [[ -f "$BASE/down/docker-${DOCKER_VER}.tgz" ]];then logger warn "docker binaries already existed" else logger info "downloading docker binaries, version $DOCKER_VER" if [[ -e /usr/bin/curl ]];then curl -C- -O --retry 3 "$DOCKER_URL" || { logger error "downloading docker failed"; exit 1; } else wget -c "$DOCKER_URL" || { logger error "downloading docker failed"; exit 1; } fi /bin/mv -f "./docker-$DOCKER_VER.tgz" "$BASE/down" fi tar zxf "$BASE/down/docker-$DOCKER_VER.tgz" -C "$BASE/down" && \ /bin/cp -f "$BASE"/down/docker/* "$BASE/bin" && \ /bin/mv -f "$BASE"/down/docker/* /opt/kube/bin && \ ln -sf /opt/kube/bin/docker /bin/docker } function install_docker() { # check if a container runtime is already installed systemctl status docker|grep Active|grep -q running && { logger warn "docker is already running."; return 0; } logger debug "generate docker service file" cat > /etc/systemd/system/docker.service << EOF [Unit] Description=Docker Application Container Engine Documentation=http://docs.docker.io [Service] Environment="PATH=/opt/kube/bin:/bin:/sbin:/usr/bin:/usr/sbin" ExecStartPre=/sbin/iptables -F ExecStartPre=/sbin/iptables -X ExecStartPre=/sbin/iptables -F -t nat ExecStartPre=/sbin/iptables -X -t nat ExecStartPre=/sbin/iptables -F -t raw ExecStartPre=/sbin/iptables -X -t raw ExecStartPre=/sbin/iptables -F -t mangle ExecStartPre=/sbin/iptables -X -t mangle ExecStart=/opt/kube/bin/dockerd ExecStartPost=/sbin/iptables -P INPUT ACCEPT ExecStartPost=/sbin/iptables -P OUTPUT ACCEPT ExecStartPost=/sbin/iptables -P FORWARD ACCEPT ExecReload=/bin/kill -s HUP \$MAINPID Restart=on-failure RestartSec=5 LimitNOFILE=infinity LimitNPROC=infinity LimitCORE=infinity Delegate=yes KillMode=process [Install] WantedBy=multi-user.target EOF # configuration for dockerd mkdir -p /etc/docker DOCKER_VER_MAIN=$(echo "$DOCKER_VER"|cut -d. -f1) CGROUP_DRIVER="cgroupfs" ((DOCKER_VER_MAIN>=20)) && CGROUP_DRIVER="systemd" logger debug "generate docker config: /etc/docker/daemon.json" if [[ "$REGISTRY_MIRROR" == CN ]];then logger debug "prepare register mirror for $REGISTRY_MIRROR" cat > /etc/docker/daemon.json << EOF { "exec-opts": ["native.cgroupdriver=$CGROUP_DRIVER"], "registry-mirrors": [ "https://docker.mirrors.ustc.edu.cn", "http://hub-mirror.c.163.com" ], "max-concurrent-downloads": 10, "log-driver": "json-file", "log-level": "warn", "log-opts": { "max-size": "10m", "max-file": "3" }, "data-root": "/var/lib/docker" } EOF else logger debug "standard config without registry mirrors" cat > /etc/docker/daemon.json << EOF { "exec-opts": ["native.cgroupdriver=$CGROUP_DRIVER"], "max-concurrent-downloads": 10, "log-driver": "json-file", "log-level": "warn", "log-opts": { "max-size": "10m", "max-file": "3" }, "data-root": "/var/lib/docker" } EOF fi if [[ -e /etc/centos-release || -e /etc/redhat-release ]]; then logger debug "turn off selinux in CentOS/Redhat" getenforce|grep Disabled || setenforce 0 sed -i 's/^SELINUX=.*$/SELINUX=disabled/g' /etc/selinux/config fi logger debug "enable and start docker" systemctl enable docker systemctl daemon-reload && systemctl restart docker && sleep 4 } function get_kubeasz() { # check if kubeasz already existed [[ -d "$BASE/roles/kube-node" ]] && { logger warn "kubeasz already existed"; return 0; } logger info "downloading kubeasz: $KUBEASZ_VER" logger debug " run a temporary container" docker run -d --name temp_easz easzlab/kubeasz:${KUBEASZ_VER} || { logger error "download failed."; exit 1; } [[ -f "$BASE/down/docker-${DOCKER_VER}.tgz" ]] && /bin/mv -f "$BASE/down/docker-${DOCKER_VER}.tgz" /tmp [[ -d "$BASE/bin" ]] && /bin/mv -f "$BASE/bin" /tmp rm -rf "$BASE" && \ logger debug "cp kubeasz code from the temporary container" && \ docker cp "temp_easz:$BASE" "$BASE" && \ logger debug "stop&remove temporary container" && \ docker rm -f temp_easz mkdir -p "$BASE/bin" "$BASE/down" [[ -f "/tmp/docker-${DOCKER_VER}.tgz" ]] && /bin/mv -f "/tmp/docker-${DOCKER_VER}.tgz" "$BASE/down" [[ -d "/tmp/bin" ]] && /bin/mv -f /tmp/bin/* "$BASE/bin" return 0 } function get_k8s_bin() { [[ -f "$BASE/bin/kubelet" ]] && { logger warn "kubernetes binaries existed"; return 0; } logger info "downloading kubernetes: $K8S_BIN_VER binaries" docker pull easzlab/kubeasz-k8s-bin:"$K8S_BIN_VER" && \ logger debug "run a temporary container" && \ docker run -d --name temp_k8s_bin easzlab/kubeasz-k8s-bin:${K8S_BIN_VER} && \ logger debug "cp k8s binaries" && \ docker cp temp_k8s_bin:/k8s "$BASE/k8s_bin_tmp" && \ /bin/mv -f "$BASE"/k8s_bin_tmp/* "$BASE/bin" && \ logger debug "stop&remove temporary container" && \ docker rm -f temp_k8s_bin && \ rm -rf "$BASE/k8s_bin_tmp" } function get_ext_bin() { [[ -f "$BASE/bin/etcdctl" ]] && { logger warn "extra binaries existed"; return 0; } logger info "downloading extral binaries kubeasz-ext-bin:$EXT_BIN_VER" docker pull "easzlab/kubeasz-ext-bin:$EXT_BIN_VER" && \ logger debug "run a temporary container" && \ docker run -d --name temp_ext_bin "easzlab/kubeasz-ext-bin:$EXT_BIN_VER" && \ logger debug "cp extral binaries" && \ docker cp temp_ext_bin:/extra "$BASE/extra_bin_tmp" && \ /bin/mv -f "$BASE"/extra_bin_tmp/* "$BASE/bin" && \ logger debug "stop&remove temporary container" && \ docker rm -f temp_ext_bin && \ rm -rf "$BASE/extra_bin_tmp" } function get_sys_pkg() { [[ -f "$BASE/down/packages/chrony_xenial.tar.gz" ]] && { logger warn "system packages existed"; return 0; } logger info "downloading system packages kubeasz-sys-pkg:$SYS_PKG_VER" docker pull "easzlab/kubeasz-sys-pkg:$SYS_PKG_VER" && \ logger debug "run a temporary container" && \ docker run -d --name temp_sys_pkg "easzlab/kubeasz-sys-pkg:$SYS_PKG_VER" && \ logger debug "cp system packages" && \ docker cp temp_sys_pkg:/packages "$BASE/down" && \ logger debug "stop&remove temporary container" && \ docker rm -f temp_sys_pkg } function get_harbor_offline_pkg() { [[ -f "$BASE/down/harbor-offline-installer-$HARBOR_VER.tgz" ]] && { logger warn "harbor-offline existed"; return 0; } logger info "downloading harbor-offline:$HARBOR_VER" docker pull "easzlab/harbor-offline:$HARBOR_VER" && \ logger debug "run a temporary container" && \ docker run -d --name temp_harbor "easzlab/harbor-offline:$HARBOR_VER" && \ logger debug "cp harbor-offline installer package" && \ docker cp "temp_harbor:/harbor-offline-installer-$HARBOR_VER.tgz" "$BASE/down" && \ logger debug "stop&remove temporary container" && \ docker rm -f temp_harbor } function get_offline_image() { imageDir="$BASE/down" logger info "downloading offline images" if [[ ! -f "$imageDir/calico_$calicoVer.tar" ]];then docker pull "calico/cni:$calicoVer" && \ docker pull "calico/pod2daemon-flexvol:$calicoVer" && \ docker pull "calico/kube-controllers:$calicoVer" && \ docker pull "calico/node:$calicoVer" && \ docker save -o "$imageDir/calico_$calicoVer.tar" "calico/cni:$calicoVer" "calico/kube-controllers:$calicoVer" "calico/node:$calicoVer" "calico/pod2daemon-flexvol:$calicoVer" fi if [[ ! -f "$imageDir/coredns_$corednsVer.tar" ]];then docker pull "coredns/coredns:$corednsVer" && \ docker save -o "$imageDir/coredns_$corednsVer.tar" "coredns/coredns:$corednsVer" fi if [[ ! -f "$imageDir/k8s-dns-node-cache_$dnsNodeCacheVer.tar" ]];then docker pull "easzlab/k8s-dns-node-cache:$dnsNodeCacheVer" && \ docker save -o "$imageDir/k8s-dns-node-cache_$dnsNodeCacheVer.tar" "easzlab/k8s-dns-node-cache:$dnsNodeCacheVer" fi if [[ ! -f "$imageDir/dashboard_$dashboardVer.tar" ]];then docker pull "kubernetesui/dashboard:$dashboardVer" && \ docker save -o "$imageDir/dashboard_$dashboardVer.tar" "kubernetesui/dashboard:$dashboardVer" fi if [[ ! -f "$imageDir/flannel_$flannelVer.tar" ]];then docker pull "easzlab/flannel:$flannelVer" && \ docker save -o "$imageDir/flannel_$flannelVer.tar" "easzlab/flannel:$flannelVer" fi if [[ ! -f "$imageDir/metrics-scraper_$dashboardMetricsScraperVer.tar" ]];then docker pull "kubernetesui/metrics-scraper:$dashboardMetricsScraperVer" && \ docker save -o "$imageDir/metrics-scraper_$dashboardMetricsScraperVer.tar" "kubernetesui/metrics-scraper:$dashboardMetricsScraperVer" fi if [[ ! -f "$imageDir/metrics-server_$metricsVer.tar" ]];then docker pull "mirrorgooglecontainers/metrics-server-amd64:$metricsVer" && \ docker save -o "$imageDir/metrics-server_$metricsVer.tar" "mirrorgooglecontainers/metrics-server-amd64:$metricsVer" fi if [[ ! -f "$imageDir/pause_$pauseVer.tar" ]];then docker pull "easzlab/pause-amd64:$pauseVer" && \ docker save -o "$imageDir/pause_$pauseVer.tar" "easzlab/pause-amd64:$pauseVer" /bin/cp -u "$imageDir/pause_$pauseVer.tar" "$imageDir/pause.tar" fi if [[ ! -f "$imageDir/nfs-provisioner_$nfsProvisionerVer.tar" ]];then docker pull "easzlab/nfs-subdir-external-provisioner:$nfsProvisionerVer" && \ docker save -o "$imageDir/nfs-provisioner_$nfsProvisionerVer.tar" "easzlab/nfs-subdir-external-provisioner:$nfsProvisionerVer" fi if [[ ! -f "$imageDir/kubeasz_$KUBEASZ_VER.tar" ]];then docker pull "easzlab/kubeasz:$KUBEASZ_VER" && \ docker save -o "$imageDir/kubeasz_$KUBEASZ_VER.tar" "easzlab/kubeasz:$KUBEASZ_VER" fi } function download_all() { mkdir -p /opt/kube/bin "$BASE/down" "$BASE/bin" download_docker && \ install_docker && \ get_kubeasz && \ get_k8s_bin && \ get_ext_bin && \ get_offline_image } function start_kubeasz_docker() { [[ -d "$BASE/roles/kube-node" ]] || { logger error "not initialized. try 'ezdown -D' first."; exit 1; } logger info "try to run kubeasz in a container" # get host's IP host_if=$(ip route|grep default|head -n1|cut -d' ' -f5) host_ip=$(ip a|grep "$host_if$"|head -n1|awk '{print $2}'|cut -d'/' -f1) logger debug "get host IP: $host_ip" # allow ssh login using key locally if [[ ! -e /root/.ssh/id_rsa ]]; then logger debug "generate ssh key pair" ssh-keygen -t rsa -b 2048 -N '' -f /root/.ssh/id_rsa > /dev/null cat /root/.ssh/id_rsa.pub >> /root/.ssh/authorized_keys ssh-keyscan -t ecdsa -H "$host_ip" >> /root/.ssh/known_hosts fi # create a link '/usr/bin/python' in Ubuntu1604 if [[ ! -e /usr/bin/python && -e /etc/debian_version ]]; then logger debug "create a soft link '/usr/bin/python'" ln -s /usr/bin/python3 /usr/bin/python fi # docker load -i "$BASE/down/kubeasz_$KUBEASZ_VER.tar" # run kubeasz docker container docker run --detach \ --env HOST_IP="$host_ip" \ --name kubeasz \ --network host \ --restart always \ --volume "$BASE":"$BASE" \ --volume /root/.kube:/root/.kube \ --volume /root/.ssh:/root/.ssh \ easzlab/kubeasz:${KUBEASZ_VER} sleep 36000 } function clean_container() { logger info "clean all running containers" docker ps -a|awk 'NR>1{print $1}'|xargs docker rm -f } ### Main Lines ################################################## function main() { BASE="/etc/kubeasz" # check if use bash shell readlink /proc/$$/exe|grep -q "dash" && { logger error "you should use bash shell, not sh"; exit 1; } # check if use with root [[ "$EUID" -ne 0 ]] && { logger error "you should run this script as root"; exit 1; } [[ "$#" -eq 0 ]] && { usage >&2; exit 1; } ACTION="" while getopts "CDPRSd:e:k:m:p:z:" OPTION; do case "$OPTION" in C) ACTION="clean_container" ;; D) ACTION="download_all" ;; P) ACTION="get_sys_pkg" ;; R) ACTION="get_harbor_offline_pkg" ;; S) ACTION="start_kubeasz_docker" ;; d) DOCKER_VER="$OPTARG" ;; e) EXT_BIN_VER="$OPTARG" ;; k) K8S_BIN_VER="$OPTARG" ;; m) REGISTRY_MIRROR="$OPTARG" ;; p) SYS_PKG_VER="$OPTARG" ;; z) KUBEASZ_VER="$OPTARG" ;; ?) usage exit 1 ;; esac done [[ "$ACTION" == "" ]] && { logger error "illegal option"; usage; exit 1; } # excute cmd "$ACTION" logger info "Action begin: $ACTION" ${ACTION} || { logger error "Action failed: $ACTION"; return 1; } logger info "Action successed: $ACTION" } main "$@"
安装 ansible ssh-copy-id docker
root@ubuntu20:/etc/docker# cat daemon.json
{
"registry-mirrors": ["https://e83z5xiz.mirror.aliyuncs.com"]
}
chmod +x ./ezdown
# 使用工具脚本下载 docker pull
./ezdown -D
脚本运行成功后,所有文件(kubeasz代码、二进制、离线镜像)均已整理好放入目录/etc/kubeasz
4.2 创建集群配置实例
Ls -lv /usr/bin/docker /usr/local/bin
ezctl new k8s-01
2021-01-19 10:48:23 DEBUG generate custom cluster files in /etc/kubeasz/clusters/k8s-01
2021-01-19 10:48:23 DEBUG set version of common plugins
2021-01-19 10:48:23 DEBUG cluster k8s-01: files successfully created.
2021-01-19 10:48:23 INFO next steps 1: to config '/etc/kubeasz/clusters/k8s-01/hosts'
2021-01-19 10:48:23 INFO next steps 2: to config '/etc/kubeasz/clusters/k8s-01/config.yml'
然后根据提示配置'/etc/kubeasz/clusters/k8s-01/hosts' 和 '/etc/kubeasz/clusters/k8s-01/config.yml':
#上传下载好的包到部署节点
root@ubuntu20:/etc/kubeasz# ls
ansible.cfg bin clusters down example ezctl ezdown playbooks roles
#master节点安装keepalived+haproxy,配置免密认证,docker加载镜像包并上传到Harbor
root@ubuntu20:/etc/kubeasz/clusters/k8s-01# cat hosts # 'etcd' cluster should have odd member(s) (1,3,5,...) [etcd] 192.168.192.151 # master node(s) [kube_master] 192.168.192.151 192.168.192.152 192.168.192.153 # work node(s) [kube_node] 192.168.192.151 192.168.192.152 192.168.192.153 # [optional] harbor server, a private docker registry # 'NEW_INSTALL': 'true' to install a harbor server; 'false' to integrate with existed one [harbor] #192.168.192.8 NEW_INSTALL=false # [optional] loadbalance for accessing k8s from outside [ex_lb] 192.168.192.151 LB_ROLE=backup EX_APISERVER_VIP=192.168.192.188 EX_APISERVER_PORT=6443 192.168.192.152 LB_ROLE=master EX_APISERVER_VIP=192.168.192.188 EX_APISERVER_PORT=6443 # [optional] ntp server for the cluster [chrony] #192.168.192.1 [all:vars] # --------- Main Variables --------------- # Secure port for apiservers SECURE_PORT="6443" # Cluster container-runtime supported: docker, containerd CONTAINER_RUNTIME="docker" # Network plugins supported: calico, flannel, kube-router, cilium, kube-ovn CLUSTER_NETWORK="calico" # Service proxy mode of kube-proxy: 'iptables' or 'ipvs' PROXY_MODE="ipvs" # K8S Service CIDR, not overlap with node(host) networking SERVICE_CIDR="10.100.0.0/16" # Cluster CIDR (Pod CIDR), not overlap with node(host) networking CLUSTER_CIDR="10.200.0.0/16" # NodePort Range NODE_PORT_RANGE="1-65000" # Cluster DNS Domain CLUSTER_DNS_DOMAIN="cluster.local" # -------- Additional Variables (don't change the default value right now) --- # Binaries Directory bin_dir="/usr/local/bin" # Deploy Directory (kubeasz workspace) base_dir="/etc/kubeasz" # Directory for a specific cluster cluster_dir="{{ base_dir }}/clusters/k8s-01" # CA and other components cert/key Directory ca_dir="/etc/kubernetes/ssl"
root@ubuntu20:/etc/kubeasz/clusters/k8s-01# cat config.yml ############################ # prepare ############################ # 可选离线安装系统软件包 (offline|online) INSTALL_SOURCE: "online" # 可选进行系统安全加固 github.com/dev-sec/ansible-collection-hardening OS_HARDEN: false # 设置时间源服务器【重要:集群内机器时间必须同步】 ntp_servers: - "ntp1.aliyun.com" - "time1.cloud.tencent.com" - "0.cn.pool.ntp.org" # 设置允许内部时间同步的网络段,比如"10.0.0.0/8",默认全部允许 local_network: "0.0.0.0/0" ############################ # role:deploy ############################ # default: ca will expire in 100 years # default: certs issued by the ca will expire in 50 years CA_EXPIRY: "876000h" CERT_EXPIRY: "438000h" # kubeconfig 配置参数 CLUSTER_NAME: "cluster1" CONTEXT_NAME: "context-{{ CLUSTER_NAME }}" ############################ # role:etcd ############################ # 设置不同的wal目录,可以避免磁盘io竞争,提高性能 ETCD_DATA_DIR: "/var/lib/etcd" ETCD_WAL_DIR: "" ############################ # role:runtime [containerd,docker] ############################ # ------------------------------------------- containerd # [.]启用容器仓库镜像 ENABLE_MIRROR_REGISTRY: true # [containerd]基础容器镜像 SANDBOX_IMAGE: "192.168.192.155:80/chuan/pause-amd64:3.4.1" # [containerd]容器持久化存储目录 CONTAINERD_STORAGE_DIR: "/var/lib/containerd" # ------------------------------------------- docker # [docker]容器存储目录 DOCKER_STORAGE_DIR: "/var/lib/docker" # [docker]开启Restful API ENABLE_REMOTE_API: false # [docker]信任的HTTP仓库 INSECURE_REG: '["127.0.0.1/8","192.168.192.155:80"]' ############################ # role:kube-master ############################ # k8s 集群 master 节点证书配置,可以添加多个ip和域名(比如增加公网ip和域名) MASTER_CERT_HOSTS: - "10.1.1.1" - "k8s.test.io" #- "www.test.com" # node 节点上 pod 网段掩码长度(决定每个节点最多能分配的pod ip地址) # 如果flannel 使用 --kube-subnet-mgr 参数,那么它将读取该设置为每个节点分配pod网段 # https://github.com/coreos/flannel/issues/847 NODE_CIDR_LEN: 24 ############################ # role:kube-node ############################ # Kubelet 根目录 KUBELET_ROOT_DIR: "/var/lib/kubelet" # node节点最大pod 数 MAX_PODS: 300 # 配置为kube组件(kubelet,kube-proxy,dockerd等)预留的资源量 # 数值设置详见templates/kubelet-config.yaml.j2 KUBE_RESERVED_ENABLED: "yes" # k8s 官方不建议草率开启 system-reserved, 除非你基于长期监控,了解系统的资源占用状况; # 并且随着系统运行时间,需要适当增加资源预留,数值设置详见templates/kubelet-config.yaml.j2 # 系统预留设置基于 4c/8g 虚机,最小化安装系统服务,如果使用高性能物理机可以适当增加预留 # 另外,集群安装时候apiserver等资源占用会短时较大,建议至少预留1g内存 SYS_RESERVED_ENABLED: "no" # haproxy balance mode BALANCE_ALG: "roundrobin" ############################ # role:network [flannel,calico,cilium,kube-ovn,kube-router] ############################ # ------------------------------------------- flannel # [flannel]设置flannel 后端"host-gw","vxlan"等 FLANNEL_BACKEND: "vxlan" DIRECT_ROUTING: false # [flannel] flanneld_image: "quay.io/coreos/flannel:v0.10.0-amd64" flannelVer: "v0.13.0-amd64" flanneld_image: "easzlab/flannel:{{ flannelVer }}" # [flannel]离线镜像tar包 flannel_offline: "flannel_{{ flannelVer }}.tar" # ------------------------------------------- calico # [calico]设置 CALICO_IPV4POOL_IPIP=“off”,可以提高网络性能,条件限制详见 docs/setup/calico.md CALICO_IPV4POOL_IPIP: "Always" # [calico]设置 calico-node使用的host IP,bgp邻居通过该地址建立,可手工指定也可以自动发现 IP_AUTODETECTION_METHOD: "can-reach={{ groups['kube_master'][0] }}" # [calico]设置calico 网络 backend: brid, vxlan, none CALICO_NETWORKING_BACKEND: "brid" # [calico]更新支持calico 版本: [v3.3.x] [v3.4.x] [v3.8.x] [v3.15.x] calico_ver: "v3.15.3" # [calico]calico 主版本 calico_ver_main: "{{ calico_ver.split('.')[0] }}.{{ calico_ver.split('.')[1] }}" # [calico]离线镜像tar包 calico_offline: "calico_{{ calico_ver }}.tar" # ------------------------------------------- cilium # [cilium]CILIUM_ETCD_OPERATOR 创建的 etcd 集群节点数 1,3,5,7... ETCD_CLUSTER_SIZE: 1 # [cilium]镜像版本 cilium_ver: "v1.4.1" # [cilium]离线镜像tar包 cilium_offline: "cilium_{{ cilium_ver }}.tar" # ------------------------------------------- kube-ovn # [kube-ovn]选择 OVN DB and OVN Control Plane 节点,默认为第一个master节点 OVN_DB_NODE: "{{ groups['kube_master'][0] }}" # [kube-ovn]离线镜像tar包 kube_ovn_ver: "v1.5.3" kube_ovn_offline: "kube_ovn_{{ kube_ovn_ver }}.tar" # ------------------------------------------- kube-router # [kube-router]公有云上存在限制,一般需要始终开启 ipinip;自有环境可以设置为 "subnet" OVERLAY_TYPE: "full" # [kube-router]NetworkPolicy 支持开关 FIREWALL_ENABLE: "true" # [kube-router]kube-router 镜像版本 kube_router_ver: "v0.3.1" busybox_ver: "1.28.4" # [kube-router]kube-router 离线镜像tar包 kuberouter_offline: "kube-router_{{ kube_router_ver }}.tar" busybox_offline: "busybox_{{ busybox_ver }}.tar" ############################ # role:cluster-addon ############################ # coredns 自动安装 dns_install: "no" corednsVer: "1.8.0" ENABLE_LOCAL_DNS_CACHE: false dnsNodeCacheVer: "1.17.0" # 设置 local dns cache 地址 LOCAL_DNS_CACHE: "169.254.20.10" # metric server 自动安装 metricsserver_install: "no" metricsVer: "v0.3.6" # dashboard 自动安装 dashboard_install: "no" dashboardVer: "v2.2.0" dashboardMetricsScraperVer: "v1.0.6" # ingress 自动安装 ingress_install: "no" ingress_backend: "traefik" traefik_chart_ver: "9.12.3" # prometheus 自动安装 prom_install: "no" prom_namespace: "monitor" prom_chart_ver: "12.10.6" # nfs-provisioner 自动安装 nfs_provisioner_install: "no" nfs_provisioner_namespace: "kube-system" nfs_provisioner_ver: "v4.0.1" nfs_storage_class: "managed-nfs-storage" nfs_server: "192.168.1.10" nfs_path: "/data/nfs" ############################ # role:harbor ############################ # harbor version,完整版本号 HARBOR_VER: "v2.1.3" HARBOR_DOMAIN: "harbor.yourdomain.com" HARBOR_TLS_PORT: 8443 # if set 'false', you need to put certs named harbor.pem and harbor-key.pem in directory 'down' HARBOR_SELF_SIGNED_CERT: true # install extra component HARBOR_WITH_NOTARY: false HARBOR_WITH_TRIVY: false HARBOR_WITH_CLAIR: false HARBOR_WITH_CHARTMUSEUM: true
root@ubuntu20:/etc/kubeasz# cat roles/calico/templates/calico-v3.15.yaml.j2 |grep image image: 192.168.192.155:80/chuan/calico-cni:v3.15.3 image: 192.168.192.155:80/chuan/calico-pod2daemon-flexvol:v3.15.3 image: 192.168.192.155:80/chuan/calico-node:v3.15.3 image: 192.168.192.155:80/chuan/calico-kube-controllers:v3.15.3
./ezctl setup k8s-01 01 ./ezctl setup k8s-01 02 ./ezctl setup k8s-01 03 ./ezctl setup k8s-01 04 ./ezctl setup k8s-01 05 ./ezctl setup k8s-01 06
ansible-playbook -i clusters/k8s-01/hosts -e @clusters/k8s-01/config.yml playbooks/06.network.yml
root@master001:~# calicoctl node status Calico process is running. IPv4 BGP status +-----------------+-------------------+-------+----------+-------------+ | PEER ADDRESS | PEER TYPE | STATE | SINCE | INFO | +-----------------+-------------------+-------+----------+-------------+ | 192.168.192.152 | node-to-node mesh | up | 12:59:42 | Established | | 192.168.192.153 | node-to-node mesh | up | 12:59:42 | Established | +-----------------+-------------------+-------+----------+-------------+ IPv6 BGP status No IPv6 peers found.
DNS部署
root@kubeasz:/etc/kubeasz/down# docker load -i coredns_1.8.0.tar docker tag coredns/coredns:1.8.0 192.168.80.250:80/chuan/dns:v2 docker push 192.168.80.250:80/chuan/dns:v2
root@slave002:~/dns# cat dns.yaml apiVersion: v1 kind: ServiceAccount metadata: name: coredns namespace: kube-system --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: kubernetes.io/bootstrapping: rbac-defaults name: system:coredns rules: - apiGroups: - "" resources: - endpoints - services - pods - namespaces verbs: - list - watch - apiGroups: - discovery.k8s.io resources: - endpointslices verbs: - list - watch --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: annotations: rbac.authorization.kubernetes.io/autoupdate: "true" labels: kubernetes.io/bootstrapping: rbac-defaults name: system:coredns roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: system:coredns subjects: - kind: ServiceAccount name: coredns namespace: kube-system --- apiVersion: v1 kind: ConfigMap metadata: name: coredns namespace: kube-system data: Corefile: | .:53 { errors health { lameduck 5s } bind 0.0.0.0 ready kubernetes cluster.local in-addr.arpa ip6.arpa { fallthrough in-addr.arpa ip6.arpa } prometheus :9153 forward . 223.6.6.6 { max_concurrent 1000 } cache 30 loop reload loadbalance } --- apiVersion: apps/v1 kind: Deployment metadata: name: coredns namespace: kube-system labels: k8s-app: kube-dns kubernetes.io/name: "CoreDNS" spec: # replicas: not specified here: # 1. Default is 1. # 2. Will be tuned in real time if DNS horizontal auto-scaling is turned on. strategy: type: RollingUpdate rollingUpdate: maxUnavailable: 1 selector: matchLabels: k8s-app: kube-dns template: metadata: labels: k8s-app: kube-dns spec: priorityClassName: system-cluster-critical serviceAccountName: coredns tolerations: - key: "CriticalAddonsOnly" operator: "Exists" nodeSelector: kubernetes.io/os: linux affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 podAffinityTerm: labelSelector: matchExpressions: - key: k8s-app operator: In values: ["kube-dns"] topologyKey: kubernetes.io/hostname containers: - name: coredns image: 192.168.192.155:80/chuan/dns:v2 imagePullPolicy: IfNotPresent resources: limits: memory: 170Mi requests: cpu: 100m memory: 70Mi args: [ "-conf", "/etc/coredns/Corefile" ] volumeMounts: - name: config-volume mountPath: /etc/coredns readOnly: true ports: - containerPort: 53 name: dns protocol: UDP - containerPort: 53 name: dns-tcp protocol: TCP - containerPort: 9153 name: metrics protocol: TCP securityContext: allowPrivilegeEscalation: false capabilities: add: - NET_BIND_SERVICE drop: - all readOnlyRootFilesystem: true livenessProbe: httpGet: path: /health port: 8080 scheme: HTTP initialDelaySeconds: 60 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 5 readinessProbe: httpGet: path: /ready port: 8181 scheme: HTTP dnsPolicy: Default volumes: - name: config-volume configMap: name: coredns items: - key: Corefile path: Corefile --- apiVersion: v1 kind: Service metadata: name: kube-dns namespace: kube-system annotations: prometheus.io/port: "9153" prometheus.io/scrape: "true" labels: k8s-app: kube-dns kubernetes.io/cluster-service: "true" kubernetes.io/name: "CoreDNS" spec: type: NodePort selector: k8s-app: kube-dns clusterIP: 10.100.0.2 ports: - name: dns port: 53 protocol: UDP - name: dns-tcp port: 53 protocol: TCP - name: metrics port: 9153 protocol: TCP targetPort: 9153 nodePort: 30009
root@slave001:~# cat nginx.yaml apiVersion: v1 kind: Pod metadata: name: nginx-server labels: app: nginx spec: containers: - name: nginx image: nginx:1.17.2-alpine --- apiVersion: v1 kind: Service metadata: name: web spec: type: NodePort ports: - port: 7878 targetPort: 80 protocol: TCP name: web80 nodePort: 32333 selector: app: nginx
root@slave001:~# kubectl exec -it nginx-server sh kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead. / # ping www.baidu.com PING www.baidu.com (110.242.68.3): 56 data bytes 64 bytes from 110.242.68.3: seq=0 ttl=127 time=61.217 ms 64 bytes from 110.242.68.3: seq=1 ttl=127 time=13.591 ms root@slave001:~# curl 192.168.192.152:32333 <!DOCTYPE html> <html> <head> <title>Welcome to nginx!</title> <style> body { width: 35em; margin: 0 auto; font-family: Tahoma, Verdana, Arial, sans-serif; } </style> </head> <body> <h1>Welcome to nginx!</h1> <p>If you see this page, the nginx web server is successfully installed and working. Further configuration is required.</p> <p>For online documentation and support please refer to <a href="http://nginx.org/">nginx.org</a>.<br/> Commercial support is available at <a href="http://nginx.com/">nginx.com</a>.</p> <p><em>Thank you for using nginx.</em></p> </body> </html>
root@master01:~# cat /etc/kube-lb/conf/kube-lb.conf user root; worker_processes 1; error_log /etc/kube-lb/logs/error.log warn; events { worker_connections 3000; } stream { upstream backend { server 192.168.80.202:6443 max_fails=2 fail_timeout=3s; server 192.168.80.203:6443 max_fails=2 fail_timeout=3s; server 192.168.80.204:6443 max_fails=2 fail_timeout=3s; } server { listen 127.0.0.1:6443; proxy_connect_timeout 1s; proxy_pass backend; } }
root@master01:~# systemctl status kube-* ● kube-lb.service - l4 nginx proxy for kube-apiservers Loaded: loaded (/etc/systemd/system/kube-lb.service; enabled; vendor preset: enabled) Active: active (running) since Mon 2022-02-28 19:57:09 CST; 33min ago Process: 895 ExecStartPre=/etc/kube-lb/sbin/kube-lb -c /etc/kube-lb/conf/kube-lb.conf -p /etc/kube-lb -t (code=exited, status=0/SUCCESS) Process: 920 ExecStart=/etc/kube-lb/sbin/kube-lb -c /etc/kube-lb/conf/kube-lb.conf -p /etc/kube-lb (code=exited, status=0/SUCCESS) Main PID: 925 (kube-lb) Tasks: 2 (limit: 9406) Memory: 3.3M CGroup: /system.slice/kube-lb.service ├─925 nginx: master process /etc/kube-lb/sbin/kube-lb -c /etc/kube-lb/conf/kube-lb.conf -p /etc/kube-lb └─926 nginx: worker process Feb 28 19:57:09 master01 systemd[1]: Starting l4 nginx proxy for kube-apiservers... Feb 28 19:57:09 master01 kube-lb[895]: nginx: the configuration file /etc/kube-lb/conf/kube-lb.conf syntax is ok Feb 28 19:57:09 master01 kube-lb[895]: nginx: configuration file /etc/kube-lb/conf/kube-lb.conf test is successful Feb 28 19:57:09 master01 systemd[1]: Started l4 nginx proxy for kube-apiservers. ● kube-controller-manager.service - Kubernetes Controller Manager Loaded: loaded (/etc/systemd/system/kube-controller-manager.service; enabled; vendor preset: enabled) Active: active (running) since Mon 2022-02-28 19:57:09 CST; 33min ago Docs: https://github.com/GoogleCloudPlatform/kubernetes Main PID: 894 (kube-controller) Tasks: 8 (limit: 9406) Memory: 146.2M CGroup: /system.slice/kube-controller-manager.service └─894 /usr/local/bin/kube-controller-manager --bind-address=192.168.80.202 --allocate-node-cidrs=true --cluster-cidr=10.200.0.0/16 --cluster-name=kubernetes --cluster-sig> Feb 28 20:30:27 master01 kube-controller-manager[894]: I0228 20:30:27.141955 894 event.go:291] "Event occurred" object="knative-serving/activator" kind="HorizontalPodAutoscaler" a> Feb 28 20:30:27 master01 kube-controller-manager[894]: E0228 20:30:27.151403 894 horizontal.go:227] failed to compute desired number of replicas based on listed metrics for Deploy> Feb 28 20:30:27 master01 kube-controller-manager[894]: I0228 20:30:27.151672 894 event.go:291] "Event occurred" object="knative-serving/webhook" kind="HorizontalPodAutoscaler" api> Feb 28 20:30:27 master01 kube-controller-manager[894]: I0228 20:30:27.151710 894 event.go:291] "Event occurred" object="knative-serving/webhook" kind="HorizontalPodAutoscaler" api> Feb 28 20:30:27 master01 kube-controller-manager[894]: E0228 20:30:27.157870 894 horizontal.go:227] failed to compute desired number of replicas based on listed metrics for Deploy> Feb 28 20:30:27 master01 kube-controller-manager[894]: I0228 20:30:27.158182 894 event.go:291] "Event occurred" object="istio-system/istiod" kind="HorizontalPodAutoscaler" apiVers> Feb 28 20:30:27 master01 kube-controller-manager[894]: I0228 20:30:27.158252 894 event.go:291] "Event occurred" object="istio-system/istiod" kind="HorizontalPodAutoscaler" apiVers> Feb 28 20:30:27 master01 kube-controller-manager[894]: E0228 20:30:27.164028 894 horizontal.go:227] failed to compute desired number of replicas based on listed metrics for Deploy> Feb 28 20:30:27 master01 kube-controller-manager[894]: I0228 20:30:27.164150 894 event.go:291] "Event occurred" object="knative-eventing/eventing-webhook" kind="HorizontalPodAutos> Feb 28 20:30:27 master01 kube-controller-manager[894]: I0228 20:30:27.164181 894 event.go:291] "Event occurred" object="knative-eventing/eventing-webhook" kind="HorizontalPodAutos> ● kube-scheduler.service - Kubernetes Scheduler Loaded: loaded (/etc/systemd/system/kube-scheduler.service; enabled; vendor preset: enabled) Active: active (running) since Mon 2022-02-28 19:57:09 CST; 33min ago Docs: https://github.com/GoogleCloudPlatform/kubernetes Main PID: 899 (kube-scheduler) Tasks: 9 (limit: 9406) Memory: 57.8M CGroup: /system.slice/kube-scheduler.service └─899 /usr/local/bin/kube-scheduler --config=/etc/kubernetes/kube-scheduler-config.yaml --v=2 Feb 28 19:57:26 master01 kube-scheduler[899]: I0228 19:57:26.003254 899 reflector.go:219] Starting reflector *v1.ReplicaSet (0s) from k8s.io/client-go/informers/factory.go:134 Feb 28 19:57:26 master01 kube-scheduler[899]: I0228 19:57:26.000109 899 reflector.go:219] Starting reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:134 Feb 28 19:57:26 master01 kube-scheduler[899]: I0228 19:57:26.006279 899 reflector.go:219] Starting reflector *v1.PersistentVolumeClaim (0s) from k8s.io/client-go/informers/factory> Feb 28 19:57:26 master01 kube-scheduler[899]: I0228 19:57:26.000372 899 reflector.go:219] Starting reflector *v1.PersistentVolume (0s) from k8s.io/client-go/informers/factory.go:1> Feb 28 19:57:26 master01 kube-scheduler[899]: I0228 19:57:26.000583 899 reflector.go:219] Starting reflector *v1.CSIDriver (0s) from k8s.io/client-go/informers/factory.go:134 Feb 28 19:57:26 master01 kube-scheduler[899]: I0228 19:57:26.000811 899 reflector.go:219] Starting reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:134 Feb 28 19:57:26 master01 kube-scheduler[899]: I0228 19:57:26.217260 899 node_tree.go:65] Added node "192.168.80.202" in group "" to NodeTree Feb 28 19:57:26 master01 kube-scheduler[899]: I0228 19:57:26.217555 899 node_tree.go:65] Added node "192.168.80.203" in group "" to NodeTree Feb 28 19:57:26 master01 kube-scheduler[899]: I0228 19:57:26.223528 899 node_tree.go:65] Added node "192.168.80.204" in group "" to NodeTree Feb 28 19:57:26 master01 kube-scheduler[899]: I0228 19:57:26.389917 899 leaderelection.go:243] attempting to acquire leader lease kube-system/kube-scheduler... ● kube-apiserver.service - Kubernetes API Server Loaded: loaded (/etc/systemd/system/kube-apiserver.service; enabled; vendor preset: enabled) Active: active (running) since Mon 2022-02-28 19:57:25 CST; 33min ago Docs: https://github.com/GoogleCloudPlatform/kubernetes Main PID: 893 (kube-apiserver) Tasks: 11 (limit: 9406) Memory: 505.3M CGroup: /system.slice/kube-apiserver.service └─893 /usr/local/bin/kube-apiserver --advertise-address=192.168.80.202 --allow-privileged=true --anonymous-auth=false --api-audiences=api,istio-ca --authorization-mode=No> Feb 28 20:29:39 master01 kube-apiserver[893]: I0228 20:29:39.336968 893 clientconn.go:948] ClientConn switching balancer to "pick_first" Feb 28 20:29:39 master01 kube-apiserver[893]: I0228 20:29:39.337097 893 balancer_conn_wrappers.go:78] pickfirstBalancer: HandleSubConnStateChange: 0xc00a9949b0, {CONNECTING <nil>} Feb 28 20:29:39 master01 kube-apiserver[893]: I0228 20:29:39.341918 893 balancer_conn_wrappers.go:78] pickfirstBalancer: HandleSubConnStateChange: 0xc00a9949b0, {READY <nil>} Feb 28 20:29:39 master01 kube-apiserver[893]: I0228 20:29:39.343101 893 controlbuf.go:508] transport: loopyWriter.run returning. connection error: desc = "transport is closing" Feb 28 20:30:12 master01 kube-apiserver[893]: I0228 20:30:12.957140 893 client.go:360] parsed scheme: "passthrough" Feb 28 20:30:12 master01 kube-apiserver[893]: I0228 20:30:12.957217 893 passthrough.go:48] ccResolverWrapper: sending update to cc: {[{https://192.168.80.202:2379 <nil> 0 <nil>}]> Feb 28 20:30:12 master01 kube-apiserver[893]: I0228 20:30:12.957225 893 clientconn.go:948] ClientConn switching balancer to "pick_first" Feb 28 20:30:12 master01 kube-apiserver[893]: I0228 20:30:12.957337 893 balancer_conn_wrappers.go:78] pickfirstBalancer: HandleSubConnStateChange: 0xc01b2a4510, {CONNECTING <nil>} Feb 28 20:30:12 master01 kube-apiserver[893]: I0228 20:30:12.962570 893 balancer_conn_wrappers.go:78] pickfirstBalancer: HandleSubConnStateChange: 0xc01b2a4510, {READY <nil>} Feb 28 20:30:12 master01 kube-apiserver[893]: I0228 20:30:12.963677 893 controlbuf.go:508] transport: loopyWriter.run returning. connection error: desc = "transport is closing" ● kube-proxy.service - Kubernetes Kube-Proxy Server Loaded: loaded (/etc/systemd/system/kube-proxy.service; enabled; vendor preset: enabled) Active: active (running) since Mon 2022-02-28 19:57:09 CST; 33min ago Docs: https://github.com/GoogleCloudPlatform/kubernetes Main PID: 898 (kube-proxy) Tasks: 8 (limit: 9406) Memory: 58.5M CGroup: /system.slice/kube-proxy.service └─898 /usr/local/bin/kube-proxy --config=/var/lib/kube-proxy/kube-proxy-config.yaml Feb 28 19:57:25 master01 kube-proxy[898]: Trace[382958093]: [12.655165518s] [12.655165518s] END Feb 28 19:57:25 master01 kube-proxy[898]: E0228 19:57:25.934258 898 reflector.go:138] k8s.io/client-go/informers/factory.go:134: Failed to watch *v1beta1.EndpointSlice: failed to > Feb 28 19:57:27 master01 kube-proxy[898]: I0228 19:57:27.039122 898 shared_informer.go:247] Caches are synced for service config Feb 28 19:57:27 master01 kube-proxy[898]: W0228 19:57:27.123843 898 warnings.go:70] discovery.k8s.io/v1beta1 EndpointSlice is deprecated in v1.21+, unavailable in v1.25+; use disc> Feb 28 19:57:27 master01 kube-proxy[898]: I0228 19:57:27.147149 898 shared_informer.go:247] Caches are synced for endpoint slice config root@master01:~# systemctl status kubelet.service ● kubelet.service - Kubernetes Kubelet Loaded: loaded (/etc/systemd/system/kubelet.service; enabled; vendor preset: enabled) Active: active (running) since Mon 2022-02-28 19:57:09 CST; 35min ago Docs: https://github.com/GoogleCloudPlatform/kubernetes Process: 901 ExecStartPre=/bin/mkdir -p /sys/fs/cgroup/cpu/podruntime.slice (code=exited, status=0/SUCCESS) Process: 923 ExecStartPre=/bin/mkdir -p /sys/fs/cgroup/cpuacct/podruntime.slice (code=exited, status=0/SUCCESS) Process: 931 ExecStartPre=/bin/mkdir -p /sys/fs/cgroup/cpuset/podruntime.slice (code=exited, status=0/SUCCESS) Process: 935 ExecStartPre=/bin/mkdir -p /sys/fs/cgroup/memory/podruntime.slice (code=exited, status=0/SUCCESS) Process: 936 ExecStartPre=/bin/mkdir -p /sys/fs/cgroup/pids/podruntime.slice (code=exited, status=0/SUCCESS) Process: 937 ExecStartPre=/bin/mkdir -p /sys/fs/cgroup/systemd/podruntime.slice (code=exited, status=0/SUCCESS) Process: 938 ExecStartPre=/bin/mkdir -p /sys/fs/cgroup/cpu/system.slice (code=exited, status=0/SUCCESS) Process: 939 ExecStartPre=/bin/mkdir -p /sys/fs/cgroup/cpuacct/system.slice (code=exited, status=0/SUCCESS) Process: 940 ExecStartPre=/bin/mkdir -p /sys/fs/cgroup/cpuset/system.slice (code=exited, status=0/SUCCESS) Process: 945 ExecStartPre=/bin/mkdir -p /sys/fs/cgroup/memory/system.slice (code=exited, status=0/SUCCESS) Process: 950 ExecStartPre=/bin/mkdir -p /sys/fs/cgroup/pids/system.slice (code=exited, status=0/SUCCESS) Process: 958 ExecStartPre=/bin/mkdir -p /sys/fs/cgroup/systemd/system.slice (code=exited, status=0/SUCCESS) Process: 960 ExecStartPre=/bin/mkdir -p /sys/fs/cgroup/hugetlb/podruntime.slice (code=exited, status=0/SUCCESS) Process: 961 ExecStartPre=/bin/mkdir -p /sys/fs/cgroup/hugetlb/system.slice (code=exited, status=0/SUCCESS) Main PID: 966 (kubelet) Tasks: 25 (limit: 9406) Memory: 140.1M CGroup: /system.slice/kubelet.service └─966 /usr/local/bin/kubelet --config=/var/lib/kubelet/config.yaml --cni-bin-dir=/usr/local/bin --cni-conf-dir=/etc/cni/net.d --hostname-override=192.168.80.202 --image-p> Feb 28 19:58:28 master01 kubelet[966]: I0228 19:58:28.038457 966 logs.go:319] "Finished parsing log file" path="/var/lib/docker/containers/905e99a3d1cd5eec7246287ebbeda79dd537c16b>