kubesphere笔记

[root@node01 kubesphere]# pwd
/root/kubesphere
[root@node01 kubesphere]# export KKZONE=cn
[root@node01 kubesphere]# ./kk create cluster -f config-sample.yaml
+--------+------+------+---------+----------+-------+-------+-----------+----------+------------+-------------+------------------+--------------+
| name | sudo | curl | openssl | ebtables | socat | ipset | conntrack | docker | nfs client | ceph client | glusterfs client | time |
+--------+------+------+---------+----------+-------+-------+-----------+----------+------------+-------------+------------------+--------------+
| node02 | y | y | y | y | y | y | y | 20.10.17 | y | | | CST 11:53:58 |
| node01 | y | y | y | y | y | y | y | 20.10.17 | y | | | CST 11:53:58 |
+--------+------+------+---------+----------+-------+-------+-----------+----------+------------+-------------+------------------+--------------+

This is a simple check of your environment.
Before installation, you should ensure that your machines meet all requirements specified at
https://github.com/kubesphere/kubekey#requirements-and-recommendations

Continue this installation? [yes/no]: yes
INFO[11:54:01 CST] Downloading Installation Files
INFO[11:54:01 CST] Downloading kubeadm ...
INFO[11:54:01 CST] Downloading kubelet ...
INFO[11:54:02 CST] Downloading kubectl ...
INFO[11:54:02 CST] Downloading helm ...
INFO[11:54:03 CST] Downloading kubecni ...
INFO[11:54:03 CST] Downloading etcd ...
INFO[11:54:03 CST] Downloading docker ...
INFO[11:54:03 CST] Downloading crictl ...
INFO[11:54:04 CST] Configuring operating system ...
[node02 172.22.0.13] MSG:
net.core.rmem_default = 33554432
net.core.rmem_max = 33554432
net.ipv4.ip_forward = 1
vm.swappiness = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.tcp_max_syn_backlog = 65536
net.core.netdev_max_backlog = 32768
net.core.somaxconn = 32768
net.core.wmem_default = 8388608
net.core.rmem_default = 8388608
net.core.rmem_max = 16777216
net.core.wmem_max = 16777216
net.ipv4.tcp_timestamps = 0
net.ipv4.tcp_synack_retries = 2
net.ipv4.tcp_syn_retries = 2
net.ipv4.tcp_tw_recycle = 0
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_mem = 94500000 915000000 927000000
net.ipv4.tcp_max_orphans = 3276800
net.ipv4.ip_local_port_range = 1024 65535
net.bridge.bridge-nf-call-arptables = 1
net.ipv4.ip_local_reserved_ports = 30000-32767
vm.max_map_count = 262144
fs.inotify.max_user_instances = 524288
[node01 172.22.0.12] MSG:
net.core.rmem_default = 33554432
net.core.rmem_max = 33554432
net.ipv4.ip_forward = 1
vm.swappiness = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.tcp_max_syn_backlog = 65536
net.core.netdev_max_backlog = 32768
net.core.somaxconn = 32768
net.core.wmem_default = 8388608
net.core.rmem_default = 8388608
net.core.rmem_max = 16777216
net.core.wmem_max = 16777216
net.ipv4.tcp_timestamps = 0
net.ipv4.tcp_synack_retries = 2
net.ipv4.tcp_syn_retries = 2
net.ipv4.tcp_tw_recycle = 0
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_mem = 94500000 915000000 927000000
net.ipv4.tcp_max_orphans = 3276800
net.ipv4.ip_local_port_range = 1024 65535
net.bridge.bridge-nf-call-arptables = 1
net.ipv4.ip_local_reserved_ports = 30000-32767
vm.max_map_count = 262144
fs.inotify.max_user_instances = 524288
INFO[11:54:05 CST] Get cluster status
INFO[11:54:05 CST] Installing Container Runtime ...
INFO[11:54:05 CST] Start to download images on all nodes
[node02] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/pause:3.2
[node01] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/pause:3.2
[node02] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/kube-proxy:v1.20.10
[node01] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/kube-apiserver:v1.20.10
[node02] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/coredns:1.6.9
[node02] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/k8s-dns-node-cache:1.15.12
[node02] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/kube-controllers:v3.20.0
[node02] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/cni:v3.20.0
[node02] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/node:v3.20.0
[node02] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/pod2daemon-flexvol:v3.20.0
[node01] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/kube-controller-manager:v1.20.10
[node01] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/kube-scheduler:v1.20.10
[node01] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/kube-proxy:v1.20.10
[node01] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/coredns:1.6.9
[node01] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/k8s-dns-node-cache:1.15.12
[node01] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/kube-controllers:v3.20.0
[node01] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/cni:v3.20.0
[node01] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/node:v3.20.0
[node01] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/pod2daemon-flexvol:v3.20.0
INFO[11:54:37 CST] Getting etcd status
[node01 172.22.0.12] MSG:
Configuration file will be created
INFO[11:54:37 CST] Generating etcd certs
INFO[11:54:38 CST] Synchronizing etcd certs
INFO[11:54:38 CST] Creating etcd service
Push /root/kubesphere/kubekey/v1.20.10/amd64/etcd-v3.4.13-linux-amd64.tar.gz to 172.22.0.12:/tmp/kubekey/etcd-v3.4.13-linux-amd64.tar.gz Done
INFO[11:54:39 CST] Starting etcd cluster
INFO[11:54:39 CST] Refreshing etcd configuration
[node01 172.22.0.12] MSG:
Created symlink from /etc/systemd/system/multi-user.target.wants/etcd.service to /etc/systemd/system/etcd.service.
INFO[11:54:44 CST] Backup etcd data regularly
INFO[11:54:50 CST] Installing kube binaries
Push /root/kubesphere/kubekey/v1.20.10/amd64/kubeadm to 172.22.0.12:/tmp/kubekey/kubeadm Done
Push /root/kubesphere/kubekey/v1.20.10/amd64/kubeadm to 172.22.0.13:/tmp/kubekey/kubeadm Done
Push /root/kubesphere/kubekey/v1.20.10/amd64/kubelet to 172.22.0.13:/tmp/kubekey/kubelet Done
Push /root/kubesphere/kubekey/v1.20.10/amd64/kubelet to 172.22.0.12:/tmp/kubekey/kubelet Done
Push /root/kubesphere/kubekey/v1.20.10/amd64/kubectl to 172.22.0.12:/tmp/kubekey/kubectl Done
Push /root/kubesphere/kubekey/v1.20.10/amd64/kubectl to 172.22.0.13:/tmp/kubekey/kubectl Done
Push /root/kubesphere/kubekey/v1.20.10/amd64/helm to 172.22.0.13:/tmp/kubekey/helm Done
Push /root/kubesphere/kubekey/v1.20.10/amd64/helm to 172.22.0.12:/tmp/kubekey/helm Done
Push /root/kubesphere/kubekey/v1.20.10/amd64/cni-plugins-linux-amd64-v0.9.1.tgz to 172.22.0.13:/tmp/kubekey/cni-plugins-linux-amd64-v0.9.1.tgz Done
Push /root/kubesphere/kubekey/v1.20.10/amd64/cni-plugins-linux-amd64-v0.9.1.tgz to 172.22.0.12:/tmp/kubekey/cni-plugins-linux-amd64-v0.9.1.tgz Done
INFO[11:54:54 CST] Initializing kubernetes cluster
[node01 172.22.0.12] MSG:
[reset] Reading configuration from the cluster...
[reset] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
W0923 11:57:10.533577 99401 configset.go:77] Warning: No kubeproxy.config.k8s.io/v1alpha1 config is loaded. Continuing without it: configmaps "kube-proxy" not found
W0923 11:57:10.537653 99401 reset.go:99] [reset] Unable to fetch the kubeadm-config ConfigMap from cluster: failed to get node registration: failed to get corresponding node: nodes "node01" not found
[preflight] Running pre-flight checks
W0923 11:57:10.537766 99401 removeetcdmember.go:79] [reset] No kubeadm config, using etcd pod spec to get data directory
[reset] No etcd config found. Assuming external etcd
[reset] Please, manually reset etcd to prevent further issues
[reset] Stopping the kubelet service
[reset] Unmounting mounted directories in "/var/lib/kubelet"
[reset] Deleting contents of config directories: [/etc/kubernetes/manifests /etc/kubernetes/pki]
[reset] Deleting files: [/etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/bootstrap-kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf]
[reset] Deleting contents of stateful directories: [/var/lib/kubelet /var/lib/dockershim /var/run/kubernetes /var/lib/cni]

The reset process does not clean CNI configuration. To do so, you must remove /etc/cni/net.d

The reset process does not reset or clean up iptables rules or IPVS tables.
If you wish to reset iptables, you must do so manually by using the "iptables" command.

If your cluster was setup to utilize IPVS, run ipvsadm --clear (or similar)
to reset your system's IPVS tables.

The reset process does not clean your kubeconfig files and you must remove them manually.
Please, check the contents of the $HOME/.kube/config file.
[node01 172.22.0.12] MSG:
W0923 11:57:13.869101 99849 utils.go:69] The recommended value for "clusterDNS" in "KubeletConfiguration" is: [10.233.0.10]; the provided value is: [169.254.25.10]
[init] Using Kubernetes version: v1.20.10
[preflight] Running pre-flight checks
[WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
[WARNING SystemVerification]: this Docker version is not on the list of validated versions: 20.10.17. Latest validated version: 19.03
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local lb.kubesphere.local localhost node01 node01.cluster.local node02 node02.cluster.local] and IPs [10.233.0.1 172.22.0.12 127.0.0.1 172.22.0.13]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] External etcd mode: Skipping etcd/ca certificate authority generation
[certs] External etcd mode: Skipping etcd/server certificate generation
[certs] External etcd mode: Skipping etcd/peer certificate generation
[certs] External etcd mode: Skipping etcd/healthcheck-client certificate generation
[certs] External etcd mode: Skipping apiserver-etcd-client certificate generation
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 11.001617 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.20" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node node01 as control-plane by adding the labels "node-role.kubernetes.io/master=''" and "node-role.kubernetes.io/control-plane='' (deprecated)"
[mark-control-plane] Marking the node node01 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: 5mn681.8wm269izpse9qco6
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of control-plane nodes by copying certificate authorities
and service account keys on each node and then running the following as root:

kubeadm join lb.kubesphere.local:6443 --token 5mn681.8wm269izpse9qco6 \
--discovery-token-ca-cert-hash sha256:0751e7db3c8e0973099150acf23fb729194356bafe869d6985ab8423891f038f \
--control-plane

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join lb.kubesphere.local:6443 --token 5mn681.8wm269izpse9qco6 \
--discovery-token-ca-cert-hash sha256:0751e7db3c8e0973099150acf23fb729194356bafe869d6985ab8423891f038f
[node01 172.22.0.12] MSG:
service "kube-dns" deleted
[node01 172.22.0.12] MSG:
service/coredns created
Warning: resource clusterroles/system:coredns is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically.
clusterrole.rbac.authorization.k8s.io/system:coredns configured
[node01 172.22.0.12] MSG:
serviceaccount/nodelocaldns created
daemonset.apps/nodelocaldns created
[node01 172.22.0.12] MSG:
configmap/nodelocaldns created
INFO[11:57:50 CST] Get cluster status
INFO[11:57:51 CST] Joining nodes to cluster
[node02 172.22.0.13] MSG:
[preflight] Running pre-flight checks
W0923 12:00:03.280153 4401 removeetcdmember.go:79] [reset] No kubeadm config, using etcd pod spec to get data directory
[reset] No etcd config found. Assuming external etcd
[reset] Please, manually reset etcd to prevent further issues
[reset] Stopping the kubelet service
[reset] Unmounting mounted directories in "/var/lib/kubelet"
[reset] Deleting contents of config directories: [/etc/kubernetes/manifests /etc/kubernetes/pki]
[reset] Deleting files: [/etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/bootstrap-kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf]
[reset] Deleting contents of stateful directories: [/var/lib/kubelet /var/lib/dockershim /var/run/kubernetes /var/lib/cni]

The reset process does not clean CNI configuration. To do so, you must remove /etc/cni/net.d

The reset process does not reset or clean up iptables rules or IPVS tables.
If you wish to reset iptables, you must do so manually by using the "iptables" command.

If your cluster was setup to utilize IPVS, run ipvsadm --clear (or similar)
to reset your system's IPVS tables.

The reset process does not clean your kubeconfig files and you must remove them manually.
Please, check the contents of the $HOME/.kube/config file.
[node02 172.22.0.13] MSG:
[preflight] Running pre-flight checks
[WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
[WARNING SystemVerification]: this Docker version is not on the list of validated versions: 20.10.17. Latest validated version: 19.03
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
W0923 12:00:03.923162 4485 utils.go:69] The recommended value for "clusterDNS" in "KubeletConfiguration" is: [10.233.0.10]; the provided value is: [169.254.25.10]
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
[node02 172.22.0.13] MSG:
node/node02 labeled
INFO[12:00:11 CST] Deploying network plugin ...
[node01 172.22.0.12] MSG:
configmap/calico-config created
customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/blockaffinities.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamblocks.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamconfigs.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamhandles.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/kubecontrollersconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networksets.crd.projectcalico.org created
clusterrole.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrole.rbac.authorization.k8s.io/calico-node created
clusterrolebinding.rbac.authorization.k8s.io/calico-node created
daemonset.apps/calico-node created
serviceaccount/calico-node created
deployment.apps/calico-kube-controllers created
serviceaccount/calico-kube-controllers created
poddisruptionbudget.policy/calico-kube-controllers created
[node01 172.22.0.12] MSG:
storageclass.storage.k8s.io/local created
serviceaccount/openebs-maya-operator created
clusterrole.rbac.authorization.k8s.io/openebs-maya-operator created
clusterrolebinding.rbac.authorization.k8s.io/openebs-maya-operator created
deployment.apps/openebs-localpv-provisioner created
INFO[12:00:12 CST] Deploying KubeSphere ...
v3.2.0
[node01 172.22.0.12] MSG:
namespace/kubesphere-system created
namespace/kubesphere-monitoring-system created
[node01 172.22.0.12] MSG:
secret/kube-etcd-client-certs created
[node01 172.22.0.12] MSG:
namespace/kubesphere-system unchanged
serviceaccount/ks-installer unchanged
customresourcedefinition.apiextensions.k8s.io/clusterconfigurations.installer.kubesphere.io unchanged
clusterrole.rbac.authorization.k8s.io/ks-installer unchanged
clusterrolebinding.rbac.authorization.k8s.io/ks-installer unchanged
deployment.apps/ks-installer unchanged
clusterconfiguration.installer.kubesphere.io/ks-installer created
#####################################################
### Welcome to KubeSphere! ###
#####################################################

Console: http://172.22.0.12:30880
Account: admin
Password: P@88w0rd

NOTES:
1. After you log into the console, please check the
monitoring status of service components in
"Cluster Management". If any service is not
ready, please wait patiently until all components
are up and running.
2. Please change the default password after login.

#####################################################
https://kubesphere.io 2022-09-23 12:04:29
#####################################################
INFO[12:04:35 CST] Installation is complete.

Please check the result using the command:

kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') -f


$ export KKZONE=cn
$ kk add nodes -f config-sample.yaml

 

[root@node01 kubesphere]# ./kk add nodes -f config-sample.yaml
+--------+------+------+---------+----------+-------+-------+-----------+----------+------------+-------------+------------------+--------------+
| name | sudo | curl | openssl | ebtables | socat | ipset | conntrack | docker | nfs client | ceph client | glusterfs client | time |
+--------+------+------+---------+----------+-------+-------+-----------+----------+------------+-------------+------------------+--------------+
| node03 | y | y | y | y | y | y | y | 20.10.17 | y | | | CST 09:08:45 |
| node01 | y | y | y | y | y | y | y | 20.10.17 | y | | | CST 09:08:45 |
| node02 | y | y | y | y | y | y | y | 20.10.17 | y | | | CST 09:08:45 |
+--------+------+------+---------+----------+-------+-------+-----------+----------+------------+-------------+------------------+--------------+

This is a simple check of your environment.
Before installation, you should ensure that your machines meet all requirements specified at
https://github.com/kubesphere/kubekey#requirements-and-recommendations

Continue this installation? [yes/no]: yes
INFO[09:08:48 CST] Downloading Installation Files
INFO[09:08:48 CST] Downloading kubeadm ...
INFO[09:08:48 CST] Downloading kubelet ...
INFO[09:08:49 CST] Downloading kubectl ...
INFO[09:08:49 CST] Downloading helm ...
INFO[09:08:49 CST] Downloading kubecni ...
INFO[09:08:49 CST] Downloading etcd ...
INFO[09:08:49 CST] Downloading docker ...
INFO[09:08:50 CST] Downloading crictl ...
INFO[09:08:50 CST] Configuring operating system ...
[node01 172.22.0.12] MSG:
net.core.rmem_default = 33554432
net.core.rmem_max = 33554432
net.ipv4.ip_forward = 1
vm.swappiness = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.tcp_max_syn_backlog = 65536
net.core.netdev_max_backlog = 32768
net.core.somaxconn = 32768
net.core.wmem_default = 8388608
net.core.rmem_default = 8388608
net.core.rmem_max = 16777216
net.core.wmem_max = 16777216
net.ipv4.tcp_timestamps = 0
net.ipv4.tcp_synack_retries = 2
net.ipv4.tcp_syn_retries = 2
net.ipv4.tcp_tw_recycle = 0
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_mem = 94500000 915000000 927000000
net.ipv4.tcp_max_orphans = 3276800
net.ipv4.ip_local_port_range = 1024 65535
net.bridge.bridge-nf-call-arptables = 1
net.ipv4.ip_local_reserved_ports = 30000-32767
vm.max_map_count = 262144
fs.inotify.max_user_instances = 524288
[node02 172.22.0.13] MSG:
net.core.rmem_default = 33554432
net.core.rmem_max = 33554432
net.ipv4.ip_forward = 1
vm.swappiness = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.tcp_max_syn_backlog = 65536
net.core.netdev_max_backlog = 32768
net.core.somaxconn = 32768
net.core.wmem_default = 8388608
net.core.rmem_default = 8388608
net.core.rmem_max = 16777216
net.core.wmem_max = 16777216
net.ipv4.tcp_timestamps = 0
net.ipv4.tcp_synack_retries = 2
net.ipv4.tcp_syn_retries = 2
net.ipv4.tcp_tw_recycle = 0
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_mem = 94500000 915000000 927000000
net.ipv4.tcp_max_orphans = 3276800
net.ipv4.ip_local_port_range = 1024 65535
net.bridge.bridge-nf-call-arptables = 1
net.ipv4.ip_local_reserved_ports = 30000-32767
vm.max_map_count = 262144
fs.inotify.max_user_instances = 524288
[node03 172.22.0.11] MSG:
net.core.rmem_default = 33554432
net.core.rmem_max = 33554432
net.ipv4.ip_forward = 1
vm.swappiness = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.tcp_max_syn_backlog = 65536
net.core.netdev_max_backlog = 32768
net.core.somaxconn = 32768
net.core.wmem_default = 8388608
net.core.rmem_default = 8388608
net.core.rmem_max = 16777216
net.core.wmem_max = 16777216
net.ipv4.tcp_timestamps = 0
net.ipv4.tcp_synack_retries = 2
net.ipv4.tcp_syn_retries = 2
net.ipv4.tcp_tw_recycle = 0
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_mem = 94500000 915000000 927000000
net.ipv4.tcp_max_orphans = 3276800
net.ipv4.ip_local_port_range = 1024 65535
net.bridge.bridge-nf-call-arptables = 1
net.ipv4.ip_local_reserved_ports = 30000-32767
vm.max_map_count = 262144
fs.inotify.max_user_instances = 524288
INFO[09:08:57 CST] Get cluster status
INFO[09:08:58 CST] Installing Container Runtime ...
INFO[09:08:58 CST] Start to download images on all nodes
[node03] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/pause:3.2
[node01] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/pause:3.2
[node02] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/pause:3.2
[node03] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/kube-proxy:v1.20.10
[node01] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/kube-apiserver:v1.20.10
[node02] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/kube-proxy:v1.20.10
[node03] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/coredns:1.6.9
[node02] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/coredns:1.6.9
[node01] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/kube-controller-manager:v1.20.10
[node03] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/k8s-dns-node-cache:1.15.12
[node02] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/k8s-dns-node-cache:1.15.12
[node01] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/kube-scheduler:v1.20.10
[node03] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/kube-controllers:v3.20.0
[node02] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/kube-controllers:v3.20.0
[node03] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/cni:v3.20.0
[node01] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/kube-proxy:v1.20.10
[node03] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/node:v3.20.0
[node02] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/cni:v3.20.0
[node01] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/coredns:1.6.9
[node03] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/pod2daemon-flexvol:v3.20.0
[node02] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/node:v3.20.0
[node01] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/k8s-dns-node-cache:1.15.12
[node02] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/pod2daemon-flexvol:v3.20.0
[node01] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/kube-controllers:v3.20.0
[node01] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/cni:v3.20.0
[node01] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/node:v3.20.0
[node01] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/pod2daemon-flexvol:v3.20.0
INFO[09:09:05 CST] Getting etcd status
[node01 172.22.0.12] MSG:
Configuration file already exists
[node01 172.22.0.12] MSG:
ETCD_NAME=etcd-node01
INFO[09:09:05 CST] Generating etcd certs
INFO[09:09:05 CST] Synchronizing etcd certs
INFO[09:09:05 CST] Creating etcd service
Push /root/kubesphere/kubekey/v1.20.10/amd64/etcd-v3.4.13-linux-amd64.tar.gz to 172.22.0.12:/tmp/kubekey/etcd-v3.4.13-linux-amd64.tar.gz Done
INFO[09:09:06 CST] Starting etcd cluster
INFO[09:09:06 CST] Refreshing etcd configuration
INFO[09:09:06 CST] Backup etcd data regularly
INFO[09:09:13 CST] Installing kube binaries
Push /root/kubesphere/kubekey/v1.20.10/amd64/kubeadm to 172.22.0.11:/tmp/kubekey/kubeadm Done
Push /root/kubesphere/kubekey/v1.20.10/amd64/kubelet to 172.22.0.11:/tmp/kubekey/kubelet Done
Push /root/kubesphere/kubekey/v1.20.10/amd64/kubectl to 172.22.0.11:/tmp/kubekey/kubectl Done
Push /root/kubesphere/kubekey/v1.20.10/amd64/helm to 172.22.0.11:/tmp/kubekey/helm Done
Push /root/kubesphere/kubekey/v1.20.10/amd64/cni-plugins-linux-amd64-v0.9.1.tgz to 172.22.0.11:/tmp/kubekey/cni-plugins-linux-amd64-v0.9.1.tgz Done
INFO[09:09:17 CST] Joining nodes to cluster
[node03 172.22.0.11] MSG:
[preflight] Running pre-flight checks
[WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
[WARNING SystemVerification]: this Docker version is not on the list of validated versions: 20.10.17. Latest validated version: 19.03
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
W0926 09:09:18.661303 67190 utils.go:69] The recommended value for "clusterDNS" in "KubeletConfiguration" is: [10.233.0.10]; the provided value is: [169.254.25.10]
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
[node03 172.22.0.11] MSG:
node/node03 labeled
INFO[09:09:26 CST] Congratulations! Scaling cluster is successful.

./kk delete node <nodeName> -f config-sample.yaml

[root@node01 kubesphere]# ./kk delete node node03 -f config-sample.yaml
Are you sure to delete this node? [yes/no]: yes
INFO[11:10:49 CST] Resetting kubernetes node ...
[node01 172.22.0.12] MSG:
node02
node03
[node01 172.22.0.12] MSG:
Flag --delete-local-data has been deprecated, This option is deprecated and will be deleted. Use --delete-emptydir-data.
node/node03 already cordoned
WARNING: ignoring DaemonSet-managed Pods: kube-system/calico-node-c25q2, kube-system/kube-proxy-vgqtn, kube-system/nodelocaldns-gsr5b, kubesphere-monitoring-system/node-exporter-qtsxh
node/node03 drained
[node01 172.22.0.12] MSG:
node "node03" deleted
INFO[11:10:49 CST] Successful.

 

 

docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.25.2
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.25.2
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.25.2
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.25.2
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.8
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.5.4-0
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:v1.9.3

 

docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.25.2 k8s.gcr.io/kube-apiserver:v1.25.2
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.25.2 k8s.gcr.io/kube-controller-manager:v1.25.2
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.25.2 k8s.gcr.io/kube-scheduler:v1.25.2
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.25.2 k8s.gcr.io/kube-proxy:v1.25.2
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.8 k8s.gcr.io/pause:3.8
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.5.4-0 k8s.gcr.io/etcd:3.5.4-0
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:v1.9.3 k8s.gcr.io/coredns/coredns:v1.9.3

yum install -y kubelet-1.20.11 kubeadm-1.20.11 kubectl-1.20.11

sudo systemctl enable kubelet
sudo systemctl start kubelet
yum -y install docker-ce-19.03.5
kubeadm version
kubectl version
kubelet --version

kubeadm init \
--kubernetes-version=1.23.1 \
--apiserver-advertise-address=116.63.216.250 \
--service-cidr=10.1.0.0/16 \
--pod-network-cidr=10.244.0.0/16
--ignore-preflight-errors=all
kubeadm init \
--apiserver-advertise-address=116.63.216.250 \
--image-repository registry.aliyuncs.com/google_containers \
--kubernetes-version v1.19.0 \
--service-cidr=10.96.0.0/12 \
--pod-network-cidr=10.244.0.0/16 \
--ignore-preflight-errors=all

posted @ 2023-01-10 16:10  beawh  阅读(96)  评论(0编辑  收藏  举报