k8s本地搭建相信步骤
搭建前的准备:
主机名配置
cat >/etc/hosts<<EOF
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.19.61 k8s-api.virtual.local k8s-api
192.168.19.61 etcd-1
192.168.19.62 etcd-2
192.168.19.63 etcd-3
EOF
环境变量配置:
[root@k8s-api ~]# cat env.sh
# TLS Bootstrapping 使用的Token,可以使用命令 head -c 16 /dev/urandom | od -An -t x | tr -d ' ' 生成
BOOTSTRAP_TOKEN="2f84230ea8fd1994946a994c7b458559"
# 建议使用未用的网段来定义服务网段和Pod 网段
# 服务网段(Service CIDR),部署前路由不可达,部署后集群内部使用IP:Port可达
SERVICE_CIDR="10.254.0.0/16"
# Pod 网段(Cluster CIDR),部署前路由不可达,部署后路由可达(flanneld 保证)
CLUSTER_CIDR="172.30.0.0/16"
# 服务端口范围(NodePort Range)
NODE_PORT_RANGE="30000-32766"
# etcd集群服务地址列表
ETCD_ENDPOINTS="https://192.168.19.61:2379,https://192.168.19.62:2379,https://192.168.19.63:2379"
# flanneld 网络配置前缀
FLANNEL_ETCD_PREFIX="/kubernetes/network"
# kubernetes 服务IP(预先分配,一般为SERVICE_CIDR中的第一个IP)
CLUSTER_KUBERNETES_SVC_IP="10.254.0.1"
# 集群 DNS 服务IP(从SERVICE_CIDR 中预先分配)
CLUSTER_DNS_SVC_IP="10.254.0.2"
# 集群 DNS 域名
CLUSTER_DNS_DOMAIN="cluster.local."
# MASTER API Server 地址
MASTER_URL="k8s-api.virtual.local"
主机直接无密码登陆:
都要执行,都要互相拷贝给对方
ssh-keygen -t rsa
ssh-copy-id -i ~/.ssh/id_rsa.pub root@etcd-3
for host in $(cat remote-hosts)
do
#ssh $host "yum -y install wget"
#ssh $host "mkdir -p /etc/kubernetes/ssl"
#ssh $host "mkdir -p /usr/k8s/bin"
#scp env.sh $host:/usr/k8s/bin
#ssh $host "chmod +x /usr/k8s/bin/env.sh"
#ssh $host "echo "export PATH=/usr/k8s/bin:$PATH" >> /etc/rc.local "
scp /root/ssl/ca* $host:/etc/kubernetes/ssl
#scp token.csv $host:/etc/kubernetes/
#scp kubernetes-server-linux-amd64.tar.gz $host:kubernetes-server-linux-amd64.tar.gz
#scp /etc/kubernetes/*.kubeconfig $host:/etc/kubernetes/
#ssh $host "yum -y install etcd"
#ssh $host "mkdir -p /opt/etcd"
#ssh $host "chmod -R 777 /opt/etcd"
#ssh $host "yum -y install ntpdate;ntpdate cn.ntp.org.cn;ln -sf /usr/share/zoneinfo/Asia/Chongqing /etc/localtime"
#ssh $host "chmod -R 775 /etc/kubernetes/"
#ssh $host "yum install -y flannel"
#ssh $host "yum install docker -y"
done
创建ca证书和密匙(每个机器都执行了)
wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
chmod +x cfssl_linux-amd64
sudo mv cfssl_linux-amd64 /usr/k8s/bin/cfssl
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
chmod +x cfssljson_linux-amd64
sudo mv cfssljson_linux-amd64 /usr/k8s/bin/cfssljson
wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
chmod +x cfssl-certinfo_linux-amd64
sudo mv cfssl-certinfo_linux-amd64 /usr/k8s/bin/cfssl-certinfo
export PATH=/usr/k8s/bin:$PATH
mkdir ssl && cd ssl
cfssl print-defaults config > config.json
cfssl print-defaults csr > csr.json
cat >ca-config.json<<EOF
{
"signing": {
"default": {
"expiry": "87600h"
},
"profiles": {
"kubernetes": {
"expiry": "87600h",
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
]
}
}
}
}
EOF
cat > ca-csr.json << EOF
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}
EOF
export NODE_NAME=etcd01
export NODE_IP=192.168.19.61
export NODE_IPS="192.168.19.61 192.168.19.62 192.168.19.63"
export ETCD_NODES=etcd01=https://192.168.19.61:2380,etcd02=https://192.168.19.62:2380,etcd03=https://192.168.19.63:2380
sudo mv etcd.service /etc/systemd/system/
sudo systemctl daemon-reload
sudo systemctl enable etcd
sudo systemctl restart etcd
sudo systemctl status etcd
wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
chmod +x cfssl_linux-amd64
sudo mv cfssl_linux-amd64 /usr/k8s/bin/cfssl
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
chmod +x cfssljson_linux-amd64
sudo mv cfssljson_linux-amd64 /usr/k8s/bin/cfssljson
wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
chmod +x cfssl-certinfo_linux-amd64
sudo mv cfssl-certinfo_linux-amd64 /usr/k8s/bin/cfssl-certinfo
ntpd cn.ntp.org.cn;ntpdate cn.ntp.org.cn;rm -rf /etc/localtime;ln -s /usr/share/zoneinfo/Asia/Chongqing /etc/localtime
tar -xzvf kubernetes-client-linux-amd64.tar.gz
sudo cp kubernetes/client/bin/kube* /usr/k8s/bin/
sudo chmod a+x /usr/k8s/bin/kube*
export PATH=/usr/k8s/bin:$PATH
sudo cp flanneld.service /etc/systemd/system/
sudo systemctl daemon-reload
sudo systemctl enable flanneld
sudo systemctl start flanneld
systemctl status flanneld
export PATH=/usr/k8s/bin:$PATH
export NODE_IP=192.168.19.65
source /usr/k8s/bin/env.sh
cat > flanneld-csr.json <<EOF
{
"CN": "flanneld",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}
EOF
cfssl gencert -ca=/etc/kubernetes/ssl/ca.pem -ca-key=/etc/kubernetes/ssl/ca-key.pem -config=/etc/kubernetes/ssl/ca-config.json -profile=kubernetes flanneld-csr.json | cfssljson -bare flanneld
sudo mkdir -p /etc/flanneld/ssl
sudo mv flanneld*.pem /etc/flanneld/ssl
etcdctl --endpoints=${ETCD_ENDPOINTS} --ca-file=/etc/kubernetes/ssl/ca.pem --cert-file=/etc/flanneld/ssl/flanneld.pem --key-file=/etc/flanneld/ssl/flanneld-key.pem set ${FLANNEL_ETCD_PREFIX}/config '{"Network":"'${CLUSTER_CIDR}'", "SubnetLen": 24, "Backend": {"Type": "vxlan"}}'
mkdir flannel
tar -xzvf flannel-v0.9.0-linux-amd64.tar.gz -C flannel
sudo cp flannel/{flanneld,mk-docker-opts.sh} /usr/k8s/bin
cat > flanneld.service << EOF
[Unit]
Description=Flanneld overlay address etcd agent
After=network.target
After=network-online.target
Wants=network-online.target
After=etcd.service
Before=docker.service
[Service]
Type=notify
ExecStart=/usr/k8s/bin/flanneld \\
-etcd-cafile=/etc/kubernetes/ssl/ca.pem \\
-etcd-certfile=/etc/flanneld/ssl/flanneld.pem \\
-etcd-keyfile=/etc/flanneld/ssl/flanneld-key.pem \\
-etcd-endpoints=${ETCD_ENDPOINTS} \\
-etcd-prefix=${FLANNEL_ETCD_PREFIX}
ExecStartPost=/usr/k8s/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/docker
Restart=on-failure
[Install]
WantedBy=multi-user.target
RequiredBy=docker.service
EOF
sudo cp flanneld.service /etc/systemd/system/
sudo systemctl daemon-reload
sudo systemctl enable flanneld
sudo systemctl start flanneld
systemctl status flanneld
ip addr
etcdctl --endpoints=${ETCD_ENDPOINTS} --ca-file=/etc/kubernetes/ssl/ca.pem --cert-file=/etc/flanneld/ssl/flanneld.pem --key-file=/etc/flanneld/ssl/flanneld-key.pem get ${FLANNEL_ETCD_PREFIX}/config
etcdctl --endpoints=${ETCD_ENDPOINTS} --ca-file=/etc/kubernetes/ssl/ca.pem --cert-file=/etc/flanneld/ssl/flanneld.pem --key-file=/etc/flanneld/ssl/flanneld-key.pem ls ${FLANNEL_ETCD_PREFIX}/subnets
etcdctl --endpoints=${ETCD_ENDPOINTS} --ca-file=/etc/kubernetes/ssl/ca.pem --cert-file=/etc/flanneld/ssl/flanneld.pem --key-file=/etc/flanneld/ssl/flanneld-key.pem get ${FLANNEL_ETCD_PREFIX}/subnets/172.30.66.0-24
etcdctl --endpoints=${ETCD_ENDPOINTS} --ca-file=/etc/kubernetes/ssl/ca.pem --cert-file=/etc/flanneld/ssl/flanneld.pem --key-file=/etc/flanneld/ssl/flanneld-key.pem ls ${FLANNEL_ETCD_PREFIX}/subnets
cat > token.csv <<EOF
${BOOTSTRAP_TOKEN},kubelet-bootstrap,10001,"system:kubelet-bootstrap"
EOF
sudo cp kube-apiserver.service /etc/systemd/system/
sudo systemctl daemon-reload
sudo systemctl enable kube-apiserver
sudo systemctl start kube-apiserver
sudo systemctl status kube-apiserver
sudo cp kube-controller-manager.service /etc/systemd/system/
sudo systemctl daemon-reload
sudo systemctl enable kube-controller-manager
sudo systemctl start kube-controller-manager
sudo systemctl status kube-controller-manager
sudo cp kube-scheduler.service /etc/systemd/system/
sudo systemctl daemon-reload
sudo systemctl enable kube-scheduler
sudo systemctl start kube-scheduler
sudo systemctl status kube-scheduler
export NODE_IP=192.168.19.63
source /usr/k8s/bin/env.sh
tar -xzvf kubernetes-server-linux-amd64.tar.gz
sudo cp -r server/bin/{kube-apiserver,kube-controller-manager,kube-scheduler} /usr/k8s/bin/
cd kubernetes
sudo cp -r server/bin/{kube-apiserver,kube-controller-manager,kube-scheduler} /usr/k8s/bin/
cat > kubernetes-csr.json <<EOF
{
"CN": "kubernetes",
"hosts": [
"127.0.0.1",
"${NODE_IP}",
"${MASTER_URL}",
"${CLUSTER_KUBERNETES_SVC_IP}",
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}
EOF
cat kubernetes-csr.json
cfssl gencert -ca=/etc/kubernetes/ssl/ca.pem -ca-key=/etc/kubernetes/ssl/ca-key.pem -config=/etc/kubernetes/ssl/ca-config.json -profile=kubernetes kubernetes-csr.json | cfssljson -bare kubernetes
ls kubernetes*
sudo mkdir -p /etc/kubernetes/ssl/
sudo mv kubernetes*.pem /etc/kubernetes/ssl/
cat > token.csv <<EOF
${BOOTSTRAP_TOKEN},kubelet-bootstrap,10001,"system:kubelet-bootstrap"
EOF
cat token.csv
sudo mv token.csv /etc/kubernetes/
cat > kube-apiserver.service <<EOF
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target
[Service]
ExecStart=/usr/k8s/bin/kube-apiserver \\
--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota \\
--advertise-address=${NODE_IP} \\
--bind-address=0.0.0.0 \\
--insecure-bind-address=${NODE_IP} \\
--authorization-mode=Node,RBAC \\
--runtime-config=rbac.authorization.k8s.io/v1alpha1 \\
--kubelet-https=true \\
--experimental-bootstrap-token-auth \\
--token-auth-file=/etc/kubernetes/token.csv \\
--service-cluster-ip-range=${SERVICE_CIDR} \\
--service-node-port-range=${NODE_PORT_RANGE} \\
--tls-cert-file=/etc/kubernetes/ssl/kubernetes.pem \\
--tls-private-key-file=/etc/kubernetes/ssl/kubernetes-key.pem \\
--client-ca-file=/etc/kubernetes/ssl/ca.pem \\
--service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \\
--etcd-cafile=/etc/kubernetes/ssl/ca.pem \\
--etcd-certfile=/etc/kubernetes/ssl/kubernetes.pem \\
--etcd-keyfile=/etc/kubernetes/ssl/kubernetes-key.pem \\
--etcd-servers=${ETCD_ENDPOINTS} \\
--enable-swagger-ui=true \\
--allow-privileged=true \\
--apiserver-count=2 \\
--audit-log-maxage=30 \\
--audit-log-maxbackup=3 \\
--audit-log-maxsize=100 \\
--audit-log-path=/var/lib/audit.log \\
--audit-policy-file=/etc/kubernetes/audit-policy.yaml \\
--event-ttl=1h \\
--logtostderr=true \\
--v=6
Restart=on-failure
RestartSec=5
Type=notify
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
cat > /etc/kubernetes/audit-policy.yaml << EOF
apiVersion: audit.k8s.io/v1beta1 # This is required.
kind: Policy
# Don't generate audit events for all requests in RequestReceived stage.
omitStages:
- "RequestReceived"
rules:
# Log pod changes at RequestResponse level
- level: RequestResponse
resources:
- group: ""
# Resource "pods" doesn't match requests to any subresource of pods,
# which is consistent with the RBAC policy.
resources: ["pods"]
# Log "pods/log", "pods/status" at Metadata level
- level: Metadata
resources:
- group: ""
resources: ["pods/log", "pods/status"]
# Don't log requests to a configmap called "controller-leader"
- level: None
resources:
- group: ""
resources: ["configmaps"]
resourceNames: ["controller-leader"]
# Don't log watch requests by the "system:kube-proxy" on endpoints or services
- level: None
users: ["system:kube-proxy"]
verbs: ["watch"]
resources:
- group: "" # core API group
resources: ["endpoints", "services"]
# Don't log authenticated requests to certain non-resource URL paths.
- level: None
userGroups: ["system:authenticated"]
nonResourceURLs:
- "/api*" # Wildcard matching.
- "/version"
# Log the request body of configmap changes in kube-system.
- level: Request
resources:
- group: "" # core API group
resources: ["configmaps"]
# This rule only applies to resources in the "kube-system" namespace.
# The empty string "" can be used to select non-namespaced resources.
namespaces: ["kube-system"]
# Log configmap and secret changes in all other namespaces at the Metadata level.
- level: Metadata
resources:
- group: "" # core API group
resources: ["secrets", "configmaps"]
# Log all other resources in core and extensions at the Request level.
- level: Request
resources:
- group: "" # core API group
- group: "extensions" # Version of group should NOT be included.
# A catch-all rule to log all other requests at the Metadata level.
- level: Metadata
# Long-running requests like watches that fall under this rule will not
# generate an audit event in RequestReceived.
omitStages:
- "RequestReceived"
EOF
sudo cp kube-apiserver.service /etc/systemd/system/
sudo systemctl daemon-reload
sudo systemctl enable kube-apiserver
sudo systemctl start kube-apiserver
sudo systemctl status kube-apiserver
cat > kube-controller-manager.service <<EOF
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
[Service]
ExecStart=/usr/k8s/bin/kube-controller-manager \\
--address=127.0.0.1 \\
--master=http://${MASTER_URL}:8080 \\
--allocate-node-cidrs=true \\
--service-cluster-ip-range=${SERVICE_CIDR} \\
--cluster-cidr=${CLUSTER_CIDR} \\
--cluster-name=kubernetes \\
--cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem \\
--cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem \\
--service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem \\
--root-ca-file=/etc/kubernetes/ssl/ca.pem \\
--leader-elect=true \\
--v=2
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
sudo cp kube-controller-manager.service /etc/systemd/system/
sudo systemctl daemon-reload
sudo systemctl enable kube-controller-manager
sudo systemctl start kube-controller-manager
sudo systemctl status kube-controller-manager
cat > kube-scheduler.service <<EOF
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
[Service]
ExecStart=/usr/k8s/bin/kube-scheduler \\
--address=127.0.0.1 \\
--master=http://${MASTER_URL}:8080 \\
--leader-elect=true \\
--v=2
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
sudo cp kube-scheduler.service /etc/systemd/system/
sudo systemctl daemon-reload
sudo systemctl enable kube-scheduler
sudo systemctl start kube-scheduler
sudo systemctl status kube-scheduler
kubectl get componentstatuses
sudo systemctl start haproxy
sudo systemctl enable haproxy
sudo systemctl status haproxy
systemctl start keepalived
systemctl enable keepalived
sudo systemctl daemon-reload
sudo systemctl stop firewalld
sudo systemctl disable firewalld
sudo iptables -F && sudo iptables -X && sudo iptables -F -t nat && sudo iptables -X -t nat
sudo systemctl enable docker
sudo systemctl start docker
tar -xzvf kubernetes-server-linux-amd64.tar.gz
cd kubernetes
tar -xzvf kubernetes-src.tar.gz
sudo cp -r ./server/bin/{kube-proxy,kubelet} /usr/k8s/bin/
sudo cp kubelet.service /etc/systemd/system/kubelet.service
sudo systemctl daemon-reload
sudo systemctl enable kubelet
sudo systemctl start kubelet
systemctl status kubelet
sudo cp kube-proxy.service /etc/systemd/system/
sudo systemctl daemon-reload
sudo systemctl enable kube-proxy
sudo systemctl start kube-proxy
systemctl status kube-proxy
192.168.19.61etcd-1192.168.19.62etcd-2192.168.19.63etcd-3cat >/etc/hosts<<EOF127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4::1 localhost localhost.localdomain localhost6 localhost6.localdomain6192.168.19.61 k8s-api.virtual.local k8s-api192.168.19.61etcd-1192.168.19.62etcd-2192.168.19.63etcd-3EOF
ssh-keygen -t rsassh-copy-id -i ~/.ssh/id_rsa.pub root@etcd-3
for host in $(cat remote-hosts)do ssh $host "yum -y install wget"done
wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64chmod +x cfssl_linux-amd64sudo mv cfssl_linux-amd64 /usr/local/bin/cfsslwget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64chmod +x cfssljson_linux-amd64sudo mv cfssljson_linux-amd64 /usr/local/bin/cfssljsonwget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64chmod +x cfssl-certinfo_linux-amd64sudo mv cfssl-certinfo_linux-amd64 /usr/local/bin/cfssl-certinfo
mkdir /root/sslcd /root/sslcfssl print-defaults csr > ca-csr.json#对CA证书签名请求修改为下tee ca-csr.json <<-'EOF' {"CN": "panjb-k8s","key": { "algo": "rsa", "size": 2048 },"names": [ { "C": "CN", "ST": "SiChuan", "L": "chengdu", "O": "k8s", "OU": "System" } ]}EOFcfssl gencert -initca ca-csr.json | cfssljson -bare ca #生产CA证书和私钥$ lsca.csr ca-csr.json ca-key.pem ca.pem
cfssl print-defaults config >ca-config.jsontee ca-config.json <<-'EOF'{ "signing": { "default": { "expiry": "87600h" }, "profiles": { "panjb-k8s": { "usages": [ "signing", "key encipherment", "server auth", "client auth" ], "expiry": "87600h" } } }}EOF
tee kubernetes-csr.json <<-'EOF'{ "CN": "panjb-k8s", "hosts": [ "127.0.0.1", "192.168.19.61", "192.168.19.62", "192.168.19.63", "10.254.0.1", "kubernetes", "kubernetes.default", "kubernetes.default.svc", "kubernetes.default.svc.cluster", "kubernetes.default.svc.cluster.local" ], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "SiChuan", "L": "chengdu", "O": "k8s", "OU": "System" } ]}EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=panjb-k8s kubernetes-csr.json | cfssljson -bare kubernetes
tee admin-csr.json <<-'EOF'{ "CN": "admin", "hosts": [], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "SiChuan", "L": "chengdu", "O": "system:masters", "OU": "System" } ]}EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=panjb-k8s admin-csr.json | cfssljson -bare admin
tee kube-proxy-csr.json <<-'EOF'{ "CN": "system:kube-proxy", "hosts": [], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "SiChuan", "L": "chengdu", "O": "k8s", "OU": "System" } ]}EOFcfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=panjb-k8s kube-proxy-csr.json | cfssljson -bare kube-proxy
openssl x509 -noout -text -in kubernetes.pem
cfssl-certinfo -cert kubernetes.pem cat > token.csv <<EOF${BOOTSTRAP_TOKEN},kubelet-bootstrap,10001,"system:kubelet-bootstrap"EOF
tar -xzvf kubernetes-client-linux-amd64.tar.gzcp kubernetes/client/bin/kube* /usr/bin/chmod a+x /usr/bin/kube*
cat > /usr/lib/systemd/system/etcd.service <<EOF[Unit]Description=Etcd ServerAfter=network.targetAfter=network-online.targetWants=network-online.targetDocumentation=https://github.com/coreos[Service]Type=notifyWorkingDirectory=/var/lib/etcd/EnvironmentFile=-/etc/etcd/etcd.confUser=etcdExecStart=/usr/bin/etcd \\ --name=etcd-1 \\ --cert-file=/etc/kubernetes/ssl/kubernetes.pem \\ --key-file=/etc/kubernetes/ssl/kubernetes-key.pem \\ --peer-cert-file=/etc/kubernetes/ssl/kubernetes.pem \\ --peer-key-file=/etc/kubernetes/ssl/kubernetes-key.pem \\ --trusted-ca-file=/etc/kubernetes/ssl/ca.pem \\ --peer-trusted-ca-file=/etc/kubernetes/ssl/ca.pem \\ --initial-advertise-peer-urls=https://192.168.19.61:2380 \\ --listen-peer-urls=https://192.168.19.61:2380 \\ --listen-client-urls=https://192.168.19.61:2379,https://127.0.0.1:2379 \\ --advertise-client-urls=https://192.168.19.61:2379 \\ --initial-cluster-token=etcd-cluster-0 \\ --initial-cluster=etcd-1=https://192.168.19.61:2380,etcd-2=https://192.168.19.62:2380,etcd-3=https://192.168.19.63:2380 \\ --initial-cluster-state new \\ --data-dir=/opt/etcdRestart=on-failureRestartSec=5LimitNOFILE=65536[Install]WantedBy=multi-user.targetEOFsystemctl daemon-reload && systemctl start etcd && systemctl enable etcd
cat > /usr/lib/systemd/system/etcd.service <<EOF[Unit]Description=Etcd ServerAfter=network.targetAfter=network-online.targetWants=network-online.targetDocumentation=https://github.com/coreos[Service]Type=notifyWorkingDirectory=/var/lib/etcd/EnvironmentFile=-/etc/etcd/etcd.confUser=etcdExecStart=/usr/bin/etcd \\ --name=etcd-2 \\ --cert-file=/etc/kubernetes/ssl/kubernetes.pem \\ --key-file=/etc/kubernetes/ssl/kubernetes-key.pem \\ --peer-cert-file=/etc/kubernetes/ssl/kubernetes.pem \\ --peer-key-file=/etc/kubernetes/ssl/kubernetes-key.pem \\ --trusted-ca-file=/etc/kubernetes/ssl/ca.pem \\ --peer-trusted-ca-file=/etc/kubernetes/ssl/ca.pem \\ --initial-advertise-peer-urls=https://192.168.19.62:2380 \\ --listen-peer-urls=https://192.168.19.62:2380 \\ --listen-client-urls=https://192.168.19.62:2379,https://127.0.0.1:2379 \\ --advertise-client-urls=https://192.168.19.62:2379 \\ --initial-cluster-token=etcd-cluster-0 \\ --initial-cluster=etcd-1=https://192.168.19.61:2380,etcd-2=https://192.168.19.62:2380,etcd-3=https://192.168.19.63:2380 \\ --initial-cluster-state=new \\ --data-dir=/opt/etcdRestart=on-failureRestartSec=5LimitNOFILE=65536[Install]WantedBy=multi-user.targetEOFsystemctl daemon-reload && systemctl start etcd && systemctl enable etcd
cat > /usr/lib/systemd/system/etcd.service <<EOF[Unit]Description=Etcd ServerAfter=network.targetAfter=network-online.targetWants=network-online.targetDocumentation=https://github.com/coreos[Service]Type=notifyWorkingDirectory=/var/lib/etcd/EnvironmentFile=-/etc/etcd/etcd.confUser=etcdExecStart=/usr/bin/etcd \\ --name=etcd-3 \\ --cert-file=/etc/kubernetes/ssl/kubernetes.pem \\ --key-file=/etc/kubernetes/ssl/kubernetes-key.pem \\ --peer-cert-file=/etc/kubernetes/ssl/kubernetes.pem \\ --peer-key-file=/etc/kubernetes/ssl/kubernetes-key.pem \\ --trusted-ca-file=/etc/kubernetes/ssl/ca.pem \\ --peer-trusted-ca-file=/etc/kubernetes/ssl/ca.pem \\ --initial-advertise-peer-urls=https://192.168.19.63:2380 \\ --listen-peer-urls=https://192.168.19.63:2380 \\ --listen-client-urls=https://192.168.19.63:2379,https://127.0.0.1:2379 \\ --advertise-client-urls=https://192.168.19.63:2379 \\ --initial-cluster-token=etcd-cluster-0 \\ --initial-cluster=etcd-1=https://192.168.19.61:2380,etcd-2=https://192.168.19.62:2380,etcd-3=https://192.168.19.63:2380 \\ --initial-cluster-state=new \\ --data-dir=/opt/etcdRestart=on-failureRestartSec=5LimitNOFILE=65536[Install]WantedBy=multi-user.targetEOF
systemctl daemon-reload && systemctl start etcd && systemctl enable etcd
etcdctl \ --endpoints=https://192.168.19.61:2379 \ --ca-file=/etc/kubernetes/ssl/ca.pem \ --cert-file=/etc/kubernetes/ssl/kubernetes.pem \ --key-file=/etc/kubernetes/ssl/kubernetes-key.pem \ cluster-health
yum install -y flannel cat > /usr/lib/systemd/system/flanneld.service << EOF[Unit]Description=Flanneld overlay address etcd agentAfter=network.targetAfter=network-online.targetWants=network-online.targetAfter=etcd.serviceBefore=docker.service[Service]Type=notifyEnvironmentFile=/etc/sysconfig/flanneldEnvironmentFile=-/etc/sysconfig/docker-networkExecStart=/usr/bin/flanneld-start \$FLANNEL_OPTIONSExecStartPost=/usr/libexec/flannel/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/dockerRestart=on-failure[Install]WantedBy=multi-user.targetRequiredBy=docker.serviceEOF
cat > /etc/sysconfig/flanneld<< EOF# Flanneld configuration options # etcd url location. Point this to the server where etcd runsFLANNEL_ETCD_ENDPOINTS="https://192.168.19.61:2379,https://192.168.19.62:2379,https://192.168.19.63:2379"# etcd config key. This is the configuration key that flannel queries# For address range assignmentFLANNEL_ETCD_PREFIX="/kube-centos/network"# Any additional options that you want to passFLANNEL_OPTIONS="-etcd-cafile=/etc/kubernetes/ssl/ca.pem -etcd-certfile=/etc/kubernetes/ssl/kubernetes.pem -etcd-keyfile=/etc/kubernetes/ssl/kubernetes-key.pem"EOF mkdir -p /kube-centos/network只在一个节点执行: etcdctl --endpoints=https://192.168.19.61:2379 --ca-file=/etc/kubernetes/ssl/ca.pem --cert-file=/etc/kubernetes/ssl/kubernetes.pem --key-file=/etc/kubernetes/ssl/kubernetes-key.pem set /kube-centos/network/config '{"Network":"10.250.0.0/16", "SubnetLen": 24, "Backend": {"Type": "vxlan"}}'
etcdctl --endpoints=https://192.168.19.61:2379 --ca-file=/etc/kubernetes/ssl/ca.pem --cert-file=/etc/kubernetes/ssl/kubernetes.pem --key-file=/etc/kubernetes/ssl/kubernetes-key.pem ls /kube-centos/network/subnets