一、准备服务器节点
10.0.0.4 ansible
10.0.0.10 ks-master
10.0.0.11 k8s-node1
10.0.0.12 k8s-node2
二、配置ansible
1、安装ansible
# 配置yum源为阿里云
cd /etc/yum.repos.d/
# 备份自带的yum源
mv CentOS-Base.repo CentOS-Base.repo.backup
# 下载阿里云yum源
curl -o /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo
# 清理缓存
yum clean all
# 安装epel源
yum -y install epel-release
# 安装ansible
yum -y install ansible
2、设置免密连接
[root@ansible ~]# ssh-keygen
[root@ansible ~]# ssh-copy-id root@10.0.0.10
[root@ansible ~]# ssh-copy-id root@10.0.0.11
[root@ansible ~]# ssh-copy-id root@10.0.0.12
3、在Ansible服务器上的/etc/hosts文件中添加k8s服务器节点信息
[root@ansible ~]# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
10.0.0.4 ansible
10.0.0.10 k8s-master
10.0.0.11 k8s-node1
10.0.0.12 k8s-node2
4、在ansible配置文件中添加节点信息
[root@ansible ~]# vim /etc/ansible/hosts
[k8s-all]
10.0.0.10
10.0.0.11
10.0.0.12
[master]
10.0.0.10
[modes]
10.0.0.11
10.0.0.12
三、修改k8s集群各节点/etc/hosts
1、创建playbook文件并执行
[root@ansible ~]# vim hosts_playbook.yml
---
- hosts: nodes
remote_user: root
tasks:
- name: backup /etc/hosts
shell: mv /etc/hosts /etc/host_bak
- name: copy localhosts file to remote
copy: src=/etc/hosts dest=/etc/ owner=root group=root mode=0644
[root@ansible ~]# ansible-playbook hosts_playbook.yml
四、安装Docker
1、创建playbook文件并执行
· 所有节点安装docker
[root@ansible ~]# cat install_docker_playbook.yml
- hosts: k8s-all
remote_user: root
vars:
docker_version: 18.09.2
tasks:
- name: install dependencies
shell: yum install -y yum-utils device-mapper-persistent-data lvm2
- name: docker-repo
shell: yum-config-manager --add-repo https://mirrors.ustc.edu.cn/docker-ce/linux/centos/docker-ce.repo
- name: install docker
yum: name=docker-ce-{{docker_version}} state=present
- name: start docker
shell: systemctl start docker && systemctl enable docker
[root@ansible ~]# vim /etc/ansible/ansible.cfg
deprecation_warnings = false ## 179默认是true,并且不生效
[root@ansible ~]# ansible-playbook install_docker_playbook.yml
五、部署k8s master
1、开始部署之前,需要做一些初始化处理:关闭防火墙、关闭selinux、禁用swap、配置k8s阿里云yum源等,所有操作放在脚本 before.sh 中,并在playbook中通过script模块执行
[root@ansible ~]# cat before.sh
#!/bin/bash
#防火墙
systemctl disable firewalld
systemctl stop firewalld
setenforce 0
#禁用swap
swapoff -a
#修改内核参数
cat <<EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
#重新加载配置文件
sysctl --system
#配置阿里k8s yum源
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
#更新缓存
yum clean all -y && yum makecache
创建playbook文件,只针对master节点,安装kubectl,kubeadm,kubelet,以及flannel
# 创建flunne.yml
[root@ansible ~]# vim kube-flannel.yml
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: psp.flannel.unprivileged
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
privileged: false
volumes:
- configMap
- secret
- emptyDir
- hostPath
allowedHostPaths:
- pathPrefix: "/etc/cni/net.d"
- pathPrefix: "/etc/kube-flannel"
- pathPrefix: "/run/flannel"
readOnlyRootFilesystem: false
# Users and groups
runAsUser:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
fsGroup:
rule: RunAsAny
# Privilege Escalation
allowPrivilegeEscalation: false
defaultAllowPrivilegeEscalation: false
# Capabilities
allowedCapabilities: ['NET_ADMIN']
defaultAddCapabilities: []
requiredDropCapabilities: []
# Host namespaces
hostPID: false
hostIPC: false
hostNetwork: true
hostPorts:
- min: 0
max: 65535
# SELinux
seLinux:
# SELinux is unused in CaaSP
rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
rules:
- apiGroups: ['extensions']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: ['psp.flannel.unprivileged']
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-system
labels:
tier: node
app: flannel
data:
cni-conf.json: |
{
"name": "cbr0",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
net-conf.json: |
{
"Network": "10.244.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds-amd64
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: beta.kubernetes.io/os
operator: In
values:
- linux
- key: beta.kubernetes.io/arch
operator: In
values:
- amd64
hostNetwork: true
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.11.0-amd64
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.11.0-amd64
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds-arm64
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: beta.kubernetes.io/os
operator: In
values:
- linux
- key: beta.kubernetes.io/arch
operator: In
values:
- arm64
hostNetwork: true
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.11.0-arm64
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.11.0-arm64
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds-arm
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: beta.kubernetes.io/os
operator: In
values:
- linux
- key: beta.kubernetes.io/arch
operator: In
values:
- arm
hostNetwork: true
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.11.0-arm
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.11.0-arm
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds-ppc64le
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: beta.kubernetes.io/os
operator: In
values:
- linux
- key: beta.kubernetes.io/arch
operator: In
values:
- ppc64le
hostNetwork: true
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.11.0-ppc64le
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.11.0-ppc64le
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds-s390x
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: beta.kubernetes.io/os
operator: In
values:
- linux
- key: beta.kubernetes.io/arch
operator: In
values:
- s390x
hostNetwork: true
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.11.0-s390x
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.11.0-s390x
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
# 部署master playbook
[root@ansible ~]# cat deloy_master_playbook.yml
- hosts: master
remote_user: root
vars:
kube_version: 1.16.0-0
k8s_version: v1.16.0
k8s_master: 10.0.0.10
tasks:
- name: before
script: ./before.sh
- name: install kube***
yum: name={{item}} state=present
with_items:
- kubectl-{{kube_version}}
- kubeadm-{{kube_version}}
- kubelet-{{kube_version}}
- name: init k8s
shell: kubeadm init --image-repository registry.aliyuncs.com/google_containers --kubernetes-version {{k8s_version}} --apiserver-advertise-address {{k8s_master}} --pod-network-cidr=10.244.0.0/16 --token-ttl 0
- name: config kube
shell: mkdir -p $HOME/.kube && cp -i /etc/kubernetes/admin.conf $HOME/.kube/config && chown $(id -u):$(id -g) $HOME/.kube/config
- name: copy flannel yaml file
copy: src=/root/kube-flannel.yml dest=/tmp/kube-flannel.yml
- name: install flannel
shell: kubectl apply -f /tmp/kube-flannel.yml
- name: get join command
shell: kubeadm token create --print-join-command
register: join_command
- name: show join command
debug: var=join_command verbosity=0
[root@ansible ~]# vim /etc/ansible/ansible.cfg
command_warnings = False ### 187行
[root@ansible ~]# ansible-playbook deploy_master_playbook.yml
[root@master ~]# docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
quay-mirror.qiniu.com/coreos/flannel v0.12.0-amd64 4e9f801d2217 7 weeks ago 52.8MB
registry.aliyuncs.com/google_containers/kube-apiserver v1.16.0 b305571ca60a 7 months ago 217MB
registry.aliyuncs.com/google_containers/kube-proxy v1.16.0 c21b0c7400f9 7 months ago 86.1MB
registry.aliyuncs.com/google_containers/kube-controller-manager v1.16.0 06a629a7e51c 7 months ago 163MB
registry.aliyuncs.com/google_containers/kube-scheduler v1.16.0 301ddc62b80b 7 months ago 87.3MB
registry.aliyuncs.com/google_containers/etcd 3.3.15-0 b2756210eeab 8 months ago 247MB
registry.aliyuncs.com/google_containers/coredns 1.6.2 bf261d157914 8 months ago 44.1MB
registry.aliyuncs.com/google_containers/pause 3.1 da86e6ba6ca1 2 years ago 742kB
[root@master ~]# kubectl get pods --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system coredns-58cc8c89f4-bt86l 1/1 Running 0 17h
kube-system coredns-58cc8c89f4-tdqsg 1/1 Running 0 17h
kube-system etcd-k8s-master 1/1 Running 0 17h
kube-system kube-apiserver-k8s-master 1/1 Running 0 17h
kube-system kube-controller-manager-k8s-master 1/1 Running 0 17h
kube-system kube-flannel-ds-amd64-v57pg 1/1 Running 0 17h
kube-system kube-proxy-xtjkl 1/1 Running 0 17h
kube-system kube-scheduler-k8s-master 1/1 Running 0 17h
[root@k8s-master ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
master Ready master 17h v1.16.0
七、部署k8s node
1、同master一样,开始部署之前,需要做一些初始化处理,所有操作放在脚本 before.sh 中,并在playbook中通过script模块执行
[root@ansible ~]# cat deloy_nodes_playbook.yml
- hosts: nodes
remote_user: root
vars:
kube_version: 1.16.0-0
tasks:
- name: before
script: ./before.sh
- name: install kube***
yum: name={{item}} state=present
with_items:
- kubeadm-{{kube_version}}
- kubelet-{{kube_version}}
- name: start kubelet
shell: systemctl enable kubelet && systemctl start kubelet
- name: join cluster
shell: kubeadm join 10.0.0.10:6443 --token fc8awf.3mrp0x0dp09e26do --discovery-token-ca-cert-hash sha256:a406381743687631723cd6008ba02946b94035e15319a161f60c5ee0b7461ebe
[root@ansible ~]# ansible-playbook deloy_nodes_playbook.yml
2、在master节点上通过kubectl get nodes看到加入到集群中的节点,并且status为Ready状态
[root@k8s-master ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master Ready master 3h11m v1.16.0
k8s-node1 Ready <none> 3h3m v1.16.0
k8s-node2 Ready <none> 3h3m v1.16.0