企业级实战模块二:Ansible 自动化部署K8S集群

企业级实战模块二:Ansible 自动化部署K8S集群

1 服务器规划

角色IP组件
k8s-master-01 192.168.5.3 kube-apiserver kube-controller-manager kube-scheduler etcd
k8s-master-02 192.168.5.4 kube-apiserver kube-controller-manager kube-scheduler etcd
k8s-node-01 192.168.5.5 kubelet kube-proxy docker etcd
k8s-node-02 192.168.5.6 kubelet kube-proxy docker
Load Balancer(Master) 192.168.5.7 192.168.5.10 (VIP) nginx keepalived
Load Balancer(Backup) 192.168.5.8 nginx keepalived

2 编写主要配置文件

# 创建文件夹

mkdir -p /root/ansible-auto-install-k8s/group_vars

2.1 编写Ansible.cfg文件

cat <<EOF>> /root/ansible-auto-install-k8s/ansible.cfg
[defaults]
# 定义inventory文件路径
inventory = /inventory
# 默认开启的并发数
forks = 5
# 执行ansible命令时使用的用户,默认root
remote_user = root
# 远程主机SSH端口,默认22
remote_port = 22
# ansible第一次连接客户端是是否要检查ssh密钥
host_key_checking = False
# SSH连接超时时间,默认10s
timeout = 10
# Ansible日志路径,默认/var/log/ansible.log
log_path = /var/log/ansible.log
# 在使用ssh公钥私钥登录系统时候,使用的密钥路径
private_key_file = /root/.ssh/id_rsa
EOF

2.2 编写Inventory文件

cat <<EOF>> /root/ansible-auto-install-k8s/inventory.cfg
[master]
# 如果部署单Master,只保留一个Master节点
# 默认Master节点也部署Node组件
192.168.5.3 node_name=k8s-master-01
192.168.5.4 node_name=k8s-master-02

[node]
192.168.5.5 node_name=k8s-node-01
192.168.5.6 node_name=k8s-node-02

[etcd]
192.168.5.3 etcd_name=etcd-01
192.168.5.4 etcd_name=etcd-02
192.168.5.5 etcd_name=etcd-03

[lb]
# 如果部署单Master,该项忽略
192.168.5.7 lb_name=lb-master
192.168.5.8 lb_name=lb-backup

[k8s:children]
master
node

[newnode]
#192.168.5.11 node_name=k8s-node3
EOF

2.3 编写环境变量文件

cat <<EOF>>/root/ansible-auto-install-k8s/group_vars/all.yml
# 安装目录
software_dir: '/root/binary_pkg'
k8s_work_dir: '/usr/local/k8s/kubernetes'
etcd_work_dir: '/usr/local/k8s/etcd'
tmp_dir: '/usr/local/k8s/tmp'

# 集群网络
service_cidr: '10.0.0.0/24'
cluster_dns: '10.0.0.2'   # 与roles/addons/files/coredns.yaml中IP一致,并且是service_cidr中的IP
pod_cidr: '10.244.0.0/16' # 与roles/addons/files/calico.yaml中网段一致
service_nodeport_range: '30000-32767'
cluster_domain: 'cluster.local'

# 高可用,如果部署单Master,该项忽略
vip: '192.168.5.10'
nic: 'ens33'  # 修改为实际内网网卡名

# 自签证书可信任IP列表,为方便扩展,可添加多个预留IP
cert_hosts:
 # 包含所有LB、VIP、Master IP和service_cidr的第一个IP
k8s:
  - 10.0.0.1
  - 192.168.5.3
  - 192.168.5.4
  - 192.168.5.5
  - 192.168.5.6
  - 192.168.5.7
  - 192.168.5.8
  - 192.168.5.10
 # 包含所有etcd节点IP
etcd:
  - 192.168.5.3
  - 192.168.5.4
  - 192.168.5.5
EOF

2.4 单节点部署剧本

---
- name: 0.系统初始化
gather_facts: false
hosts:
  - k8s
  - etcd
roles:
  - common
tags: common

- name: 1.自签证书
gather_facts: false
hosts: localhost
roles:
  - tls
tags: tls

- name: 2.部署Docker
gather_facts: false
hosts: k8s
roles:
  - docker
tags: docker

- name: 3.部署ETCD集群
gather_facts: false
hosts: etcd
roles:
  - etcd
tags: etcd

- name: 4.部署K8S Master
gather_facts: false
hosts: master
roles:
  - master
tags: master

- name: 5.部署K8S Node
gather_facts: false
hosts: k8s
roles:
  - node
tags: node

- name: 6.部署插件
gather_facts: false
hosts: master
roles:
  - addons
tags: addons

2.5 多节点部署剧本

---
- name: 0.系统初始化
gather_facts: false
hosts: all
roles:
  - common
tags: common

- name: 1.自签证书
gather_facts: false
hosts: localhost
roles:
  - tls
tags: tls

- name: 2.部署Docker
gather_facts: false
hosts: k8s
roles:
  - docker
tags: docker

- name: 3.部署ETCD集群
gather_facts: false
hosts: etcd
roles:
  -  etcd
tags: etcd

- name: 4.部署K8S Master
gather_facts: false
hosts: master
roles:
  - master
tags: master

- name: 5. 部署Nginx负载均衡并高可用
gather_facts: false
hosts: lb
roles:
  - ha
tags: ha

- name: 6.部署K8S Node
gather_facts: false
hosts: k8s
roles:
  - node
tags: node

- name: 7.部署插件
gather_facts: false
hosts: master
roles:
  - addons
tags: addons

2.6 扩容新节点剧本

---
- name: 0.系统初始化
gather_facts: false
hosts: newnode
roles:
  - common
tags: common

- name: 1.部署Docker
gather_facts: false
hosts: newnode
roles:
  - docker
tags: docker

- name: 2.部署K8S Node
gather_facts: false
hosts: newnode
roles:
  - node
tags: node

3 编写角色(roles)

# 目录架构
roles
├── addons
│   ├── files
│   │   ├── calico.yaml
│   │   ├── coredns.yaml
│   │   ├── ingress-controller.yaml
│   │   └── kubernetes-dashboard.yaml
│   └── tasks
│       └── main.yml
├── common
│   ├── tasks
│   │   └── main.yml
│   └── templates
│       └── hosts.j2
├── docker
│   ├── files
│   │   ├── daemon.json
│   │   └── docker.service
│   └── tasks
│       └── main.yml
├── etcd
│   ├── tasks
│   │   └── main.yml
│   └── templates
│       ├── etcd.conf.j2
│       ├── etcd.service.j2
│       └── etcd.sh.j2
├── ha
│   ├── files
│   │   └── check_nginx.sh
│   ├── tasks
│   │   └── main.yml
│   └── templates
│       ├── keepalived.conf.j2
│       └── nginx.conf.j2
├── master
│   ├── files
│   │   ├── apiserver-to-kubelet-rbac.yaml
│   │   ├── kubelet-bootstrap-rbac.yaml
│   │   └── token.csv
│   ├── tasks
│   │   └── main.yml
│   └── templates
│       ├── config.j2
│       ├── kube-apiserver.conf.j2
│       ├── kube-apiserver.service.j2
│       ├── kube-controller-manager.conf.j2
│       ├── kube-controller-manager.kubeconfig.j2
│       ├── kube-controller-manager.service.j2
│       ├── kube-scheduler.conf.j2
│       ├── kube-scheduler.kubeconfig.j2
│       └── kube-scheduler.service.j2
├── node
│   ├── tasks
│   │   └── main.yml
│   └── templates
│       ├── bootstrap.kubeconfig.j2
│       ├── kubelet-config.yml.j2
│       ├── kubelet.conf.j2
│       ├── kubelet.service.j2
│       ├── kube-proxy-config.yml.j2
│       ├── kube-proxy.conf.j2
│       ├── kube-proxy.kubeconfig.j2
│       └── kube-proxy.service.j2
└── tls
  ├── files
  │   ├── generate_etcd_cert.sh
  │   └── generate_k8s_cert.sh
  ├── tasks
  │   └── main.yml
  └── templates
      ├── etcd
      │   ├── ca-config.json.j2
      │   ├── ca-csr.json.j2
      │   └── server-csr.json.j2
      └── k8s
          ├── admin-csr.json.j2
          ├── ca-config.json.j2
          ├── ca-csr.json.j2
          ├── kube-controller-manager-csr.json.j2
          ├── kube-proxy-csr.json.j2
          ├── kube-scheduler-csr.json.j2
          └── server-csr.json.j2

3.1 系统初始化(common)

1)roles\common\tasks\main.yml

---
# 这个剧本在所有节点上运行

- name: 关闭firewalld
service: name=firewalld state=stopped enabled=no

- name: 关闭selinux
lineinfile:
dest: /etc/selinux/config
regexp: "^SELINUX="
line: "SELINUX=disabled"

- name: 关闭swap
lineinfile:
dest: /etc/fstab
regexp: ".*swap"
line: ""

- name: 即时生效
shell: setenforce 0 ; swapoff -a

- name: 拷贝时区
copy: src=/usr/share/zoneinfo/Asia/Shanghai dest=/etc/localtime

- name: 添加hosts
template: src=hosts.j2 dest=/etc/hosts

# 服务器可联网,可以开启以下字段,自动同步网络时间,否则同步自身时间服务器
#- name: 同步系统时间
# yum: name=ntpdate state=present
#- name: 同步系统时间
# shell: ntpdate time.windows.com

2)roles\common\templates\hosts.j2

127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
{% for host in groups['k8s'] %}
{{ hostvars[host].inventory_hostname }} {{ hostvars[host].node_name }}
{% endfor %}

字段解析:

{% for host in groups['k8s'] %}
# 获取Inventory文件下K8S组清单

{{ hostvars[host].inventory_hostname }} {{ hostvars[host].node_name }}
# 获取文件中的IP 获取文件中的node_name,

{% endfor %}

3.2 自签证书 (tls)

1)roles\tls\files\generate_etcd_cert.sh

cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=www server-csr.json | cfssljson -bare server

# 拷贝到使用证书的roles下
root_dir=$(pwd |sed 's#ssl/etcd##')
apiserver_cert_dir=$root_dir/roles/master/files/etcd_cert
etcd_cert_dir=$root_dir/roles/etcd/files/etcd_cert
mkdir -p $etcd_cert_dir $apiserver_cert_dir
for dir in $apiserver_cert_dir $etcd_cert_dir; do
cp -rf ca.pem server.pem server-key.pem $dir
done

2)roles\tls\files\generate_k8s_cert.sh

cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes server-csr.json | cfssljson -bare server
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-scheduler-csr.json | cfssljson -bare kube-scheduler
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin

# 拷贝到使用证书的roles下
root_dir=$(pwd |sed 's#ssl/k8s##')
apiserver_cert_dir=$root_dir/roles/master/files/k8s_cert
node_cert_dir=$root_dir/roles/node/files/k8s_cert
mkdir -p $apiserver_cert_dir $node_cert_dir
cp -rf *.pem $apiserver_cert_dir
cp -rf ca.pem kube-proxy-key.pem kube-proxy.pem $node_cert_dir

3)roles\tls\tasks\main.yml

---
- name: 获取Ansible工作目录
shell: pwd |sed 's#roles/tls##'
register: root_dir

- name: 创建工作目录
file: dest={{ root_dir.stdout }}/ssl/{{ item }} state=directory
with_items:
- etcd
- k8s

- name: 准备cfssl工具
unarchive: src={{ software_dir }}/cfssl.tar.gz dest=/usr/bin/ mode=u+x

- name: 准备etcd证书请求文件
template: src=etcd/{{ item }} dest={{ root_dir.stdout }}/ssl/etcd/{{ item.split('.')[:-1]|join('.') }}
with_items:
- ca-config.json.j2
- ca-csr.json.j2
- server-csr.json.j2

- name: 准备生成etcd证书脚本
copy: src=generate_etcd_cert.sh dest={{ root_dir.stdout }}/ssl/etcd mode=u+x

- name: 生成etcd证书
shell: cd {{ root_dir.stdout }}/ssl/etcd && /bin/bash generate_etcd_cert.sh

- name: 准备k8s证书请求文件
template: src=k8s/{{ item }} dest={{ root_dir.stdout }}/ssl/k8s/{{ item.split('.')[:-1]|join('.') }}
with_items:
- ca-config.json.j2
- ca-csr.json.j2
- server-csr.json.j2
- kube-proxy-csr.json.j2
- kube-controller-manager-csr.json.j2
- kube-scheduler-csr.json.j2
- admin-csr.json.j2

- name: 准备生成k8s证书脚本
copy: src=generate_k8s_cert.sh dest={{ root_dir.stdout }}/ssl/k8s mode=u+x

- name: 生成k8s证书
shell: cd {{ root_dir.stdout }}/ssl/k8s && /bin/bash generate_k8s_cert.sh

4)roles\tls\templates\etcd\ca-config.json.j2

{
"signing": {
"default": {
"expiry": "87600h"
},
"profiles": {
"www": {
"expiry": "87600h",
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
]
}
}
}
}

5)roles\tls\templates\etcd\ca-csr.json.j2

{
"CN": "etcd CA",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "Beijing",
"ST": "Beijing"
}
]
}

6)roles\tls\templates\etcd\server-csr.json.j2

{% set number=cert_hosts.etcd | length %}
{
"CN": "etcd",
"hosts": [
{% for ip in cert_hosts.etcd %}
{% if number == loop.index %}
"{{ ip }}"
{% else %}
"{{ ip }}",
{% endif %}
{% endfor %}
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing"
}
]
}

7)roles\tls\templates\k8s\admin-csr.json.j2

{
"CN": "admin",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "system:masters",
"OU": "System"
}
]
}

8)roles\tls\templates\k8s\ca-config.json.j2

{
"signing": {
"default": {
"expiry": "87600h"
},
"profiles": {
"kubernetes": {
"expiry": "87600h",
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
]
}
}
}
}

9)roles\tls\templates\k8s\ca-csr.json.j2

{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "Beijing",
"ST": "Beijing",
"O": "k8s",
"OU": "System"
}
]
}

10)roles\tls\templates\k8s\kube-controller-manager-csr.json.j2

{
"CN": "system:kube-controller-manager",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "system:masters",
"OU": "System"
}
]
}

11)roles\tls\templates\k8s\kube-proxy-csr.json.j2

{
"CN": "system:kube-proxy",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}

12)roles\tls\templates\k8s\kube-scheduler-csr.json.j2

{
"CN": "system:kube-scheduler",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "system:masters",
"OU": "System"
}
]
}

13)roles\tls\templates\k8s\server-csr.json.j2

{% set number=cert_hosts.k8s | length %}
{
"CN": "kubernetes",
"hosts": [
"10.0.0.1",
"127.0.0.1",
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local",
{% for ip in cert_hosts.k8s %}
{% if number == loop.index %}
"{{ ip }}"
{% else %}
"{{ ip }}",
{% endif %}
{% endfor %}
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}

3.3 部署Docker(docker)

1)roles\docker\files\daemon.json

{
"registry-mirrors": ["https://b9pmyelo.mirror.aliyuncs.com"],
"insecure-registries": ["192.168.5.20"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"storage-driver": "overlay2"
}

2)roles\docker\files\docker.service

[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target firewalld.service containerd.service
Wants=network-online.target

[Service]
Type=notify
ExecStart=/usr/bin/dockerd
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always
StartLimitBurst=3
StartLimitInterval=60s
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TasksMax=infinity
Delegate=yes
KillMode=process

[Install]
WantedBy=multi-user.target

3)roles\docker\tasks\main.yml

---
- name: 创建临时目录
file: dest={{ tmp_dir }} state=directory

- name: 分发并解压docker二进制包
unarchive: src={{ item }} dest={{ tmp_dir }}
with_fileglob:
- "{{ software_dir }}/docker-*.tgz"

- name: 移动docker二进制文件
shell: cp -rf {{ tmp_dir }}/docker/* /usr/bin

- name: 分发service文件
copy: src=docker.service dest=/usr/lib/systemd/system/

- name: 创建目录
file: dest=/etc/docker state=directory

- name: 配置docker
copy: src=daemon.json dest=/etc/docker/daemon.json

- name: 启动docker
systemd: name=docker state=restarted enabled=yes daemon_reload=yes

- name: 查看状态
shell: docker info
register: docker
- debug: var=docker.stdout_lines

3.4 部署ETCD集群(etcd)

1)roles\etcd\tasks\main.yml

---
- name: 创建工作目录
file: dest={{ etcd_work_dir }}/{{ item }} state=directory
with_items:
- bin
- cfg
- ssl

- name: 创建临时目录
file: dest={{ tmp_dir }} state=directory

- name: 分发并解压etcd二进制包
unarchive: src={{ item }} dest={{ tmp_dir }}
with_fileglob:
- "{{ software_dir }}/etcd-v*.tar.gz"

- name: 移动etcd二进制文件
shell: cp -rf {{ tmp_dir }}/etcd-v*/{etcd,etcdctl} {{ etcd_work_dir }}/bin

- name: 分发证书
copy: src=etcd_cert/{{ item }} dest={{ etcd_work_dir }}/ssl
with_items:
- ca.pem
- server.pem
- server-key.pem

- name: 分发etcd配置文件
template: src=etcd.conf.j2 dest={{ etcd_work_dir }}/cfg/etcd.conf

- name: 分发service文件
template: src=etcd.service.j2 dest=/usr/lib/systemd/system/etcd.service

- name: 启动etcd
systemd: name=etcd state=restarted enabled=yes daemon_reload=yes

- name: 分发etcd脚本
template: src=etcd.sh.j2 dest={{ tmp_dir }}/etcd.sh mode=u+x

- name: 获取etcd集群状态
shell: /bin/bash {{ tmp_dir }}/etcd.sh
register: status
- debug: var=status.stdout_lines

2)roles\etcd\templates\etcd.conf.j2

{% set local_ip = inventory_hostname %}

#[Member]
ETCD_NAME="{{ etcd_name }}"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://{{ local_ip }}:2380"
ETCD_LISTEN_CLIENT_URLS="https://{{ local_ip }}:2379"

#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://{{ local_ip }}:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://{{ local_ip }}:2379"
{#获取主机组变量#}
ETCD_INITIAL_CLUSTER="{% for host in groups['etcd'] %}{{ hostvars[host].etcd_name }}=https://{{ hostvars[host].inventory_hostname }}:2380{% if not loop.last %},{% endif %}{% endfor %}"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"

3)roles\etcd\templates\etcd.service.j2

[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target

[Service]
Type=notify
EnvironmentFile={{ etcd_work_dir }}/cfg/etcd.conf
ExecStart={{ etcd_work_dir }}/bin/etcd \
--cert-file={{ etcd_work_dir }}/ssl/server.pem \
--key-file={{ etcd_work_dir }}/ssl/server-key.pem \
--peer-cert-file={{ etcd_work_dir }}/ssl/server.pem \
--peer-key-file={{ etcd_work_dir }}/ssl/server-key.pem \
--trusted-ca-file={{ etcd_work_dir }}/ssl/ca.pem \
--peer-trusted-ca-file={{ etcd_work_dir }}/ssl/ca.pem
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target

4)roles\etcd\templates\etcd.sh.j2

ETCDCTL_API=3 {{ etcd_work_dir }}/bin/etcdctl \
--cacert={{ etcd_work_dir }}/ssl/ca.pem \
--cert={{ etcd_work_dir }}/ssl/server.pem \
--key={{ etcd_work_dir }}/ssl/server-key.pem \
--endpoints={% for host in groups['etcd'] %}https://{{ hostvars[host].inventory_hostname }}:2379{% if not loop.last %},{% endif %}{% endfor %} \
endpoint health --write-out=table

3.5 部署K8S Master(master)

1)roles\master\files\apiserver-to-kubelet-rbac.yaml

apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:kube-apiserver-to-kubelet
rules:
- apiGroups:
- ""
resources:
- nodes/proxy
- nodes/stats
- nodes/log
- nodes/spec
- nodes/metrics
- pods/log
verbs:
- "*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: system:kube-apiserver
namespace: ""
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:kube-apiserver-to-kubelet
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: kubernetes

2)roles\master\files\kubelet-bootstrap-rbac.yaml

kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubelet-bootstrap
subjects:
- kind: User
name: kubelet-bootstrap
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: ClusterRole
name: system:node-bootstrapper
apiGroup: rbac.authorization.k8s.io

3)roles\master\files\token.csv

c47ffb939f5ca36231d9e3121a252940,kubelet-bootstrap,10001,"system:node-bootstrapper"

4)roles\master\tasks\main.yml

---
- name: 创建工作目录
file: dest={{ k8s_work_dir }}/{{ item }} state=directory
with_items:
- bin
- cfg
- ssl
- logs

- name: 创建etcd证书目录
file: dest={{ k8s_work_dir }}/ssl/etcd state=directory

- name: 创建临时目录
file: dest={{ tmp_dir }} state=directory

- name: 分发并解压k8s二进制包
unarchive: src={{ item }} dest={{ tmp_dir }}
with_fileglob:
- "{{ software_dir }}/kubernetes-*.tar.gz"

- name: 移动k8s master二进制文件
shell: cp -rf {{ tmp_dir }}/kubernetes/server/bin/{kube-apiserver,kube-controller-manager,kube-scheduler} {{ k8s_work_dir }}/bin && \
cp -rf {{ tmp_dir }}/kubernetes/server/bin/kubectl /usr/bin

- name: 分发k8s证书
copy: src=k8s_cert/{{ item }} dest={{ k8s_work_dir }}/ssl
with_items:
- ca.pem
- ca-key.pem
- server.pem
- server-key.pem
- kube-controller-manager.pem
- kube-controller-manager-key.pem
- kube-scheduler.pem
- kube-scheduler-key.pem
- admin.pem
- admin-key.pem

- name: 分发etcd证书
copy: src=etcd_cert/{{ item }} dest={{ k8s_work_dir }}/ssl/etcd
with_items:
- ca.pem
- server.pem
- server-key.pem

- name: 分发token文件
copy: src=token.csv dest={{ k8s_work_dir }}/cfg

- name: 分发k8s配置文件
template: src={{ item }} dest={{ k8s_work_dir }}/cfg/{{ item.split('.')[:-1]|join('.') }}
with_items:
- kube-apiserver.conf.j2
- kube-controller-manager.conf.j2
- kube-scheduler.conf.j2
- kube-controller-manager.kubeconfig.j2
- kube-scheduler.kubeconfig.j2

- name: 分发service文件
template: src={{ item }} dest=/usr/lib/systemd/system/{{ item.split('.')[:-1]|join('.') }}
with_items:
- kube-apiserver.service.j2
- kube-controller-manager.service.j2
- kube-scheduler.service.j2

- name: 启动k8s master组件
systemd: name={{ item }} state=restarted enabled=yes daemon_reload=yes
with_items:
- kube-apiserver
- kube-controller-manager
- kube-scheduler

- name: 创建kubectl配置文件目录
file: dest=/root/.kube state=directory
- name: 分发k8s配置文件
template: src=config.j2 dest=/root/.kube/config

- name: 查看集群状态
shell: sleep 3 && kubectl get cs
register: cs
- debug: var=cs.stdout_lines

- name: 拷贝RBAC文件
copy: src={{ item }} dest={{ tmp_dir }}
with_items:
- kubelet-bootstrap-rbac.yaml
- apiserver-to-kubelet-rbac.yaml

- name: 授权APIServer访问Kubelet与授权kubelet bootstrap
ignore_errors: yes
shell: |
sleep 3
kubectl apply -f {{ tmp_dir }}/apiserver-to-kubelet-rbac.yaml
kubectl apply -f {{ tmp_dir }}/kubelet-bootstrap-rbac.yaml

- name: 自动审批Kubelet证书
ignore_errors: yes
shell: |
#自动批准首次申请证书的 CSR 请求
kubectl create clusterrolebinding node-client-auto-approve-csr --clusterrole=system:certificates.k8s.io:certificatesigningrequests:nodeclient --user=kubelet-bootstrap
# 自动批准kubelet客户端证书续签
kubectl create clusterrolebinding node-client-auto-renew-crt --clusterrole=system:certificates.k8s.io:certificatesigningrequests:selfnodeclient --group=system:nodes
# 自动批准kubelet服务端证书续签
kubectl create clusterrolebinding node-server-auto-renew-crt --clusterrole=system:certificates.k8s.io:certificatesigningrequests:selfnodeserver --group=system:nodes

5)roles\master\templates\config.j2

{% set local_ip = inventory_hostname + ':' + '6443' %}
apiVersion: v1
clusters:
- cluster:
certificate-authority: {{ k8s_work_dir }}/ssl/ca.pem
server: https://{{ local_ip }}
name: kubernetes
contexts:
- context:
cluster: kubernetes
user: admin
name: default
current-context: default
kind: Config
preferences: {}
users:
- name: admin
user:
client-certificate: {{ k8s_work_dir }}/ssl/admin.pem
client-key: {{ k8s_work_dir }}/ssl/admin-key.pem

6)roles\master\templates\kube-apiserver.conf.j2

{% set local_ip = inventory_hostname %}
KUBE_APISERVER_OPTS="--logtostderr=false \
--v=2 \
--log-dir={{ k8s_work_dir }}/logs \
--etcd-servers={% for host in groups['etcd'] %}https://{{ hostvars[host].inventory_hostname }}:2379{% if not loop.last %},{% endif %}{% endfor %} \
--bind-address={{ local_ip }} \
--secure-port=6443 \
--advertise-address={{ local_ip }} \
--allow-privileged=true \
--service-cluster-ip-range={{ service_cidr }} \
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \
--authorization-mode=RBAC,Node \
--enable-bootstrap-token-auth=true \
--token-auth-file={{ k8s_work_dir }}/cfg/token.csv \
--service-node-port-range={{ service_nodeport_range }} \
--kubelet-client-certificate={{ k8s_work_dir }}/ssl/server.pem \
--kubelet-client-key={{ k8s_work_dir }}/ssl/server-key.pem \
--tls-cert-file={{ k8s_work_dir }}/ssl/server.pem \
--tls-private-key-file={{ k8s_work_dir }}/ssl/server-key.pem \
--client-ca-file={{ k8s_work_dir }}/ssl/ca.pem \
--service-account-key-file={{ k8s_work_dir }}/ssl/ca-key.pem \
--etcd-cafile={{ k8s_work_dir }}/ssl/etcd/ca.pem \
--etcd-certfile={{ k8s_work_dir }}/ssl/etcd/server.pem \
--etcd-keyfile={{ k8s_work_dir }}/ssl/etcd/server-key.pem \
--service-account-issuer=api \
--service-account-signing-key-file={{ k8s_work_dir }}/ssl/server-key.pem \
--requestheader-client-ca-file={{ k8s_work_dir }}/ssl/ca.pem \
--proxy-client-cert-file={{ k8s_work_dir }}/ssl/server.pem \
--proxy-client-key-file={{ k8s_work_dir }}/ssl/server-key.pem \
--requestheader-allowed-names=kubernetes \
--requestheader-extra-headers-prefix=X-Remote-Extra- \
--requestheader-group-headers=X-Remote-Group \
--requestheader-username-headers=X-Remote-User \
--enable-aggregator-routing=true \
--audit-log-maxage=30 \
--audit-log-maxbackup=3 \
--audit-log-maxsize=100 \
--audit-log-path={{ k8s_work_dir }}/logs/k8s-audit.log"

7)roles\master\templates\kube-apiserver.service.j2

[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile={{ k8s_work_dir }}/cfg/kube-apiserver.conf
ExecStart={{ k8s_work_dir }}/bin/kube-apiserver $KUBE_APISERVER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target

8)roles\master\templates\kube-controller-manager.conf.j2

KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=false \
--v=2 \
--log-dir={{ k8s_work_dir }}/logs \
--leader-elect=true \
--kubeconfig=/opt/kubernetes/cfg/kube-controller-manager.kubeconfig \
--bind-address=127.0.0.1 \
--allocate-node-cidrs=true \
--cluster-cidr={{ pod_cidr }} \
--service-cluster-ip-range={{ service_cidr }} \
--cluster-signing-cert-file={{ k8s_work_dir }}/ssl/ca.pem \
--cluster-signing-key-file={{ k8s_work_dir }}/ssl/ca-key.pem \
--root-ca-file={{ k8s_work_dir }}/ssl/ca.pem \
--service-account-private-key-file={{ k8s_work_dir }}/ssl/ca-key.pem \
--cluster-signing-duration=87600h0m0s"

9)roles\master\templates\kube-controller-manager.kubeconfig.j2

{% if groups['master'] | length == 1 %}
{% set apiserver = groups['master'][0] + ':' + '6443' %}
{% elif groups['master'] | length > 1 %}
{% set apiserver = vip + ':' + '16443' %}
{% endif %}

apiVersion: v1
clusters:
- cluster:
certificate-authority: {{ k8s_work_dir }}/ssl/ca.pem
server: https://{{ apiserver }}
name: kubernetes
contexts:
- context:
cluster: kubernetes
user: kube-controller-manager
name: default
current-context: default
kind: Config
preferences: {}
users:
- name: kube-controller-manager
user:
client-certificate: {{ k8s_work_dir }}/ssl/kube-controller-manager.pem
client-key: {{ k8s_work_dir }}/ssl/kube-controller-manager-key.pem

10)roles\master\templates\kube-controller-manager.service.j2

[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile={{ k8s_work_dir }}/cfg/kube-controller-manager.conf
ExecStart={{ k8s_work_dir }}/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target

11)roles\master\templates\kube-scheduler.conf.j2

KUBE_SCHEDULER_OPTS="--logtostderr=false \
--v=2 \
--log-dir={{ k8s_work_dir }}/logs \
--leader-elect \
--kubeconfig=/opt/kubernetes/cfg/kube-scheduler.kubeconfig \
--bind-address=127.0.0.1"

12)roles\master\templates\kube-scheduler.kubeconfig.j2

{% if groups['master'] | length == 1 %}
{% set apiserver = groups['master'][0] + ':' + '6443' %}
{% elif groups['master'] | length > 1 %}
{% set apiserver = vip + ':' + '16443' %}
{% endif %}

apiVersion: v1
clusters:
- cluster:
certificate-authority: {{ k8s_work_dir }}/ssl/ca.pem
server: https://{{ apiserver }}
name: kubernetes
contexts:
- context:
cluster: kubernetes
user: kube-scheduler
name: default
current-context: default
kind: Config
preferences: {}
users:
- name: kube-scheduler
user:
client-certificate: {{ k8s_work_dir }}/ssl/kube-scheduler.pem
client-key: {{ k8s_work_dir }}/ssl/kube-scheduler-key.pem

13)roles\master\templates\kube-scheduler.service.j2

[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile={{ k8s_work_dir }}/cfg/kube-scheduler.conf
ExecStart={{ k8s_work_dir }}/bin/kube-scheduler $KUBE_SCHEDULER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target

3.6 部署K8S Node(node)

1)roles\node\tasks\main.yml

---
- name: 创建工作目录
file: dest={{ k8s_work_dir }}/{{ item }} state=directory
with_items:
- bin
- cfg
- ssl
- logs

- name: 创建cni插件目录
file: dest={{ item }} state=directory
with_items:
- /opt/cni/bin
- /etc/cni/net.d

- name: 创建临时目录
file: dest={{ tmp_dir }} state=directory

- name: 分发并解压k8s二进制包(需要花费一些时间...)
unarchive: src={{ item }} dest={{ tmp_dir }}
with_fileglob:
- "{{ software_dir }}/kubernetes-*.tar.gz"

- name: 分发并解压cni插件二进制包
unarchive: src={{ item }} dest=/opt/cni/bin
with_fileglob:
- "{{ software_dir }}/cni-plugins-*.tgz"

- name: 移动k8s node二进制文件
shell: cp -rf {{ tmp_dir }}/kubernetes/server/bin/{kubelet,kube-proxy} {{ k8s_work_dir }}/bin

- name: 分发k8s证书
copy: src=k8s_cert/{{ item }} dest={{ k8s_work_dir }}/ssl
with_items:
- ca.pem
- kube-proxy.pem
- kube-proxy-key.pem

- name: 分发k8s配置文件
template: src={{ item }} dest={{ k8s_work_dir }}/cfg/{{ item.split('.')[:-1]|join('.') }}
with_items:
- bootstrap.kubeconfig.j2
- kubelet.conf.j2
- kubelet-config.yml.j2
- kube-proxy.kubeconfig.j2
- kube-proxy.conf.j2
- kube-proxy-config.yml.j2

- name: 分发service文件
template: src={{ item }} dest=/usr/lib/systemd/system/{{ item.split('.')[:-1]|join('.') }}
with_items:
- kubelet.service.j2
- kube-proxy.service.j2

- name: 启动k8s node组件
systemd: name={{ item }} state=restarted enabled=yes daemon_reload=yes
with_items:
- kubelet
- kube-proxy

- name: 分发预准备镜像(需要花费一些时间...)
copy: src={{ software_dir }}/image.tar.gz dest={{ tmp_dir }}

- name: 导入镜像(需要花费一些时间...)
shell: cd {{ tmp_dir }} && \
tar zxf image.tar.gz && \
for image in $(ls *.tar);do docker load < $image;done

2)roles\node\templates\bootstrap.kubeconfig.j2

{#如果只有一个Master则说明是单Master架构#}
{% if groups['master'] | length == 1 %}
{% set apiserver = groups['master'][0] + ':' + '6443' %}
{% elif groups['master'] | length > 1 %}
{% set apiserver = vip + ':' + '16443' %}
{% endif %}

apiVersion: v1
clusters:
- cluster:
certificate-authority: {{ k8s_work_dir }}/ssl/ca.pem
server: https://{{ apiserver }}
name: kubernetes
contexts:
- context:
cluster: kubernetes
user: kubelet-bootstrap
name: default
current-context: default
kind: Config
preferences: {}
users:
- name: kubelet-bootstrap
user:
token: c47ffb939f5ca36231d9e3121a252940

3)roles\node\templates\kube-proxy-config.yml.j2

kind: KubeProxyConfiguration
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 0.0.0.0
metricsBindAddress: {{ inventory_hostname }}:10249
clientConnection:
kubeconfig: {{ k8s_work_dir }}/cfg/kube-proxy.kubeconfig
hostnameOverride: {{ node_name }}
clusterCIDR: {{ pod_cidr }}
#mode: ipvs
#ipvs:
# scheduler: "rr"

4)roles\node\templates\kube-proxy.conf.j2

KUBE_PROXY_OPTS="--logtostderr=false \
--v=4 \
--log-dir={{ k8s_work_dir }}/logs \
--config={{ k8s_work_dir }}/cfg/kube-proxy-config.yml"

5)roles\node\templates\kube-proxy.kubeconfig.j2

{% if groups['master'] | length == 1 %}
{% set apiserver = groups['master'][0] + ':' + '6443' %}
{% elif groups['master'] | length > 1 %}
{% set apiserver = vip + ':' + '16443' %}
{% endif %}

apiVersion: v1
clusters:
- cluster:
certificate-authority: {{ k8s_work_dir }}/ssl/ca.pem
server: https://{{ apiserver }}
name: kubernetes
contexts:
- context:
cluster: kubernetes
user: kube-proxy
name: default
current-context: default
kind: Config
preferences: {}
users:
- name: kube-proxy
user:
client-certificate: {{ k8s_work_dir }}/ssl/kube-proxy.pem
client-key: {{ k8s_work_dir }}/ssl/kube-proxy-key.pem

6)roles\node\templates\kube-proxy.service.j2

[Unit]
Description=Kubernetes Proxy
After=network.target

[Service]
EnvironmentFile={{ k8s_work_dir }}/cfg/kube-proxy.conf
ExecStart={{ k8s_work_dir }}/bin/kube-proxy $KUBE_PROXY_OPTS
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target

7)roles\node\templates\kubelet-config.yml.j2

kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: 0.0.0.0
port: 10250
readOnlyPort: 10255
cgroupDriver: cgroupfs
clusterDNS:
- {{ cluster_dns }}
clusterDomain: {{ cluster_domain }}
failSwapOn: false
authentication:
anonymous:
enabled: false
webhook:
cacheTTL: 2m0s
enabled: true
x509:
clientCAFile: {{ k8s_work_dir }}/ssl/ca.pem
authorization:
mode: Webhook
webhook:
cacheAuthorizedTTL: 5m0s
cacheUnauthorizedTTL: 30s
evictionHard:
imagefs.available: 15%
memory.available: 100Mi
nodefs.available: 10%
nodefs.inodesFree: 5%
maxOpenFiles: 1000000
maxPods: 110

8)roles\node\templates\kubelet.conf.j2

KUBELET_OPTS="--logtostderr=false \
--v=4 \
--log-dir={{ k8s_work_dir }}/logs \
--hostname-override={{ node_name }} \
--network-plugin=cni \
--kubeconfig={{ k8s_work_dir }}/cfg/kubelet.kubeconfig \
--bootstrap-kubeconfig={{ k8s_work_dir }}/cfg/bootstrap.kubeconfig \
--config={{ k8s_work_dir }}/cfg/kubelet-config.yml \
--cert-dir={{ k8s_work_dir }}/ssl \
--pod-infra-container-image=lizhenliang/pause-amd64:3.0"

9)roles\node\templates\kubelet.service.j2

[Unit]
Description=Kubernetes Kubelet
After=docker.service

[Service]
EnvironmentFile={{ k8s_work_dir }}/cfg/kubelet.conf
ExecStart={{ k8s_work_dir }}/bin/kubelet $KUBELET_OPTS
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target

3.7 部署Nginx负载均衡并高可用(ha)

1)roles\ha\files\check_nginx.sh

#!/bin/bash
count=$(ps -ef |grep nginx |egrep -cv "grep|$$")

if [ "$count" -eq 0 ];then
exit 1
else
exit 0
fi

2)roles\ha\tasks\main.yml

---
- name: 创建临时目录
file: dest={{ tmp_dir }} state=directory

- name: 拷贝nginx,keepalived安装包
unarchive: src={{ software_dir }}/ha.tar.gz dest={{ tmp_dir }}

- name: 安装keepalived高可用软件
yum: name={{ tmp_dir }}/{{ item }} state=present
with_items:
- "net-snmp-libs-5.7.2-43.el7.x86_64.rpm"
- "net-snmp-agent-libs-5.7.2-43.el7.x86_64.rpm"
- "keepalived-1.3.5-16.el7.x86_64.rpm"

- name: 安装nginx负载均衡器
yum: name={{ tmp_dir }}/nginx-1.16.1-1.el7.ngx.x86_64.rpm state=present

- name: 拷贝nginx配置文件
template: src=nginx.conf.j2 dest=/etc/nginx/nginx.conf

- name: 拷贝keepalived配置文件
template: src=keepalived.conf.j2 dest=/etc/keepalived/keepalived.conf

- name: 拷贝nginx健康检查脚本
copy: src=check_nginx.sh dest=/etc/keepalived/ mode=u+x

- name: 启动服务
systemd: name={{ item }} state=restarted enabled=yes daemon_reload=yes
with_items:
- nginx
- keepalived

3)roles\ha\templates\keepalived.conf.j2

{% if lb_name == 'lb-master' %}
{% set role = 'MASTER' %}
{% set priority = 100 %}
{% elif lb_name == 'lb-backup' %}
{% set role = 'BACKUP' %}
{% set priority = 90 %}
{% endif %}

global_defs {
notification_email {
acassen@firewall.loc
failover@firewall.loc
sysadmin@firewall.loc
}
notification_email_from Alexandre.Cassen@firewall.loc
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id NGINX_{{ role }}
}

vrrp_script check_nginx {
script "/etc/keepalived/check_nginx.sh"
}

vrrp_instance VI_1 {
state {{ role }}
interface {{ nic }}
virtual_router_id 51 # VRRP 路由 ID实例,每个实例是唯一的
priority {{ priority }} # 优先级,备服务器设置 90
advert_int 1 # 指定VRRP 心跳包通告间隔时间,默认1秒
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
{{ vip }}/24
}
track_script {
check_nginx
}
}

4)roles\ha\templates\nginx.conf.j2


user nginx;
worker_processes 4;

error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;


events {
worker_connections 1024;
}

stream {

log_format main '$remote_addr $upstream_addr - [$time_local] $status $upstream_bytes_sent';

access_log /var/log/nginx/k8s-access.log main;

upstream k8s-apiserver {
{% for ip in groups['master'] %}
server {{ hostvars[ip].inventory_hostname }}:6443;
{% endfor %}
}

server {
listen 16443;
proxy_pass k8s-apiserver;
}
}


http {
include /etc/nginx/mime.types;
default_type application/octet-stream;

log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';

access_log /var/log/nginx/access.log main;

sendfile on;
#tcp_nopush on;

keepalive_timeout 65;

#gzip on;

include /etc/nginx/conf.d/*.conf;
}

3.8 部署插件(addons)

1)roles\addons\files\calico.yaml

wget https://cunqi0105-1300757323.cos.ap-shanghai.myqcloud.com/configuration-file/calico.yaml

2)roles\addons\files\coredns.yaml

wget https://cunqi0105-1300757323.cos.ap-shanghai.myqcloud.com/configuration-file/coredns.yaml

3)roles\addons\files\ingress-controller.yaml

wget https://cunqi0105-1300757323.cos.ap-shanghai.myqcloud.com/configuration-file/ingress-controller.yaml

4)roles\addons\files\kubernetes-dashboard.yaml

wget https://cunqi0105-1300757323.cos.ap-shanghai.myqcloud.com/configuration-file/kubernetes-dashboard.yaml

5)roles\addons\tasks\main.yml

---
#- name: 允许Node加入集群
# ignore_errors: yes
# shell: kubectl certificate approve $(kubectl get csr |awk 'NR!=1{print $1}')

- name: 拷贝YAML文件到Master
copy: src={{ item }} dest={{ tmp_dir }}
with_fileglob:
- "*.yaml"

- name: 部署Calico,Dashboard,CoreDNS,Ingress
ignore_errors: yes
shell: |
cd {{ tmp_dir }}
for yaml in $(ls *.yaml);do kubectl apply -f $yaml;done

- name: 查看Pod状态
shell: kubectl get all --all-namespaces
register: getall
- debug: var=getall.stdout_lines

- name: 创建Dashboard管理员令牌
ignore_errors: yes
shell: |
kubectl create serviceaccount dashboard-admin -n kubernetes-dashboard
kubectl create clusterrolebinding dashboard-admin --clusterrole=cluster-admin --serviceaccount=kubernetes-dashboard:dashboard-admin

- name: 获取Dashboard管理员令牌
ignore_errors: yes
shell: |
web='https://NodeIP:30001'
token=$(kubectl describe secrets -n kubernetes-dashboard \
$(kubectl get secret -n kubernetes-dashboard | awk '/dashboard-admin/{print $1}') |awk '/^token/{print $2}')
echo "访问地址--->$web"
echo "令牌内容--->$token"
register: ui
- name: Kubernetes Dashboard登录信息
debug: var=ui.stdout_lines

4 执行安装

4.1 单Master部署

ansible-playbook -i hosts single-master-deploy.yml -uroot -k

4.2 多Master部署

ansible-playbook -i hosts multi-master-deploy.yml -uroot -k

4.3 节点扩容

ansible-playbook -i hosts add-node.yml -uroot -k

 

 

posted @ 2021-09-14 15:01  孤独的小人物  阅读(515)  评论(0编辑  收藏  举报