k8s部署

hostnamectl --static set-hostname k8s-master01

安装依赖包
yum install -y iptables-services && systemctl start iptables && systemctl enable iptables && iptables -F && service iptables save

设置防火墙为iptables并清空规则
systemctl stop firewalld && systemctl disable firewalld

yum install -y conntrack ntpdate ipvsadm ipset jq iptables curl sysstat libseccomp wget vim net-tools git

systemctl stop postfix.service && systemctl disable postfix.service

关闭selinux
swapoff -a && sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab

setenforce 0 && sed -i 's/^SELINUX=.*/SELINUX=disable/' /etc/selinux/config 

调整内核参数
1、
cat > kubernetes.conf <<EOF
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
net.ipv4.ip_forward=1
net.ipv4.tcp_tw_recycle=0
vm.swappiness=0 #禁止使用swap空间,只有当系统OOM(都是字母,不是数字零)时才允许使用它
vm.overcommit_memory=1 #不检查物理内存是否够用
vm.panci_on_oom=0 #开启OOM
fs.inotify.max_user_instances=8192
fs.inotify.max_user_watches=1048576
fs.file-max=52706963
fs.nr_open=52706963
net.ipv6.conf.all.disable_ipv6=1
net.netfilter.nf_conntrack_max=2310720
EOF

cp kubernetes.conf /etc/sysctl.d/kubernetes.conf
sysctl -p /etc/sysctl.d/kubernetes.conf

2、
net.bridge.bridge-nf-call-ip6tables = 1 
net.bridge.bridge-nf-call-iptables = 1 
vm.swappiness = 0

调整系统时区
timedatectl set-timezone Asia/Shanghai

将当前时间的UTC时间写入硬件时钟
timedatectl set-local-rtc 0

重启依赖于系统时间的服务
systemctl restart rsyslog
systemctl restart crond

设置rsyslogd和systemd journald
mkdir -p /var/log/journal   #持久化保存日志的目录
mkdir  -p /etc/system/journald.conf.d
vi /etc/system/journald.conf.d/99-prophet.conf 
[Journal]
#持久化保存到磁盘
Storage=persistent

#压缩历史日志
Compress=yes

SyncIntervalSec=5m
RateLimitInterval=30s
RateLimitBurst=1000

#最大占用空间
SystemMaxUse=10G

#单日志最大文件
SystemMaxFileSize=200M

#日志保存时间2周
MaxRetentionSec=2week

#不将日志转发到syslog
ForwardToSyslog=no

systemctl restart systemd-journald

升级系统内核为4.44
rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm

yum --enablerepo=elrepo-kernel install -y kernel-lt

设置开机从新内核启动
grub2-set-default 'CentOS Linux (4.4.189-1.el7.elrepo.x86_64) 7 (Core)'


kube-proxy开启ipvs的前置条件

modprobe br_netfilter

cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF

chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e vf_conntrack_ipv4

安装docker软件

yum install -y yum-utils device-mapper-persistent-data lvm2

yum-config-manager \
--add-repo \
http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

yum update -y && yum install -y docker-ce

mkdir /etc/docker

cat > /etc/docker/daemon.json <<EOF
{
    "exec-opts":["native.cgroupdrive=systemd"],
    "log-driver":"json-file",
    "log-opts":{
       "max-size":"100m"
    }
}
EOF

修改cgroupdriver是为了消除告警:
[WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/

mkdir -p /etc/systemd/system/docker.service.d

systemctl daemon-reload && systemctl restart docker && systemctl enable docker

安装Kubeadm
1、
[root@master ~]# cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

yum install -y kubelet-1.15.1 kubeadm-1.15.1 kubectl-1.15.1
systemctl enable kubelet.service

2、
[root@master ~]# cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

[] 中括号中的是repository id,唯一,用来标识不同仓库
name 仓库名称,自定义
baseurl 仓库地址
enable 是否启用该仓库,默认为1表示启用
gpgcheck 是否验证从该仓库获得程序包的合法性,1为验证
repo_gpgcheck 是否验证元数据的合法性 元数据就是程序包列表,1为验证
gpgkey=URL 数字签名的公钥文件所在位置,如果gpgcheck值为1,此处就需要指定gpgkey文件的位置,如果gpgcheck值为0就不需要此项了

更新缓存
[root@master ~]# yum clean all
[root@master ~]# yum -y makecache

初始化主节点

#kubeadm config print init-defaults > kubeadm-config.yaml
localAPIEndpoint:
    advertiseAddress: 192.168.125.132
kubernetesVersion: v1.15.1
networking:
  podSubnet: "10.244.0.0/16"
  serviceSubnet: "10.96.0.0/12"
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
featureGates:
  SupportIPVSProxyMode: true
mode: ipvs

kubeadm init --config=kubeadm-config.yaml --experimental-upload-certs | tee kubeadm-init.log

1.138后才有自动颁发证书,高可用性时才用的到
--experimental-upload-certs

查看日志
vim kubeadm-init.log

查看证书
cd /etc/kubernetes/pki

加入主节点以及其余工作节点
执行安装日志中的加入命令即可

为了在工作节点上也能使用kubectl,而kubectl命令需要使用kubernetes-admin来运行,因此我们需要将主节点中的【/etc/kubernetes/admin.conf】文件拷贝到工作节点相同目录下

部署网络
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel/yaml

mkdir install-k8s
cd install-k8s
mkdir plugin
cd plugin
mkdir flannel
cd flannel
1、
wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
2、
wget https://raw.githubusercontent.com/coreos/flannel/a70459be0084506e4ec919aa1c114638878db11b/Documentation/kube-flannel.yml

kube-flannel.yml配置文件的Network要和kubeadm-config.yaml的
networking:
  podSubnet: "10.244.0.0/16"
一致

kubectl create -f kube-flannel.yml

kubectl get pod -n kube-system
-n 指定名称空间为kube-system,不加-n为default
所有系统组件都会默认安装在kube-system,所有要指定

kubectl get pod -n kube-system -w  #watch,监视,动态更新

kubectl get node 


kube-flannel-ds-amd64-92whm       0/1     Init:ImagePullBackOff   0          8h
出现上面的问题,删除现有的flannel pod即可
kubectl delete  pod kube-flannel-ds-amd64-92whm  -n kube-system

下载镜像脚本
#!/bin/bash
url=registry.cn-hangzhou.aliyuncs.com/google_containers
version=v1.15.1
images=(`kubeadm config images list --kubernetes-version=$version|awk -F '/' '{print $2}'`)
for imagename in ${images[@]} ; do
  docker pull $url/$imagename
  docker tag $url/$imagename k8s.gcr.io/$imagename
  docker rmi -f $url/$imagename
done


kubectl run nginx-deployment --image=nginx --port=80 --replicas=1

查看pod详细信息
kubectl get pod -o wide

kubectl get deployment

kubectl scale --replicas=3 deployment/nginx-deployment

kubectl get rs
访问30000端口就是访问容器的80端口
kubectl expose deployment nginx --port=30000 --target-port=80

kubectl get svc 

kubectl edit svc nginx-deployment #"nginx-deployment"是名字,kubectl get svc命令查看
把svc nginx-deployment的TYPE改成NodePort,才可以用真实机器的IP从浏览器访问

ipvsadm -Ln

 

posted @ 2020-02-20 13:09  w_boy  阅读(272)  评论(0编辑  收藏  举报