kubernetes部署
【自动安装】
一:操作环境
#systemctl stop firewalld && systemctl disable firewalld
#setenforce 0
#vim /etc/selinux/config
SELINUX=disabled
192.168.50.128 k8s-master
192.168.50.135 k8sr-node1
二:安装部署
2.1安装前准备
ntpdate -u 192.168.2.68(我物理机配置了ntp)
在node节点上安装redhat-ca.crt
yum install *rhsm* -y
2.2etcd集群配置
master节点配置
yum -y install kubernetes-master etcd
#vi /etc/etcd/etcd.conf
#[Member]
#ETCD_CORS=""
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
#ETCD_WAL_DIR=""
ETCD_LISTEN_PEER_URLS="http://192.168.50.128:2380"
ETCD_LISTEN_CLIENT_URLS="http://192.168.50.128:2379,http://127.0.0.1:2379"
#ETCD_MAX_SNAPSHOTS="5"
ETCD_MAX_WALS="5"
ETCD_NAME="etcd1"
#ETCD_SNAPSHOT_COUNT="100000"
#ETCD_HEARTBEAT_INTERVAL="100"
#ETCD_ELECTION_TIMEOUT="1000"
#ETCD_QUOTA_BACKEND_BYTES="0"
#ETCD_MAX_REQUEST_BYTES="1572864"
#ETCD_GRPC_KEEPALIVE_MIN_TIME="5s"
#ETCD_GRPC_KEEPALIVE_INTERVAL="2h0m0s"
#ETCD_GRPC_KEEPALIVE_TIMEOUT="20s"
#
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.50.128:2380"
ETCD_ADVERTISE_CLIENT_URLS="http://192.168.50.128:2379"
#ETCD_DISCOVERY=""
#ETCD_DISCOVERY_FALLBACK="proxy"
#ETCD_DISCOVERY_PROXY=""
#ETCD_DISCOVERY_SRV=""
ETCD_INITIAL_CLUSTER="etcd1=http://192.168.50.128:2380,etcd2=http://192.168.50.135:2380"
#ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
#ETCD_INITIAL_CLUSTER_STATE="new"
#ETCD_STRICT_RECONFIG_CHECK="true"
#ETCD_ENABLE_V2="true"
#
nodes节点配置
#yum -y install kubernetes-node etcd flannel docker
#vi /etc/etcd/etcd.conf
#[Member]
#ETCD_CORS=""
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
#ETCD_WAL_DIR=""
ETCD_LISTEN_PEER_URLS="http://192.168.50.135:2380"
ETCD_LISTEN_CLIENT_URLS="http://192.168.50.135:2379,http://127.0.0.1:2379"
#ETCD_MAX_SNAPSHOTS="5"
#ETCD_MAX_WALS="5"
ETCD_NAME="etcd2"
#ETCD_SNAPSHOT_COUNT="100000"
#ETCD_HEARTBEAT_INTERVAL="100"
#ETCD_ELECTION_TIMEOUT="1000"
#ETCD_QUOTA_BACKEND_BYTES="0"
#ETCD_MAX_REQUEST_BYTES="1572864"
#ETCD_GRPC_KEEPALIVE_MIN_TIME="5s"
#ETCD_GRPC_KEEPALIVE_INTERVAL="2h0m0s"
#ETCD_GRPC_KEEPALIVE_TIMEOUT="20s"
#
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.50.135:2380"
ETCD_ADVERTISE_CLIENT_URLS="http://192.168.50.135:2379"
#ETCD_DISCOVERY=""
#ETCD_DISCOVERY_FALLBACK="proxy"
#ETCD_DISCOVERY_PROXY=""
#ETCD_DISCOVERY_SRV=""
ETCD_INITIAL_CLUSTER="etcd1=http://192.168.50.128:2380,etcd2=http://192.168.50.135:2380"
#ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
#ETCD_INITIAL_CLUSTER_STATE="new"
#ETCD_STRICT_RECONFIG_CHECK="true"
#ETCD_ENABLE_V2="true"
启动etcd cluster
#systemctl start etcd.service #systemctl status etcd.service
查看etcd集群状态
[root@k8s-master ~]# systemctl start etcd.service [root@k8s-master ~]# etcdctl cluster-health member 272e2ecbe3d84558 is healthy: got healthy result from http://192.168.50.128:2379 member 94b5d90215d70e1e is healthy: got healthy result from http://192.168.50.135:2379 cluster is healthy
[member] ETCD_NAME :ETCD的节点名 ETCD_DATA_DIR:ETCD的数据存储目录 ETCD_SNAPSHOT_COUNTER:多少次的事务提交将触发一次快照 ETCD_HEARTBEAT_INTERVAL:ETCD节点之间心跳传输的间隔,单位毫秒 ETCD_ELECTION_TIMEOUT:该节点参与选举的最大超时时间,单位毫秒 ETCD_LISTEN_PEER_URLS:该节点与其他节点通信时所监听的地址列表,多个地址使用逗号隔开,其格式可以划分为scheme://IP:PORT,这里的scheme可以是http、https ETCD_LISTEN_CLIENT_URLS:该节点与客户端通信时监听的地址列表 [cluster] ETCD_INITIAL_ADVERTISE_PEER_URLS:该成员节点在整个集群中的通信地址列表,这个地址用来传输集群数据的地址。因此这个地址必须是可以连接集群中所有的成员的。 ETCD_INITIAL_CLUSTER:配置集群内部所有成员地址,其格式为:ETCD_NAME=ETCD_INITIAL_ADVERTISE_PEER_URLS,如果有多个使用逗号隔开 ETCD_ADVERTISE_CLIENT_URLS:广播给集群中其他成员自己的客户端地址列表
二:Kubernetes集群配置
master节点配置
#vi /etc/kubernetes/apiserver
# The address on the local server to listen to.
#KUBE_API_ADDRESS="--insecure-bind-address=127.0.0.1"
KUBE_API_ADDRESS="--address=0.0.0.0"
# The port on the local server to listen on.
KUBE_API_PORT="--port=8080"
# Port minions listen on
KUBELET_PORT="--kubelet-port=10250"
# Comma separated list of nodes in the etcd cluster
KUBE_ETCD_SERVERS="--etcd-servers=http://192.168.50.128:2379,http://192.168.50.135:2379"
# Address range to use for services
KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16"
# default admission control policies
#KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota"
KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ResourceQuota"
# Add your own!
KUBE_API_ARGS=""
2.启动服务
systemctl start kube-apiserver.service systemctl start kube-controller-manager.service systemctl start kube-scheduler.service systemctl enable kube-apiserver.service systemctl enable kube-controller-manager.service systemctl enable kube-scheduler.service
nodes节点配置
cat /etc/kubernetes/config
# logging to stderr means we get it in the systemd journal
KUBE_LOGTOSTDERR="--logtostderr=true"
# journal message level, 0 is debug
KUBE_LOG_LEVEL="--v=0"
# Should this cluster be allowed to run privileged docker containers
KUBE_ALLOW_PRIV="--allow-privileged=false"
# How the controller-manager, scheduler, and proxy find the apiserver
KUBE_MASTER="--master=http://192.168.50.128:8080"
2.配置kubelet
cat /etc/kubernetes/kubelet
###
# kubernetes kubelet (minion) config
# The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces)
KUBELET_ADDRESS="--address=127.0.0.1"
# The port for the info server to serve on
# KUBELET_PORT="--port=10250"
# You may leave this blank to use the actual hostname
KUBELET_HOSTNAME="--hostname-override=192.168.50.135"
# location of the api-server
KUBELET_API_SERVER="--api-servers=http://192.168.50.128:8080"
# pod infrastructure container
KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=registry.access.redhat.com/rhel7/pod-infrastructure:latest"
# Add your own!
KUBELET_ARGS=""
3.网络配置
cat /etc/sysconfig/flanneld
# Flanneld configuration options
# etcd url location. Point this to the server where etcd runs
FLANNEL_ETCD_ENDPOINTS="http://192.168.50.128:2379"
# etcd config key. This is the configuration key that flannel queries
# For address range assignment
FLANNEL_ETCD_PREFIX="/atomic.io/network"
# Any additional options that you want to pass
#FLANNEL_OPTIONS=""
启动相关服务
systemctl start kubelet && systemctl start kube-proxy systemctl enable kubelet && systemctl enable kube-proxy
在k8s-master上进行测试
[root@k8s-master ~]# kubectl get nodes NAME STATUS AGE 192.168.50.135 Ready 21h
[root@k8s-master ~]# etcdctl member list 272e2ecbe3d84558: name=etcd1 peerURLs=http://192.168.50.128:2380 clientURLs=http://192.168.50.128:2379 isLeader=true 94b5d90215d70e1e: name=etcd2 peerURLs=http://192.168.50.135:2380 clientURLs=http://192.168.50.135:2379 isLeader=false
[root@k8s-master ~]# etcdctl cluster-health member 272e2ecbe3d84558 is healthy: got healthy result from http://192.168.50.128:2379 member 94b5d90215d70e1e is healthy: got healthy result from http://192.168.50.135:2379 cluster is healthy