搭建kubenetes集群

1.环境准备

name ip address system
master 192.168.211.130 centos 7 64位
node1 192.168.211.131 centos 7 64位
node2 192.168.211.132 centos 7 64位

2.安装部署准备

同步三台机器时间(master/node1/node2)

yum install -y ntp
ntpdate -u cn.pool.ntp.org

在node节点上安装redhat-ca软件包(node1/node2)

yum install *rhsm* -y

配置软件包的密钥(node1/node2)

wget http://mirror.centos.org/centos/7/os/x86_64/Packages/python-rhsm-certificates-1.19.10-1.el7_4.x86_64.rpm
rpm2cpio python-rhsm-certificates-1.19.10-1.el7_4.x86_64.rpm | cpio -iv --to-stdout ./etc/rhsm/ca/redhat-uep.pem | tee /etc/rhsm/ca/redhat-uep.pem

3.安装etcd集群

  • master节点
yum -y install kubernetes-master etcd
vi /etc/etcd/etcd.conf
#[Member]
#ETCD_CORS=""
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
#ETCD_WAL_DIR=""
ETCD_LISTEN_PEER_URLS="http://0.0.0.0:2380"
ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379"
ETCD_MAX_SNAPSHOTS="5"
#ETCD_MAX_WALS="5"
ETCD_NAME="etcd1"
#ETCD_SNAPSHOT_COUNT="100000"
#ETCD_HEARTBEAT_INTERVAL="100"
#ETCD_ELECTION_TIMEOUT="1000"
#ETCD_QUOTA_BACKEND_BYTES="0"
#ETCD_MAX_REQUEST_BYTES="1572864"
#ETCD_GRPC_KEEPALIVE_MIN_TIME="5s"
#ETCD_GRPC_KEEPALIVE_INTERVAL="2h0m0s"
#ETCD_GRPC_KEEPALIVE_TIMEOUT="20s"
#
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.211.130:2380"
ETCD_ADVERTISE_CLIENT_URLS="http://192.168.211.130:2379"
#ETCD_DISCOVERY=""
#ETCD_DISCOVERY_FALLBACK="proxy"
#ETCD_DISCOVERY_PROXY=""
#ETCD_DISCOVERY_SRV=""
ETCD_INITIAL_CLUSTER="etcd1=http://192.168.211.130:2380,etcd2=http://192.168.211.131:2380,etcd3=http://192.168.211.132:2380"
#ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
#ETCD_INITIAL_CLUSTER_STATE="new"
#ETCD_STRICT_RECONFIG_CHECK="true"
#ETCD_ENABLE_V2="true"
#
#[Proxy]
#ETCD_PROXY="off"
#ETCD_PROXY_FAILURE_WAIT="5000"
#ETCD_PROXY_REFRESH_INTERVAL="30000"
#ETCD_PROXY_DIAL_TIMEOUT="1000"
#ETCD_PROXY_WRITE_TIMEOUT="5000"
#ETCD_PROXY_READ_TIMEOUT="0"
#
#[Security]
#ETCD_CERT_FILE=""
#ETCD_KEY_FILE=""
#ETCD_CLIENT_CERT_AUTH="false"
#ETCD_TRUSTED_CA_FILE=""
#ETCD_AUTO_TLS="false"
#ETCD_PEER_CERT_FILE=""
#ETCD_PEER_KEY_FILE=""
#ETCD_PEER_CLIENT_CERT_AUTH="false"
#ETCD_PEER_TRUSTED_CA_FILE=""
#ETCD_PEER_AUTO_TLS="false"
#
#[Logging]
#ETCD_DEBUG="false"
#ETCD_LOG_PACKAGE_LEVELS=""
#ETCD_LOG_OUTPUT="default"
#
#[Unsafe]
#ETCD_FORCE_NEW_CLUSTER="false"
#
#[Version]
#ETCD_VERSION="false"
#ETCD_AUTO_COMPACTION_RETENTION="0"
#
#[Profiling]
#ETCD_ENABLE_PPROF="false"
#ETCD_METRICS="basic"
#
#[Auth]
#ETCD_AUTH_TOKEN="simple"
  • node1,node2节点
yum -y install kubernetes-node etcd flannel docker
vi /etc/etcd/etcd.conf
#[Member]
#ETCD_CORS=""
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
#ETCD_WAL_DIR=""
ETCD_LISTEN_PEER_URLS="http://0.0.0.0:2380"
ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379"
#ETCD_MAX_SNAPSHOTS="5"
#ETCD_MAX_WALS="5"
ETCD_NAME="etcd2"
#ETCD_SNAPSHOT_COUNT="100000"
#ETCD_HEARTBEAT_INTERVAL="100"
#ETCD_ELECTION_TIMEOUT="1000"
#ETCD_QUOTA_BACKEND_BYTES="0"
#ETCD_MAX_REQUEST_BYTES="1572864"
#ETCD_GRPC_KEEPALIVE_MIN_TIME="5s"
#ETCD_GRPC_KEEPALIVE_INTERVAL="2h0m0s"
#ETCD_GRPC_KEEPALIVE_TIMEOUT="20s"
#
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.211.131:2380"
ETCD_ADVERTISE_CLIENT_URLS="http://192.168.211.131:2379"
#ETCD_DISCOVERY=""
#ETCD_DISCOVERY_FALLBACK="proxy"
#ETCD_DISCOVERY_PROXY=""
#ETCD_DISCOVERY_SRV=""
ETCD_INITIAL_CLUSTER="etcd1=http://192.168.211.130:2380,etcd2=http://192.168.211.131:2380,etcd3=http://192.168.211.132:2380"
#ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
#ETCD_INITIAL_CLUSTER_STATE="new"
#ETCD_STRICT_RECONFIG_CHECK="true"
#ETCD_ENABLE_V2="true"
#
#[Proxy]
#ETCD_PROXY="off"
#ETCD_PROXY_FAILURE_WAIT="5000"
#ETCD_PROXY_REFRESH_INTERVAL="30000"
#ETCD_PROXY_DIAL_TIMEOUT="1000"
#ETCD_PROXY_WRITE_TIMEOUT="5000"
#ETCD_PROXY_READ_TIMEOUT="0"
#
#[Security]
#ETCD_CERT_FILE=""
#ETCD_KEY_FILE=""
#ETCD_CLIENT_CERT_AUTH="false"
#ETCD_TRUSTED_CA_FILE=""
#ETCD_AUTO_TLS="false"
#ETCD_PEER_CERT_FILE=""
#ETCD_PEER_KEY_FILE=""
#ETCD_PEER_CLIENT_CERT_AUTH="false"
#ETCD_PEER_TRUSTED_CA_FILE=""
#ETCD_PEER_AUTO_TLS="false"
#
#[Logging]
#ETCD_DEBUG="false"
#ETCD_LOG_PACKAGE_LEVELS=""
#ETCD_LOG_OUTPUT="default"
#
#[Unsafe]
#ETCD_FORCE_NEW_CLUSTER="false"
#
#[Version]
#ETCD_VERSION="false"
#ETCD_AUTO_COMPACTION_RETENTION="0"
#
#[Profiling]
#ETCD_ENABLE_PPROF="false"
#ETCD_METRICS="basic"
#
#[Auth]
#ETCD_AUTH_TOKEN="simple"
  • 启动etc cluster(master,node1,node2)
systemctl start etcd.service
systemctl status etcd.service
  • 查看集群状态(master)
etcdctl cluster-health

4.Kubenetes集群配置

  • master节点
vim /etc/kubernetes/apiserver
###
# kubernetes system config
#
# The following values are used to configure the kube-apiserver
#

# The address on the local server to listen to.
# KUBE_API_ADDRESS="--insecure-bind-address=127.0.0.1"
KUBE_API_ADDRESS="--address=0.0.0.0"

# The port on the local server to listen on.
KUBE_API_PORT="--port=8080"

# Port minions listen on
KUBELET_PORT="--kubelet-port=10250"

# Comma separated list of nodes in the etcd cluster
KUBE_ETCD_SERVERS="--etcd-servers=http://192.168.211.130:2379,http://192.168.211.131:2379,http://192.168.211.132:2379"

# Address range to use for services
KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16"

# default admission control policies
# KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota"
KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,ResourceQuota"

# Add your own!
KUBE_API_ARGS=""
systemctl enable kube-apiserver        # 开机启动
systemctl enable kube-controller-manager
systemctl enable kube-scheduler
systemctl start kube-apiserver         # 开启
systemctl start kube
-controller-manager
systemctl start kube
-scheduler
  • node1,node2节点
vim /etc/kubernetes/config
###
# kubernetes system config
#
# The following values are used to configure various aspects of all
# kubernetes services, including
#
#   kube-apiserver.service
#   kube-controller-manager.service
#   kube-scheduler.service
#   kubelet.service
#   kube-proxy.service
# logging to stderr means we get it in the systemd journal
KUBE_LOGTOSTDERR="--logtostderr=true"

# journal message level, 0 is debug
KUBE_LOG_LEVEL="--v=0"

# Should this cluster be allowed to run privileged docker containers
KUBE_ALLOW_PRIV="--allow-privileged=false"

# How the controller-manager, scheduler, and proxy find the apiserver
KUBE_MASTER="--master=http://182.168.211.130:8080"
vim /etc/kubernetes/kubelet
###
# kubernetes kubelet (minion) config

# The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces)
KUBELET_ADDRESS="--address=192.168.211.132"

# The port for the info server to serve on
# KUBELET_PORT="--port=10250"

# You may leave this blank to use the actual hostname
KUBELET_HOSTNAME="--hostname-override=192.168.211.132"

# location of the api-server
KUBELET_API_SERVER="--api-servers=http://192.168.211.130:8080"

# pod infrastructure container
KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=registry.access.redhat.com/rhel7/pod-infrastructure:latest"

# Add your own!
KUBELET_ARGS=""
vim /etc/sysconfig/flanneld


# Flanneld configuration options  

# etcd url location.  Point this to the server where etcd runs
FLANNEL_ETCD_ENDPOINTS="http://192.168.211.130:2379"

# etcd config key.  This is the configuration key that flannel queries
# For address range assignment
FLANNEL_ETCD_PREFIX="/atomic.io/network"    # *****重点来了:这个需要在master手动配置,配置如下******

# Any additional options that you want to pass
#FLANNEL_OPTIONS=""

 ## 由于linux还有底层的iptables,所以在node上分别执行:##

 ## 注意要关闭防火墙,有一些端口用于flannel之间相互通信 ##

 iptables -P INPUT ACCEPT

 iptables -P FORWARD ACCEPT

 iptables -F

 iptables -L -n

etcdctl mk /atomic.io/network/network/config '{"Network":"10.2.0.0/16"}'      # Type:host-gw/vxlan
etcdctl set /atomic.io/network/config '{"Network":"172.17.0.0/16", "SubnetMin": "172.17.1.0", "SubnetMax": "172.17.254.0", "Backend":{"Type":"host-gw"}}'
systemctl enable kubelet
systemctl enable kube-proxy
systemctl enable flanneld
systemctl enable docker
systemctl start kubelet
systemctl start kube-proxy
systemctl start flanneld systemctl start docker
  • 查看集群状态(master):
kubectl get nodes
etcdctl  member list
etcdctl cluster-health

5.Kubernetes管理

  • 创建pods
kubectl run my-nginx --image=nginx --replicas=2 --port=80
kubectl create -f deployment-nginx.yml
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: nginx-deployment
  labels:
    app: nginx
spec:
  replicas: 4
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: nginx:1.12.2
        ports:
        - containerPort: 80
  • 查看pod信息
kubectl get pods -o wide
  • 删除pod
kubectl delete pods my-nginx-379829228-bkt21
  • 进入指定pods并执行bash命令
kubectl exec -it my-nginx-379829228-462dl bash
  • 将pods端口与宿主端口映射
kubectl port-forward my-nginx-379829228-ng2jb 80:80    # 主机(master)上映射,并且只能通过http://127.0.0.1:8080访问
kubectl expose deployment nginx-deployment --type=NodePort  # 节点(node1,node2)上映射,**推荐**
  •  修改pods数量(pods的类型有:rc【Replication Controller】、rs【ReplicaSet】、deployment、job)
kubectl scale rc my-nginx --replicas=4
  • 删除pods(文件/name)(文件仅支持:json、yaml)
kubectl delete deployment --all
kubectl delete -f rc_nginx.yaml
  •  通过deployment创建的pods升级images
kubectl set image deployment nginx-deployment nginx=nginx:1.13
  • 查看pods升级images的历史
kubectl rollout history deployment nginx-deployment
  • 回滚pods升级images的历史
kubectl rollout undo deployment nginx-deployment
  • 查看节点
kubectl get node -o wide
  • 创建service

    kubectl expoese命令,会给我们的pod创建一个service,供外部访问。通过yaml文件创建。还可以通过DNS(但需要add-on)

    service主要有三类:ClusterIP,NodePort,Loadalancer

kubectl get svc  # 查询service
kubectl expose pods pod-nginx    # 创建ClusterIP,创建出来的cluster ip不会变化,原本的pod的ip是会变化的。
                    # 通过给出的cluster ip+port访问时,会自动做负载均衡
kubectl expose pods pod-nginx --type=NodePort  # 创建NodePort。--type默认为ClusterIP

 

原文链接:Centos7下Kubernetes集群安装部署

posted @ 2018-10-10 17:04  张界  阅读(1192)  评论(0编辑  收藏  举报