deploy efk add-on on kubernetes
注意事项
- efk用到的image需要在集群的各个节点本地有,不然会imagePullBackOff
official docs
fluentd-elasticsearch
wget https://storage.googleapis.com/kubernetes-release/release/v1.14.1/kubernetes-server-linux-amd64.tar.gz
mkdir ~/efk
tar -zxvf kubernetes-server-linux-amd64.tar.gz
cd kubernetes && tar -zxvf tar -zxvf kubernetes-src.tar.gz
cp kubernetes/cluster/addons/fluentd-elasticsearch/*.yaml ~/efk/
[root@master efk]# ls
es-service.yaml es-statefulset.yaml fluentd-es-configmap.yaml fluentd-es-ds.yaml kibana-deployment.yaml kibana-service.yaml
[root@master ~]# grep -rn image ./*
./efk/es-statefulset.yaml:76: - image: gcr.io/fluentd-elasticsearch/elasticsearch:v6.6.1
./efk/es-statefulset.yaml:106: - image: alpine:3.6
./efk/fluentd-es-configmap.yaml:205: # time="2016-02-04T07:53:57.505612354Z" level=error msg="HTTP Error" err="No such image: -f" statusCode=404
./efk/fluentd-es-ds.yaml:80: image: k8s.gcr.io/fluentd-elasticsearch:v2.4.0
./efk/kibana-deployment.yaml:24: image: docker.elastic.co/kibana/kibana-oss:6.6.1
[root@master ~]# docker pull docker.elastic.co/elasticsearch/elasticsearch:6.6.1
[root@master ~]# docker pull alpine:3.6
[root@master ~]# docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/fluentd-elasticsearch:v2.4.0
[root@master ~]# docker pull docker.elastic.co/kibana/kibana-oss:6.6.1
docker tag docker.elastic.co/elasticsearch/elasticsearch:6.6.1 gcr.io/fluentd-elasticsearch/elasticsearch:v6.6.1
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/fluentd-elasticsearch:v2.4.0 k8s.gcr.io/fluentd-elasticsearch:v2.4.0
[root@master efk]# kubectl apply -f .
service/elasticsearch-logging created
serviceaccount/elasticsearch-logging created
clusterrole.rbac.authorization.k8s.io/elasticsearch-logging created
clusterrolebinding.rbac.authorization.k8s.io/elasticsearch-logging created
statefulset.apps/elasticsearch-logging created
configmap/fluentd-es-config-v0.2.0 created
serviceaccount/fluentd-es created
clusterrole.rbac.authorization.k8s.io/fluentd-es created
clusterrolebinding.rbac.authorization.k8s.io/fluentd-es created
daemonset.apps/fluentd-es-v2.4.0 created
deployment.apps/kibana-logging created
service/kibana-logging created
[root@master ~]# kubectl cluster-info
Kubernetes master is running at https://192.168.1.2:6443
Elasticsearch is running at https://192.168.1.2:6443/api/v1/namespaces/kube-system/services/elasticsearch-logging/proxy
Kibana is running at https://192.168.1.2:6443/api/v1/namespaces/kube-system/services/kibana-logging/proxy
KubeDNS is running at https://192.168.1.2:6443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy
直接访问https://192.168.11.150:6444/api/v1/namespaces/kube-system/services/kibana-logging/proxy 会报错
services \"elasticsearch-logging\" is forbidden: User \"system:anonymous\" cannot proxy resource
匿名用户没有访问apiserver 权限
设置代理以进行访问(http协议代理 apiserver)
kubectl proxy --address='192.168.1.2' --port=5601 --accept-hosts='^*$'
reference
WARNING: No any other purpose,keeping reminded! So sorry to offended,if necessary, contact me and I do change what I had done to protect your privileges!