极客时间运维进阶训练营第十五周作业

1、添加两个以上静态令牌认证的用户,例如 tom 和 jerry,并认证到 Kubernetes 上;

#### 生成token
root@k8s-master01:~# echo "$(openssl rand -hex 3).$(openssl rand -hex 8)"
d40f7e.e7d89c0d87fb06da
root@k8s-master01:~# echo "$(openssl rand -hex 3).$(openssl rand -hex 8)"
002479.396965c9d6802b54
root@k8s-master01:~# echo "$(openssl rand -hex 3).$(openssl rand -hex 8)"
d953d0.61cac36af895c0f5
#### 创建token文件
root@k8s-master01:~# touch /etc/kubernetes/authfiles/tokens.csv
root@k8s-master01:/etc/kubernetes/authfiles# cat tokens.csv
d40f7e.e7d89c0d87fb06da,tom,1001,"kubeusers,kubeadmin"
002479.396965c9d6802b54,jerry,1002,"kubeusers"
d953d0.61cac36af895c0f5,trump,1003,"kubeadmin"

#### 编辑配置文件
```bash
root@k8s-master01:/etc/kubernetes/authfiles# cp  /etc/kubernetes/manifests/kube-apiserver.yaml{,.bak}
root@k8s-master01:/etc/kubernetes/authfiles# cp /etc/kubernetes/manifests/kube-apiserver.yaml /tmp/

root@k8s-master01:/etc/kubernetes/manifests# cat kube-apiserver.yaml
apiVersion: v1
kind: Pod
metadata:
  annotations:
    kubeadm.kubernetes.io/kube-apiserver.advertise-address.endpoint: 192.168.56.161:6443
  creationTimestamp: null
  labels:
    component: kube-apiserver
    tier: control-plane
  name: kube-apiserver
  namespace: kube-system
spec:
  containers:
  - command:
    - kube-apiserver
    - --advertise-address=192.168.56.161
    - --allow-privileged=true
    - --authorization-mode=Node,RBAC
    - --client-ca-file=/etc/kubernetes/pki/ca.crt
    - --token-auth-file=/etc/kubernetes/authfiles/tokens.csv
    - --enable-admission-plugins=NodeRestriction
    - --enable-bootstrap-token-auth=true
    - --etcd-cafile=/etc/kubernetes/pki/etcd/ca.crt
    - --etcd-certfile=/etc/kubernetes/pki/apiserver-etcd-client.crt
    - --etcd-keyfile=/etc/kubernetes/pki/apiserver-etcd-client.key
    - --etcd-servers=https://127.0.0.1:2379
    - --kubelet-client-certificate=/etc/kubernetes/pki/apiserver-kubelet-client.crt
    - --kubelet-client-key=/etc/kubernetes/pki/apiserver-kubelet-client.key
    - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
    - --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.crt
    - --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client.key
    - --requestheader-allowed-names=front-proxy-client
    - --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt
    - --requestheader-extra-headers-prefix=X-Remote-Extra-
    - --requestheader-group-headers=X-Remote-Group
    - --requestheader-username-headers=X-Remote-User
    - --secure-port=6443
    - --service-account-issuer=https://kubernetes.default.svc.cluster.local
    - --service-account-key-file=/etc/kubernetes/pki/sa.pub
    - --service-account-signing-key-file=/etc/kubernetes/pki/sa.key
    - --service-cluster-ip-range=10.96.0.0/12
    - --tls-cert-file=/etc/kubernetes/pki/apiserver.crt
    - --tls-private-key-file=/etc/kubernetes/pki/apiserver.key
    image: registry.aliyuncs.com/google_containers/kube-apiserver:v1.26.1
    imagePullPolicy: IfNotPresent
    livenessProbe:
      failureThreshold: 8
      httpGet:
        host: 192.168.56.161
        path: /livez
        port: 6443
        scheme: HTTPS
      initialDelaySeconds: 10
      periodSeconds: 10
      timeoutSeconds: 15
    name: kube-apiserver
    readinessProbe:
      failureThreshold: 3
      httpGet:
        host: 192.168.56.161
        path: /readyz
        port: 6443
        scheme: HTTPS
      periodSeconds: 1
      timeoutSeconds: 15
    resources:
      requests:
        cpu: 250m
    startupProbe:
      failureThreshold: 24
      httpGet:
        host: 192.168.56.161
        path: /livez
        port: 6443
        scheme: HTTPS
      initialDelaySeconds: 10
      periodSeconds: 10
      timeoutSeconds: 15
    volumeMounts:
    - mountPath: /etc/ssl/certs
      name: ca-certs
      readOnly: true
    - mountPath: /etc/ca-certificates
      name: etc-ca-certificates
      readOnly: true
    - mountPath: /etc/pki
      name: etc-pki
      readOnly: true
    - mountPath: /etc/kubernetes/pki
      name: k8s-certs
      readOnly: true
    - mountPath: /usr/local/share/ca-certificates
      name: usr-local-share-ca-certificates
      readOnly: true
    - mountPath: /usr/share/ca-certificates
      name: usr-share-ca-certificates
      readOnly: true
    - mountPath: /etc/kubernetes/authfiles
      name: authfiles
      readOnly: true
  hostNetwork: true
  priorityClassName: system-node-critical
  securityContext:
    seccompProfile:
      type: RuntimeDefault
  volumes:
  - hostPath:
      path: /etc/ssl/certs
      type: DirectoryOrCreate
    name: ca-certs
  - hostPath:
      path: /etc/ca-certificates
      type: DirectoryOrCreate
    name: etc-ca-certificates
  - hostPath:
      path: /etc/pki
      type: DirectoryOrCreate
    name: etc-pki
  - hostPath:
      path: /etc/kubernetes/pki
      type: DirectoryOrCreate
    name: k8s-certs
  - hostPath:
      path: /usr/local/share/ca-certificates
      type: DirectoryOrCreate
    name: usr-local-share-ca-certificates
  - hostPath:
      path: /usr/share/ca-certificates
      type: DirectoryOrCreate
    name: usr-share-ca-certificates
  - hostPath:
      path: /etc/kubernetes/authfiles
      type: DirectoryOrCreate
    name: authfiles
status: {}

# 生效文件
root@k8s-master01:~# cp /tmp/kube-apiserver.yaml /etc/kubernetes/manifests/
# 稍等片刻后验证
root@k8s-master01:~# kubectl  get pods -n kube-system
NAME                                              READY   STATUS    RESTARTS         AGE
coredns-5bbd96d687-8jx6m                          1/1     Running   207 (129m ago)   36d
coredns-5bbd96d687-fznlm                          1/1     Running   204 (129m ago)   36d
csi-nfs-controller-998f5bbd6-vrz2z                3/3     Running   39 (32s ago)     13d
csi-nfs-node-2485p                                3/3     Running   27 (129m ago)    13d
csi-nfs-node-49z52                                3/3     Running   21 (129m ago)    13d
csi-nfs-node-8vgrb                                3/3     Running   24 (129m ago)    13d
csi-nfs-node-v6hvx                                3/3     Running   21 (129m ago)    13d
etcd-k8s-master01.magedu.com                      1/1     Running   9 (129m ago)     36d
kube-apiserver-k8s-master01.magedu.com            1/1     Running   1 (17s ago)      27m
kube-controller-manager-k8s-master01.magedu.com   1/1     Running   24 (35s ago)     36d
kube-proxy-6p9vb                                  1/1     Running   13 (129m ago)    36d
kube-proxy-78jf6                                  1/1     Running   10 (129m ago)    36d
kube-proxy-czdmd                                  1/1     Running   9 (129m ago)     36d
kube-proxy-glf2m                                  1/1     Running   12 (129m ago)    36d
kube-scheduler-k8s-master01.magedu.com            1/1     Running   25 (36s ago)     36d

# 验证
kubectl -s https://kubeapi.magedu.com:6443 --token="d40f7e.e7d89c0d87fb06da"  --certificate-authority='/etc/kubernetes/pki/ca.crt' get pods
E0407 21:19:12.674881   25582 memcache.go:265] couldn't get current server API group list: the server has asked for the client to provide credentials
E0407 21:19:12.680975   25582 memcache.go:265] couldn't get current server API group list: the server has asked for the client to provide credentials
E0407 21:19:12.690975   25582 memcache.go:265] couldn't get current server API group list: the server has asked for the client to provide credentials
E0407 21:19:12.724766   25582 memcache.go:265] couldn't get current server API group list: the server has asked for the client to provide credentials
E0407 21:19:12.736352   25582 memcache.go:265] couldn't get current server API group list: the server has asked for the client to provide credentials
error: You must be logged in to the server (the server has asked for the client to provide credentials)

 curl -k -H "Authorization: Bearer d953d0.61cac36af895c0f5"  -k  https://kubeapi.magedu.com:6443/api/v1/namespaces/default/pods/
{
  "kind": "Status",
  "apiVersion": "v1",
  "metadata": {},
  "status": "Failure",
  "message": "Unauthorized",
  "reason": "Unauthorized",
  "code": 401
}

# 此处有异常,待查明原因
View Code

添加两个以上的 X509 证书认证到 Kubernetes 的用户,比如 mason 和 magedu。把认证凭据添加到 kubeconfig 配置文件进行加载。

# 查看证书信息
root@k8s-master01:/etc/kubernetes/pki# ls
apiserver.crt              apiserver.key                 ca.crt  front-proxy-ca.crt      front-proxy-client.key
apiserver-etcd-client.crt  apiserver-kubelet-client.crt  ca.key  front-proxy-ca.key      sa.key
apiserver-etcd-client.key  apiserver-kubelet-client.key  etcd    front-proxy-client.crt  sa.pub
# 创建私钥
root@k8s-master01:/etc/kubernetes/pki# (umask 077; openssl genrsa -out ./mason.key 2048)
Generating RSA private key, 2048 bit long modulus (2 primes)
.......+++++
........................+++++
e is 65537 (0x010001)
# 创建证书请求/

(umask 077; openssl genrsa -out ./john.key 2048)

root@k8s-master01:/etc/kubernetes/pki# openssl req -new -key ./mason.key -out ./mason.csr -subj "/CN=mason/O=kubeadmins"
 openssl req -new -key ./john.key -out ./john.csr -subj "/CN=john/O=kubeadmins"
# 签发证书
root@k8s-master01:/etc/kubernetes/pki#  openssl x509 -req -days 365 -CA ./ca.crt  -CAkey ./ca.key -CAcreateserial  -in ./mason.csr -out ./mason.crt
Signature ok
subject=CN = mason, O = kubeadmins
Getting CA Private Key

openssl x509 -req -days 365 -CA ./ca.crt  -CAkey ./ca.key -CAcreateserial  -in ./john.csr -out ./john.crt

# 证书复制到node1
root@k8s-master01:/etc/kubernetes/pki# scp -p mason.crt mason.key 192.168.56.166:/etc/kubernetes/pki/

root@k8s-master01:/etc/kubernetes/pki# scp -p john.crt john.key 192.168.56.166:/etc/kubernetes/pki
# node1 验证
root@k8s-node01:/etc/kubernetes/pki# kubectl  -s https://kubeapi.magedu.com:6443 --client-certificate=/etc/kubernetes/pki/mason.crt --client-key=/etc/kubernetes/pki/mason.key --certificate-authority=/etc/kubernetes/pki/ca.crt get pods
Error from server (Forbidden): pods is forbidden: User "mason" cannot list resource "pods" in API group "" in the namespace "default"
root@k8s-node01:~# kubectl  -s https://kubeapi.magedu.com:6443 --client-certificate=/etc/kubernetes/pki/john.crt --client-key=/etc/kubernetes/pki/john.key --certificate-authority=/etc/kubernetes/pki/ca.crt get pods
Error from server (Forbidden): pods is forbidden: User "john" cannot list resource "pods" in API group "" in the namespace "default"

# 配置权限

root@k8s-master01:/etc/kubernetes/pki# kubectl config set-cluster kube-test --embed-certs=true --certificate-authority=/etc/kubernetes/pki/ca.crt --server="https://kubeapi.magedu.com:6443" --kubeconfig=$HOME/.kube/kubeusers.conf
Cluster "kube-test" set.

root@k8s-master01:~# kubectl config view --kubeconfig=kubeusers.conf
apiVersion: v1
clusters: null
contexts: null
current-context: ""
kind: Config
preferences: {}
users: null

# 将证书用户mason添加进入配置文件
kubectl config set-credentials mason --embed-certs=true --client-certificate=/etc/kubernetes/pki/mason.crt --client-key=/etc/kubernetes/pki/mason.key --kubeconfig=$HOME/.kube/kubeusers.conf

# 证书用户john 添加至配置文件
kubectl config set-credentials john --embed-certs=true --client-certificate=/etc/kubernetes/pki/john.crt --client-key=/etc/kubernetes/pki/john.key --kubeconfig=$HOME/.kube/kubeusers.conf


root@k8s-master01:~/.kube# kubectl config set-context mason@kube-test --cluster=kube-test --user=mason  --kubeconfig=$HOME/.kube/kubeusers.conf
Context "mason@kube-test" created.

root@k8s-master01:~/.kube# kubectl config set-context john@kube-test --cluster=kube-test --user=john  --kubeconfig=$HOME/.kube/kubeusers.conf


root@k8s-master01:~/.kube# kubectl  get pods --kubeconfig=kubeusers.conf --context='mason@kube-test'
root@k8s-master01:~/.kube# kubectl get pods --context='mason@kube-test'
Error from server (Forbidden): pods is forbidden: User "mason" cannot list resource "pods" in API group "" in the namespace "default"
View Code

2、使用资源配置文件创建 ServiceAccount,并附加一个 imagePullSecrets。

root@k8s-master01:~/test# kubectl create secret docker-registry mydcokrhub --docker-username=USER --docker-password=PASSWORD -n test
secret/mydcokrhub created
root@k8s-master01:~/test# kubectl  get secret -n test
NAME         TYPE                             DATA   AGE
mydcokrhub   kubernetes.io/dockerconfigjson   1      19s

root@k8s-master01:~/test# cat mysa.yaml
apiVersion: v1
kind: ServiceAccount
imagePullSecrets:
- name: mydcokrhub
metadata:
  name: mysa
  namespace: test
root@k8s-master01:~/test# kubectl  apply -f mysa.yaml
serviceaccount/mysa created
root@k8s-master01:~/test# kubectl  get   sa -n test
NAME      SECRETS   AGE
default   0         14h
mysa      0         21s

      #使用创建serviceaccount mysa
      serviceAccountName: mysa
      schedulerName: default-scheduler

或
        imagePullSecrets:
        - name: myregistrykey
View Code

 3、为 tom 用户授予管理 blog 名称空间的权限;为 jerry 授予管理整个集群的权限;为 mason 用户授予读取集群资源的权限。

# 授予名称空间 test 管理员权限
## 创建role
kubectl create role manager-ns-test --verb=* --resource=pods,deployments,daemonsets,replicasets,statefulsets,jobs,cronjobs,ingresses,events,configmaps,endpoints,services -n test
## 绑定role
kubectl create rolebinding john-as-manage-ns-test --role=manager-ns-test --user=john -n test
## 验证
kubectl  get pods -n test --kubeconfig=kubeusers.conf --context='john@kube-test'
## 删除权限
root@k8s-master01:~/.kube# kubectl  get rolebinding -n test
NAME                     ROLE                   AGE
john-as-manage-ns-test   Role/manager-ns-test   2m26s
root@k8s-master01:~/.kube# kubectl  delete rolebinding john-as-manage-ns-test -n test
rolebinding.rbac.authorization.k8s.io "john-as-manage-ns-test" deleted
root@k8s-master01:~/.kube# kubectl  get pods -n test --kubeconfig=kubeusers.conf --context='john@kube-test'
Error from server (Forbidden): pods is forbidden: User "john" cannot list resource "pods" in API group "" in the namespace "test"

# 授予集群管理员权限并删除权限
## 授予集群管理员权限
kubectl create clusterrolebinding mason-as-cluster-admin --user=mason --clusterrole=cluster-admin
kubectl  get pods --kubeconfig=kubeusers.conf --context='mason@kube-test'

## 删除权限
kubectl  delete clusterrolebinding mason-as-cluster-admin
kubectl  get pods --kubeconfig=kubeusers.conf --context='mason@kube-test' # 此时发下已经没有权限了

# 授予mason集群读取权限
kubectl create clusterrolebinding mason--as-view --clusterrole=view --user=mason
root@k8s-master01:~/.kube# kubectl create clusterrolebinding mason--as-view --clusterrole=view --user=mason
clusterrolebinding.rbac.authorization.k8s.io/mason--as-view created
root@k8s-master01:~/.kube# kubectl  get pods --kubeconfig=kubeusers.conf --context='mason@kube-test'
NAME                      READY   STATUS    RESTARTS      AGE
demoapp-75f59c894-9fw9g   1/1     Running   1 (15h ago)   15h
demoapp-75f59c894-sw2vw   1/1     Running   1 (15h ago)   15h
demoapp-75f59c894-x6k6h   1/1     Running   1 (15h ago)   15h

## 删除权限
kubectl get   clusterrolebinding
kubectl  delete clusterrolebinding  mason--as-view  

root@k8s-master01:~/.kube# kubectl  get pods --kubeconfig=kubeusers.conf --context='mason@kube-test'
Error from server (Forbidden): pods is forbidden: User "mason" cannot list resource "pods" in API group "" in the namespace "default" 
# 此时已经没有权限了
View Code

 

4、部署 Jenkins、Prometheus-Server、Node-Exporter 至 Kubernetes 集群;而后使用 Ingress 开放至集群外部,Jenkins 要使用 https 协议开放。

# 安装jenkins

root@k8s-master01:~/learning-k8s-master/jenkins/deploy# cat 01-namespace-jenkins.yaml 
# Maintainer: MageEdu <mage@magedu.com>
---
apiVersion: v1
kind: Namespace
metadata:
  name: jenkins
root@k8s-master01:~/learning-k8s-master/jenkins/deploy# cat 02-pvc-jenkins.yaml 
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: jenkins-pvc
  namespace: jenkins
spec:
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 10Gi
  storageClassName: nfs-csi
root@k8s-master01:~/learning-k8s-master/jenkins/deploy# cat 03-rbac-jenkins.yaml 
apiVersion: v1
kind: ServiceAccount
metadata:
  name: jenkins-master
  namespace: jenkins

---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  name: jenkins-master
rules:
  - apiGroups: ["extensions", "apps"]
    resources: ["deployments"]
    verbs: ["create", "delete", "get", "list", "watch", "patch", "update"]
  - apiGroups: [""]
    resources: ["services"]
    verbs: ["create", "delete", "get", "list", "watch", "patch", "update"]
  - apiGroups: [""]
    resources: ["pods"]
    verbs: ["create","delete","get","list","patch","update","watch"]
  - apiGroups: [""]
    resources: ["pods/exec"]
    verbs: ["create","delete","get","list","patch","update","watch"]
  - apiGroups: [""]
    resources: ["pods/log"]
    verbs: ["get","list","watch"]
  - apiGroups: [""]
    resources: ["secrets"]
    verbs: ["get"]

---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: jenkins-master
roleRef:
  kind: ClusterRole
  name: jenkins-master
  apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
  name: jenkins-master
  namespace: jenkins
root@k8s-master01:~/learning-k8s-master/jenkins/deploy# cat 04-deploy-jenkins.yaml 
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: jenkins
  namespace: jenkins 
spec:
  replicas: 1
  selector:
    matchLabels:
      app: jenkins
  template:
    metadata:
      labels:
        app: jenkins
    spec:
      serviceAccountName: jenkins-master
      volumes:
      - name: jenkins-store
        persistentVolumeClaim:
          claimName: jenkins-pvc
      containers:
      - name: jenkins
        image: jenkins/jenkins:jdk11
        #image: jenkins/jenkins:lts-jdk11
        volumeMounts:
        - name: jenkins-store
          mountPath: /var/jenkins_home/
        imagePullPolicy: IfNotPresent
        env:
        - name: JAVA_OPTS
          value: -XshowSettings:vm -Dhudson.slaves.NodeProvisioner.initialDelay=0 -Dhudson.slaves.NodeProvisioner.MARGIN=50 -Dhudson.slaves.NodeProvisioner.MARGIN0=0.85 -Duser.timezone=Asia/Shanghai -Djenkins.install.runSetupWizard=true
        ports:
        - containerPort: 8080
          name: web
          protocol: TCP
        - containerPort: 50000
          name: agent
          protocol: TCP
root@k8s-master01:~/learning-k8s-master/jenkins/deploy# cat 05
cat: 05: No such file or directory
root@k8s-master01:~/learning-k8s-master/jenkins/deploy# cat 05-service-jenkins.yaml 
---
apiVersion: v1
kind: Service
metadata:
  name: jenkins
  namespace: jenkins 
  labels:
    app: jenkins
spec:
  selector:
    app: jenkins
  type: NodePort
  ports:
  - name: http
    port: 8080
    targetPort: 8080
---
apiVersion: v1
kind: Service
metadata:
  name: jenkins-jnlp
  namespace: jenkins 
  labels:
    app: jenkins
spec:
  selector:
    app: jenkins
  ports:
  - name: agent
    port: 50000
    targetPort: 50000

root@k8s-master01:~/learning-k8s-master/jenkins/deploy# kubectl apply -f 01-namespace-jenkins.yaml  -f 02-pvc-jenkins.yaml  -f 03-rbac-jenkins.yaml  -f 04-deploy-jenkins.yaml  -f 05-service-jenkins.yaml
namespace/jenkins created
persistentvolumeclaim/jenkins-pvc created
serviceaccount/jenkins-master created
clusterrole.rbac.authorization.k8s.io/jenkins-master created
clusterrolebinding.rbac.authorization.k8s.io/jenkins-master created
deployment.apps/jenkins created
service/jenkins created
service/jenkins-jnlp created

root@k8s-master01:~/learning-k8s-master/jenkins/deploy# kubectl  get ns
NAME              STATUS   AGE
blog              Active   6d1h
database          Active   16d
default           Active   42d
jenkins           Active   92s
kube-flannel      Active   42d
kube-node-lease   Active   42d
kube-public       Active   42d
kube-system       Active   42d
nfs               Active   18d
prom              Active   16d
test              Active   25d
root@k8s-master01:~/learning-k8s-master/jenkins/deploy# kubectl get sa -n jenkins
NAME             SECRETS   AGE
default          0         106s
jenkins-master   0         106s
root@k8s-master01:~/learning-k8s-master/jenkins/deploy# kubectl get pods -n jenkins
NAME                       READY   STATUS              RESTARTS   AGE
root@k8s-master01:~/learning-k8s-master/jenkins/deploy# kubectl  get pods jenkins-6c5f8665c8-68w44 -o yaml -n jenkins
  serviceAccountName: jenkins-master

root@k8s-master01:~/learning-k8s-master/jenkins/deploy# kubectl  get svc -n jenkins
NAME           TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)          AGE
jenkins        NodePort    10.99.47.31      <none>        8080:30211/TCP   3m59s
jenkins-jnlp   ClusterIP   10.102.197.234   <none>        50000/TCP        3m59s
root@k8s-master01:~/learning-k8s-master/jenkins/deploy#

root@k8s-master01:~/learning-k8s-master/jenkins/deploy# kubectl  logs jenkins-6c5f8665c8-68w44  -n jenkins
# 登录验证:http://192.168.56.166:30211/
7bf8f43263c444f2ab685ec7aa2793dc

# 除默认插件外安装kubernetes plugin  插件
账号admin 密码admin

# 部署prometheus
git clone https://github.com/iKubernetes/k8s-prom.git

root@k8s-master01:~/k8s-prom-master# ls prometheus
prometheus-cfg.yaml  prometheus-deploy.yaml  prometheus-rbac.yaml  prometheus-svc.yaml
root@k8s-master01:~/k8s-prom-master# cat prometheus/prometheus-cfg.yaml
---
kind: ConfigMap
apiVersion: v1
metadata:
  labels:
    app: prometheus
  name: prometheus-config
  namespace: prom
data:
  prometheus.yml: |
    # A scrape configuration for running Prometheus on a Kubernetes cluster.
    # This uses separate scrape configs for cluster components (i.e. API server, node)
    # and services to allow each to use different authentication configs.
    #
    # Kubernetes labels will be added as Prometheus labels on metrics via the
    # `labelmap` relabeling action.
    #
    # If you are using Kubernetes 1.7.2 or earlier, please take note of the comments
    # for the kubernetes-cadvisor job; you will need to edit or remove this job.

    # Scrape config for API servers.
    #
    # Kubernetes exposes API servers as endpoints to the default/kubernetes
    # service so this uses `endpoints` role and uses relabelling to only keep
    # the endpoints associated with the default/kubernetes service using the
    # default named port `https`. This works for single API server deployments as
    # well as HA API server deployments.
    global:
      scrape_interval: 15s
      scrape_timeout: 10s
      evaluation_interval: 1m

    scrape_configs:
    - job_name: 'kubernetes-apiservers'

      kubernetes_sd_configs:
      - role: endpoints

      # Default to scraping over https. If required, just disable this or change to
      # `http`.
      scheme: https

      # This TLS & bearer token file config is used to connect to the actual scrape
      # endpoints for cluster components. This is separate to discovery auth
      # configuration because discovery & scraping are two separate concerns in
      # Prometheus. The discovery auth config is automatic if Prometheus runs inside
      # the cluster. Otherwise, more config options have to be provided within the
      # <kubernetes_sd_config>.
      tls_config:
        ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
        # If your node certificates are self-signed or use a different CA to the
        # master CA, then disable certificate verification below. Note that
        # certificate verification is an integral part of a secure infrastructure
        # so this should only be disabled in a controlled environment. You can
        # disable certificate verification by uncommenting the line below.
        #
        # insecure_skip_verify: true
      bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token

      # Keep only the default/kubernetes service endpoints for the https port. This
      # will add targets for each API server which Kubernetes adds an endpoint to
      # the default/kubernetes service.
      relabel_configs:
      - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
        action: keep
        regex: default;kubernetes;https

    # Scrape config for nodes (kubelet).
    #
    # Rather than connecting directly to the node, the scrape is proxied though the
    # Kubernetes apiserver.  This means it will work if Prometheus is running out of
    # cluster, or can't connect to nodes for some other reason (e.g. because of
    # firewalling).
    - job_name: 'kubernetes-nodes'

      # Default to scraping over https. If required, just disable this or change to
      # `http`.
      scheme: https

      # This TLS & bearer token file config is used to connect to the actual scrape
      # endpoints for cluster components. This is separate to discovery auth
      # configuration because discovery & scraping are two separate concerns in
      # Prometheus. The discovery auth config is automatic if Prometheus runs inside
      # the cluster. Otherwise, more config options have to be provided within the
      # <kubernetes_sd_config>.
      tls_config:
        ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
      bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token

      kubernetes_sd_configs:
      - role: node

      relabel_configs:
      - action: labelmap
        regex: __meta_kubernetes_node_label_(.+)
      - target_label: __address__
        replacement: kubernetes.default.svc:443
      - source_labels: [__meta_kubernetes_node_name]
        regex: (.+)
        target_label: __metrics_path__
        replacement: /api/v1/nodes/${1}/proxy/metrics

    # Scrape config for Kubelet cAdvisor.
    #
    # This is required for Kubernetes 1.7.3 and later, where cAdvisor metrics
    # (those whose names begin with 'container_') have been removed from the
    # Kubelet metrics endpoint.  This job scrapes the cAdvisor endpoint to
    # retrieve those metrics.
    #
    # In Kubernetes 1.7.0-1.7.2, these metrics are only exposed on the cAdvisor
    # HTTP endpoint; use "replacement: /api/v1/nodes/${1}:4194/proxy/metrics"
    # in that case (and ensure cAdvisor's HTTP server hasn't been disabled with
    # the --cadvisor-port=0 Kubelet flag).
    #
    # This job is not necessary and should be removed in Kubernetes 1.6 and
    # earlier versions, or it will cause the metrics to be scraped twice.
    - job_name: 'kubernetes-cadvisor'

      # Default to scraping over https. If required, just disable this or change to
      # `http`.
      scheme: https

      # This TLS & bearer token file config is used to connect to the actual scrape
      # endpoints for cluster components. This is separate to discovery auth
      # configuration because discovery & scraping are two separate concerns in
      # Prometheus. The discovery auth config is automatic if Prometheus runs inside
      # the cluster. Otherwise, more config options have to be provided within the
      # <kubernetes_sd_config>.
      tls_config:
        ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
      bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token

      kubernetes_sd_configs:
      - role: node

      relabel_configs:
      - action: labelmap
        regex: __meta_kubernetes_node_label_(.+)
      - target_label: __address__
        replacement: kubernetes.default.svc:443
      - source_labels: [__meta_kubernetes_node_name]
        regex: (.+)
        target_label: __metrics_path__
        replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor

    # Scrape config for service endpoints.
    #
    # The relabeling allows the actual service scrape endpoint to be configured
    # via the following annotations:
    #
    # * `prometheus.io/scrape`: Only scrape services that have a value of `true`
    # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need
    # to set this to `https` & most likely set the `tls_config` of the scrape config.
    # * `prometheus.io/path`: If the metrics path is not `/metrics` override this.
    # * `prometheus.io/port`: If the metrics are exposed on a different port to the
    # service then set this appropriately.
    - job_name: 'kubernetes-service-endpoints'

      kubernetes_sd_configs:
      - role: endpoints

      relabel_configs:
      - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape]
        action: keep
        regex: true
      - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme]
        action: replace
        target_label: __scheme__
        regex: (https?)
      - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path]
        action: replace
        target_label: __metrics_path__
        regex: (.+)
      - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port]
        action: replace
        target_label: __address__
        regex: ([^:]+)(?::\d+)?;(\d+)
        replacement: $1:$2
      - action: labelmap
        regex: __meta_kubernetes_service_label_(.+)
      - source_labels: [__meta_kubernetes_namespace]
        action: replace
        target_label: kubernetes_namespace
      - source_labels: [__meta_kubernetes_service_name]
        action: replace
        target_label: kubernetes_name

    # Example scrape config for pods
    #
    # The relabeling allows the actual pod scrape endpoint to be configured via the
    # following annotations:
    #
    # * `prometheus.io/scrape`: Only scrape pods that have a value of `true`
    # * `prometheus.io/path`: If the metrics path is not `/metrics` override this.
    # * `prometheus.io/port`: Scrape the pod on the indicated port instead of the
    # pod's declared ports (default is a port-free target if none are declared).
    - job_name: 'kubernetes-pods'
      # if you want to use metrics on jobs, set the below field to
      # true to prevent Prometheus from setting the `job` label
      # automatically.
      honor_labels: false
      kubernetes_sd_configs:
      - role: pod
      # skip verification so you can do HTTPS to pods
      tls_config:
        insecure_skip_verify: true
      # make sure your labels are in order
      relabel_configs:
      # these labels tell Prometheus to automatically attach source
      # pod and namespace information to each collected sample, so
      # that they'll be exposed in the custom metrics API automatically.
      - source_labels: [__meta_kubernetes_namespace]
        action: replace
        target_label: namespace
      - source_labels: [__meta_kubernetes_pod_name]
        action: replace
        target_label: pod
      # these labels tell Prometheus to look for
      # prometheus.io/{scrape,path,port} annotations to configure
      # how to scrape
      - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape]
        action: keep
        regex: true
      - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path]
        action: replace
        target_label: __metrics_path__
        regex: (.+)
      - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
        action: replace
        regex: ([^:]+)(?::\d+)?;(\d+)
        replacement: $1:$2
        target_label: __address__
      - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scheme]
        action: replace
        target_label: __scheme__
        regex: (.+)

    #- job_name: 'Kubernetes'
    #  static_configs:
    #  - targets: ['kube-state-metrics:8080']
root@k8s-master01:~/k8s-prom-master# cat prometheus/prometheus-deploy.yaml
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: prometheus-server
  namespace: prom
  labels:
    app: prometheus
spec:
  replicas: 1
  selector:
    matchLabels:
      app: prometheus
      component: server
    #matchExpressions:
    #- {key: app, operator: In, values: [prometheus]}
    #- {key: component, operator: In, values: [server]}
  template:
    metadata:
      labels:
        app: prometheus
        component: server
      annotations:
        prometheus.io/scrape: 'true'
        prometheus.io/port: '9090'
    spec:
      serviceAccountName: prometheus
      containers:
      - name: prometheus
        image: prom/prometheus:v2.40.5
        imagePullPolicy: Always
        command:
          - prometheus
          - --config.file=/etc/prometheus/prometheus.yml
          - --storage.tsdb.path=/prometheus
          - --storage.tsdb.retention=720h
        ports:
        - containerPort: 9090
          protocol: TCP
        resources:
          limits:
            memory: 2Gi
        volumeMounts:
        - mountPath: /etc/prometheus/prometheus.yml
          name: prometheus-config
          subPath: prometheus.yml
        - mountPath: /prometheus/
          name: prometheus-storage-volume
      volumes:
        - name: prometheus-config
          configMap:
            name: prometheus-config
            items:
              - key: prometheus.yml
                path: prometheus.yml
                mode: 0644
        - name: prometheus-storage-volume
          emptyDir: {}
root@k8s-master01:~/k8s-prom-master# cat prometheus/prometheus-rbac.yaml
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  name: prometheus
rules:
- apiGroups: [""]
  resources:
  - nodes
  - nodes/proxy
  - services
  - endpoints
  - pods
  verbs: ["get", "list", "watch"]
- apiGroups:
  - extensions
  resources:
  - ingresses
  verbs: ["get", "list", "watch"]
- nonResourceURLs: ["/metrics"]
  verbs: ["get"]
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: prometheus
  namespace: prom
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: prometheus
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: prometheus
subjects:
- kind: ServiceAccount
  name: prometheus
  namespace: prom
root@k8s-master01:~/k8s-prom-master# cat prometheus/prometheus-svc.yaml
---
apiVersion: v1
kind: Service
metadata:
  name: prometheus
  namespace: prom
  annotations:
    prometheus.io/scrape: 'true'
    prometheus.io/port: '9090'
  labels:
    app: prometheus
spec:
  type: NodePort
  ports:
    - port: 9090
      targetPort: 9090
      nodePort: 30090
      protocol: TCP
  selector:
    app: prometheus
    component: server
root@k8s-master01:~/k8s-prom-master#
root@k8s-master01:~/k8s-prom-master# kubectl  apply -f prometheus

root@k8s-master01:~/k8s-prom-master# kubectl  get pods -n prom
NAME                                READY   STATUS    RESTARTS   AGE
prometheus-server-8f7bc69b5-tbnlj   1/1     Running   0          36s
root@k8s-master01:~/k8s-prom-master# kubectl  get svc -n prom
NAME         TYPE       CLUSTER-IP      EXTERNAL-IP   PORT(S)          AGE
prometheus   NodePort   10.102.79.209   <none>        9090:30090/TCP   54s

root@k8s-master01:~/k8s-prom-master# kubectl  get pods -n prom
NAME                                READY   STATUS    RESTARTS   AGE
prometheus-server-8f7bc69b5-tbnlj   1/1     Running   0          36s
root@k8s-master01:~/k8s-prom-master# kubectl  get svc -n prom
NAME         TYPE       CLUSTER-IP      EXTERNAL-IP   PORT(S)          AGE
prometheus   NodePort   10.102.79.209   <none>        9090:30090/TCP   54s

# 部署node exporter

root@k8s-master01:~/k8s-prom-master# ls node_exporter/
node-exporter-ds.yaml  node-exporter-svc.yaml
root@k8s-master01:~/k8s-prom-master# cat node_exporter/node-exporter-ds.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: prometheus-node-exporter
  namespace: prom
  labels:
    app: prometheus
    component: node-exporter
spec:
  selector:
    matchLabels:
      app: prometheus
      component: node-exporter
  template:
    metadata:
      name: prometheus-node-exporter
      labels:
        app: prometheus
        component: node-exporter
    spec:
      tolerations:
      - effect: NoSchedule
        key: node-role.kubernetes.io/master
      containers:
      - image: prom/node-exporter:v1.5.0
        name: prometheus-node-exporter
        ports:
        - name: prom-node-exp
          containerPort: 9100
          hostPort: 9100
      hostNetwork: true
      hostPID: true
root@k8s-master01:~/k8s-prom-master# cat node_exporter/node-exporter-svc.yaml
apiVersion: v1
kind: Service
metadata:
  annotations:
    prometheus.io/scrape: 'true'
  name: prometheus-node-exporter
  namespace: prom
  labels:
    app: prometheus
    component: node-exporter
spec:
  clusterIP: None
  ports:
    - name: prometheus-node-exporter
      port: 9100
      protocol: TCP
  selector:
    app: prometheus
    component: node-exporter
  type: ClusterIP
root@k8s-master01:~/k8s-prom-master# kubectl  apply -f node_exporter/
daemonset.apps/prometheus-node-exporter created
service/prometheus-node-exporter created

root@k8s-master01:~/k8s-prom-master# kubectl  get pods -n prom
NAME                                READY   STATUS    RESTARTS   AGE
prometheus-node-exporter-cw5sx      1/1     Running   0          42s
prometheus-node-exporter-r4xdn      1/1     Running   0          42s
prometheus-node-exporter-s6wzb      1/1     Running   0          42s
prometheus-server-8f7bc69b5-tbnlj   1/1     Running   0          9m39s

# ingress 发布
kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.6.4/deploy/static/provider/cloud/deploy.yaml


sed -i s'?registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20220916-gd32f8c343?dyrnq/kube-webhook-certgen:v20220916-gd32f8c343?g' ingress-nginx.yaml
sed -i s'?registry.k8s.io/ingress-nginx/controller:v1.6.4?registry.cn-hangzhou.aliyuncs.com/google_containers/nginx-ingress-controller:v1.6.4?g' ingress-nginx.yaml

root@k8s-master01:~# kubectl  apply -f ingress-nginx.yaml   # 网址的yaml文件内容
namespace/ingress-nginx created
serviceaccount/ingress-nginx created
serviceaccount/ingress-nginx-admission created
role.rbac.authorization.k8s.io/ingress-nginx created
role.rbac.authorization.k8s.io/ingress-nginx-admission created
clusterrole.rbac.authorization.k8s.io/ingress-nginx created
clusterrole.rbac.authorization.k8s.io/ingress-nginx-admission created
rolebinding.rbac.authorization.k8s.io/ingress-nginx created
rolebinding.rbac.authorization.k8s.io/ingress-nginx-admission created
clusterrolebinding.rbac.authorization.k8s.io/ingress-nginx created
clusterrolebinding.rbac.authorization.k8s.io/ingress-nginx-admission created
configmap/ingress-nginx-controller created
service/ingress-nginx-controller created
service/ingress-nginx-controller-admission created
deployment.apps/ingress-nginx-controller created
job.batch/ingress-nginx-admission-create created
job.batch/ingress-nginx-admission-patch created
ingressclass.networking.k8s.io/nginx created
validatingwebhookconfiguration.admissionregistration.k8s.io/ingress-nginx-admission created

 kubectl  get pods -n ingress-nginx

root@k8s-master01:~# kubectl  get svc -n ingress-nginx
NAME                                 TYPE           CLUSTER-IP       EXTERNAL-IP   PORT(S)                      AGE
ingress-nginx-controller             LoadBalancer   10.109.133.73    <pending>     80:30657/TCP,443:32749/TCP   78s
ingress-nginx-controller-admission   ClusterIP      10.101.130.146   <none>        443/TCP                      77s

root@k8s-master01:~# kubectl edit svc ingress-nginx-controller -n ingress-nginx
  externalTrafficPolicy: Cluster
  externalIPs:
  - 192.168.56.100

root@k8s-master01:~# kubectl  get svc -n ingress-nginx
NAME                                 TYPE           CLUSTER-IP       EXTERNAL-IP      PORT(S)                      AGE
ingress-nginx-controller             LoadBalancer   10.109.214.116   192.168.56.100   80:30296/TCP,443:30032/TCP   8m2s
ingress-nginx-controller-admission   ClusterIP      10.99.236.200    <none>           443/TCP                      8m2s
root@k8s-master01:~# \
# 入口地址: http://192.168.56.100/


## 创建证书
### 创建私钥
 (umask 077; openssl genrsa -out magedu.key 2048)
### 创建自签证书
openssl req -new -x509 -key magedu.key -out magedu.crt -subj  /C=CN/ST=Beijing/L=Beijing/O=DevOps/CN=cicd.magedu.com
### 创建secret 保存在jenkins名称空间
root@k8s-master01:~/certs# kubectl create secret tls tls-magedu --cert=./magedu.crt --key=./magedu.key  -n jenkins
secret/tls-magedu created
### 生成配置

root@k8s-master01:~/certs# kubectl create ingress tls-demo --rule='demoapp.magedu.com/*=demoapp10:80,tls=tls-magedu' --class=nginx -o yaml --dry-run=client

tee -a 07-ingress-jenkins.yaml << "EOF"
  tls:
  - hosts:
    - cicd.magedu.com
    secretName: tls-magedu
EOF

root@k8s-master01:~/learning-k8s-master/jenkins/deploy# kubectl  get secret -n jenkins
NAME         TYPE                DATA   AGE
tls-magedu   kubernetes.io/tls   2      8m12s

root@k8s-master01:~/learning-k8s-master/jenkins/deploy# kubectl  get ingress -n jenkins
NAME      CLASS   HOSTS                                ADDRESS          PORTS     AGE
jenkins   nginx   cicd.magedu.com,jenkins.magedu.com   192.168.56.100   80, 443   16m
View Code

5、使用 helm 部署主从复制的 MySQL 集群,部署 wordpress,并使用 ingress 暴露到集群外部;

# 安装helm

wget https://get.helm.sh/helm-v3.11.0-linux-amd64.tar.gz
tar xf helm-v3.11.0-linux-amd64.tar.gz
 cp linux-amd64/helm  /usr/local/bin/

# 部署主从的mysql
# 添加仓库
helm repo add bitnami https://charts.bitnami.com/bitnami
helm repo list
# 部署mysql
kubectl  create ns wordpress

helm install mysql  \
    --set auth.rootPassword=MageEdu \
    --set global.storageClass=nfs-csi \
    --set architecture=replication \
    --set auth.database=wpdb \
    --set auth.username=wpuser \
    --set auth.password='magedu.com' \
    --set secondary.replicaCount=1 \
    --set auth.replicationPassword='replpass' \
    bitnami/mysql \
    -n wordpress
##  检查
> 主节点访问:
 mysql -h mysql-primary.wordpress.svc.cluster.local -uroot -p"$MYSQL_ROOT_PASSWORD"
> 从节点访问
 mysql -h mysql-secondary.wordpress.svc.cluster.local -uroot -p"$MYSQL_ROOT_PASSWORD"
> 获取密码
  echo Username: root
  MYSQL_ROOT_PASSWORD=$(kubectl get secret --namespace wordpress mysql -o jsonpath="{.data.mysql-root-password}" | base64 -d)

root@k8s-master01:~/learning-k8s-master/helm/wordpress# helm  list -n wordpress
NAME    NAMESPACE       REVISION        UPDATED                                 STATUS          CHART           APP VERSION
mysql   wordpress       1               2023-03-10 18:45:34.904098748 +0800 CST deployed        mysql-9.6.0     8.0.32
root@k8s-master01:~/learning-k8s-master/helm/wordpress# kubectl  get pods -n wordpress
NAME                READY   STATUS    RESTARTS   AGE
mysql-primary-0     1/1     Running   0          3m17s
mysql-secondary-0   1/1     Running   0          3m17s

root@k8s-master01:~/learning-k8s-master/helm/wordpress# kubectl  get svc -n wordpress
NAME                       TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)    AGE
mysql-primary              ClusterIP   10.98.148.235   <none>        3306/TCP   4m20s
mysql-primary-headless     ClusterIP   None            <none>        3306/TCP   4m20s
mysql-secondary            ClusterIP   10.108.26.62    <none>        3306/TCP   4m20s
mysql-secondary-headless   ClusterIP   None            <none>        3306/TCP   4m20s

查看状态

root@k8s-master01:~/learning-k8s-master/helm/wordpress# helm status mysql -n wordpress
NAME: mysql
LAST DEPLOYED: Fri Mar 10 18:45:34 2023
NAMESPACE: wordpress
STATUS: deployed
REVISION: 1
TEST SUITE: None
NOTES:
CHART NAME: mysql
CHART VERSION: 9.6.0
APP VERSION: 8.0.32

** Please be patient while the chart is being deployed **

Tip:

  Watch the deployment status using the command: kubectl get pods -w --namespace wordpress

Services:

  echo Primary: mysql-primary.wordpress.svc.cluster.local:3306
  echo Secondary: mysql-secondary.wordpress.svc.cluster.local:3306

Execute the following to get the administrator credentials:

  echo Username: root
  MYSQL_ROOT_PASSWORD=$(kubectl get secret --namespace wordpress mysql -o jsonpath="{.data.mysql-root-password}" | base64 -d)

To connect to your database:

  1. Run a pod that you can use as a client:

      kubectl run mysql-client --rm --tty -i --restart='Never' --image  docker.io/bitnami/mysql:8.0.32-debian-11-r14 --namespace wordpress --env MYSQL_ROOT_PASSWORD=$MYSQL_ROOT_PASSWORD --command -- bash

  2. To connect to primary service (read/write):

      mysql -h mysql-primary.wordpress.svc.cluster.local -uroot -p"$MYSQL_ROOT_PASSWORD"

  3. To connect to secondary service (read-only):

      mysql -h mysql-secondary.wordpress.svc.cluster.local -uroot -p"$MYSQL_ROOT_PASSWORD"
root@k8s-master01:~/learning-k8s-master/helm/wordpress#
## 部署wordpress
helm install wordpress \
    --set mariadb.enabled=false \
    --set externalDatabase.host=mysql-primary.wordpress.svc.cluster.local \
    --set externalDatabase.user=wpuser \
    --set externalDatabase.password='magedu.com' \
    --set externalDatabase.database=wpdb \
    --set externalDatabase.port=3306 \
    --set persistence.storageClass=nfs-csi \
    --set ingress.enabled=true \
    --set ingress.ingressClassName=nginx \
    --set ingress.hostname=blog.magedu.com \
    --set ingress.pathType=Prefix \
    --set wordpressUsername=admin \
    --set wordpressPassword='magedu.com' \
    bitnami/wordpress \
    -n wordpress

'''
** Please be patient while the chart is being deployed **

Your WordPress site can be accessed through the following DNS name from within your cluster:

    wordpress.wordpress.svc.cluster.local (port 80)

To access your WordPress site from outside the cluster follow the steps below:

1. Get the WordPress URL and associate WordPress hostname to your cluster external IP:

   export CLUSTER_IP=$(minikube ip) # On Minikube. Use: `kubectl cluster-info` on others K8s clusters
   echo "WordPress URL: http://blog.magedu.com/"
   echo "$CLUSTER_IP  blog.magedu.com" | sudo tee -a /etc/hosts

2. Open a browser and access WordPress using the obtained URL.

3. Login with the following credentials below to see your blog:

  echo Username: admin
  echo Password: $(kubectl get secret --namespace wordpress wordpress -o jsonpath="{.data.wordpress-password}" | base64 -d)
  '''

root@k8s-master01:~/learning-k8s-master/helm/wordpress# helm list -n wordpress
NAME            NAMESPACE       REVISION        UPDATED                                 STATUS          CHART                   APP VERSION
mysql           wordpress       1               2023-03-10 18:45:34.904098748 +0800 CST deployed        mysql-9.6.0             8.0.32
wordpress       wordpress       1               2023-03-10 18:57:48.370773007 +0800 CST deployed        wordpress-15.2.51       6.1.1
root@k8s-master01:~/learning-k8s-master/helm/wordpress# kubectl  get pods -n wordpress
NAME                         READY   STATUS    RESTARTS   AGE
mysql-primary-0              1/1     Running   0          14m
mysql-secondary-0            1/1     Running   0          14m
wordpress-66d5c8f868-rq6rw   0/1     Running   0          109s
root@k8s-master01:~/learning-k8s-master/helm/wordpress# kubectl  get svc -n wordpress
NAME                       TYPE           CLUSTER-IP      EXTERNAL-IP   PORT(S)                      AGE
mysql-primary              ClusterIP      10.98.148.235   <none>        3306/TCP                     14m
mysql-primary-headless     ClusterIP      None            <none>        3306/TCP                     14m
mysql-secondary            ClusterIP      10.108.26.62    <none>        3306/TCP                     14m
mysql-secondary-headless   ClusterIP      None            <none>        3306/TCP                     14m
wordpress                  LoadBalancer   10.100.176.86   <pending>     80:30017/TCP,443:31234/TCP   2m14s
root@k8s-master01:~/learning-k8s-master/helm/wordpress# kubectl get ingress -n wordpress
NAME        CLASS   HOSTS             ADDRESS          PORTS   AGE
wordpress   nginx   blog.magedu.com   192.168.56.100   80      2m45s
View Code

使用 helm 部署 harbor,成功验证推送 Image 至 Harbor 上;

root@k8s-master01:~/learning-k8s-master/helm/harbor# kubectl  create ns harbor
namespace/harbor created
root@k8s-master01:~/learning-k8s-master/helm/harbor# cat harbor-values.yaml
expose:
  type: ingress
  tls:
    enabled: true
    certSource: auto
  ingress:
    hosts:
      core: hub.magedu.com
      notary: notary.magedu.com
    controller: default
    annotations:
      kubernetes.io/ingress.class: "nginx"

ipFamily:
  ipv4:
    enabled: true
  ipv6:
    enabled: false


externalURL: https://hub.magedu.com

# 持久化存储配置部分
persistence:
  enabled: true
  resourcePolicy: "keep"
  persistentVolumeClaim:        # 定义Harbor各个组件的PVC持久卷
    registry:          # registry组件(持久卷)
      storageClass: "nfs-csi"           # 前面创建的StorageClass,其它组件同样配置
      accessMode: ReadWriteMany          # 卷的访问模式,需要修改为ReadWriteMany
      size: 5Gi
    chartmuseum:     # chartmuseum组件(持久卷)
      storageClass: "nfs-csi"
      accessMode: ReadWriteMany
      size: 5Gi
    jobservice:
      jobLog:
        storageClass: "nfs-csi"
        accessMode: ReadWriteOnce
        size: 1Gi
      scanDataExports:
        storageClass: "nfs-csi"
        accessMode: ReadWriteOnce
        size: 1Gi
    database:        # PostgreSQl数据库组件
      storageClass: "nfs-csi"
      accessMode: ReadWriteMany
      size: 2Gi
    redis:    # Redis缓存组件
      storageClass: "nfs-csi"
      accessMode: ReadWriteMany
      size: 2Gi
    trivy:         # Trity漏洞扫描
      storageClass: "nfs-csi"
      accessMode: ReadWriteMany
      size: 5Gi

harborAdminPassword: "magedu.com"

root@k8s-master01:~/learning-k8s-master/helm/harbor# helm install harbor -f harbor-values.yaml harbor/harbor -n harbor
'''

** Please be patient while the chart is being deployed **

1. Get the Harbor URL:

  NOTE: It may take a few minutes for the LoadBalancer IP to be available.
        Watch the status with: 'kubectl get svc --namespace harbor -w harbor'
    export SERVICE_IP=$(kubectl get svc --namespace harbor harbor --template "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}")
    echo "Harbor URL: http://$SERVICE_IP/"

2. Login with the following credentials to see your Harbor application

  echo Username: "admin"
  echo Password: $(kubectl get secret --namespace harbor harbor-core-envvars -o jsonpath="{.data.HARBOR_ADMIN_PASSWORD}" | base64 -d)
  '''

root@k8s-master01:~/learning-k8s-master/helm/harbor# kubectl  get pvc -n harbor
NAME                              STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
data-harbor-redis-0               Bound    pvc-a6a15714-b909-4581-9c7d-1a231e2ee56f   2Gi        RWX            nfs-csi        21s
data-harbor-trivy-0               Bound    pvc-a249ad5b-a8de-401c-8996-41050caa185f   5Gi        RWX            nfs-csi        21s
database-data-harbor-database-0   Bound    pvc-4a4e917f-6506-4e77-bbdd-6b71935d9b92   2Gi        RWX            nfs-csi        21s
harbor-chartmuseum                Bound    pvc-0dbf7775-9c47-462f-979c-5a405691f7f8   5Gi        RWX            nfs-csi        22s
harbor-jobservice                 Bound    pvc-ec05d3a5-4cc1-4316-8325-2d5d86c48297   1Gi        RWO            nfs-csi        22s
harbor-registry                   Bound    pvc-03e33691-50ec-434f-aee9-f2994a32656f   5Gi        RWX            nfs-csi        22s
root@k8s-master01:~/learning-k8s-master/helm/harbor#
root@k8s-master01:~/learning-k8s-master/helm/harbor# kubectl  get pvc -n harbor
NAME                              STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
data-harbor-redis-0               Bound    pvc-a6a15714-b909-4581-9c7d-1a231e2ee56f   2Gi        RWX            nfs-csi        39s
data-harbor-trivy-0               Bound    pvc-a249ad5b-a8de-401c-8996-41050caa185f   5Gi        RWX            nfs-csi        39s
database-data-harbor-database-0   Bound    pvc-4a4e917f-6506-4e77-bbdd-6b71935d9b92   2Gi        RWX            nfs-csi        39s
harbor-chartmuseum                Bound    pvc-0dbf7775-9c47-462f-979c-5a405691f7f8   5Gi        RWX            nfs-csi        40s
harbor-jobservice                 Bound    pvc-ec05d3a5-4cc1-4316-8325-2d5d86c48297   1Gi        RWO            nfs-csi        40s
harbor-registry                   Bound    pvc-03e33691-50ec-434f-aee9-f2994a32656f   5Gi        RWX            nfs-csi        40s
root@k8s-master01:~/learning-k8s-master/helm/harbor# kubectl  get svc -n harbor
NAME                   TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)             AGE
harbor-chartmuseum     ClusterIP   10.106.205.196   <none>        80/TCP              51s
harbor-core            ClusterIP   10.100.218.129   <none>        80/TCP              51s
harbor-database        ClusterIP   10.96.255.30     <none>        5432/TCP            51s
harbor-jobservice      ClusterIP   10.98.9.190      <none>        80/TCP              51s
harbor-notary-server   ClusterIP   10.99.149.180    <none>        4443/TCP            51s
harbor-notary-signer   ClusterIP   10.100.172.152   <none>        7899/TCP            51s
harbor-portal          ClusterIP   10.97.113.248    <none>        80/TCP              51s
harbor-redis           ClusterIP   10.108.17.81     <none>        6379/TCP            51s
harbor-registry        ClusterIP   10.102.10.26     <none>        5000/TCP,8080/TCP   51s
harbor-trivy           ClusterIP   10.109.41.60     <none>        8080/TCP            51s
root@k8s-master01:~/learning-k8s-master/helm/harbor# kubectl get pods -n harbor
NAME                                    READY   STATUS    RESTARTS      AGE
harbor-chartmuseum-7c578d79f4-2mz49     1/1     Running   0             77s
harbor-core-579b5455b9-rtcn8            1/1     Running   0             78s
harbor-database-0                       1/1     Running   0             77s
harbor-jobservice-7867f78ff-bp268       0/1     Running   1 (24s ago)   77s
harbor-notary-server-696bc76fd9-tpxbx   0/1     Running   1 (46s ago)   78s
harbor-notary-signer-5b5b6cd797-lzqdr   1/1     Running   1 (42s ago)   78s
harbor-portal-687956689b-kb6gx          1/1     Running   0             78s
harbor-redis-0                          1/1     Running   0             77s
harbor-registry-5c5f4c99fb-4b6fk        2/2     Running   0             78s
harbor-trivy-0                          1/1     Running   0             77s
View Code

使用 helm 部署一个 redis cluster 至 Kubernetes 上。

root@k8s-master01:~# helm repo add bitnami https://charts.bitnami.com/bitnami
root@k8s-master01:~# helm search repo redis
NAME                    CHART VERSION   APP VERSION     DESCRIPTION
bitnami/redis           17.8.3          7.0.9           Redis(R) is an open source, advanced key-value ...
bitnami/redis-cluster   8.3.10          7.0.9           Redis(R) is an open source, scalable, distribut...
root@k8s-master01:~# mkdir redis
root@k8s-master01:~# cd redis/
root@k8s-master01:~/redis# helm pull bitnami/redis-cluster
root@k8s-master01:~/redis# tar xf redis-cluster-8.3.10.tgz
root@k8s-master01:~/redis# cd redis-cluster/
# 设置参数
root@k8s-master01:~/redis/redis-cluster# cat values.yaml |grep -Ev "^$|#"|head
global:
  imageRegistry: ""
  imagePullSecrets: []
  storageClass: "nfs-csi"
  redis:
    password: "redis@1234"

root@k8s-master01:~/redis/redis-cluster# kubectl  create ns redis
namespace/redis created
root@k8s-master01:~/redis/redis-cluster# helm  install redis-cluster -f values.yaml  bitnami/redis-cluster -n redis

root@k8s-master01:~/redis/redis-cluster# kubectl  get all -n redis
NAME                  READY   STATUS              RESTARTS   AGE
pod/redis-cluster-0   0/1     ContainerCreating   0          27s
pod/redis-cluster-1   0/1     ContainerCreating   0          27s
pod/redis-cluster-2   0/1     ContainerCreating   0          27s
pod/redis-cluster-3   0/1     ContainerCreating   0          27s
pod/redis-cluster-4   0/1     ContainerCreating   0          27s
pod/redis-cluster-5   0/1     ContainerCreating   0          27s

NAME                             TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)              AGE
service/redis-cluster            ClusterIP   10.102.247.110   <none>        6379/TCP             27s
service/redis-cluster-headless   ClusterIP   None             <none>        6379/TCP,16379/TCP   27s

NAME                             READY   AGE
statefulset.apps/redis-cluster   0/6     27s


root@k8s-master01:~/redis/redis-cluster# kubectl  get all -n redis
NAME                  READY   STATUS    RESTARTS      AGE
pod/redis-cluster-0   1/1     Running   0             112s
pod/redis-cluster-1   1/1     Running   1 (55s ago)   112s
pod/redis-cluster-2   1/1     Running   1 (22s ago)   112s
pod/redis-cluster-3   1/1     Running   1 (19s ago)   112s
pod/redis-cluster-4   1/1     Running   0             112s
pod/redis-cluster-5   1/1     Running   1 (53s ago)   112s

NAME                             TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)              AGE
service/redis-cluster            ClusterIP   10.102.247.110   <none>        6379/TCP             112s
service/redis-cluster-headless   ClusterIP   None             <none>        6379/TCP,16379/TCP   112s

NAME                             READY   AGE
statefulset.apps/redis-cluster   6/6     112s


root@k8s-master01:~/redis/redis-cluster# export REDIS_PASSWORD=$(kubectl get secret --namespace "redis" redis-cluster -o jsonpath="{.data.redis-password}" | base64 -d)
root@k8s-master01:~/redis/redis-cluster# echo  "REDIS_PASSWORD"
REDIS_PASSWORD

# 验证
kubectl run --namespace redis redis-cluster-client --rm --tty -i --restart='Never' \
 --env REDIS_PASSWORD=$REDIS_PASSWORD \
--image docker.io/bitnami/redis-cluster:7.0.9-debian-11-r1 -- bash

redis-cli -c -h redis-cluster -a $REDIS_PASSWORD

redis-cluster:6379> set a helloworld
-> Redirected to slot [15495] located at 10.244.3.128:6379
OK
10.244.3.128:6379> get a
"helloworld"

10.244.3.128:6379> info
# Server
redis_version:7.0.9
redis_git_sha1:00000000
redis_git_dirty:0
redis_build_id:8083125bb2a0d2ac
redis_mode:cluster
os:Linux 5.4.0-144-generic x86_64
arch_bits:64
monotonic_clock:POSIX clock_gettime
multiplexing_api:epoll
atomicvar_api:c11-builtin
gcc_version:10.2.1
process_id:1
process_supervised:no
run_id:04eed170befe14aa0e4b5a5a7da6c79e3b451d74
tcp_port:6379
server_time_usec:1679922206508959
uptime_in_seconds:212
uptime_in_days:0
hz:10
configured_hz:10
lru_clock:2200606
executable:/redis-server
config_file:
io_threads_active:0

# Clients
connected_clients:1
cluster_connections:10
maxclients:10000
client_recent_max_input_buffer:20480
client_recent_max_output_buffer:20504
blocked_clients:0
tracking_clients:0
clients_in_timeout_table:0

# Memory
used_memory:1853088
used_memory_human:1.77M
used_memory_rss:9736192
used_memory_rss_human:9.29M
used_memory_peak:1868352
used_memory_peak_human:1.78M
used_memory_peak_perc:99.18%
used_memory_overhead:1633404
used_memory_startup:1599816
used_memory_dataset:219684
used_memory_dataset_perc:86.74%
allocator_allocated:2038760
allocator_active:2445312
allocator_resident:5070848
total_system_memory:4090572800
total_system_memory_human:3.81G
used_memory_lua:31744
used_memory_vm_eval:31744
used_memory_lua_human:31.00K
used_memory_scripts_eval:0
number_of_cached_scripts:0
number_of_functions:0
number_of_libraries:0
used_memory_vm_functions:32768
used_memory_vm_total:64512
used_memory_vm_total_human:63.00K
used_memory_functions:184
used_memory_scripts:184
used_memory_scripts_human:184B
maxmemory:0
maxmemory_human:0B
maxmemory_policy:noeviction
allocator_frag_ratio:1.20
allocator_frag_bytes:406552
allocator_rss_ratio:2.07
allocator_rss_bytes:2625536
rss_overhead_ratio:1.92
rss_overhead_bytes:4665344
mem_fragmentation_ratio:5.32
mem_fragmentation_bytes:7905992
mem_not_counted_for_evict:128
mem_replication_backlog:20508
mem_total_replication_buffers:20504
mem_clients_slaves:0
mem_clients_normal:1800
mem_cluster_links:10880
mem_aof_buffer:128
mem_allocator:jemalloc-5.2.1
active_defrag_running:0
lazyfree_pending_objects:0
lazyfreed_objects:0

# Persistence
loading:0
async_loading:0
current_cow_peak:0
current_cow_size:0
current_cow_size_age:0
current_fork_perc:0.00
current_save_keys_processed:0
current_save_keys_total:0
rdb_changes_since_last_save:1
rdb_bgsave_in_progress:0
rdb_last_save_time:1679921996
rdb_last_bgsave_status:ok
rdb_last_bgsave_time_sec:0
rdb_current_bgsave_time_sec:-1
rdb_saves:2
rdb_last_cow_size:339968
rdb_last_load_keys_expired:0
rdb_last_load_keys_loaded:0
aof_enabled:1
aof_rewrite_in_progress:0
aof_rewrite_scheduled:0
aof_last_rewrite_time_sec:-1
aof_current_rewrite_time_sec:-1
aof_last_bgrewrite_status:ok
aof_rewrites:0
aof_rewrites_consecutive_failures:0
aof_last_write_status:ok
aof_last_cow_size:0
module_fork_in_progress:0
module_fork_last_cow_size:0
aof_current_size:148
aof_base_size:88
aof_pending_rewrite:0
aof_buffer_length:0
aof_pending_bio_fsync:0
aof_delayed_fsync:0

# Stats
total_connections_received:86
total_commands_processed:385
instantaneous_ops_per_sec:1
total_net_input_bytes:11697
total_net_output_bytes:2286
total_net_repl_input_bytes:0
total_net_repl_output_bytes:694
instantaneous_input_kbps:0.04
instantaneous_output_kbps:0.00
instantaneous_input_repl_kbps:0.00
instantaneous_output_repl_kbps:0.00
rejected_connections:0
sync_full:2
sync_partial_ok:0
sync_partial_err:2
expired_keys:0
expired_stale_perc:0.00
expired_time_cap_reached_count:0
expire_cycle_cpu_milliseconds:3
evicted_keys:0
evicted_clients:0
total_eviction_exceeded_time:0
current_eviction_exceeded_time:0
keyspace_hits:1
keyspace_misses:0
pubsub_channels:0
pubsub_patterns:0
pubsubshard_channels:0
latest_fork_usec:349
total_forks:2
migrate_cached_sockets:0
slave_expires_tracked_keys:0
active_defrag_hits:0
active_defrag_misses:0
active_defrag_key_hits:0
active_defrag_key_misses:0
total_active_defrag_time:0
current_active_defrag_time:0
tracking_total_keys:0
tracking_total_items:0
tracking_total_prefixes:0
unexpected_error_replies:0
total_error_replies:2
dump_payload_sanitizations:0
total_reads_processed:471
total_writes_processed:199
io_threaded_reads_processed:0
io_threaded_writes_processed:0
reply_buffer_shrinks:3
reply_buffer_expands:0

# Replication
role:master
connected_slaves:1
slave0:ip=10.244.1.156,port=6379,state=online,offset=340,lag=0
master_failover_state:no-failover
master_replid:0bd1e2923b98684a44640e736e06e24601ba561f
master_replid2:0000000000000000000000000000000000000000
master_repl_offset:340
second_repl_offset:-1
repl_backlog_active:1
repl_backlog_size:1048576
repl_backlog_first_byte_offset:1
repl_backlog_histlen:340

# CPU
used_cpu_sys:0.320259
used_cpu_user:0.211444
used_cpu_sys_children:0.135392
used_cpu_user_children:0.160890
used_cpu_sys_main_thread:0.320565
used_cpu_user_main_thread:0.208367

# Modules

# Errorstats
errorstat_NOAUTH:count=2

# Cluster
cluster_enabled:1

# Keyspace
db0:keys=1,expires=0,avg_ttl=0
10.244.3.128:6379>
View Code

 

posted @ 2023-03-02 22:19  john221100  阅读(39)  评论(0编辑  收藏  举报