java
export JAVA_HOME=/home/software/jdk1.8.0_202 export JRE_HOME=${JAVA_HOME}/jre export CLASSPATH=.:${JAVA_HOME}/lib:${JRE_HOME}/lib:$CLASSPATH export JAVA_PATH=${JAVA_HOME}/bin:${JRE_HOME}/bin export PATH=$PATH:${JAVA_PATH}
etcd
docker run -d --name etcd-server --publish 2379:2379 --publish 2380:2380 --env ALLOW_NONE_AUTHENTICATION=yes --env ETCD_ADVERTISE_CLIENT_URLS=http://etcd-server:2379 bitnami/etcd:latest
grafana
docker run -d -p 3000:3000 --name=grafana grafana/grafana
h2 版本问题控制台连不上
wget https://h2database.com/h2-2019-10-14.zip --no-check-certificate nohup java -cp "h2-1.4.199.jar:$H2DRIVERS:$CLASSPATH" org.h2.tools.Server -tcpAllowOthers -webAllowOthers -webPort 8082 "$@" &
zookeeper
docker run --privileged=true -d --name zookeeper --publish 2181:2181 -d zookeeper:latest
mysql
docker run -p 3306:3306 --name mysql -e MYSQL_ROOT_PASSWORD=123456 -d mysql:5.7
skywalking + es
docker pull elasticsearch:7.5.1 docker pull apache/skywalking-oap-server:6.6.0-es7 docker pull apache/skywalking-ui:6.6.0 docker run -d --name=es7 -p 9200:9200 -p 9300:9300 -e "discovery.type=single-node" elasticsearch:7.5.1 docker run --name oap --restart always -d \ --restart=always \ -e TZ=Asia/Shanghai \ -p 12800:12800 \ -p 11800:11800 \ --link es7:es7 \ -e SW_STORAGE=elasticsearch \ -e SW_STORAGE_ES_CLUSTER_NODES=es7:9200 \ apache/skywalking-oap-server:6.6.0-es7 docker run -d --name skywalking-ui \ --restart=always \ -e TZ=Asia/Shanghai \ -p 8088:8080 \ --link oap:oap \ -e SW_OAP_ADDRESS=oap:12800 \ apache/skywalking-ui:6.6.0
promemetheus -v 将本地的某个目录挂在到容器中,后续指定的是挂在后的容器内目录
docker run --name prometheus -d -p 9099:9090 -v /home/conf/prometheus-data/:/prometheus-data 227ae20e1b04 --web.enable-lifecycle --config.file=/prometheus-data/prometheus.yml
----prometheus.yml
global: scrape_interval: 15s external_labels: monitor: 'codelab-monitor' scrape_configs: - job_name: 'prometheus' scrape_interval: 5s static_configs: - targets: ['localhost:9090']
kubectl
curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl chmod +x ./kubectl sudo mv ./kubectl /usr/local/bin/kubectl
minikube
curl -Lo minikube https://storage.googleapis.com/minikube/releases/latest/minikube-linux-amd64 \ && chmod +x minikube sudo cp minikube /usr/local/bin && rm minikube
k8s安装开启
minikube start --memory=4068 --cpus=4 --force --driver=docker --kubernetes-version=v1.22.0
创建个nginx pod,安装控制台查看
minikube addons enable ingress kubectl run nginx --image=nginx --port=80 启个控制台看一下 minikube dashboard 开启代理访问 kubectl proxy --port=33458 --address='0.0.0.0' --accept-hosts='^.*' & http://139.224.65.218:33458/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/#/workloads?namespace=default
deployment
apiVersion: apps/v1 kind: Deployment metadata: name: my-nginx spec: selector: matchLabels: app: my-nginx replicas: 2 template: metadata: labels: app: my-nginx spec: containers: - name: my-nginx image: nginx ports: - containerPort: 80
service
apiVersion: v1 kind: Service metadata: name: nginx-service labels: app: nginx-service spec: type: NodePort selector: app: my-nginx ports: - port: 8000 targetPort: 80 nodePort: 32500
ingress job 不成功
# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml apiVersion: batch/v1 kind: Job metadata: name: ingress-nginx-admission-create namespace: ingress-nginx annotations: helm.sh/hook: pre-install,pre-upgrade helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded labels: helm.sh/chart: ingress-nginx-4.0.4 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/version: 1.0.3 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook spec: template: metadata: name: ingress-nginx-admission-create labels: helm.sh/chart: ingress-nginx-4.0.4 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/version: 1.0.3 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook spec: containers: - name: create image: itworker365/kube-webhook-certgen:latest imagePullPolicy: IfNotPresent args: - create - --host=ingress-nginx-controller-admission,ingress-nginx-controller-admission.$(POD_NAMESPACE).svc - --namespace=$(POD_NAMESPACE) - --secret-name=ingress-nginx-admission env: - name: POD_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace restartPolicy: OnFailure serviceAccountName: ingress-nginx-admission nodeSelector: kubernetes.io/os: linux securityContext: runAsNonRoot: true runAsUser: 2000 --- # Source: ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml apiVersion: batch/v1 kind: Job metadata: name: ingress-nginx-admission-patch namespace: ingress-nginx annotations: helm.sh/hook: post-install,post-upgrade helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded labels: helm.sh/chart: ingress-nginx-4.0.4 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/version: 1.0.3 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook spec: template: metadata: name: ingress-nginx-admission-patch labels: helm.sh/chart: ingress-nginx-4.0.4 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/version: 1.0.3 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook spec: containers: - name: patch image: itworker365/kube-webhook-certgen:latest imagePullPolicy: IfNotPresent args: - patch - --webhook-name=ingress-nginx-admission - --namespace=$(POD_NAMESPACE) - --patch-mutating=false - --secret-name=ingress-nginx-admission - --patch-failure-policy=Fail env: - name: POD_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace restartPolicy: OnFailure serviceAccountName: ingress-nginx-admission nodeSelector: kubernetes.io/os: linux securityContext: runAsNonRoot: true runAsUser: 2000
nginx-ingress-controller不成功
# Source: ingress-nginx/templates/controller-deployment.yaml apiVersion: apps/v1 kind: Deployment metadata: labels: helm.sh/chart: ingress-nginx-4.0.4 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/version: 1.0.3 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: controller name: ingress-nginx-controller namespace: ingress-nginx spec: selector: matchLabels: app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/component: controller revisionHistoryLimit: 10 minReadySeconds: 0 template: metadata: labels: app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/component: controller spec: dnsPolicy: ClusterFirst containers: - name: controller image: itworker365/controller:v1.0.3 imagePullPolicy: IfNotPresent lifecycle: preStop: exec: command: - /wait-shutdown args: - /nginx-ingress-controller - --publish-service=$(POD_NAMESPACE)/ingress-nginx-controller - --election-id=ingress-controller-leader - --controller-class=k8s.io/ingress-nginx - --configmap=$(POD_NAMESPACE)/ingress-nginx-controller - --validating-webhook=:8443 - --validating-webhook-certificate=/usr/local/certificates/cert - --validating-webhook-key=/usr/local/certificates/key securityContext: capabilities: drop: - ALL add: - NET_BIND_SERVICE runAsUser: 101 allowPrivilegeEscalation: true env: - name: POD_NAME valueFrom: fieldRef: fieldPath: metadata.name - name: POD_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace - name: LD_PRELOAD value: /usr/local/lib/libmimalloc.so livenessProbe: failureThreshold: 5 httpGet: path: /healthz port: 10254 scheme: HTTP initialDelaySeconds: 10 periodSeconds: 10 successThreshold: 1 timeoutSeconds: 1 readinessProbe: failureThreshold: 3 httpGet: path: /healthz port: 10254 scheme: HTTP initialDelaySeconds: 10 periodSeconds: 10 successThreshold: 1 timeoutSeconds: 1 ports: - name: http containerPort: 80 protocol: TCP - name: https containerPort: 443 protocol: TCP - name: webhook containerPort: 8443 protocol: TCP volumeMounts: - name: webhook-cert mountPath: /usr/local/certificates/ readOnly: true resources: requests: cpu: 100m memory: 90Mi nodeSelector: kubernetes.io/os: linux serviceAccountName: ingress-nginx terminationGracePeriodSeconds: 300 volumes: - name: webhook-cert secret: secretName: ingress-nginx-admission
建一条ingress配置试试
apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: test-ingress namespace: default spec: ingressClassName: nginx rules: - http: paths: - path: /testpath pathType: Prefix backend: service: name: nginx-service port: number: 80
循环访问测试
kubectl exec -it **** /bin/bash cat >> /usr/share/nginx/html/index.html << EOF X EOF for i in `seq 1 10`; do curl http://192.168.49.2:32500 --silent -w "Status: %{http_code}\n" ;done
helm
wget https://get.helm.sh/helm-v3.7.1-linux-amd64.tar.gz mv linux-amd64/helm /usr/local/bin/helm
istio
wget https://github.com/istio/istio/releases/download/1.11.4/istio-1.11.4-linux-arm64.tar.gz cp istio-1.11.0/bin/istioctl /usr/local/bin/ istioctl install --set profile=demo -y
需要其他相关组件(grafana jaeger kiali prometheus)或例子的话
kubectl apply -f samples/addons
kubectl apply -f samples/addons/extras
istio注入管理 - 查看、打开
kubectl get ns -L istio-injection
kubectl label namespace default istio-injection=enabled
部署一个istio测试例子
kubectl apply -f samples/bookinfo/platform/kube/bookinfo.yaml
部署gateway
kubectl apply -f samples/bookinfo/networking/bookinfo-gateway.yaml
查看网关地址
kubectl get svc istio-ingressgateway -n istio-system kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.ports[?(@.name=="http2")].nodePort}' #由之前的信息可知minikube的ip为192.168.49.2 minikube service --url nginx-service #以上信息组合后可以访问 curl http://192.168.49.2:30926/productpage
apisix
kubectl create namespace apisix helm repo add apisix https://charts.apiseven.com helm repo update helm install apisix apisix/apisix --set admin.allow.ipList="{0.0.0.0/0}" --set etcd.enabled=false --set etcd.host={http://139.224.65.218:2379} --namespace apisix helm install apisix-dashboard apisix/apisix-dashboard --set config.conf.etcd.endpoints={http://139.224.65.218:2379} --namespace apisix helm install apisix-ingress-controller apisix/apisix-ingress-controller --namespace apisix --set config.apisix.baseURL=http://apisix-admin:9180/apisix/admin --set config.apisix.adminKey=edd1c9f034335f136f87ad84b625c8f1
------
kaili 外部访问service均需改nodeport
apiVersion: v1 kind: Service metadata: name: kiali namespace: istio-system labels: helm.sh/chart: kiali-server-1.42.0 app: kiali app.kubernetes.io/name: kiali app.kubernetes.io/instance: kiali version: "v1.42.0" app.kubernetes.io/version: "v1.42.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/part-of: "kiali" annotations: spec: type: NodePort selector: app.kubernetes.io/name: kiali app.kubernetes.io/instance: kiali ports: - name: http protocol: TCP port: 20001 targetPort: 20001 nodePort: 32501 - name: http-metrics protocol: TCP port: 9090 targetPort: 9090 nodePort: 32502
再通过nginx转发
/usr/local/conf/nginx.conf upstream nginxsvctest { server 192.168.49.2:32500; } upstream test32501 { server 192.168.49.2:32501; } upstream test32502 { server 192.168.49.2:32502; } server { listen 32500; location / { proxy_pass http://nginxsvctest; } } server { listen 32501; location / { proxy_pass http://test32501; } } server { listen 32502; location / { proxy_pass http://test32502; } }
提示grafana没配,就加一下重启
external_services: custom_dashboards: enabled: true grafana: url: "http://10.105.56.167:3000"
apisix
helm repo add apisix https://charts.apiseven.com helm repo update
// install apisix helm install apisix apisix/apisix --set admin.allow.ipList="{0.0.0.0/0}" --set etcd.enabled=false --set etcd.host={http://139.xxx.xxx.218:2379} --namespace default export NODE_PORT=$(kubectl get --namespace default -o jsonpath="{.spec.ports[0].nodePort}" services apisix-gateway) export NODE_IP=$(kubectl get nodes --namespace default -o jsonpath="{.items[0].status.addresses[0].address}") echo http://$NODE_IP:$NODE_PORT http://192.168.49.2:32535 //install apisix-dashboard helm install apisix-dashboard apisix/apisix-dashboard --set config.conf.etcd.endpoints={http://139.xxx.xxx.218:2379} --namespace default export POD_NAME=$(kubectl get pods --namespace default -l "app.kubernetes.io/name=apisix-dashboard,app.kubernetes.io/instance=apisix-dashboard" -o jsonpath="{.items[0].metadata.name}") export CONTAINER_PORT=$(kubectl get pod --namespace default $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") echo "Visit http://127.0.0.1:8080 to use your application" kubectl --namespace default port-forward $POD_NAME 8080:$CONTAINER_PORT //install apisix-ingress-controller helm install apisix-ingress-controller apisix/apisix-ingress-controller --namespace default --set config.apisix.baseURL=http://apisix-admin:9180/apisix/admin --set config.apisix.adminKey=edd1c9f034335f136f87ad84b625c8f1
其他服务
apiVersion: apps/v1 kind: Deployment metadata: name: etcd namespace: etcd spec: replicas: 1 selector: matchLabels: app: etcd template: metadata: labels: app: etcd spec: containers: - name: gateway image: quay.io/coreos/etcd:v3.4.13 imagePullPolicy: IfNotPresent env: - name: ETCDCTL_API value: "3" - name: ETCD_LISTEN_CLIENT_URLS value: "http://0.0.0.0:2379" - name: ETCD_ADVERTISE_CLIENT_URLS value: "http://0.0.0.0:2379" ports: - containerPort: 2379 protocol: TCP name: tcp01 - containerPort: 2380 protocol: TCP name: tcp02 volumeMounts: - mountPath: /etcd name: etcd-etcd - mountPath: /etcd-server.etcd name: etcd-data volumes: - name: etcd-etcd persistentVolumeClaim: claimName: etcd-etcd - name: etcd-data persistentVolumeClaim: claimName: etcd-data --- kind: PersistentVolumeClaim apiVersion: v1 metadata: name: etcd-etcd namespace: etcd spec: accessModes: - ReadWriteOnce resources: requests: storage: 10Gi storageClassName: disk --- kind: PersistentVolumeClaim apiVersion: v1 metadata: name: etcd-data namespace: etcd spec: accessModes: - ReadWriteOnce resources: requests: storage: 40Gi storageClassName: disk --- kind: Service apiVersion: v1 metadata: name: etcd namespace: etcd labels: name: etcd spec: ports: - name: etcd-01 protocol: TCP targetPort: 2379 port: 2379 - name: etcd-02 protocol: TCP targetPort: 2380 port: 2380 selector: app: etcd
helm repo add skywalking https://apache.jfrog.io/artifactory/skywalking-helm helm install "skywalking" skywalking/skywalking -n "skywalking" \ --set oap.image.tag=9.2.0 \ --set oap.storageType=elasticsearch \ --set ui.image.tag=9.2.0
execsql https://github.com/xuxueli/xxl-job/blob/2.0.2/doc/db/tables_xxl_job.sql
apiVersion: apps/v1 # for versions before 1.8.0 use apps/v1beta1 kind: Deployment metadata: name: xxl-job-admin labels: app: xxl-job-admin spec: replicas: 1 selector: matchLabels: app: xxl-job-admin template: metadata: labels: app: xxl-job-admin spec: containers: - name: xxl-job-admin image: xuxueli/xxl-job-admin:2.0.2 ports: - containerPort: 8080 env: - name: PARAMS value: "--spring.datasource.url=jdbc:mysql://139.196.230.xxx:3306/xxl_job?Unicode=true&characterEncoding=UTF-8&useSSL=false --spring.datasource.username=root --spring.datasource.password=123456" resources: limits: cpu: "500m" —— apiVersion: v1 kind: Service metadata: name: xxl-job-admin labels: app: xxl-job-admin spec: ports: - port: 8080 protocol: TCP name: http selector: app: xxl-job-admin
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
yum install docker-ce docker-ce-cli containerd.io
curl-format.txt -------------------------------\n time_namelookup: %{time_namelookup}\n time_connect: %{time_connect}\n time_appconnect: %{time_appconnect}\n time_redirect: %{time_redirect}\n time_pretransfer: %{time_pretransfer}\n time_starttransfer: %{time_starttransfer}\n -------------------------------\n time_total: %{time_total}\n \n 使用 #!/bin/bash while true do sleep 1 curl -w "@curl-format.txt" -o /dev/null -s -L ip:port/test/sleep?sleep=100 done
for i in {1..300};do jstack 6 >> /home/$ip_$(date +%Y%m%d_%T).tdump;i+=1;sleep 1;done
docker run -itd --name redis-test -p 6379:6379 redis
docker run -e PARAMS="--spring.datasource.url=jdbc:mysql://139.196.xxx.xxx:3306/xxl_job?Unicode=true&characterEncoding=UTF-8 --spring.datasource.username=root --spring.datasource.password=123456" -p 8899:8080 -v /tmp:/data/applogs --name xxl-job-admin xuxueli/xxl-job-admin:2.3.0 docker run -e PARAMS="--spring.datasource.url=jdbc:mysql://139.196.xxx.xxx:3306/xxl-job?Unicode=true&characterEncoding=UTF-8 --spring.datasource.username=xxl-job --spring.datasource.password=123456 --xxl.job.login.password=123456" -p 33333:8080 -v /tmp:/data/applogs --name xxl-job-admin202 --privileged=true -d xuxueli/xxl-job-admin:2.0.2
Centos yum install -y yum-utils device-mapper-persistent-data lvm2 yum-config-manager \ --add-repo \ https://download.docker.com/linux/centos/docker-ce.repo yum install docker-ce -y systemctl start docker systemctl enable docker docker info
apiVersion: apps/v1
kind: Deployment
metadata:
name: sptestv1
labels:
app: sptestv1
spec:
replicas: 2
selector:
matchLabels:
app: sptestv1
template:
metadata:
labels:
app: sptestv1
spec:
containers:
- name: sptestv1
image: itworker365/sptest:v2023
ports:
- containerPort: 7777
resources:
limits:
cpu: "500m"