skywalking(四)实现收集kubernetes环境dubbo微服务链路跟踪案例

1. 部署zookeeper

1.1 构建镜像

准备java基础镜像

docker pull elevy/slim_java:8
docker tag elevy/slim_java:8 harbor.chu.net/baseimages/slim_java:8
docker push harbor.chu.net/baseimages/slim_java:8

编写Dockerfile

FROM harbor.chu.net/baseimages/slim_java:8
ENV ZK_VERSION 3.4.14
ADD repositories /etc/apk/repositories
# Download Zookeeper
COPY zookeeper-3.4.14.tar.gz /tmp/zk.tgz
COPY zookeeper-3.4.14.tar.gz.asc /tmp/zk.tgz.asc
COPY KEYS /tmp/KEYS
RUN apk add --no-cache --virtual .build-deps \
ca-certificates \
gnupg \
tar \
wget && \
#
# Install dependencies
apk add --no-cache \
bash && \
#
#
# Verify the signature
export GNUPGHOME="$(mktemp -d)" && \
gpg -q --batch --import /tmp/KEYS && \
gpg -q --batch --no-auto-key-retrieve --verify /tmp/zk.tgz.asc /tmp/zk.tgz && \
#
# Set up directories
#
mkdir -p /zookeeper/data /zookeeper/wal /zookeeper/log && \
#
# Install
tar -x -C /zookeeper --strip-components=1 --no-same-owner -f /tmp/zk.tgz && \
#
# Slim down
cd /zookeeper && \
cp dist-maven/zookeeper-${ZK_VERSION}.jar . && \
rm -rf \
*.txt \
*.xml \
bin/README.txt \
bin/*.cmd \
conf/* \
contrib \
dist-maven \
docs \
lib/*.txt \
lib/cobertura \
lib/jdiff \
recipes \
src \
zookeeper-*.asc \
zookeeper-*.md5 \
zookeeper-*.sha1 && \
#
# Clean up
apk del .build-deps && \
rm -rf /tmp/* "$GNUPGHOME"
COPY conf /zookeeper/conf/
COPY bin/zkReady.sh /zookeeper/bin/
COPY entrypoint.sh /
RUN chmod a+x /zookeeper/bin/zkReady.sh /entrypoint.sh
ENV PATH=/zookeeper/bin:${PATH} \
ZOO_LOG_DIR=/zookeeper/log \
ZOO_LOG4J_PROP="INFO, CONSOLE, ROLLINGFILE" \
JMXPORT=9010
ENTRYPOINT [ "/entrypoint.sh" ]
CMD [ "zkServer.sh", "start-foreground" ]
EXPOSE 2181 2888 3888 9010

执行构建

docker build -t harbor.chu.net/web/zookeeper:v3.4.14 .
docker push harbor.chu.net/web/zookeeper:v3.4.14

测试镜像

bash-4.3# /zookeeper/bin/zkServer.sh status
ZooKeeper JMX enabled by default
ZooKeeper remote JMX Port set to 9010
ZooKeeper remote JMX authenticate set to false
ZooKeeper remote JMX ssl set to false
ZooKeeper remote JMX log4j set to true
Using config: /zookeeper/bin/../conf/zoo.cfg
Mode: follower

1.2 创建PV

NFS服务器(10.0.0.101)创建目录

mkdir -p /data/k8sdata/web/zookeeper-datadir-1
mkdir -p /data/k8sdata/web/zookeeper-datadir-2
mkdir -p /data/k8sdata/web/zookeeper-datadir-3

编写yaml文件

apiVersion: v1
kind: PersistentVolume
metadata:
name: zookeeper-datadir-pv-1
spec:
capacity:
storage: 20Gi
accessModes:
- ReadWriteOnce
nfs:
server: 10.0.0.101
path: /data/k8sdata/web/zookeeper-datadir-1
mountOptions:
- nfsvers=3
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: zookeeper-datadir-pv-2
spec:
capacity:
storage: 20Gi
accessModes:
- ReadWriteOnce
nfs:
server: 10.0.0.101
path: /data/k8sdata/web/zookeeper-datadir-2
mountOptions:
- nfsvers=3
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: zookeeper-datadir-pv-3
spec:
capacity:
storage: 20Gi
accessModes:
- ReadWriteOnce
nfs:
server: 10.0.0.101
path: /data/k8sdata/web/zookeeper-datadir-3
mountOptions:
- nfsvers=3

查看

[root@k8s-master2 pv]#kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
zookeeper-datadir-pv-1 20Gi RWO Retain Available 9s
zookeeper-datadir-pv-2 20Gi RWO Retain Available 9s
zookeeper-datadir-pv-3 20Gi RWO Retain Available 9s

1.3 创建PVC

apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: zookeeper-datadir-pvc-1
namespace: myserver
spec:
accessModes:
- ReadWriteOnce
volumeName: zookeeper-datadir-pv-1
resources:
requests:
storage: 10Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: zookeeper-datadir-pvc-2
namespace: myserver
spec:
accessModes:
- ReadWriteOnce
volumeName: zookeeper-datadir-pv-2
resources:
requests:
storage: 10Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: zookeeper-datadir-pvc-3
namespace: myserver
spec:
accessModes:
- ReadWriteOnce
volumeName: zookeeper-datadir-pv-3
resources:
requests:
storage: 10Gi

查看

[root@k8s-master2 pv]#kubectl get pvc -n myserver
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
zookeeper-datadir-pvc-1 Bound zookeeper-datadir-pv-1 20Gi RWO 7s
zookeeper-datadir-pvc-2 Bound zookeeper-datadir-pv-2 20Gi RWO 7s
zookeeper-datadir-pvc-3 Bound zookeeper-datadir-pv-3 20Gi RWO 7s

1.4 创建zookeeper集群

apiVersion: v1
kind: Service
metadata:
name: zookeeper
namespace: myserver
spec:
ports:
- name: client
port: 2181
selector:
app: zookeeper
---
apiVersion: v1
kind: Service
metadata:
name: zookeeper1
namespace: myserver
spec:
type: NodePort
ports:
- name: client
port: 2181
nodePort: 32181
- name: followers
port: 2888
- name: election
port: 3888
selector:
app: zookeeper
server-id: "1"
---
apiVersion: v1
kind: Service
metadata:
name: zookeeper2
namespace: myserver
spec:
type: NodePort
ports:
- name: client
port: 2181
nodePort: 32182
- name: followers
port: 2888
- name: election
port: 3888
selector:
app: zookeeper
server-id: "2"
---
apiVersion: v1
kind: Service
metadata:
name: zookeeper3
namespace: myserver
spec:
type: NodePort
ports:
- name: client
port: 2181
nodePort: 32183
- name: followers
port: 2888
- name: election
port: 3888
selector:
app: zookeeper
server-id: "3"
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: zookeeper1
namespace: myserver
spec:
replicas: 1
selector:
matchLabels:
app: zookeeper
template:
metadata:
labels:
app: zookeeper
server-id: "1"
spec:
volumes:
- name: data
emptyDir: {}
- name: wal
emptyDir: {}
#emptyDir:
# medium: Memory
containers:
- name: server
image: harbor.chu.net/web/zookeeper:v3.4.14
imagePullPolicy: Always
env:
- name: MYID
value: "1"
- name: SERVERS
value: "zookeeper1,zookeeper2,zookeeper3"
- name: JVMFLAGS
value: "-Xmx2G"
ports:
- containerPort: 2181
- containerPort: 2888
- containerPort: 3888
volumeMounts:
- mountPath: "/zookeeper/data"
name: zookeeper-datadir-pvc-1
volumes:
- name: zookeeper-datadir-pvc-1
persistentVolumeClaim:
claimName: zookeeper-datadir-pvc-1
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: zookeeper2
namespace: myserver
spec:
replicas: 1
selector:
matchLabels:
app: zookeeper
template:
metadata:
labels:
app: zookeeper
server-id: "2"
spec:
volumes:
- name: data
emptyDir: {}
- name: wal
emptyDir: {}
#emptyDir:
# medium: Memory
containers:
- name: server
image: harbor.chu.net/web/zookeeper:v3.4.14
imagePullPolicy: Always
env:
- name: MYID
value: "2"
- name: SERVERS
value: "zookeeper1,zookeeper2,zookeeper3"
- name: JVMFLAGS
value: "-Xmx2G"
ports:
- containerPort: 2181
- containerPort: 2888
- containerPort: 3888
volumeMounts:
- mountPath: "/zookeeper/data"
name: zookeeper-datadir-pvc-2
volumes:
- name: zookeeper-datadir-pvc-2
persistentVolumeClaim:
claimName: zookeeper-datadir-pvc-2
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: zookeeper3
namespace: myserver
spec:
replicas: 1
selector:
matchLabels:
app: zookeeper
template:
metadata:
labels:
app: zookeeper
server-id: "3"
spec:
volumes:
- name: data
emptyDir: {}
- name: wal
emptyDir: {}
#emptyDir:
# medium: Memory
containers:
- name: server
image: harbor.chu.net/web/zookeeper:v3.4.14
imagePullPolicy: Always
env:
- name: MYID
value: "3"
- name: SERVERS
value: "zookeeper1,zookeeper2,zookeeper3"
- name: JVMFLAGS
value: "-Xmx2G"
ports:
- containerPort: 2181
- containerPort: 2888
- containerPort: 3888
volumeMounts:
- mountPath: "/zookeeper/data"
name: zookeeper-datadir-pvc-3
volumes:
- name: zookeeper-datadir-pvc-3
persistentVolumeClaim:
claimName: zookeeper-datadir-pvc-3

1.5 验证集群状态

查看service

[root@k8s-master2 zookeeper]#kubectl get svc -n myserver
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
zookeeper ClusterIP 10.100.163.107 <none> 2181/TCP 6s
zookeeper1 NodePort 10.100.213.46 <none> 2181:32181/TCP,2888:52709/TCP,3888:54540/TCP 6s
zookeeper2 NodePort 10.100.245.126 <none> 2181:32182/TCP,2888:45550/TCP,3888:37673/TCP 6s
zookeeper3 NodePort 10.100.200.136 <none> 2181:32183/TCP,2888:36956/TCP,3888:36589/TCP 6s

查看pod、logs等状态

[root@k8s-master2 zookeeper]#kubectl get pod -n myserver
NAME READY STATUS RESTARTS AGE
zookeeper1-56679f8f44-cgf5q 1/1 Running 0 69s
zookeeper2-5cd9f77979-xq5qn 1/1 Running 0 69s
zookeeper3-5b75f6546b-r8l2z 1/1 Running 0 69s

查看zookeeper容器状态

# 进入容器,查看集群状态
[root@k8s-master2 zookeeper]#kubectl exec -it zookeeper1-56679f8f44-cgf5q -n myserver bash
bash-4.3# /zookeeper/bin/zkServer.sh status
ZooKeeper JMX enabled by default
ZooKeeper remote JMX Port set to 9010
ZooKeeper remote JMX authenticate set to false
ZooKeeper remote JMX ssl set to false
ZooKeeper remote JMX log4j set to true
Using config: /zookeeper/bin/../conf/zoo.cfg
Mode: follower #集群状态,leader主、follower从
bash-4.3# cat /zookeeper/conf/zoo.cfg
tickTime=10000 # 通信心跳数,建议设置10000(毫秒)
initLimit=10
syncLimit=5
dataDir=/zookeeper/data
dataLogDir=/zookeeper/wal
#snapCount=100000
autopurge.purgeInterval=1
clientPort=2181
quorumListenOnAllIPs=true
server.1=zookeeper1:2888:3888
server.2=zookeeper2:2888:3888
server.3=zookeeper3:2888:3888

数据写入

#! /bin/env python
# kazoo 2.9.0版本有问题,安装指定版本号kazoo==2.8.0
from kazoo.client import KazooClient
# 服务器地址、端口号
zk = KazooClient(hosts='10.0.0.41:32183')
# 创建连接
zk.start()
# makepath=True递归创建目录
zk.create('/web/kafka/nodes/id-1', b'10.0.0.41', makepath=True)
# 查看所有数据
node = zk.get_children('/')
print(node)
# 关闭连接
zk.stop()

登录ZooInspector图形工具查看

查看写入数据

2. 部署dubbo provider

2.1 准备镜像

dubbo-provider-2.1.5​、skywalking java agent

# dubbo-provider内容
[root@k8s-master2 provider]#tree -L 2
.
├── Dockerfile
├── apache-skywalking-java-agent-9.0.0.tgz
├── build-command.sh
├── dubbo-server.jar
├── dubbo-server.jar.bak
├── run_java.sh
└── skywalking-agent
├── LICENSE
├── NOTICE
├── activations
├── bootstrap-plugins
├── config
├── licenses
├── logs
├── optional-plugins
├── optional-reporter-plugins
├── plugins
└── skywalking-agent.jar
# 根据实际修改dubbo-server.jar包config.properties中zookeeper连接地址
......
dubbo.registry=zookeeper://zookeeper1.myserver.svc.cluster.local:2181?backup=zookeeper2.myserver.svc.cluster.local:2181,zookeeper3.myserver.svc.cluster.local:2181
dubbo.port=8080
# 修改skywalking连接信息
cat skywalking-agent/config/agent.config
...
agent.service_name=${SW_AGENT_NAME:k8s-dubbo-provider}
agent.namespace=${SW_AGENT_NAMESPACE:k8s-dubbo}
# Backend service addresses. # 后端服务即skywalking-oap平台地址,11800为grpc端口,12800为http端口
collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:10.0.0.91:11800}

run_java.sh​启动脚本

#!/bin/bash
su - user1 -c "java -javaagent:/skywalking-agent/skywalking-agent.jar -jar /apps/dubbo/provider/dubbo-server.jar"
# tail -f /etc/hosts

编写Dockerfile

jdk基础镜像制作参考:

#Dubbo provider
FROM harbor.chu.net/baseimages/jdk-base:v8.212
RUN yum install file nc -y && useradd user1 -u 2000
RUN mkdir -p /apps/dubbo/provider/bin
ADD dubbo-server.jar /apps/dubbo/provider/
ADD run_java.sh /apps/dubbo/provider/bin/
ADD skywalking-agent/ /skywalking-agent/
RUN chown user1:user1 -R /apps /skywalking-agent
RUN chmod a+x /apps/dubbo/provider/bin/*.sh
CMD ["/apps/dubbo/provider/bin/run_java.sh"]

构建镜像

# 脚本
[root@k8s-deploy provider]#cat build-command.sh
#!/bin/bash
#TAG=$1-`date +%Y%m%d%H%M%S`
TAG=$1
docker build -t harbor.chu.net/web/dubbo-provider:${TAG} .
sleep 3
docker push harbor.chu.net/web/dubbo-provider:${TAG}
# 构建
[root@k8s-deploy provider]#bash build-command.sh v1-skywalking

2.2 运行provider服务

编辑yaml文件

kind: Deployment
apiVersion: apps/v1
metadata:
labels:
app: web-provider
name: web-provider-deployment
namespace: myserver
spec:
replicas: 3
selector:
matchLabels:
app: web-provider
template:
metadata:
labels:
app: web-provider
spec:
containers:
- name: web-provider-container
image: harbor.chu.net/web/dubbo-provider:v1-skywalking
imagePullPolicy: Always
ports:
- containerPort: 8080
protocol: TCP
name: http
---
kind: Service
apiVersion: v1
metadata:
labels:
app: web-provider
name: web-provider-spec
namespace: myserver
spec:
type: NodePort
ports:
- name: http
port: 8080
protocol: TCP
targetPort: 8080
nodePort: 38800
selector:
app: web-provider

验证状态

# 创建provider服务
kubectl apply -f provider.yaml
[root@k8s-master2 provider]#kubectl get pod -n myserver
NAME READY STATUS RESTARTS AGE
myserver-provider-deployment-588bccc9d-46zsk 1/1 Running 0 17s
myserver-provider-deployment-588bccc9d-sx4f6 1/1 Running 0 17s
myserver-provider-deployment-588bccc9d-vkb9x 1/1 Running 0 17s
zookeeper1-56679f8f44-cgf5q 1/1 Running 0 27m
zookeeper2-5cd9f77979-xq5qn 1/1 Running 0 27m
zookeeper3-5b75f6546b-r8l2z 1/1 Running 0 27m
[root@k8s-master2 provider]#kubectl get svc -n myserver
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
myserver-provider-spec NodePort 10.100.88.147 <none> 8080:38800/TCP 30s
zookeeper ClusterIP 10.100.163.107 <none> 2181/TCP 28m
zookeeper1 NodePort 10.100.213.46 <none> 2181:32181/TCP,2888:52709/TCP,3888:54540/TCP 28m
zookeeper2 NodePort 10.100.245.126 <none> 2181:32182/TCP,2888:45550/TCP,3888:37673/TCP 28m
zookeeper3 NodePort 10.100.200.136 <none> 2181:32183/TCP,2888:36956/TCP,3888:36589/TCP 28m

3. 部署dubbo consumer

3.1 准备镜像

dubbo-consumer-2.1.5​文件

[root@k8s-deploy consumer]#tree dubbo-consumer -L 2
.
├── Dockerfile
├── build-command.sh
├── dubbo-client.jar
├── dubbo-client.jar.bak
├── run_java.sh
└── skywalking-agent
├── LICENSE
├── NOTICE
├── activations
├── bootstrap-plugins
├── config
├── licenses
├── logs
├── optional-plugins
├── optional-reporter-plugins
├── plugins
└── skywalking-agent.jar
# 根据实际修改dubbo-clinet.jar包config.properties文件中zookeeper连接地址
......
dubbo.registry=zookeeper://zookeeper1.myserver.svc.cluster.local:2181?backup=zookeeper2.myserver.svc.cluster.local:2181,zookeeper3.myserver.svc.cluster.local:2181
# 修改skywalking连接信息
cat skywalking-agent/config/agent.config
...
agent.service_name=${SW_AGENT_NAME:k8s-dubbo-consumer}
agent.namespace=${SW_AGENT_NAMESPACE:k8s-dubbo}
# Backend service addresses. # 后端服务即skywalking-oap平台地址,11800为grpc端口,12800为http端口
collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:10.0.0.91:11800}

run_java.sh​启动脚本

#!/bin/bash
su - user1 -c "java -javaagent:/skywalking-agent/skywalking-agent.jar -jar /apps/dubbo/consumer/dubbo-client.jar"

编写Dockerfile

#Dubbo consumer
FROM harbor.chu.net/baseimages/jdk-base:v8.212
RUN yum install file -y && useradd user1 -u 2000
RUN mkdir -p /apps/dubbo/consumer
ADD dubbo-client.jar /apps/dubbo/consumer
ADD run_java.sh /apps/dubbo/consumer/bin
ADD skywalking-agent/ /skywalking-agent/
RUN chown user1:user1 -R /apps /skywalking-agent
RUN chmod a+x /apps/dubbo/consumer/bin/*.sh
CMD ["/apps/dubbo/consumer/bin/run_java.sh"]

构建镜像

# 脚本
[root@k8s-deploy consumer]#cat build-command.sh
#!/bin/bash
TAG=$1
docker build -t harbor.chu.net/web/dubbo-provider:${TAG} .
sleep 1
docker push harbor.chu.net/web/dubbo-provider:${TAG}
# 构建
[root@k8s-deploy consumer]#bash build-command.sh v1-skywalking

3.2 运行consumer服务

编写yaml文件

kind: Deployment
apiVersion: apps/v1
metadata:
labels:
app: web-consumer
name: web-consumer-deployment
namespace: myserver
spec:
replicas: 1
selector:
matchLabels:
app: web-consumer
template:
metadata:
labels:
app: web-consumer
spec:
containers:
- name: web-consumer-container
image: harbor.chu.net/web/dubbo-demo-consumer:v1
imagePullPolicy: Always
ports:
- containerPort: 8080
protocol: TCP
name: http
---
kind: Service
apiVersion: v1
metadata:
labels:
app: web-consumer
name: web-consumer-server
namespace: myserver
spec:
type: NodePort
ports:
- name: http
port: 80
protocol: TCP
targetPort: 8080
nodePort: 30001
selector:
app: web-consumer

验证状态

[root@k8s-deploy consumer]#kubectl apply -f consumer.yaml
[root@k8s-deploy consumer]#kubectl get pod -n myserver
NAME READY STATUS RESTARTS AGE
myserver-consumer-deployment-6b9f5ccbd9-jwhs5 1/1 Running 0 9s
myserver-provider-deployment-588bccc9d-46zsk 1/1 Running 0 25m
myserver-provider-deployment-588bccc9d-sx4f6 1/1 Running 0 25m
myserver-provider-deployment-588bccc9d-vkb9x 1/1 Running 0 25m
zookeeper1-56679f8f44-cgf5q 1/1 Running 0 97m
zookeeper2-5cd9f77979-xq5qn 1/1 Running 0 97m
zookeeper3-5b75f6546b-r8l2z 1/1 Running 0 97m
[root@k8s-master2 consumer]#kubectl get svc -n myserver
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
myserver-consumer-server NodePort 10.100.244.225 <none> 80:30001/TCP 15s
myserver-provider-spec NodePort 10.100.88.147 <none> 8080:38800/TCP 25m
zookeeper ClusterIP 10.100.163.107 <none> 2181/TCP 97m
zookeeper1 NodePort 10.100.213.46 <none> 2181:32181/TCP,2888:52709/TCP,3888:54540/TCP 97m
zookeeper2 NodePort 10.100.245.126 <none> 2181:32182/TCP,2888:45550/TCP,3888:37673/TCP 97m
zookeeper3 NodePort 10.100.200.136 <none> 2181:32183/TCP,2888:36956/TCP,3888:36589/TCP 97m

3.3 查看skywalking服务

4. 验证skywalking

4.1 访问consumer,nodePort为30001

4.2 验证skywalking

  • skywalking追踪全部服务状态

  • 拓扑

  • consumer服务状态

  • provider服务状态

  • consumer实例状态

JVM状态

  • provider实例

多个pod实例状态

单pod实例状态

pod JVM状态

  • consumer 端点状态

  • 请求的链路追踪

  1. JDK基础镜像制作

    • 编写Dockerfile
    #JDK Base Image
    FROM harbor.chu.net/baseimages/centos-base:7.9.2009
    ADD jdk-8u212-linux-x64.tar.gz /usr/local/src/
    RUN ln -sv /usr/local/src/jdk1.8.0_212 /usr/local/jdk
    ADD profile /etc/profile
    ENV JAVA_HOME /usr/local/jdk
    ENV JRE_HOME $JAVA_HOME/jre
    ENV CLASSPATH $JAVA_HOME/lib/:$JRE_HOME/lib/
    ENV PATH $PATH:$JAVA_HOME/bin
    • 构建镜像,并上传至harbor仓库
    nerdctl build -t harbor.chu.net/baseimages/jdk-base:v8.212 .
    nerdctl push harbor.chu.net/baseimages/jdk-base:v8.212

    ↩︎

posted @   areke  阅读(105)  评论(0编辑  收藏  举报
相关博文:
阅读排行:
· DeepSeek 开源周回顾「GitHub 热点速览」
· 物流快递公司核心技术能力-地址解析分单基础技术分享
· .NET 10首个预览版发布:重大改进与新特性概览!
· AI与.NET技术实操系列(二):开始使用ML.NET
· 单线程的Redis速度为什么快?
点击右上角即可分享
微信分享提示