二、项目实战之zookeeper集群实现、dubbo实现provider与consumer微服务注册与发现及dubboadmin

一、zookeeper集群实例

1.k8s二进制安装和harbor安装参考文档: https://www.cnblogs.com/Yuanbangchen/p/17219073.html   
                 
2.下载JDK镜像,上传镜像到harbor.zzhz.com服务器,此镜像系统为:Alpine
[root@localhost7C zookeeper]# docker pull elevy/slim_java:8
[root@localhost7C zookeeper]# docker tag  0776147f4957 harbor.zzhz.com/pub-images/slim_java:8
[root@localhost7C zookeeper]# docker push harbor.zzhz.com/pub-images/slim_java:8


3.配置文件
[root@localhost7C zookeeper]# tree 
├── bin
│   └── zkReady.sh
├── build-command.sh
├── conf
│   ├── log4j.properties
│   └── zoo.cfg
├── Dockerfile
└── entrypoint.sh

----------------------------------------
#创建集群状态检查脚本
[root@localhost7C zookeeper]# cat bin/zkReady.sh 
#!/bin/bash

/zookeeper/bin/zkServer.sh status | egrep 'Mode: (standalone|leading|following|observing)'

----------------------------------------------
#日志配置文件
[root@localhost7C zookeeper]# cat conf/log4j.properties 
# Define some default values that can be overridden by system properties
zookeeper.root.logger=INFO, CONSOLE, ROLLINGFILE
zookeeper.console.threshold=INFO
zookeeper.log.dir=/zookeeper/log
zookeeper.log.file=zookeeper.log
zookeeper.log.threshold=INFO
zookeeper.tracelog.dir=/zookeeper/log
zookeeper.tracelog.file=zookeeper_trace.log

#
# ZooKeeper Logging Configuration
#

# Format is "<default threshold> (, <appender>)+

# DEFAULT: console appender only
log4j.rootLogger=${zookeeper.root.logger}

# Example with rolling log file
#log4j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE

# Example with rolling log file and tracing
#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE

#
# Log INFO level and above messages to the console
#
log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
log4j.appender.CONSOLE.Threshold=${zookeeper.console.threshold}
log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout
log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} [myid:%X{myid}] - %-5p [%t:%C{1}@%L] - %m%n

#
# Add ROLLINGFILE to rootLogger to get log file output
#    Log DEBUG level and above messages to a log file
log4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender
log4j.appender.ROLLINGFILE.Threshold=${zookeeper.log.threshold}
log4j.appender.ROLLINGFILE.File=${zookeeper.log.dir}/${zookeeper.log.file}

# Max log file size of 10MB
log4j.appender.ROLLINGFILE.MaxFileSize=10MB
# uncomment the next line to limit number of backup files
log4j.appender.ROLLINGFILE.MaxBackupIndex=5

log4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout
log4j.appender.ROLLINGFILE.layout.ConversionPattern=%d{ISO8601} [myid:%X{myid}] - %-5p [%t:%C{1}@%L] - %m%n


#
# Add TRACEFILE to rootLogger to get log file output
#    Log DEBUG level and above messages to a log file
log4j.appender.TRACEFILE=org.apache.log4j.FileAppender
log4j.appender.TRACEFILE.Threshold=TRACE
log4j.appender.TRACEFILE.File=${zookeeper.tracelog.dir}/${zookeeper.tracelog.file}

log4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout
### Notice we are including log4j's NDC here (%x)
log4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} [myid:%X{myid}] - %-5p [%t:%C{1}@%L][%x] - %m%n

------------------------------
#zookeeper集群配置文件模板
[root@localhost7C zookeeper]# cat conf/zoo.cfg 
tickTime=2000
initLimit=20
syncLimit=20
dataDir=/zookeeper/data
dataLogDir=/zookeeper/wal
#snapCount=100000
autopurge.purgeInterval=1
clientPort=2181
quorumListenOnAllIPs=true

----------------------------------------------------
#创建zookeeper生成集群配置文件脚本
[root@localhost7C zookeeper]# cat entrypoint.sh 
#!/bin/bash

echo ${MYID:-1} > /zookeeper/data/myid

if [ -n "$SERVERS" ]; then
    IFS=\, read -a servers <<<"$SERVERS"
    for i in "${!servers[@]}"; do 
        printf "\nserver.%i=%s:2888:3888" "$((1 + $i))" "${servers[$i]}" >> /zookeeper/conf/zoo.cfg
    done
fi

cd /zookeeper
exec "$@"
------------------------------------------
#此镜像系统为:Alpine
[root@localhost7C zookeeper]# cat Dockerfile 
FROM harbor.zzhz.com/pub-images/slim_java:8

ENV ZK_VERSION 3.4.14

RUN apk add --no-cache --virtual .build-deps \
      ca-certificates   \
      gnupg             \
      tar               \
      wget &&           \
    #
    # Install dependencies
    apk add --no-cache  \
      bash &&           \
    #
    # Download Zookeeper
    wget -nv -O /tmp/zk.tgz "https://www.apache.org/dyn/closer.cgi?action=download&filename=zookeeper/zookeeper-${ZK_VERSION}/zookeeper-${ZK_VERSION}.tar.gz" && \
    wget -nv -O /tmp/zk.tgz.asc "https://www.apache.org/dist/zookeeper/zookeeper-${ZK_VERSION}/zookeeper-${ZK_VERSION}.tar.gz.asc" && \
    wget -nv -O /tmp/KEYS https://dist.apache.org/repos/dist/release/zookeeper/KEYS && \
    #
    # Verify the signature
    export GNUPGHOME="$(mktemp -d)" && \
    gpg -q --batch --import /tmp/KEYS && \
    gpg -q --batch --no-auto-key-retrieve --verify /tmp/zk.tgz.asc /tmp/zk.tgz && \
    #
    # Set up directories
    #
    mkdir -p /zookeeper/data /zookeeper/wal /zookeeper/log && \
    #
    # Install
    tar -x -C /zookeeper --strip-components=1 --no-same-owner -f /tmp/zk.tgz && \
    #
    # Slim down
    cd /zookeeper && \
    cp dist-maven/zookeeper-${ZK_VERSION}.jar . && \
    rm -rf \
      *.txt \
      *.xml \
      bin/README.txt \
      bin/*.cmd \
      conf/* \
      contrib \
      dist-maven \
      docs \
      lib/*.txt \
      lib/cobertura \
      lib/jdiff \
      recipes \
      src \
      zookeeper-*.asc \
      zookeeper-*.md5 \
      zookeeper-*.sha1 && \
    #
    # Clean up
    apk del .build-deps && \
    rm -rf /tmp/* "$GNUPGHOME"

COPY conf /zookeeper/conf/
COPY bin/zkReady.sh /zookeeper/bin/
COPY entrypoint.sh /

ENV PATH=/zookeeper/bin:${PATH} \
    ZOO_LOG_DIR=/zookeeper/log \
    ZOO_LOG4J_PROP="INFO, CONSOLE, ROLLINGFILE" \
    JMXPORT=9010

ENTRYPOINT [ "/entrypoint.sh" ]

CMD [ "zkServer.sh", "start-foreground" ]

EXPOSE 2181 2888 3888 9010


-------------------------------------------------
[root@localhost7C zookeeper]# cat build-command.sh 
#!/bin/bash
TAG=$1
docker build -t harbor.zzhz.com/linux39/zookeeper:${TAG} .
sleep 1
docker push  harbor.zzhz.com/linux39/zookeeper:${TAG}





构建zookeeper镜像
[root@localhost7C zookeeper]# chmod  +x *.sh
[root@localhost7C zookeeper]# chmod a+x bin/*.sh
[root@localhost7C zookeeper]# ./build-command.sh v1


#测试,使用客户端ZooInspector连接zookeeper:
[root@localhost7C zookeeper]# docker run -it --rm -p 2181:2181 harbor.zzhz.com/linux39/zookeeper:v1  bash
bash-4.3# ./bin/zkServer.sh  start
ZooKeeper JMX enabled by default
ZooKeeper remote JMX Port set to 9010
ZooKeeper remote JMX authenticate set to false
ZooKeeper remote JMX ssl set to false
ZooKeeper remote JMX log4j set to true
Using config: /zookeeper/bin/../conf/zoo.cfg
Starting zookeeper ... 
STARTED





4.安装nfs服务器,基于PV和PVC作为后端存储,实现zookeeper集群
[root@localhost7B ]# cat  /etc/exports
/data/k8sdata  *(rw,no_root_squash)

[root@localhost7B ]# mkdir /data/k8sdata/magedu/zookeeper-datadir-1 -p 
[root@localhost7B ]# mkdir /data/k8sdata/magedu/zookeeper-datadir-2 -p 
[root@localhost7B ]# mkdir /data/k8sdata/magedu/zookeeper-datadir-3 -p 

[root@localhost7B ]# systemctl  restart  nfs-server.service

#写数据测试
mount -t nfs 192.168.80.110:/data/k8sdata/magedu/zookeeper-datadir-1  /mnt
cp  /etc/passwd  /mnt
umount  /mnt/


#创建zookeeper集群pv
[root@localhost7C pv]# cat zookeeper-persistentvolume.yaml 
---
apiVersion: v1
kind: PersistentVolume  
metadata:
  name: zookeeper-datadir-pv-1
  namespace: default          
spec:
  capacity:
    storage: 5Gi
  accessModes:
    - ReadWriteOnce 
  nfs:
    server: 192.168.80.110
    path: /data/k8sdata/magedu/zookeeper-datadir-1 

---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: zookeeper-datadir-pv-2
  namespace: default
spec:
  capacity:
    storage: 5Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    server: 192.168.80.110 
    path: /data/k8sdata/magedu/zookeeper-datadir-2 

---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: zookeeper-datadir-pv-3
  namespace: default
spec:
  capacity:
    storage: 5Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    server: 192.168.80.110  
    path: /data/k8sdata/magedu/zookeeper-datadir-3 

#创建zookeeper集群pvc
[root@localhost7C pv]# cat zookeeper-persistentvolumeclaim.yaml 
apiVersion: v1
kind: PersistentVolumeClaim            #定义kind类型为PVC  
metadata:
  name: zookeeper-datadir-pvc-1
  namespace: default
spec:
  accessModes:                          #访问模式,
    - ReadWriteOnce
  volumeName: zookeeper-datadir-pv-1    #调用PV名称。
  resources:                               #定义PVC创建存储卷的空间大小
    requests:
      storage: 2Gi              #使用大小,要比PV的值小,如果容器已运行,不随意变更大小。
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: zookeeper-datadir-pvc-2
  namespace: default
spec:
  accessModes:
    - ReadWriteOnce
  volumeName: zookeeper-datadir-pv-2
  resources:
    requests:
      storage: 3Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: zookeeper-datadir-pvc-3
  namespace: default
spec:
  accessModes:
    - ReadWriteOnce
  volumeName: zookeeper-datadir-pv-3
  resources:
    requests:
      storage: 4Gi

#创建PVC PV
[root@localhost7C pv]# kubectl apply  -f zookeeper-persistentvolumeclaim.yaml  -f zookeeper-persistentvolume.yaml

查看
[root@localhost7C pv]# kubectl get persistentvolumes
[root@localhost7C pv]# kubectl get persistentvolumeclaims 
[root@localhost7C pv]# kubectl  describe  persistentvolume zookeeper-datadir-pv-1
[root@localhost7C pv]# kubectl  describe  persistentvolumeclaims  zookeeper-datadir-pvc-1


5.k8s 运行zookeeper服务:通过yaml文件将zookeeper集群服务运行k8s环境
[root@localhost7C namespaces]# cat magedu-ns.yaml 
apiVersion: v1
kind: Namespace
metadata: 
  name: magedu
[root@localhost7C namespaces]# kubectl apply -f magedu-ns.yaml 
namespace/magedu created


#创建zookeeper k8s资源清单文件 deployment和service
[root@localhost7C zookeeper]# cat zookeeper.yaml 
#所有zookeeper节点内网访问client
apiVersion: v1
kind: Service
metadata:
  name: zookeeper
  namespace: magedu
spec:
  ports:
    - name: client
      port: 2181
  selector:
    app: zookeeper
---

#对外访问zookeeper1
apiVersion: v1
kind: Service
metadata:
  name: zookeeper1
  namespace: magedu
spec:
  type: NodePort        
  ports:
    - name: client
      port: 2181
      nodePort: 32181 #
    - name: followers
      port: 2888
    - name: election
      port: 3888
  selector:
    app: zookeeper
    server-id: "1"
---
#对外访问zookeeper2
apiVersion: v1
kind: Service
metadata:
  name: zookeeper2
  namespace: magedu
spec:
  type: NodePort        
  ports:
    - name: client
      port: 2181
      nodePort: 32182 #
    - name: followers
      port: 2888
    - name: election
      port: 3888
  selector:
    app: zookeeper
    server-id: "2"
---
#对外访问zookeeper3
apiVersion: v1
kind: Service
metadata:
  name: zookeeper3
  namespace: magedu
spec:
  type: NodePort        
  ports:
    - name: client
      port: 2181
      nodePort: 32183 #
    - name: followers
      port: 2888
    - name: election
      port: 3888
  selector:
    app: zookeeper
    server-id: "3"
---
kind: Deployment
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
metadata:
  name: zookeeper1
  namespace: magedu
spec:
  replicas: 1
  selector:
    matchLabels:
      app: zookeeper
  template:
    metadata:
      labels:
        app: zookeeper
        server-id: "1"
    spec:
      volumes:
        - name: data
          emptyDir: {}
        - name: wal
          emptyDir:
            medium: Memory   #预写日志数据持久化到内存
      containers:
        - name: server
          image: harbor.zzhz.com/linux39/zookeeper:v1
          imagePullPolicy: IfNotPresent
          env:
            - name: MYID
              value: "1"
            - name: SERVERS
              value: "zookeeper1,zookeeper2,zookeeper3"
            - name: JVMFLAGS
              value: "-Xmx1G"
          ports:
            - containerPort: 2181
            - containerPort: 2888
            - containerPort: 3888
          volumeMounts:
          - mountPath: "/zookeeper/data"
            name: zookeeper-datadir-pvc-1 
      volumes:
        - name: zookeeper-datadir-pvc-1 
          persistentVolumeClaim:
            claimName: zookeeper-datadir-pvc-1
---
#创建zookeeper
kind: Deployment
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
metadata:
  name: zookeeper2
  namespace: magedu
spec:
  replicas: 1
  selector:
    matchLabels:
      app: zookeeper
  template:
    metadata:
      labels:
        app: zookeeper
        server-id: "2"
    spec:
      volumes:
        - name: data
          emptyDir: {}
        - name: wal
          emptyDir:
            medium: Memory
      containers:
        - name: server
          image: harbor.zzhz.com/linux39/zookeeper:v1
          imagePullPolicy: IfNotPresent
          env:
            - name: MYID
              value: "2"
            - name: SERVERS
              value: "zookeeper1,zookeeper2,zookeeper3"
            - name: JVMFLAGS
              value: "-Xmx1G"
          ports:
            - containerPort: 2181
            - containerPort: 2888
            - containerPort: 3888
          volumeMounts:
          - mountPath: "/zookeeper/data"
            name: zookeeper-datadir-pvc-2 
      volumes:
        - name: zookeeper-datadir-pvc-2
          persistentVolumeClaim:
            claimName: zookeeper-datadir-pvc-2
---
kind: Deployment
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
metadata:
  name: zookeeper3
  namespace: magedu
spec:
  replicas: 1
  selector:
    matchLabels:
      app: zookeeper
  template:
    metadata:
      labels:
        app: zookeeper
        server-id: "3"
    spec:
      volumes:
        - name: data
          emptyDir: {}
        - name: wal
          emptyDir:
            medium: Memory
      containers:
        - name: server
          image: harbor.zzhz.com/linux39/zookeeper:v1
          imagePullPolicy: IfNotPresent
          env:
            - name: MYID
              value: "3"
            - name: SERVERS
              value: "zookeeper1,zookeeper2,zookeeper3"
            - name: JVMFLAGS
              value: "-Xmx1G"
          ports:
            - containerPort: 2181
            - containerPort: 2888
            - containerPort: 3888
          volumeMounts:
          - mountPath: "/zookeeper/data"
            name: zookeeper-datadir-pvc-3
      volumes:
        - name: zookeeper-datadir-pvc-3
          persistentVolumeClaim:
           claimName: zookeeper-datadir-pvc-3

[root@localhost7C zookeeper]# kubectl  apply  -f zookeeper.yaml
[root@localhost7C zookeeper]# kubectl get pod  -A   -o wide

magedu                 zookeeper1-565bb9fc8b-cn8m2                  1/1     Running   0          3m49s   10.20.5.37       192.168.80.160   <none>           <none>
magedu                 zookeeper2-5559dfb959-s9tnd                  1/1     Running   0          3m49s   10.20.6.27       192.168.80.150   <none>           <none>
magedu                 zookeeper3-64b877777b-fl6qm                  1/1     Running   0          3m49s   10.20.5.36       192.168.80.160   <none>           <none>


[root@localhost7C zookeeper]# kubectl exec  -it  -n magedu  zookeeper1-565bb9fc8b-cn8m2 bash
bash-4.3# /zookeeper/bin/zkServer.sh status
ZooKeeper JMX enabled by default
ZooKeeper remote JMX Port set to 9010
ZooKeeper remote JMX authenticate set to false
ZooKeeper remote JMX ssl set to false
ZooKeeper remote JMX log4j set to true
Using config: /zookeeper/bin/../conf/zoo.cfg
Mode: leader

[root@localhost7C zookeeper]# kubectl exec  -it  -n magedu  zookeeper2-5559dfb959-s9tnd bash 
bash-4.3# /zookeeper/bin/zkServer.sh status
ZooKeeper JMX enabled by default
ZooKeeper remote JMX Port set to 9010
ZooKeeper remote JMX authenticate set to false
ZooKeeper remote JMX ssl set to false
ZooKeeper remote JMX log4j set to true
Using config: /zookeeper/bin/../conf/zoo.cfg
Mode: follower

[root@localhost7C zookeeper]# kubectl exec  -it  -n magedu  zookeeper3-64b877777b-fl6qm bash 
bash-4.3# /zookeeper/bin/zkServer.sh status
ZooKeeper JMX enabled by default
ZooKeeper remote JMX Port set to 9010
ZooKeeper remote JMX authenticate set to false
ZooKeeper remote JMX ssl set to false
ZooKeeper remote JMX log4j set to true
Using config: /zookeeper/bin/../conf/zoo.cfg
Mode: follower


#任意一台node节点查看
[root@localhost7h ~]# netstat -anltp | grep 218
tcp6       0      0 :::32183                :::*                    LISTEN      1251/kube-proxy     
tcp6       0      0 :::32181                :::*                    LISTEN      1251/kube-proxy     
tcp6       0      0 :::32182                :::*                    LISTEN      1251/kube-proxy   



测试,使用客户端ZooInspector连接zookeeper

 

 

 

二、dubbo实现provider与consumer微服务注册与发现及dubboadmin

二、dubbo实现provider与consumer微服务注册与发现及dubboadmin

[root@localhost7C dubbo]# ls
consumer  dubboadmin  provider


1.provider 镜像制作 
[root@localhost7C dubbo]# ll provider/

-rwxr-xr-x 1 root root      144 3月  31 16:35 build-command.sh
-rw-r--r-- 1 root root      411 3月  31 16:22 Dockerfile
drwxr-xr-x 5 root root       40 4月   7 2020 dubbo-demo-provider-2.1.5
-rw-r--r-- 1 root root 10281793 4月   7 2020 dubbo-demo-provider-2.1.5-assembly.tar.gz
-rwxr-xr-x 1 root root      313 3月  31 16:22 run_java.sh


#配置文件
[root@localhost7C provider]# cat dubbo-demo-provider-2.1.5/conf/dubbo.properties 
##
dubbo.container=log4j,spring
dubbo.application.name=demo-provider
dubbo.application.owner=
dubbo.registry.address=zookeeper://zookeeper1.magedu.svc.zzhz.local:2181 | zookeeper://zookeeper2.magedu.svc.zzhz.local:2181 | zookeeper://zookeeper3.magedu.svc.zzhz.local:2181
dubbo.monitor.protocol=registry
dubbo.protocol.name=dubbo
dubbo.protocol.port=20880
dubbo.log4j.file=logs/dubbo-demo-provider.log
dubbo.log4j.level=WARN

#运行脚本 
[root@localhost7C provider]# cat run_java.sh 
#!/bin/bash
#echo "nameserver 223.6.6.6" > /etc/resolv.conf
#/usr/share/filebeat/bin/filebeat -c /etc/filebeat/filebeat.yml -path.home /usr/share/filebeat -path.config /etc/filebeat -path.data /var/lib/filebeat -path.logs /var/log/filebeat  &
su - tomcat -c "/apps/dubbo/provider/bin/start.sh"
tail -f /etc/hosts


#镜像制作文件,#此镜像安装了JDK ,参考:https://www.cnblogs.com/Yuanbangchen/p/17264215.html
[root@localhost7C provider]# cat Dockerfile 
#Dubbo provider
FROM harbor.zzhz.com/pub-images/jdk-base:v8.212  
MAINTAINER zhangshijie "zhangshijie@magedu.net"

RUN yum install file nc -y
RUN mkdir -p /apps/dubbo/provider && useradd tomcat
ADD dubbo-demo-provider-2.1.5/  /apps/dubbo/provider
ADD run_java.sh /apps/dubbo/provider/bin 
RUN chown tomcat.tomcat /apps -R
RUN chmod a+x /apps/dubbo/provider/bin/*.sh

CMD ["/apps/dubbo/provider/bin/run_java.sh"]


#构造脚本 
[root@localhost7C provider]# cat build-command.sh 
#!/bin/bash
docker build -t harbor.zzhz.com/linux39/dubbo-demo-provider:v1 .
sleep 3
docker push harbor.zzhz.com/linux39/dubbo-demo-provider:v1


[root@localhost7C provider]# chmod  +x ./*.sh

#生产者清单文件
[root@localhost7C provider]# cat provider.yaml 
kind: Deployment
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
metadata:
  labels:
    app: magedu-provider
  name: magedu-provider-deployment
  namespace: magedu
spec:
  replicas: 1
  selector:
    matchLabels:
      app: magedu-provider
  template:
    metadata:
      labels:
        app: magedu-provider
    spec:
      containers:
      - name: magedu-provider-container
        image: harbor.zzhz.com/linux39/dubbo-demo-provider:v1
        #command: ["/apps/tomcat/bin/run_tomcat.sh"]
        #imagePullPolicy: IfNotPresent
        imagePullPolicy: Always
        ports:
        - containerPort: 20880
          protocol: TCP
          name: http

---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: magedu-provider
  name: magedu-provider-spec
  namespace: magedu
spec:
  type: NodePort
  ports:
  - name: http
    port: 20880
    protocol: TCP
    targetPort: 20880
    #nodePort: 30001
  selector:
    app: magedu-provider

[root@localhost7C provider]# kubectl apply  -f provider.yaml 




2.consumer 镜像制作 
[root@localhost7C dubbo]# ll consumer/
-rwxr-xr-x 1 root root      145 3月  31 16:44 build-command.sh
-rw-r--r-- 1 root root      409 3月  31 16:43 Dockerfile
drwxr-xr-x 5 root root       40 4月   7 2020 dubbo-demo-consumer-2.1.5
-rw-r--r-- 1 root root 10282529 4月   7 2020 dubbo-demo-consumer-2.1.5-assembly.tar.gz
-rwxr-xr-x 1 root root      313 4月   7 2020 run_java.sh



#配置文件
[root@localhost7C consumer]# cat dubbo-demo-consumer-2.1.5/conf/dubbo.properties 
dubbo.container=log4j,spring
dubbo.application.name=demo-consumer
dubbo.application.owner=
dubbo.registry.address=zookeeper://zookeeper1.magedu.svc.zzhz.local:2181 | zookeeper://zookeeper2.magedu.svc.zzhz.local:2181 | zookeeper://zookeeper3.magedu.svc.zzhz.local:2181
dubbo.monitor.protocol=registry
dubbo.log4j.file=logs/dubbo-demo-consumer.log
dubbo.log4j.level=WARN

#运行脚本 
[root@localhost7C consumer]# cat run_java.sh 
#!/bin/bash
#echo "nameserver 223.6.6.6" > /etc/resolv.conf
#/usr/share/filebeat/bin/filebeat -c /etc/filebeat/filebeat.yml -path.home /usr/share/filebeat -path.config /etc/filebeat -path.data /var/lib/filebeat -path.logs /var/log/filebeat  &
su - tomcat -c "/apps/dubbo/consumer/bin/start.sh"
tail -f /etc/hosts

#镜像制作文件,#此镜像安装了JDK ,参考:https://www.cnblogs.com/Yuanbangchen/p/17264215.html
[root@localhost7C consumer]# cat Dockerfile 
#Dubbo consumer#
FROM harbor.zzhz.com/pub-images/jdk-base:v8.212   
MAINTAINER zhangshijie "zhangshijie@magedu.net"

RUN yum install file -y
RUN mkdir -p /apps/dubbo/consumer && useradd tomcat
ADD dubbo-demo-consumer-2.1.5  /apps/dubbo/consumer
ADD run_java.sh /apps/dubbo/consumer/bin 
RUN chown tomcat.tomcat /apps -R
RUN chmod a+x /apps/dubbo/consumer/bin/*.sh
CMD ["/apps/dubbo/consumer/bin/run_java.sh"]


#构造脚本 
[root@localhost7C consumer]# cat build-command.sh 
#!/bin/bash
docker build -t harbor.zzhz.com/linux39/dubbo-demo-consumer:v1  .
sleep 3
docker push harbor.zzhz.com/linux39/dubbo-demo-consumer:v1

#消费者清单文件
[root@localhost7C consumer]# cat consumer.yaml 
kind: Deployment
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
metadata:
  labels:
    app: magedu-consumer
  name: magedu-consumer-deployment
  namespace: magedu
spec:
  replicas: 1
  selector:
    matchLabels:
      app: magedu-consumer
  template:
    metadata:
      labels:
        app: magedu-consumer
    spec:
      containers:
      - name: magedu-consumer-container
        image: harbor.zzhz.com/linux39/dubbo-demo-consumer:v1 
        #command: ["/apps/tomcat/bin/run_tomcat.sh"]
        #imagePullPolicy: IfNotPresent
        imagePullPolicy: Always
        ports:
        - containerPort: 20880
          protocol: TCP
          name: http

---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: magedu-consumer
  name: magedu-consumer-server
  namespace: magedu
spec:
  type: NodePort
  ports:
  - name: http
    port: 20880
    protocol: TCP
    targetPort: 20880
    #nodePort: 30001
  selector:
    app: magedu-consumer


[root@localhost7C consumer]# kubectl apply  -f consumer.yaml  

[root@localhost7C consumer]# kubectl get pod  -n magedu 
NAME                                          READY   STATUS    RESTARTS   AGE
magedu-consumer-deployment-85bc85ffb5-djrs2   1/1     Running   0          23m
magedu-provider-deployment-75bb794f4-9pfh7    1/1     Running   0          35m
zookeeper1-565bb9fc8b-cn8m2                   1/1     Running   0          147m
zookeeper2-5559dfb959-s9tnd                   1/1     Running   0          147m
zookeeper3-64b877777b-fl6qm                   1/1     Running   0          147m




测试,使用客户端ZooInspector连接zookeeper




3. dubboadmin制作
[root@localhost7C dubbo]# unzip  dubboadmin.war 
[root@localhost7C dubbo]# ll dubboadmin
-rwxr-xr-x 1 root root      161 4月   9 2020 build-command.sh
-rwxr-xr-x 1 root root    22201 4月   7 2020 catalina.sh     #其它tomcat复制来
-rw-r--r-- 1 root root      585 4月   9 2020 Dockerfile
drwxr-xr-x 8 root root      132 4月   7 2020 dubboadmin   
-rw-r--r-- 1 root root 27777982 4月   9 2020 dubboadmin.war  
-rw-r--r-- 1 root root     3436 4月   7 2020 logging.properties
-rwxr-xr-x 1 root root      101 4月   7 2020 run_tomcat.sh
-rw-r--r-- 1 root root     6427 4月   7 2020 server.xml     #其它tomcat复制来,修改了路径部分


#dubboadmin配置文件
[root@localhost7C dubboadmin]# cat dubboadmin/WEB-INF/dubbo.properties 
dubbo.registry.address=zookeeper://zookeeper1.magedu.svc.zzhz.local:2181 | zookeeper://zookeeper2.magedu.svc.zzhz.local:2181 | zookeeper://zookeeper3.magedu.svc.zzhz.local:2181
dubbo.admin.root.password=root
dubbo.admin.guest.password=guest


#其它tomcat复制来,修改了路径部分
[root@localhost7C dubboadmin]# cat server.xml  #部分
<Host appBase="/data/tomcat/webapps" autoDeploy="true" name="localhost" unpackWARs="true">
  <Valve className="org.apache.catalina.valves.AccessLogValve" directory="logs" pattern="%h %l %u %t &quot;%r&quot; %s %b" prefix="localhost_access_log" suffix=".txt"/>
  <Context docBase="dubboadmin" path="/" reloadable="true" source="org.eclipse.jst.jee.server:dubboadmin"/>




#服务运行脚本
[root@localhost7C dubboadmin]# cat run_tomcat.sh 
#!/bin/bash
su - tomcat -c "/apps/tomcat/bin/catalina.sh start"
su - tomcat -c "tail -f /etc/hosts"


#日志配置文件
[root@localhost7C dubboadmin]# cat logging.properties 

handlers = 1catalina.org.apache.juli.AsyncFileHandler, 2localhost.org.apache.juli.AsyncFileHandler, 3manager.org.apache.juli.AsyncFileHandler, 4host-manager.org.apache.juli.AsyncFileHandler, java.util.logging.ConsoleHandler
.handlers = 1catalina.org.apache.juli.AsyncFileHandler, java.util.logging.ConsoleHandler

############################################################
# Handler specific properties.
# Describes specific configuration info for Handlers.
############################################################

1catalina.org.apache.juli.AsyncFileHandler.level = FINE
1catalina.org.apache.juli.AsyncFileHandler.directory = /data/tomcat/logs
1catalina.org.apache.juli.AsyncFileHandler.prefix = catalina.

2localhost.org.apache.juli.AsyncFileHandler.level = FINE
2localhost.org.apache.juli.AsyncFileHandler.directory = /data/tomcat/logs 
2localhost.org.apache.juli.AsyncFileHandler.prefix = localhost.

3manager.org.apache.juli.AsyncFileHandler.level = FINE
3manager.org.apache.juli.AsyncFileHandler.directory = /data/tomcat/logs
3manager.org.apache.juli.AsyncFileHandler.prefix = manager.

4host-manager.org.apache.juli.AsyncFileHandler.level = FINE
4host-manager.org.apache.juli.AsyncFileHandler.directory = /data/tomcat/logs
4host-manager.org.apache.juli.AsyncFileHandler.prefix = host-manager.

java.util.logging.ConsoleHandler.level = FINE
java.util.logging.ConsoleHandler.formatter = org.apache.juli.OneLineFormatter


############################################################
# Facility specific properties.
# Provides extra control for each logger.
############################################################

org.apache.catalina.core.ContainerBase.[Catalina].[localhost].level = INFO
org.apache.catalina.core.ContainerBase.[Catalina].[localhost].handlers = 2localhost.org.apache.juli.AsyncFileHandler

org.apache.catalina.core.ContainerBase.[Catalina].[localhost].[/manager].level = INFO
org.apache.catalina.core.ContainerBase.[Catalina].[localhost].[/manager].handlers = 3manager.org.apache.juli.AsyncFileHandler

org.apache.catalina.core.ContainerBase.[Catalina].[localhost].[/host-manager].level = INFO
org.apache.catalina.core.ContainerBase.[Catalina].[localhost].[/host-manager].handlers = 4host-manager.org.apache.juli.AsyncFileHandler

# For example, set the org.apache.catalina.util.LifecycleBase logger to log
# each component that extends LifecycleBase changing state:
#org.apache.catalina.util.LifecycleBase.level = FINE

# To see debug messages in TldLocationsCache, uncomment the following line:
#org.apache.jasper.compiler.TldLocationsCache.level = FINE


#构造文件信息,
#此镜像安装了JDK,tomcat的路径设置等 ,参考:https://www.cnblogs.com/Yuanbangchen/p/17264215.html
[root@localhost7C dubboadmin]# cat Dockerfile 
#Dubbo dubboadmin
FROM harbor.zzhz.com/pub-images/tomcat-base:v8.5.43 
MAINTAINER zhangshijie "zhangshijie@magedu.net"
ADD server.xml /apps/tomcat/conf/server.xml
ADD logging.properties /apps/tomcat/conf/logging.properties
ADD catalina.sh /apps/tomcat/bin/catalina.sh
ADD run_tomcat.sh /apps/tomcat/bin/run_tomcat.sh
ADD dubboadmin.war  /data/tomcat/webapps/dubboadmin.war
#RUN yum install unzip -y  
#RUN cd /data/tomcat/webapps && unzip dubboadmin.war && rm -rf dubboadmin.war && chown -R tomcat.tomcat /data /apps
ADD dubboadmin /data/tomcat/webapps/dubboadmin
RUN chown -R tomcat.tomcat /data  /apps
EXPOSE 8080 8443
CMD ["/apps/tomcat/bin/run_tomcat.sh"]


构造脚本
[root@localhost7C dubboadmin]# chmod +x ./*.sh
[root@localhost7C dubboadmin]# cat build-command.sh 
#!/bin/bash
TAG=$1
docker build -t harbor.zzhz.com/linux39/dubboadmin:${TAG}  .
sleep 3
docker push  harbor.zzhz.com/linux39/dubboadmin:${TAG}


[root@localhost7C dubboadmin]# ./build-command.sh  v1


#清单文件
[root@localhost7C dubboadmin]# cat dubboadmin.yaml 
kind: Deployment
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
metadata:
  labels:
    app: magedu-dubboadmin
  name: magedu-dubboadmin-deployment
  namespace: magedu
spec:
  replicas: 1
  selector:
    matchLabels:
      app: magedu-dubboadmin
  template:
    metadata:
      labels:
        app: magedu-dubboadmin
    spec:
      containers:
      - name: magedu-dubboadmin-container
        image: harbor.zzhz.com/linux39/dubboadmin:v1
        #command: ["/apps/tomcat/bin/run_tomcat.sh"]
        #imagePullPolicy: IfNotPresent
        imagePullPolicy: Always
        ports:
        - containerPort: 8080
          protocol: TCP
          name: http

---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: magedu-dubboadmin
  name: magedu-dubboadmin-service
  namespace: magedu
spec:
  type: NodePort
  ports:
  - name: http
    port: 80
    protocol: TCP
    targetPort: 8080
    nodePort: 30080
  selector:
    app: magedu-dubboadmin



[root@localhost7C /]# kubectl get pod  -A  
magedu                 magedu-consumer-deployment-85bc85ffb5-kr9tt   1/1     Running   0          5m54s
magedu                 magedu-dubboadmin-deployment-77bc9ff9-mrjbj   1/1     Running   0          59m
magedu                 magedu-provider-deployment-75bb794f4-j685f    1/1     Running   0          6m59s
magedu                 zookeeper1-565bb9fc8b-m8nh4                   1/1     Running   0          46h
magedu                 zookeeper2-5559dfb959-s9tnd                   1/1     Running   0          2d20h
magedu                 zookeeper3-64b877777b-7nnlt                   1/1     Running   0          46h




[root@localhost7C /]# kubectl exec  -it -n magedu magedu-provider-deployment-75bb794f4-j685f  bash
[root@magedu-provider-deployment-75bb794f4-j685f /]# netstat  -antlp 
Active Internet connections (servers and established)
Proto Recv-Q Send-Q Local Address           Foreign Address         State       PID/Program name    
tcp        0      0 0.0.0.0:20880           0.0.0.0:*               LISTEN      -       

[root@magedu-provider-deployment-75bb794f4-j685f /]# ps aux
USER        PID %CPU %MEM    VSZ   RSS TTY      STAT START   TIME COMMAND
root          1  0.0  0.0  11688  1408 ?        Ss   11:15   0:00 /bin/bash /apps/dubbo/provider/bin/run_java.sh
root          7  0.1  0.0  81980  2060 ?        S    11:15   0:00 su - tomcat -c /apps/dubbo/provider/bin/start.sh
tomcat        8  0.1  0.0  11164  1484 ?        Ss   11:15   0:00 /bin/bash /apps/dubbo/provider/bin/start.sh
tomcat       57  2.7  6.2 4614500 146780 ?      Sl   11:15   0:09 java -Djava.awt.headless=true -Djava.net.preferIPv4Stack=true -server -Xmx1g -Xms1g -Xmn256m -XX:PermSize=128m -Xss256k -XX
root       1371  0.5  0.0  11828  1852 pts/0    Ss   11:21   0:00 bash
tomcat     1391  0.0  0.0   5936   356 ?        S    11:21   0:00 sleep 1
root       1392  0.0  0.0  51752  1732 pts/0    R+   11:21   0:00 ps aux

            

[root@localhost7C /]# kubectl exec  -it -n magedu magedu-consumer-deployment-85bc85ffb5-kr9tt  bash
[root@magedu-consumer-deployment-85bc85ffb5-kr9tt /]# netstat -antlp 
Active Internet connections (servers and established)
Proto Recv-Q Send-Q Local Address           Foreign Address         State       PID/Program name    
tcp        0      0 10.20.5.43:54966        10.10.115.171:2181      ESTABLISHED -                   
tcp        0      0 10.20.5.43:60018        10.10.107.121:2181      ESTABLISHED -                   
tcp        0      0 10.20.5.43:58876        10.20.5.42:20880        ESTABLISHED -                   
tcp        0      0 10.20.5.43:60872        10.10.231.109:2181      ESTABLISHED -                   

[root@magedu-consumer-deployment-85bc85ffb5-kr9tt /]# ps aux
USER        PID %CPU %MEM    VSZ   RSS TTY      STAT START   TIME COMMAND
root          1  0.0  0.0  11688  1408 ?        Ss   11:16   0:00 /bin/bash /apps/dubbo/consumer/bin/run_java.sh
tomcat       52  1.7  5.2 4497068 122508 ?      Sl   11:16   0:05 java -Djava.awt.headless=true -Djava.net.preferIPv4Stack=true -server -Xmx1g -Xms1g -Xmn256m -XX:PermSize=128m -Xss256k -XX
root         78  0.0  0.0   4408   360 ?        S    11:16   0:00 tail -f /etc/hosts
root         95  0.1  0.0  11828  1984 pts/0    Ss   11:20   0:00 bash
root        111  0.0  0.0  51752  1740 pts/0    R+   11:21   0:00 ps aux
[root@magedu-consumer-deployment-85bc85ffb5-kr9tt /]# 
[root@magedu-consumer-deployment-85bc85ffb5-kr9tt /]# 
[root@magedu-consumer-deployment-85bc85ffb5-kr9tt /]# exit
exit


测试,使用客户端ZooInspector连接zookeeper

 


 


测试 http://192.168.80.170:30080/dubboadmin/

 


 

 

 

 

 

第一部分基本cnetos系统的另一种写法。
cnetos系统方式创建dockerfile镜像文件
root@master1:/dockerfile/project/zookeeper# cat Dockerfile
FROM harbor.cncf.net/baseimages/jdk:1.8.191 
 
ENV ZK_VERSION 3.5.10
COPY apache-zookeeper-3.5.10-bin.tar.gz /usr/local
 
RUN mkdir -p /usr/local/zookeeper/data /usr/local/zookeeper/wal /usr/local/zookeeper/log
RUN cd /usr/local/ &&  \
tar xf apache-zookeeper-3.5.10-bin.tar.gz -C /usr/local/zookeeper/ --strip-component=1 && \
rm -rf apache-zookeeper-3.5.10-bin.tar.gz
 
COPY conf /usr/local/zookeeper/conf/
COPY bin/zkReady.sh /usr/local/zookeeper/bin/
COPY entrypoint.sh /
 
ENV PATH=/usr/local/zookeeper/bin:${PATH} \
    ZOO_LOG_DIR=/usr/local/zookeeper/log \
    ZOO_LOG4J_PROP="INFO, CONSOLE, ROLLINGFILE" \
    JMXPORT=9010
 
ENTRYPOINT [ "/entrypoint.sh" ]
CMD [ "zkServer.sh", "start-foreground" ]
EXPOSE 2181 2888 3888 9010
----------------------------------------

创建zookeeper生成集群配置文件脚本
root@master1:/dockerfile/project/zookeeper# cat entrypoint.sh
#!/bin/bash
  
echo ${MYID:-0} > /usr/local/zookeeper/data/myid
 
#k8s zookeeper集群service名称
servers=(zookeeper1 zookeeper2 zookeeper3)
 
if [ -n "$servers" ]; then
   for list in "${!servers[@]}"; do
      printf "\nserver.%s = %s:2888:3888" "$((1 + $list))" "${servers[$list]}" >> /usr/local/zookeeper/conf/zoo.cfg
   done
fi
 
exec "$@"
---------------------------------------------

参考博客
https://www.cnblogs.com/punchlinux/p/16575844.html

 

posted @ 2023-03-28 10:42  yuanbangchen  阅读(103)  评论(0编辑  收藏  举报