一、项目实战之kubernetes实战案例之Nginx+Tomcat+NFS实现动静分离Web站点

基于Nginx+Tomcat+NFS实现通过域名转发动态请求到Tomcat Pod的动静分离架构,要求能够通过负载均衡的VIP访问到k8s集群中运行的Nginx+Tomcat+NFS中的web页面。
基于NFS实现动静分离:图片的上传由后端服务器tomcat完成,图片的读取由前端的nginx响应,就需要nginx与tomcat的数据保持一致性,
因此需要将数据保存到k8s环境外部的存储服务器,然后再挂载到各nginx与tomcat 的容器中进行相应的操作。(nginx和tomcat的yaml定义的挂载指定同一个NFS)

 

1.k8s二进制安装和harbor安装参考文档: https://www.cnblogs.com/Yuanbangchen/p/17219073.html   
                  

2.下载初始镜像,上传镜像到harbor.zzhz.com服务器
[root@localhost7C centos]# docker pull  centos:7.6.1810
[root@localhost7C centos]# docker tag  f1cb7c7d58b7   harbor.zzhz.com/baseimages/centos:7.6.1810
[root@localhost7C centos]# docker push harbor.zzhz.com/baseimages/centos:7.6.1810 



3.对初始镜像进行基础软件安装,此镜像为nginx tomcat jdk的基本镜像
[root@localhost7C centos]# ll
-rw-r--r-- 1 root root      174 4月   7 2020 build-command.sh
-rw-r--r-- 1 root root      503 4月   9 2020 Dockerfile
-rw-r--r-- 1 root root 24694569 4月   7 2020 filebeat-7.6.1-x86_64.rpm


[root@localhost7C centos]# cat Dockerfile 
#自定义Centos 基础镜像
from from harbor.zzhz.com/baseimages/centos:7.6.1810 
LABEL MAINTAINER="2973707860@qq.com"
ADD filebeat-7.6.1-x86_64.rpm /tmp
RUN yum install -y /tmp/filebeat-7.6.1-x86_64.rpm vim wget tree  lrzsz gcc gcc-c++ automake pcre pcre-devel zlib zlib-devel \ 
openssl openssl-devel iproute net-tools iotop &&  rm -rf /etc/localtime /tmp/filebeat-7.6.1-x86_64.rpm \
&& ln -snf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && useradd  nginx -u 2019 && useradd www -u 2020

#harbor先创建/baseimages项目
[root@localhost7C centos]# cat build-command.sh 
#!/bin/bash
docker build -t  harbor.zzhz.com/baseimages/magedu-centos-base:7.6.1810  .

docker push harbor.zzhz.com/baseimages/magedu-centos-base:7.6.1810

#执行构造
[root@localhost7C centos]# chmod  +x build-command.sh 
[root@localhost7C centos]# ./build-command.sh
4.Nginx 基础镜像制作:制作一个通用的Ningx镜像,此时的镜像没有业务。
[root@localhost7C nginx-base]# ll
-rw-r--r-- 1 root root     144 3月  28 11:28 build-command.sh
-rw-r--r-- 1 root root     470 3月  28 11:27 Dockerfile
-rw-r--r-- 1 root root 1015384 4月   7 2020 nginx-1.14.2.tar.gz

[root@localhost7C nginx-base]# cat Dockerfile 
FROM harbor.zzhz.com/baseimages/magedu-centos-base:7.6.1810
MAINTAINER  zhangshijie@magedu.net

RUN yum install -y vim wget tree  lrzsz gcc gcc-c++ automake pcre pcre-devel zlib zlib-devel openssl openssl-devel iproute net-tools iotop
ADD nginx-1.14.2.tar.gz /usr/local/src/
RUN cd /usr/local/src/nginx-1.14.2 && ./configure  && make && make install && ln -sv  /usr/local/nginx/sbin/nginx /usr/sbin/nginx  &&rm -rf /usr/local/src/nginx-1.14.2.tar.gz 

#harbor先创建pub-images项目
[root@localhost7C nginx-base]# cat build-command.sh 
#!/bin/bash
docker build -t harbor.zzhz.com/pub-images/nginx-base:v1.14.2  .
sleep 1
docker push  harbor.zzhz.com/pub-images/nginx-base:v1.14.2



#执行构造
[root@localhost7C nginx-base]# chmod  +x build-command.sh 
[root@localhost7C nginx-base]# ./build-command.sh




5.基于Nginx基础镜像,制作N个不同服务的Nginx业务镜像:
[root@localhost7C nginx]# ls
-rw-r--r-- 1 root root  265 4月   7 2020 app1.tar.gz   
-rw-r--r-- 1 root root  226 3月  28 14:17 build-command.sh
-rw-r--r-- 1 root root  354 3月  28 14:25 Dockerfile
-rw-r--r-- 1 root root   14 3月  28 14:53 index.html
-rw-r--r-- 1 root root 1608 3月  28 14:18 nginx.conf


#文件信息
[root@localhost7C nginx]# cat Dockerfile 
#Nginx 1.14.2
FROM harbor.zzhz.com/pub-images/nginx-base:v1.14.2 

ADD nginx.conf /usr/local/nginx/conf/nginx.conf
ADD app1.tar.gz  /usr/local/nginx/html/webapp/
ADD index.html  /usr/local/nginx/html/index.html

#静态资源挂载路径
RUN mkdir -p /usr/local/nginx/html/webapp/static /usr/local/nginx/html/webapp/images

EXPOSE 80 443

CMD ["nginx"] 


[root@localhost7C nginx]# cat build-command.sh 
#!/bin/bash
TAG=$1
docker build -t harbor.zzhz.com/linux39/nginx-web1:${TAG} .
echo "镜像构建完成,即将上传到harbor"
sleep 1
docker push harbor.zzhz.com/linux39/nginx-web1:${TAG}
echo "镜像上传到harbor完成"


[root@localhost7C nginx]# cat nginx.conf 
user  nginx nginx;
worker_processes  auto;

#error_log  logs/error.log;
#error_log  logs/error.log  notice;
#error_log  logs/error.log  info;
#pid        logs/nginx.pid;

daemon off;  ##关闭后台运行

events {
    worker_connections  1024;
}


http {
    include       mime.types;
    default_type  application/octet-stream;

    #log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
    #                  '$status $body_bytes_sent "$http_referer" '
    #                  '"$http_user_agent" "$http_x_forwarded_for"';

    #access_log  logs/access.log  main;

    sendfile        on;

    keepalive_timeout  65;

    #gzip  on;

#先关掉
#    upstream  tomcat_webserver {
#        server   linux39-tomcat-app1-service.linux39.svc.zzhz.local:80;
#    }

    server {
        listen       80;
        server_name  localhost;

        #charset koi8-r;

        #access_log  logs/host.access.log  main;

        location / {
            root   html;
            index  index.html index.htm;
        }

        location /webapp {
            root   html;
            index  index.html index.htm;
        }
#先关掉
#        location /myapp {
#             proxy_pass  http://tomcat_webserver;
#             proxy_set_header   Host    $host;
#             proxy_set_header   X-Forwarded-For $proxy_add_x_forwarded_for;
#             proxy_set_header X-Real-IP $remote_addr;
#        }

        # redirect server error pages to the static page /50x.html
        error_page   500 502 503 504  /50x.html;
        location = /50x.html {
            root   html;
        }
    }
}

[root@localhost7C nginx]# cat index.html 
nginx web1 v1

#执行构造
[root@localhost7C nginx]# chmod  +x build-command.sh 
[root@localhost7C nginx]# bash build-command.sh  v1


#测试nginx业务镜像可以启动为容器
[root@localhost7C nginx]# docker run  -it  --rm  -p 8801:80  harbor.zzhz.com/linux39/nginx-web1:v1 

[root@localhost7K ~]# curl  192.168.80.120:8801
nginx web1 v1

[root@localhost7K ~]# curl  192.168.80.120:8801/webapp/index.html
<!DOCTYPE html>
<html lang="en">
<head>
    <meta charset="UTF-8">
       <title>马哥教育</title>
        </head>
     <body>
      <h2>linux39 web1 v1 </h2>
     </body>
</html>
6.安装nfs服务器 :
基于NFS实现动静分离:图片的上传由后端服务器tomcat完成,图片的读取由前端的nginx响应,就需要nginx与tomcat的数据保持一致性,
因此需要将数据保存到k8s环境外部的存储服务器,然后再挂载到各nginx与tomcat 的容器中进行相应的操作。(nginx和tomcat的yaml定义的挂载指定同一个NFS)

[root@localhost7B ~]# yum install nfs-utils.x86_64

[root@localhost7B ~]# cat /etc/exports
/data/linux39/  *(rw,no_root_squash)

[root@localhost7B ~]# mkdir /data/linux39/images  -p
[root@localhost7B ~]# mkdir /data/linux39/static  -p
[root@localhost7B ~]# echo “nginx static page” > nginx.file
[root@localhost7B ~]# echo “nginx images page” > index.html

[root@localhost7B ]# systemctl  restart  nfs-server.service

#写数据测试
mount -t nfs 192.168.80.110:/data/linux39/static  /mnt

[root@localhost7F k8s]# cat  /mnt/nginx.file 
“nginx static page”

[root@localhost7F k8s]# umount  /mnt/
7.在k8s环境创建nginx业务pod
#需要提前创建好yaml文件,并创建好好pod运行所需要的namespace、yaml文件等资源
[root@localhost7C yaml]# cd namespaces/
[root@localhost7C namespaces]# ls
linux39-ns.yaml 

[root@localhost7C namespaces]# cat linux39-ns.yaml 
apiVersion: v1
kind: Namespace
metadata: 
  name: linux39


[root@localhost7C yaml]# cd linux39/nginx/
[root@localhost7C nginx]# cat nginx.yaml 
kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    app: linux39-nginx-deployment-label
  name: linux39-nginx-deployment
  namespace: linux39
spec:
  replicas: 1
  selector:
    matchLabels:
      app: linux39-nginx-selector
  template:
    metadata:
      labels:
        app: linux39-nginx-selector
    spec:
      containers:
      - name: linux39-nginx-container
        image: harbor.zzhz.com/linux39/nginx-web1:v1
        #imagePullPolicy: IfNotPresent
        imagePullPolicy: Always
        #imagePullPolicy: None
        ports:
        - containerPort: 80
          protocol: TCP
          name: http
        - containerPort: 443
          protocol: TCP
          name: https
        env:
        - name: "password"
          value: "123456"
        - name: "age"
          value: "20"
        resources:
          limits:
            cpu: 1
            memory: 512Mi
          requests:
            cpu: 200m
            memory: 246Mi

        volumeMounts:
        - name: linux39-images
          mountPath: /usr/local/nginx/html/webapp/images 
          readOnly: false
        - name: linux39-static
          mountPath: /usr/local/nginx/html/webapp/static 
          readOnly: false
      volumes:
      - name: linux39-images
        nfs:
          server: 192.168.80.110
          path: /data/linux39/images 
      - name: linux39-static
        nfs:
          server: 192.168.80.110
          path: /data/linux39/static
      #nodeSelector:
      #  group: linux39

    
---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: linux39-nginx-service-label
  name: linux39-nginx-service
  namespace: linux39
spec:
  type: NodePort
  ports:
  - name: http
    port: 80
    protocol: TCP
    targetPort: 80
    nodePort: 30080
  - name: https
    port: 443
    protocol: TCP
    targetPort: 443
    nodePort: 30443
  selector:
    app: linux39-nginx-selector



[root@localhost7C nginx]# kubectl apply -f ../../namespaces/linux39-ns.yaml    -f nginx.yaml 
[root@localhost7C nginx]# kubectl  get pod   -A  -o wide
NAMESPACE              NAME                                         READY   STATUS    RESTARTS   AGE     IP               NODE             NOMINATED NODE   READINESS GATES
kube-system            kube-dns-69979c4b84-2h6d2                    3/3     Running   0          30h     10.20.5.4        192.168.80.160   <none>           <none>
kube-system            kube-flannel-ds-amd64-2262m                  1/1     Running   0          30h     192.168.80.150   192.168.80.150   <none>           <none>
kube-system            kube-flannel-ds-amd64-69qjr                  1/1     Running   0          30h     192.168.80.160   192.168.80.160   <none>           <none>
kube-system            kube-flannel-ds-amd64-6bsnm                  1/1     Running   0          30h     192.168.80.140   192.168.80.140   <none>           <none>
kube-system            kube-flannel-ds-amd64-6cq5q                  1/1     Running   0          30h     192.168.80.120   192.168.80.120   <none>           <none>
kube-system            kube-flannel-ds-amd64-ckmzs                  1/1     Running   0          30h     192.168.80.170   192.168.80.170   <none>           <none>
kube-system            kube-flannel-ds-amd64-xddjr                  1/1     Running   0          30h     192.168.80.130   192.168.80.130   <none>           <none>
kube-system            metrics-server-ccccb9bb6-h4wdk               1/1     Running   0          25h     10.20.4.13       192.168.80.170   <none>           <none>
kubernetes-dashboard   dashboard-metrics-scraper-74bbb59f48-t6jj2   1/1     Running   0          5h20m   10.20.6.12       192.168.80.150   <none>           <none>
kubernetes-dashboard   kubernetes-dashboard-bc4695695-zkr9x         1/1     Running   0          5h20m   10.20.4.14       192.168.80.170   <none>           <none>
linux39                linux39-nginx-deployment-5dff5b5cb7-gxhh2    1/1     Running   0          59s     10.20.4.17       192.168.80.170   <none>           <none>




#复制文件到NFS文件中测试,或在pod创建文件测试。
[root@localhost7K ~]# curl  192.168.80.170:30080/index.html
nginx web1 v1
[root@localhost7K ~]# 
[root@localhost7K ~]# 
[root@localhost7K ~]# curl  192.168.80.170:30080/webapp/index.html
<!DOCTYPE html>
<html lang="en">
<head>
    <meta charset="UTF-8">
       <title>马哥教育</title>
        </head>
     <body>
      <h2>linux39 web1 v1 </h2>
     </body>
</html>
[root@localhost7K ~]# curl  192.168.80.170:30080/webapp/static/nginx.file
“nginx static page”
7.基于基础的centos镜像 harbor.zzhz.com/baseimages/magedu-centos-base:7.6.1810,制作公司内部基础镜像--jdk镜像

[root@localhost7C jdk-1.8.212]# ll
-rw-r--r-- 1 root root       138 3月  28 15:38 build-command.sh
-rw-r--r-- 1 root root       388 3月  28 15:36 Dockerfile
-rw-r--r-- 1 root root 195013152 4月   7 2020 jdk-8u212-linux-x64.tar.gz
-rw-r--r-- 1 root root      2128 3月  28 15:39 profile

[root@localhost7C jdk-1.8.212]# cat Dockerfile 
#JDK Base Image
FROM harbor.zzhz.com/baseimages/magedu-centos-base:7.6.1810
MAINTAINER zhangshijie "zhangshijie@magedu.net"

ADD jdk-8u212-linux-x64.tar.gz /usr/local/src/
RUN ln -sv /usr/local/src/jdk1.8.0_212 /usr/local/jdk 
ADD profile /etc/profile

ENV JAVA_HOME /usr/local/jdk
ENV JRE_HOME $JAVA_HOME/jre
ENV CLASSPATH $JAVA_HOME/lib/:$JRE_HOME/lib/
ENV PATH $PATH:$JAVA_HOME/bin



[root@localhost7C jdk-1.8.212]# cat build-command.sh 
#!/bin/bash
docker build -t harbor.zzhz.com/pub-images/jdk-base:v8.212  .
sleep 1
docker push  harbor.zzhz.com/pub-images/jdk-base:v8.212


#profile文件的来源:使用docker 运行一个临时容器复制出来。
[root@localhost7C jdk-1.8.212]# cat profile 
 ....
 ....
 ....
unset i
unset -f pathmunge
export LANG=en_US.UTF-8
export HISTTIMEFORMAT="%F %T `whoami` "

#主要是这几行。
export JAVA_HOME=/usr/local/jdk
export TOMCAT_HOME=/apps/tomcat
export PATH=$JAVA_HOME/bin:$JAVA_HOME/jre/bin:$TOMCAT_HOME/bin:$PATH
export CLASSPATH=.$CLASSPATH:$JAVA_HOME/lib:$JAVA_HOME/jre/lib:$JAVA_HOME/lib/tools.jar


#执行构造
[root@localhost7C jdk-1.8.212]# chmod  +x build-command.sh 
[root@localhost7C jdk-1.8.212]# ./build-command.sh

#测试
[root@localhost7C jdk-1.8.212]# docker run  -it --rm  harbor.zzhz.com/pub-images/jdk-base:v8.212 bash
[root@5cd43e965b82 /]# java  -version
java version "1.8.0_212"
Java(TM) SE Runtime Environment (build 1.8.0_212-b10)
Java HotSpot(TM) 64-Bit Server VM (build 25.212-b10, mixed mode)


8.基于jdk镜像制作tomcat基础镜像
[root@localhost7C tomcat-base-8.5.43]# ll
-rw-r--r-- 1 root root 9717059 4月   7 2020 apache-tomcat-8.5.43.tar.gz
-rw-r--r-- 1 root root     146 3月  28 15:48 build-command.sh
-rw-r--r-- 1 root root     341 3月  28 15:48 Dockerfile


[root@localhost7C tomcat-base-8.5.43]# cat Dockerfile 
#Tomcat 8.5.43基础镜像
FROM harbor.zzhz.com/pub-images/jdk-base:v8.212 
MAINTAINER zhangshijie "zhangshijie@magedu.net"
RUN mkdir /apps /data/tomcat/webapps /data/tomcat/logs -pv 
ADD apache-tomcat-8.5.43.tar.gz  /apps
RUN useradd tomcat -u 2021 && ln -sv /apps/apache-tomcat-8.5.43 /apps/tomcat && chown -R nginx.nginx /apps /data -R


[root@localhost7C tomcat-base-8.5.43]# cat build-command.sh 
#!/bin/bash
docker build -t harbor.zzhz.com/pub-images/tomcat-base:v8.5.43  .
sleep 3
docker push  harbor.zzhz.com/pub-images/tomcat-base:v8.5.43


[root@localhost7C tomcat-base-8.5.43]# chmod +x build-command.sh 
[root@localhost7C tomcat-base-8.5.43]# ./build-command.sh 


#测试访问tomcat基础镜像启动为容器:
[root@localhost7C tomcat-base-8.5.43]# docker run -it --rm -p 8801:8080  harbor.zzhz.com/pub-images/tomcat-base:v8.5.43  bash
[root@cfb5972b39ac /]# /apps/tomcat/bin/catalina.sh  start
Using CATALINA_BASE:   /apps/tomcat
Using CATALINA_HOME:   /apps/tomcat
Using CATALINA_TMPDIR: /apps/tomcat/temp
Using JRE_HOME:        /usr/local/jdk/jre
Using CLASSPATH:       /apps/tomcat/bin/bootstrap.jar:/apps/tomcat/bin/tomcat-juli.jar
Tomcat started.

[root@cfb5972b39ac /]# ps aux
USER        PID %CPU %MEM    VSZ   RSS TTY      STAT START   TIME COMMAND
root          1  0.0  0.1  11824  1924 pts/0    Ss   15:52   0:00 bash
root         23 63.4  4.9 3999372 91544 pts/0   Sl   15:53   0:03 /usr/local/jdk/jre/bin/java -Djava.util.logging.config.file=/apps/tomcat/conf/logging.properties -Djava.util.logging.manage
root         73  0.0  0.0  51748  1732 pts/0    R+   15:53   0:00 ps aux


#测试
[root@localhost7K ~]# curl 192.168.80.120:8801


9.tomcat业务镜像:tomcat业务镜像app1制作,后期按此步骤制作app2、appN镜像
[root@localhost7C linux39]# cd tomcat-app1/
[root@localhost7C tomcat-app1]# ll
-rwxr-xr-x 1 root root   165 4月   7 2020 build-command.sh
-rwxr-xr-x 1 root root 23611 4月   7 2020 catalina.sh  #文件的来源:使用docker 运行一个临时容器复制出来。
-rw-r--r-- 1 root root   548 4月   9 2020 Dockerfile
-rw-r--r-- 1 root root   416 4月   9 2020 filebeat.yml
-rw-r--r-- 1 root root    12 4月   7 2020 index.html
-rw-r--r-- 1 root root   266 4月   9 2020 myapp.tar.gz
-rwxr-xr-x 1 root root   556 4月   9 2020 run_tomcat.sh
-rw-r--r-- 1 root root  6460 4月   7 2020 server.xml    #文件的来源:使用docker 运行一个临时容器复制出来。



[root@localhost7C tomcat-app1]# cat Dockerfile 
#tomcat web1
FROM harbor.zzhz.com/pub-images/tomcat-base:v8.5.43 
ADD catalina.sh /apps/tomcat/bin/catalina.sh
ADD server.xml /apps/tomcat/conf/server.xml
#ADD myapp/* /data/tomcat/webapps/myapp/
ADD myapp.tar.gz /data/tomcat/webapps/myapp/
ADD run_tomcat.sh /apps/tomcat/bin/run_tomcat.sh
ADD filebeat.yml /etc/filebeat/filebeat.yml 
RUN mkdir /data/tomcat/webapps/myapp/images  /data/tomcat/webapps/myapp/static -p
RUN chown  -R nginx.nginx /data/ /apps/ 
EXPOSE 8080 8443
CMD ["/apps/tomcat/bin/run_tomcat.sh"]


[root@localhost7C tomcat-app1]# cat index.html 
tomcat app1



#后期测试,可以不用添加
[root@localhost7C tomcat-app1]# cat filebeat.yml 
filebeat.inputs:
- type: log
  enabled: true
  paths:
    - /apps/tomcat/logs/catalina.out
  fields:
    type: k8s-tomcat-catalina

filebeat.config.modules:
  path: ${path.config}/modules.d/*.yml
  reload.enabled: false
setup.template.settings:
  index.number_of_shards: 1

output.redis:
  hosts: ["srv-devops-redis.magedu.svc.magedu.local:6379"]
  key: "k8s-linux39-myapp"
  db: 1
  timeout: 5
  password: "123456"



[root@localhost7C tomcat-app1]# cat run_tomcat.sh 
#!/bin/bash
#echo "nameserver 223.6.6.6" > /etc/resolv.conf
#/usr/share/filebeat/bin/filebeat -e -c /etc/filebeat/filebeat.yml -path.home /usr/share/filebeat -path.config /etc/filebeat -path.data /var/lib/filebeat -path.logs /var/log/filebeat &
#后期测试,可以不用添加
/usr/share/filebeat/bin/filebeat -e -c /etc/filebeat/filebeat.yml -path.home /usr/share/filebeat -path.config /etc/filebeat -path.data /var/lib/filebeat -path.logs /var/log/filebeat &
su - nginx -c "/apps/tomcat/bin/catalina.sh start"
tail -f /etc/hosts



[root@localhost7C tomcat-app1]# cat server.xml 
其它省略
<Host name="localhost"  appBase="/data/tomcat/webapps"  unpackWARs="true" autoDeploy="true">



[root@localhost7C tomcat-app1]# cat build-command.sh 
#!/bin/bash
TAG=$1
docker build -t  harbor.zzhz.com/linux39/tomcat-app1:${TAG} .
sleep 3
docker push  harbor.zzhz.com/linux39/tomcat-app1:${TAG}




#重点,所有脚本要执行权限。
[root@localhost7C tomcat-app1]# chmod +x *.sh

[root@localhost7C tomcat-app1]# ./build-command.sh  2022-02-22


#测试访问tomcat基础镜像启动为容器:
[root@localhost7C tomcat-app1]#docker run -it --rm -p 8801:8080 harbor.zzhz.com/linux39/tomcat-app1:2022-02-22

[root@localhost7K ~]# curl  192.168.80.120:8801/myapp/index.html
<!DOCTYPE html>
<html lang="en">
<head>
    <meta charset="UTF-8">
    <title>马哥教育</title>
</head>
<body>
    <h1>linux39 myapp v1</h1>
    <h1>linux39 myapp v2</h1>
</body>
</html>



10.在k8s环境创建tomcat业务pod
[root@localhost7C tomcat-app1]# cat tomcat-app1.yaml 
kind: Deployment
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
metadata:
  labels:
    app: linux39-tomcat-app1-deployment-label
  name: linux39-tomcat-app1-deployment
  namespace: linux39
spec:
  replicas: 1
  selector:
    matchLabels:
      app: linux39-tomcat-app1-selector
  template:
    metadata:
      labels:
        app: linux39-tomcat-app1-selector
    spec:
      containers:
      - name: linux39-tomcat-app1-container
        image: harbor.zzhz.com/linux39/tomcat-app1:2022-02-22
        #command: ["/apps/tomcat/bin/run_tomcat.sh"]
        #imagePullPolicy: IfNotPresent
        imagePullPolicy: Always
        ports:
        - containerPort: 8080
          protocol: TCP
          name: http
        env:
        - name: "password"
          value: "123456"
        - name: "age"
          value: "18"
        resources:
          limits:
            cpu: 1
            memory: "512Mi"
          requests:
            cpu: 500m
            memory: "512Mi"
        volumeMounts:
        - name: linux39-images
          mountPath: /data/tomcat/webapps/myapp/images
          readOnly: false
        - name: linux39-static
          mountPath: /data/tomcat/webapps/myapp/static
          readOnly: false
      volumes:
      - name: linux39-images
        nfs:
          server: 192.168.80.110
          path: /data/linux39/images
      - name: linux39-static
        nfs:
          server: 192.168.80.110
          path: /data/linux39/static
      #nodeSelector:
      #  project: linux39
---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: linux39-tomcat-app1-service-label
  name: linux39-tomcat-app1-service
  namespace: linux39
spec:
  type: NodePort
  ports:
  - name: http
    port: 80
    protocol: TCP
    targetPort: 8080
    nodePort: 30003
  selector:
    app: linux39-tomcat-app1-selector
[root@localhost7C tomcat-app1]# kubectl apply  -f tomcat-app1.yaml 



[root@localhost7C tomcat-app1]# kubectl get services  -A
NAMESPACE              NAME                          TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)                      AGE
default                kubernetes                    ClusterIP   10.10.0.1       <none>        443/TCP                      31h
kube-system            kube-dns                      ClusterIP   10.10.0.2       <none>        53/UDP,53/TCP                31h
kube-system            metrics-server                ClusterIP   10.10.63.205    <none>        443/TCP                      30h
kubernetes-dashboard   dashboard-metrics-scraper     ClusterIP   10.10.211.255   <none>        8000/TCP                     6h32m
kubernetes-dashboard   kubernetes-dashboard          NodePort    10.10.239.72    <none>        443:30002/TCP                6h32m
linux39                linux39-nginx-service         NodePort    10.10.8.56      <none>        80:30080/TCP,443:30443/TCP   72m
linux39                linux39-tomcat-app1-service   NodePort    10.10.153.163   <none>        80:30003/TCP                 2m34s

[root@localhost7C tomcat-app1]# kubectl get pod  -A -o wide
NAMESPACE              NAME                                              READY   STATUS    RESTARTS   AGE     IP               NODE             NOMINATED NODE   READINESS GATES
linux39                linux39-nginx-deployment-5dff5b5cb7-gxhh2         1/1     Running   0          72m     10.20.4.17       192.168.80.170   <none>           <none>
linux39                linux39-tomcat-app1-deployment-85fd8d966d-cpxp8   1/1     Running   0          2m38s   10.20.6.13       192.168.80.150   <none>           <none>



#nginx测试
[root@localhost7K ~]# curl http://192.168.80.170:30080/webapp/index.html
<!DOCTYPE html>
<html lang="en">
<head>
    <meta charset="UTF-8">
       <title>马哥教育</title>
        </head>
     <body>
      <h2>linux39 web1 v1 </h2>
     </body>
</html>
[root@localhost7K ~]# curl http://192.168.80.170:30080/index.html
nginx web1 v1

#tomcat测试
[root@localhost7K ~]# curl http://192.168.80.150:30003/myapp/index.html
<!DOCTYPE html>
<html lang="en">
<head>
    <meta charset="UTF-8">
    <title>马哥教育</title>
</head>
<body>
    <h1>linux39 myapp v1</h1>
    <h1>linux39 myapp v2</h1>
</body>
</html>
11.k8s中nginx+tomcat实现动静分离:
实现一个通用的nginx+tomcat动静分离web架构,即用户访问的静态页面和图片在由nginx直接响应,而动态请求则基于location转发至tomcat。
重点:Nginx基于tomcat的service name转发用户请求到tomcat业务app

重构第5步的nginx业务镜像到版本V2
[root@localhost7C k8s-data]# ls dockerfile/linux39/nginx/
app1.tar.gz  build-command.sh  Dockerfile  index.html  nginx.conf  webapp

[root@localhost7C nginx]# cat nginx.conf 
user  nginx nginx;
worker_processes  auto;

#error_log  logs/error.log;
#error_log  logs/error.log  notice;
#error_log  logs/error.log  info;

#pid        logs/nginx.pid;
daemon off;

events {
    worker_connections  1024;
}

http {
    include       mime.types;
    default_type  application/octet-stream;

    #log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
    #                  '$status $body_bytes_sent "$http_referer" '
    #                  '"$http_user_agent" "$http_x_forwarded_for"';

    #access_log  logs/access.log  main;

    sendfile        on;
    #tcp_nopush     on;

    #keepalive_timeout  0;
    keepalive_timeout  65;

    #gzip  on;
    #开启,修改部分
    upstream  tomcat_webserver {
        server   linux39-tomcat-app1-service.linux39.svc.zzhz.local:80;
    }

    server {
        listen       80;
        server_name  localhost;

        #charset koi8-r;

        #access_log  logs/host.access.log  main;

        location / {
            root   html;
            index  index.html index.htm;
        }

        location /webapp {
            root   html;
            index  index.html index.htm;
        }
        #开启,修改部分
        location /myapp {
             proxy_pass  http://tomcat_webserver;
             proxy_set_header   Host    $host;
             proxy_set_header   X-Forwarded-For $proxy_add_x_forwarded_for;
             proxy_set_header X-Real-IP $remote_addr;
        }


        # redirect server error pages to the static page /50x.html
        error_page   500 502 503 504  /50x.html;
        location = /50x.html {
            root   html;
        }
    }
}

#升级v2版本
[root@localhost7C nginx]# ./build-command.sh v2

#修改第七步中的镜像地址 image: harbor.zzhz.com/linux39/nginx-web1:v2

[root@localhost7C nginx]# kubectl apply  -f tomcat-app1.yaml  #重新部署
[root@localhost7C nginx]# kubectl get  pod  -A  -o wide
NAMESPACE              NAME                                              READY   STATUS    RESTARTS   AGE     IP               NODE             NOMINATED NODE   READINESS GATES
linux39                linux39-nginx-deployment-7894d5cc98-f29zt         1/1     Running   0          3m7s    10.20.5.13       192.168.80.160   <none>           <none>
linux39                linux39-tomcat-app1-deployment-85fd8d966d-cpxp8   1/1     Running   0          29m     10.20.6.13       192.168.80.150   <none>           <none>

#再次测试
[root@localhost7K ~]# curl http://192.168.80.160:30080/index.html 
[root@localhost7K ~]# curl http://192.168.80.160:30080/webapp/index.html 
[root@localhost7K ~]# curl http://192.168.80.160:30080/myapp/index.html 
[root@localhost7K ~]# curl http://192.168.80.160:30080/myapp/static/nginx.file
12.基于 haproxy 和 keepalived 实现高可用的反向代理,并访问到运行在 kubernetes集群中业务 Pod。

#同步时间服务
ntpdate   time1.aliyun.com && hwclock  -w
#开启路由转发
echo 1 > /proc/sys/net/ipv4/ip_forward   
#下载YUM源
wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo

#安装软件
yum install keepalived  haproxy
 
   
#配置keepalived
[root@localhost7B ~]# cat /etc/keepalived/keepalived.conf 

global_defs {
   notification_email {
     root@localhost
   }
   notification_email_from root@localhost
   smtp_server 127.0.0.1
   smtp_connect_timeout 30
   router_id localhost7B
   vrrp_iptables
   vrrp_garp_interval 0
   vrrp_gna_interval 0
   vrrp_mcast_group4 224.0.0.18
}
vrrp_instance zzhz {
    state MASTER
    interface eth0
    virtual_router_id 51
    priority 95
    advert_int 2
    authentication {
        auth_type PASS
        auth_pass centos
    }
    virtual_ipaddress {
        192.168.80.222/24 dev eth0 label eth0:1
    }
}

#配置haproxy
[root@localhost7B ~]# cat /etc/haproxy/haproxy.cfg 
global
    log         127.0.0.1 local2
    chroot      /var/lib/haproxy
    pidfile     /var/run/haproxy.pid
    maxconn     4000
    user        haproxy
    group       haproxy
    daemon
    stats socket /var/lib/haproxy/stats

defaults
    mode                    http

    option                  httplog
    option                  dontlognull
    option http-server-close
    option                  redispatch
    retries                 3
    timeout http-request    10s
    timeout queue           1m
    timeout connect         10s
    timeout client          1m
    timeout server          1m
    timeout http-keep-alive 10s
    timeout check           10s
    maxconn                 3000

listen stats
   mode http
   bind 0.0.0.0:9999
   stats enable
   log global
   stats uri /haproxy-status
   stats auth haadmin:12345

listen k8s-nginx
   bind 192.168.80.222:80
   mode tcp
   balance roundrobin
    server 192.168.80.150 192.168.80.150:30080 check inter 2s fall 3 rise 5
    server 192.168.80.160 192.168.80.160:30080 check inter 2s fall 3 rise 5
    server 192.168.80.170 192.168.80.170:30080 check inter 2s fall 3 rise 5
    

#启动服务
systemctl enable  keepalived.service   haproxy.service 
systemctl start keepalived.service 
systemctl status  keepalived.service 
systemctl start haproxy.service 
systemctl status haproxy.service


测试:
[root@localhost7K ~]# curl http://192.168.80.222/index.html 
[root@localhost7K ~]# curl http://192.168.80.222/webapp/index.html 
[root@localhost7K ~]# curl http://192.168.80.222/myapp/index.html 
[root@localhost7K ~]# curl http://192.168.80.222/myapp/static/nginx.file
http://192.168.80.222:9999/haproxy-status

 

posted @ 2023-03-28 10:37  yuanbangchen  阅读(80)  评论(0编辑  收藏  举报