k8s+jenkins 构建全部流程

k8s-jenkins 构建全部流程

初始化

主机ip 服务
192.168.23.201 docker-ce ; gitlab
192.168.23.202 docker-ce; harbor ; mysql
192.168.23.203 docker-ce; k8s-master
192.168.23.204 docker-ce; k8s-node1
192.168.23.205 docker-ce; k8s-node2

先各自配置主机名

hostnamectl set-hostname gitlab
hostnamectl set-hostname harbor
hostnamectl set-hostname k8s-master
hostnamectl set-hostname k8s-node1
hostnamectl set-hostname k8s-node2

关闭5台服务器的firewalld 和selinux

systemctl stop firewalld
systemctl disable firewalld

setenforce 0
sed -i '/^SELINUX/ s/enforcing/disabled/' /etc/selinux/config


cat >> /etc/sysctl.conf << 'EOF'
net.ipv4.ip_forward=1
EOF

sysctl -p
systemctl restart network

安装gitlab

先上传gitlab软件包

yum -y install policycoreutils openssh-server openssh-clients postfix

systemctl enable sshd && systemctl start sshd
systemctl enable postfix && systemctl start postfix


ls gitlab-ce-12.4.2-ce.0.el6.x86_64.rpm

rpm -ivh gitlab-ce-12.4.2-ce.0.el6.x86_64.rpm


cp -a /etc/gitlab/gitlab.rb /etc/gitlab/gitlab.rb.bak

sed -i '23 s/gitlab.example.com/192.168.23.201:82/' /etc/gitlab/gitlab.rb
sed -i "1113i nginx['listen_port'] = 82" /etc/gitlab/gitlab.rb


gitlab-ctl reconfigure
gitlab-ctl restart

浏览器访问 192.168.23.201:82

Create a project---->Project name: tensquare_back ---->Create project

windows idea:

先配置远程git: 左边远中项目右击-->git-->repositry-->remotes

URL 为 gitlab 项目的 clone -->http

输入对应的用户名和密码



在harbor主机配置安装harbor

将harbor包上传

yum install -y yum-utils  device-mapper-persistent-data lvm2

yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

yum -y install docker-ce

systemctl start docker
systemctl enable docker 
ls docker-compose 

chmod  +x docker-compose 
mv docker-compose /usr/local/bin/
ls /usr/local/bin/docker-compose 
docker-compose --version
ls harbor-offline-installer-v1.9.2.tgz 
tar -zxvf harbor-offline-installer-v1.9.2.tgz 
mv harbor /opt/harbor
cd /opt/harbor/
cp -a harbor.yml  harbor.yml.bak

sed -i '5 s/reg.mydomain.com/192.168.23.202/' harbor.yml

sed -i '10 s/80/85/' harbor.yml

 ./prepare 
 ./install.sh 
 
 
 docker-compose up -d

浏览器访问 192.168.23.202:85

账户: admin / Harbor12345

新建项目--->项目名称: tensquare --->确定

右侧,系统管理--->用户管理--->创建用户--->用户名 tom 密码Abcd1234

右侧,项目---->tensquare--->成员----> +用户 ----> 姓名: tom ,角色:维护人员

cat > /etc/docker/daemon.json  << 'EOF'
{
  "registry-mirrors": ["https://k0ki64fw.mirror.aliyuncs.com"],
  "insecure-registries": ["192.168.23.202:85"]
}
EOF

systemctl restart docker
docker-compose up -d
 

docker login -u tom -p Abcd1234 192.168.23.202:85


在harbor主机,配置安装mysql

上传mysql 和sonnar-quba

安装myslq

cd /root/
ls boost_1_59_0.tar.gz  mysql-5.7.17.tar.gz tensquare_gathering.sql  tensquare_user.sql



yum -y install \
ncurses \
ncurses-devel \
bison \
cmake

useradd -s /sbin/nologin  mysql

tar zxvf mysql-5.7.17.tar.gz -C /opt/
tar zxvf boost_1_59_0.tar.gz -C /usr/local/
cd /usr/local/
mv boost_1_59_0 boost

cd /opt/mysql-5.7.17/

cmake \
-DCMAKE_INSTALL_PREFIX=/usr/local/mysql \
-DMYSQL_UNIX_ADDR=/usr/local/mysql/mysql.sock \
-DSYSCONFDIR=/etc \
-DSYSTEMD_PID_DIR=/usr/local/mysql \
-DDEFAULT_CHARSET=utf8  \
-DDEFAULT_COLLATION=utf8_general_ci \
-DWITH_INNOBASE_STORAGE_ENGINE=1 \
-DWITH_ARCHIVE_STORAGE_ENGINE=1 \
-DWITH_BLACKHOLE_STORAGE_ENGINE=1 \
-DWITH_PERFSCHEMA_STORAGE_ENGINE=1 \
-DMYSQL_DATADIR=/usr/local/mysql/data \
-DWITH_BOOST=/usr/local/boost \
-DWITH_SYSTEMD=1

make -j6 && make install 

chown -R mysql.mysql /usr/local/mysql/
cat > /etc/my.cnf <<'EOF'
[client]
port = 3306
default-character-set=utf8
socket = /usr/local/mysql/mysql.sock

[mysql]
port = 3306
default-character-set=utf8
socket = /usr/local/mysql/mysql.sock

[mysqld]
user = mysql
basedir = /usr/local/mysql
datadir = /usr/local/mysql/data
port = 3306
character_set_server=utf8
pid-file = /usr/local/mysql/mysqld.pid
socket = /usr/local/mysql/mysql.sock
server-id = 1

sql_mode=NO_ENGINE_SUBSTITUTION,STRICT_TRANS_TABLES,NO_AUTO_CREATE_USER,NO_AUTO_VALUE_ON_ZERO,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,PIPES_AS_CONCAT,ANSI_QUOTES
EOF
chown mysql:mysql /etc/my.cnf

echo 'PATH=/usr/local/mysql/bin:/usr/local/mysql/lib:$PATH' >> /etc/profile
echo 'export PATH' >> /etc/profile
source /etc/profile

cd /usr/local/mysql/

bin/mysqld \
--initialize-insecure \
--user=mysql \
--basedir=/usr/local/mysql \
--datadir=/usr/local/mysql/data

cp /usr/local/mysql/usr/lib/systemd/system/mysqld.service /usr/lib/systemd/system/

systemctl daemon-reload
systemctl start mysqld
netstat -anpt | grep 3306

systemctl enable mysqld


mysqladmin -u root -p password "abc123"


mysql -u root -pABC123

 create database tensquare_user;
 use tensquare_user;
 source /root/tensquare_user.sql;
 create database tensquare_gathering;
 use tensquare_gathering;
 source /root/tensquare_gathering.sql;
 
 grant all privileges on *.* to 'root'@'%' identified by 'abc123' with grant option;
 flush privileges;
 
 
 exit


安装三台k8s机器

三台k8s都要操作

cat >>/etc/hosts<<EOF 
192.168.23.203 k8s-master 
192.168.23.204 k8s-node1 
192.168.23.205 k8s-node2 
EOF


yum install -y yum-utils  device-mapper-persistent-data lvm2 
yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

yum -y install docker-ce

systemctl enable docker --now


cat >/etc/docker/daemon.json<<'EOF'
{
  "registry-mirrors": ["https://k0ki64fw.mirror.aliyuncs.com"],
  "insecure-registries": ["192.168.23.202:85"]
}
EOF

systemctl restart docker

modprobe br_netfilter

cat >/etc/sysctl.d/k8s.conf <<'EOF'
net.bridge.bridge-nf-call-ip6tables = 1 
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1 
vm.swappiness = 0
EOF

sysctl -p /etc/sysctl.d/k8s.conf


cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF

chmod 755 /etc/sysconfig/modules/ipvs.modules && bash 
/etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4


swapoff  -a

sed -i '/swap/ s/^/#/' /etc/fstab 
mount -a
yum clean all

cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

ls /etc/yum.repos.d/
yum install -y kubelet-1.17.0 kubeadm-1.17.0 kubectl-1.17.0

systemctl enable kubelet
kubelet --version




Master 节点操作

cat > /etc/docker/daemon.json << 'EOF'
{
  "registry-mirrors": ["https://k0ki64fw.mirror.aliyuncs.com"],
  "insecure-registries": ["192.168.23.202:85"],
  "exec-opts":["native.cgroupdriver=systemd"]
}
EOF


[ `hostname` = 'k8s-master' ] && kubeadm init --kubernetes-version=1.17.0 \
--apiserver-advertise-address=192.168.23.203 \
--image-repository registry.aliyuncs.com/google_containers \
--service-cidr=10.1.0.0/16 \
--pod-network-cidr=10.244.0.0/16

复制初始化后的命令到记事本

systemctl restart kubelet
systemctl status kubelet

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config 
sudo chown $(id -u):$(id -g) $HOME/.kube/config
mkdir k8s
cd k8s

wget --no-check-certificate https://docs.projectcalico.org/v3.10/getting-started/kubernetes/installation/hosted/kubernetes-datastore/calico-networking/1.7/calico.yaml

sed -i 's/192.168.0.0/10.244.0.0/g' calico.yaml

kubectl apply -f calico.yaml

slave 节点需要完成

# 输入初始化时复制的 此条命令
kubeadm join 192.168.23.203:6443 --token......


此时,可以在master 节点,使用  kubectl get nodes 查看到节点



在k8s-master安装和配置NFS

yum install -y nfs-utils

mkdir -p /opt/nfs/{jenkins,maven}

chmod  -R 777 /opt/nfs

cat > /etc/exports <<'EOF'
/opt/nfs/jenkins *(rw,no_root_squash)
/opt/nfs/maven *(rw,no_root_squash)
EOF


systemctl start  nfs
systemctl enable nfs



安装nfs-client

上传nfs-client-provisioner 到 master

 cd /root/
 ls nfs-client.zip 

 unzip nfs-client.zip 

cd nfs-client
sed -i '/192.*/ s/192.*/192.168.23.203/g'  deployment.yaml
kubectl apply -f .

kubectl get pods




安装Jenkins-master

`上传jenkins-master 到 master住主机

 cd /root/
 ls jenkins-master.zip

unzip jenkins-master.zip
cd jenkins-master/


kubectl create ns kube-ops
kubectl get ns


kubectl create -f .


kubectl get pods -n kube-ops -o wide
kubectl  get service  -n kube-ops

等到jenkins-0 pod 状态为READY 1/1 Running,使用浏览器访问jenkins所在 node 的ip 以及内部8080映射出来的端口


要求输入初始密码,输入下面的,然后tab 一下

cd /opt/nfs/jenkins/kube-ops-jenkins-home-jenkins-0

cd secrets/
cat initialAdminPassword 

获取到初始密码后进入jenkins


选择插件来安装----->无 ------->安装

创建用户: jerry / abc123


配置Jenkins

配置镜像加速

在服务器上,输入下列,然后tab

cd /opt/nfs/jenkins/kube-ops-jenkins-home-jenkins-0-pvc   #table一下

cd updates/

sed -i 's/http:\/\/updates.jenkins- ci.org\/download/https:\/\/mirrors.tuna.tsinghua.edu.cn\/jenkins/g' default.json && sed -i 's/http:\/\/www.google.com/https:\/\/www.baidu.com/g' default.json

在jenkins 的web 界面

在插件下载的 高级选项(Advanced) 界面,Update Site栏,使用下面地址替换

https://mirrors.tuna.tsinghua.edu.cn/jenkins/updates/update-center.json

提交后,重启jenkins


下载必要插件

Localization: Chinese

Git

Pipeline

Extended Choice Parameter

kubernetes

然后重启Jenkins



实现Jenkins与Kubernetes整合

系统管理--->系统配置--->云---> a separate configuration page.--->新建云--->kubernetes

(如果在系统配置里没有 云 ,则是没有下载 kubernetes插件)

Kubernetes 地址: https://kubernetes.default.svc.cluster.local

Kubernetes 命名空间: kube-ops

----> 连接测试。 要能看到 Conneted to Kubenetes v1.17.0

Jenkins 地址: http://jenkins.kube-ops.svc.cluster.local:8080

---->保存


构建带有maven到Jenkins镜像


将Jenkins-slave.zip 包上传到master上

cd /root
ls jenkins-slave.zip 
unzip jenkins-slave.zip 
cd jenkins-slave/


docker build -t  jenkins-slave-maven:latest .


 docker images | grep jenkins-slave-maven
docker login -u admin -p Harbor12345 192.168.23.202:85

docker tag jenkins-slave-maven:latest 192.168.23.202:85/library/jenkins-slave-maven:latest

docker push  192.168.23.202:85/library/jenkins-slave-maven




配置凭证和权限

配置gitlab的 username 方式凭证

配置Harbor的username 凭证

为所有k8s节点配置docker.sock文件权限

chmod 777 /var/run/docker.sock

下载插件 Kubernetes Continuous Deploy

配置kubernetes configuration 类型的凭据

描述: k8s-auth

Enter directly: 复制 master上 /root/.kube/config 里的所有内容

----->确定


获取3个凭证的 id



创建pipeline项目, 编写pipeline 脚本

创建pipeline项目

项目名: tensquare_back

参数化构建----->Extended Choice Parameter

Name: project_name ----> Description : 请输入需要构建的项目

Basic Parameter Types---->Parameter Typ: Check Boxes ---->Numbe..... : 4 ---->Delimiter: , (注意,英文逗号)

Value: tensquare_eureka_server@10086,tensquare_zuul@10020,tensquare_admin_service@9001,tensquare_gathering@9002

---->

Default Value: tensquare_eureka_server@10086

----->

Description: 注册中心,网关服务,权限管理,活动微服

--->

应用


流水线----->Pipeline script

注意修改 git凭证,harbor凭证,k8s凭证的 id

修改harbor仓库地址,数据卷挂载的 服务器地址为 k8s-master的地址

def git_address = "http://192.168.23.201:82/root/tensquare_back.git" 
def git_auth = "ce4ec592-44ae-45df-9e56-781d5a7be444"
//构建版本的名称
def tag = "latest"
//Harbor私服地址
def harbor_url = "192.168.23.202:85"
//Harbor的项目名称
def harbor_project_name = "tensquare"
//Harbor的凭证
def harbor_auth = "25b940d0-8df4-4e09-b8fa-b110570afc37"
//k8s凭证
def k8s_auth="20072727-2a75-4042-bc63-1c72cceeee33"

//定义k8s-barbor的凭证,定义资源名称,需要创建此资源
def secret_name="registry-auth-secret"


podTemplate(label: 'jenkins-slave', cloud: 'kubernetes', containers: [ 
        containerTemplate(
            name: 'jnlp',
            image: "192.168.23.202:85/library/jenkins-slave-maven:latest"
        ),
        containerTemplate( 
            name: 'docker',
            image: "docker:stable",
            ttyEnabled: true,
            command: 'cat'
        ),
    ],
    volumes: [
        hostPathVolume(mountPath: '/var/run/docker.sock', hostPath: '/var/run/docker.sock'),
        nfsVolume(mountPath: '/usr/local/apache-maven/repo', serverAddress: '192.168.23.203' , serverPath: '/opt/nfs/maven'),
    ],
)
{
node("jenkins-slave"){
    // 第一步
    stage('pull code'){
        checkout([$class: 'GitSCM', branches: [[name: '*/master']], extensions: [], userRemoteConfigs: [[credentialsId: "${git_auth}", url: "${git_address}"]]])
    }
    // 第二步
    stage('make public sub project'){
        //编译并安装公共工程
        sh "mvn -f tensquare_common clean install"
    }
    // 第三步
    stage('make image'){
        //把选择的项目信息转为数组
        def selectedProjects = "${project_name}".split(',')


        for(int i=0;i<selectedProjects.size();i++){
            //取出每个项目的名称和端口
            def currentProject = selectedProjects[i];
            //项目名称
            def currentProjectName = currentProject.split('@')[0]
            //项目启动端口
            def currentProjectPort = currentProject.split('@')[1]

            //定义镜像名称
            def imageName = "${currentProjectName}:${tag}"

            //编译,构建本地镜像
            sh "mvn -f ${currentProjectName} clean package dockerfile:build"
            container('docker') {

                //给镜像打标签
                sh "docker tag ${imageName} ${harbor_url}/${harbor_project_name}/${imageName}"

                //登录Harbor,并上传镜像
                withCredentials([usernamePassword(credentialsId: "${harbor_auth}", passwordVariable: 'password', usernameVariable: 'username')])
                {
                    //登录
                    sh "docker login -u ${username} -p ${password} ${harbor_url}"
                    //上传镜像
                    sh "docker push ${harbor_url}/${harbor_project_name}/${imageName}"
                }

            //删除本地镜像
            sh "docker rmi -f ${imageName}" 
            sh "docker rmi -f ${harbor_url}/${harbor_project_name}/${imageName}"
            
            // 需要拉取部署的镜像名称
            def deploy_image_name = "${harbor_url}/${harbor_project_name}/${imageName}"

            //部署到K8S 。deploy.yml 需要写在子项目的下面
            sh """
                sed -i 's#\$IMAGE_NAME#${deploy_image_name}#' ${currentProjectName}/deploy.yml
                sed -i 's#\$SECRET_NAME#${secret_name}#' ${currentProjectName}/deploy.yml
            """
            kubernetesDeploy configs: "${currentProjectName}/deploy.yml", kubeconfigId: "${k8s_auth}"
            }
        }
    }
}
}


构建前步骤

三台k8s登录harbor仓库

docker login -u tom -p Abcd1234 192.168.23.202:85

master上创建secret资源

kubectl create secret docker-registry registry-auth-secret --docker-server=192.168.23.202:85 --docker-username=tom --docker-password=Abcd1234 --docker-email=tom@qq.com


eureka部分

在eureka项目下创建文件 Dockerfile

#FROM java:8
FROM openjdk:8-jdk-alpine
ARG JAR_FILE
COPY ${JAR_FILE} app.jar
EXPOSE 10086
ENTRYPOINT ["java","-jar","/app.jar"]

在eureka项目下,创建文件deploy.yml

# 创建service 资源
apiVersion: v1
kind: Service
metadata:
  name: eureka
  labels:
    app: eureka
spec:
  type: NodePort
  ports:
    - port: 10086
      name: eureka
      targetPort: 10086
  selector:
    app: eureka
---
# 创建服务资源
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: eureka
spec:
  serviceName: "eureka"
  replicas: 2
  selector:
    matchLabels:
      app: eureka
  template:
    metadata:
      labels:
        app: eureka
    spec:
      imagePullSecrets:
        - name: $SECRET_NAME
      containers:
        - name: eureka
          image: $IMAGE_NAME
          ports:
          - containerPort: 10086
          env:
          - name: MY_POD_NAME
            valueFrom:
             fieldRef:
              fieldPath: metadata.name
          - name: EUREKA_SERVER
            value: "http://eureka-0.eureka:10086/eureka/,http://eureka- 1.eureka:10086/eureka/"
          - name: EUREKA_INSTANCE_HOSTNAME
            value: ${MY_POD_NAME}.eureka
  podManagementPolicy: "Parallel"

修改eureka项目的application.yml文件

server:
  port: ${PORT:10086}
spring:
  application:
    name: eureka

eureka:
  server:
    # 续期时间,即扫描失效服务的间隔时间(缺省为60*1000ms)
    eviction-interval-timer-in-ms: 5000
    enable-self-preservation: false
    use-read-only-response-cache: false
  client:
    # eureka client间隔多久去拉取服务注册信息 默认30s
    registry-fetch-interval-seconds: 5
    serviceUrl:
      defaultZone: ${EUREKA_SERVER:http://127.0.0.1:${server.port}/eureka/}
  instance:
    # 心跳间隔时间,即发送一次心跳之后,多久在发起下一次(缺省为30s)
    lease-renewal-interval-in-seconds: 5
    #  在收到一次心跳之后,等待下一次心跳的空档时间,大于心跳间隔即可,即服务续约到期时间(缺省为90s)
    lease-expiration-duration-in-seconds: 10
    instance-id: ${EUREKA_INSTANCE_HOSTNAME:${spring.application.name}}:${server.port}@${random.l ong(1000000,9999999)}
    hostname: ${EUREKA_INSTANCE_HOSTNAME:${spring.application.name}}

zuul 部分

上传父工程依赖

cd /opt/nfs/maven/com/tensquare
ls tensquare_parent.zip
unzip tensquare_parent.zip

在zuul 项目下创建Dockerfile文件

#FROM java:8
FROM openjdk:8-jdk-alpine
ARG JAR_FILE
COPY ${JAR_FILE} app.jar
EXPOSE 10020
ENTRYPOINT ["java","-jar","/app.jar"]

在zuul 项目创建deploy.yml文件

---
apiVersion: v1
kind: Service
metadata:
  name: zuul
  labels:
    app: zuul
spec:
  type: NodePort
  ports:
    - port: 10020
      name: zuul
      targetPort: 10020
  selector:
    app: zuul
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: zuul
spec:
  serviceName: "zuul"
  replicas: 2
  selector:
    matchLabels:
      app: zuul
  template:
    metadata:
      labels:
        app: zuul
    spec:
      imagePullSecrets:
        - name: $SECRET_NAME
      containers:
        - name: zuul
          image: $IMAGE_NAME
          ports:
            - containerPort: 10020
  podManagementPolicy: "Parallel"

修改zuul 项目的application.yml文件

server:
  port: 10020 # 端口

# 基本服务信息
spring:
  application:
    name: tensquare-zuul # 服务ID

# Eureka配置
eureka:
  client:
    service-url:
      defaultZone: http://eureka-0.eureka:10086/eureka/,http://eureka- 1.eureka:10086/eureka/ # Eureka访问地址
  instance:
    prefer-ip-address: true

# 修改ribbon的超时时间
ribbon:
  ConnectTimeout: 1500 # 连接超时时间,默认500ms
  ReadTimeout: 3000  # 请求超时时间,默认1000ms


# 修改hystrix的熔断超时时间
hystrix:
  command:
    default:
      execution:
        isolation:
          thread:
            timeoutInMillisecond: 2000 # 熔断超时时长,默认1000ms



# 网关路由配置
zuul:
  routes:
    admin:
      path: /admin/**
      serviceId: tensquare-admin-service
    gathering:
      path: /gathering/**
      serviceId: tensquare-gathering

  # jwt参数
jwt:
  config:
    key: itcast
    ttl: 1800000


admin_service部分

在admin_service项目下创建Dockerfile文件

#FROM java:8
FROM openjdk:8-jdk-alpine
ARG JAR_FILE
COPY ${JAR_FILE} app.jar
EXPOSE 9001
ENTRYPOINT ["java","-jar","/app.jar"]

在admin_service项目下创建deploy.yml文件

---
apiVersion: v1
kind: Service
metadata:
  name: admin
  labels:
    app: admin
spec:
  type: NodePort
  ports:
    - port: 9001
      name: admin
      targetPort: 9001
  selector:
    app: admin
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: admin
spec:
  serviceName: "admin"
  replicas: 2
  selector:
    matchLabels:
      app: admin
  template:
    metadata:
      labels:
        app: admin
    spec:
      imagePullSecrets:
        - name: $SECRET_NAME
      containers:
        - name: admin
          image: $IMAGE_NAME
          ports:
            - containerPort: 9001
  podManagementPolicy: "Parallel"

修改admin_service的application.yml文件

server:
  port: 9001
spring:
  application:
    name: tensquare-admin-service #指定服务名
  datasource:
    driverClassName: com.mysql.jdbc.Driver
    url: jdbc:mysql://192.168.23.202:3306/tensquare_user?characterEncoding=UTF8
    username: root
    password: abc123
  jpa:
    database: mysql
    show-sql: true

#Eureka配置
eureka:
  client:
    service-url:
      defaultZone: http://eureka-0.eureka:10086/eureka/,http://eureka- 1.eureka:10086/eureka/
  instance:
    lease-renewal-interval-in-seconds: 5 # 每隔5秒发送一次心跳
    lease-expiration-duration-in-seconds: 10 # 10秒不发送就过期
    prefer-ip-address: true



  # jwt参数
jwt:
  config:
    key: itcast
    ttl: 1800000


gathering部分

在gathering项目下创建Dockerfile文件

#FROM java:8
FROM openjdk:8-jdk-alpine
ARG JAR_FILE
COPY ${JAR_FILE} app.jar
EXPOSE 9002
ENTRYPOINT ["java","-jar","/app.jar"]

在gathering项目下创建doploy.yml文件

---
apiVersion: v1
kind: Service
metadata:
  name: gathering
  labels:
    app: gathering
spec:
  type: NodePort
  ports:
    - port: 9002
      name: gathering
      targetPort: 9002
  selector:
    app: gathering
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: gathering
spec:
  serviceName: "gathering"
  replicas: 2
  selector:
    matchLabels:
      app: gathering
  template:
    metadata:
      labels:
        app: gathering
    spec:
      imagePullSecrets:
        - name: $SECRET_NAME
      containers:
        - name: gathering
          image: $IMAGE_NAME
          ports:
            - containerPort: 9002
  podManagementPolicy: "Parallel"

修改gathering的application.yml文件

server:
  port: 9002
spring:
  application:
    name: tensquare-gathering #指定服务名
  datasource:
    driverClassName: com.mysql.jdbc.Driver
    url: jdbc:mysql://192.168.23.202:3306/tensquare_gathering?characterEncoding=UTF8
    username: root
    password: abc123
  jpa:
    database: mysql
    show-sql: true


#Eureka客户端配置
eureka:
  client:
    service-url:
      defaultZone: http://eureka-0.eureka:10086/eureka/,http://eureka- 1.eureka:10086/eureka/
  instance:
    lease-renewal-interval-in-seconds: 5 # 每隔5秒发送一次心跳
    lease-expiration-duration-in-seconds: 10 # 10秒不发送就过期
    prefer-ip-address: true



postman测试部分


现在k8s-master 上查看zuul 的映射端口

kubectl get service 

选择post 形式, http://192.168.23.204:映射端口/admin/admin/login

Body--->raw--->Json

{
    "loginname":"admin",
    "password":"123456"
}

----> Send


复制出现的token令牌


新开窗口,选择get 方式获取 http://192.168.23.204:zuul映射端口/gathering/gathering/

选择 Headers 页面没

Key: token

Value: 复制的token令牌

---->send

posted @ 2022-03-12 02:42  知己一语  阅读(1851)  评论(0编辑  收藏  举报