第4次实践作业

1. 使用Docker-compose实现Tomcat+Nginx负载均衡

要求:

  • 理解nginx反向代理原理;
  • nginx代理tomcat集群,代理2个以上tomcat;
  • 了解nginx的负载均衡策略,并至少实现nginx的2种负载均衡策略;

参考资料:

参考: https://blog.csdn.net/caijunsen/article/details/83002219

docker-compose.yml文件

version: "3"
services:
  tomcat001:
    image: tomcat:8.5.0
    ports:
      - "8083:8080"
    restart: "always"
    container_name: tomcat001
  tomcat002:
    image: tomcat:8.5.0
    ports:
      - "8082:8080"
    container_name: tomcat002
    restart: "always"
  nginx:
    image: nginx
    volumes:
      - ./tom.conf:/etc/nginx/conf.d/tom.conf
    ports:
      - "81:80"
      - "443:443"
    links:
      - tomcat001:t01
      - tomcat002:t02

nginx.conf

upstream tomcat_client {
    server t01:8080 weight=1;
	server t02:8080 weight=1;
} 
server {
    server_name "";
    listen 80 default_server;
    listen [::]:80 default_server ipv6only=on;

    location / {
        proxy_pass http://tomcat_client;
        proxy_redirect default;
        proxy_set_header Host $host;
        proxy_set_header X-Real-IP $remote_addr;
    }
}

2. 使用Docker-compose部署javaweb运行环境

要求:

  • 分别构建tomcat、数据库等镜像服务;
  • 成功部署Javaweb程序,包含简单的数据库操作;
  • 为上述环境添加nginx反向代理服务,实现负载均衡。

参考资料:

docker-compose.yml文件

version: '2'
services:
  tomcat01:
    image: tomcat:7
    container_name:  tomcat01
    ports:
     - "5050:8080"
    volumes:
     - "$PWD/webapps:/usr/local/tomcat/webapps"
    networks:
      webnet:
        ipv4_address: 15.22.0.15

  tomcat02:
    image: tomcat:7
    container_name:  tomcat02
    ports:
     - "5051:8080"
    volumes:
     - "$PWD/webapps2:/usr/local/tomcat/webapps"
    networks:
      webnet:
        ipv4_address: 15.22.0.16

  mymysql:
    build: .
    image: mymysql:test
    container_name: mymysql
    ports:
      - "3306:3306"
    command: [
            '--character-set-server=utf8mb4',
            '--collation-server=utf8mb4_unicode_ci'
    ]
    environment:
      MYSQL_ROOT_PASSWORD: "123456"
    networks:
      webnet:
        ipv4_address: 15.22.0.6

  nginx:
    container_name: web_nginx
    image: nginx
    volumes:
      - ./javaweb.conf:/etc/nginx/conf.d/javaweb.conf
    ports:
      - "80:80"
      - "443:443"
    links:
      - tomcat01:t01
      - tomcat02:t02
    networks:
       webnet:
        ipv4_address: 15.22.0.7
networks:
 webnet:
   driver: bridge
   ipam:
     config:
       - subnet: 15.22.0.0/24
         gateway: 15.22.0.2

nginx 配置文件

upstream tomcat_client {
   server t01:8080 weight=1;
   server t02:8080 weight=1;
} 
server {
   server_name "";
   listen 80 default_server;
   listen [::]:80 default_server ipv6only=on;

   location / {
       proxy_pass http://tomcat_client;
       proxy_redirect default;
       proxy_set_header Host $host;
       proxy_set_header X-Real-IP $remote_addr;
   }
}

测试

3. 使用Docker搭建大数据集群环境

直接用机器搭建Hadoop集群,会因为不同机器配置等的差异,遇到各种各样的问题;也可以尝试用多个虚拟机搭建,但是这样对计算机的性能要求比较高,通常无法负载足够的节点数;使用Docker搭建Hadoop集群,将Hadoop集群运行在Docker容器中,使Hadoop开发者能够快速便捷地在本机搭建多节点的Hadoop集群。

要求:

  • 完成hadoop分布式集群环境配置,至少包含三个节点(一个master,两个slave);
  • 成功运行hadoop 自带的测试实例。

参考资料:

5.配置hadoop

core-site.xml:

<configuration>
    <property>  
        <name>fs.defaultFS</name>
        <value>hdfs://master:9000</value>
    </property>
    <property>
        <name>hadoop.tmp.dir</name>
        <value>file:/usr/local/hadoop-3.1.3/tmp</value>
    <description>A base for other temporary derectories.</description>
    </property>
</configuration>

hdfs-site.xml:

<configuration>
        <property>
        <name>dfs.replication</name>
        <value>1</value>
    </property>
    <property>
        <name>dfs.namenode.name.dir</name>
        <value>file:/usr/local/hadoop-3.1.3/tmp/dfs/name</value>
    </property>
    <property>
        <name>dfs.namenode.data.dir</name>
        <value>file:/usr/local/hadoop-3.1.3/tmp/dfs/data</value>
    </property>
</configuration>

mapred-site.xml:

<configuration>
    <property>
        <!--使用yarn运行MapReduce程序-->
        <name>mapreduce.framework.name</name>
        <value>yarn</value>
    </property>
    <property>
        <!--jobhistory地址host:port-->
        <name>mapreduce.jobhistory.address</name>
        <value>master:10020</value>
    </property>
    <property>
        <!--jobhistory的web地址host:port-->
        <name>mapreduce.jobhistory.webapp.address</name>
        <value>master:19888</value>
    </property>
    <property>
        <!--指定MR应用程序的类路径-->
        <name>mapreduce.application.classpath</name>
        <value>/usr/local/hadoop-3.1.3/share/hadoop/mapreduce/lib/*,/usr/local/hadoop-3.1.3/share/hadoop/mapreduce/*</value>
    </property>
</configuration>
posted @ 2020-05-19 00:16  yanami  阅读(144)  评论(0编辑  收藏  举报