pom.xml
| <?xml version="1.0" encoding="UTF-8"?> |
| <project xmlns="http://maven.apache.org/POM/4.0.0" |
| xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" |
| xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> |
| <modelVersion>4.0.0</modelVersion> |
| |
| <groupId>org.example</groupId> |
| <artifactId>project_traffic</artifactId> |
| <version>1.0-SNAPSHOT</version> |
| |
| <dependencies> |
| <dependency> |
| <groupId>org.apache.spark</groupId> |
| <artifactId>spark-sql_2.11</artifactId> |
| <version>2.3.1</version> |
| </dependency> |
| <dependency> |
| <groupId>org.apache.spark</groupId> |
| <artifactId>spark-hive_2.11</artifactId> |
| <version>2.3.1</version> |
| </dependency> |
| <dependency> |
| <groupId>mysql</groupId> |
| <artifactId>mysql-connector-java</artifactId> |
| <version>8.0.18</version> |
| </dependency> |
| </dependencies> |
| </project> |
core-site.xml
| <?xml version="1.0" encoding="UTF-8"?> |
| <?xml-stylesheet type="text/xsl" href="configuration.xsl"?> |
| <configuration> |
| |
| |
| <property> |
| <name>fs.defaultFS</name> |
| <value>hdfs://HadoopCluster</value> |
| </property> |
| |
| <property> |
| <name>hadoop.tmp.dir</name> |
| <value>/opt/app/hadoop-2.8.5/metaData</value> |
| </property> |
| <property> |
| <name>ha.zookeeper.quorum</name> |
| <value>node1:2181,node2:2181,node3:2181</value> |
| </property> |
| </configuration> |
hdfs-site.xml
| <?xml version="1.0" encoding="UTF-8"?> |
| <?xml-stylesheet type="text/xsl" href="configuration.xsl"?> |
| <configuration> |
| <property> |
| <name>dfs.replication</name> |
| <value>3</value> |
| </property> |
| |
| <property> |
| <name>dfs.namenode.secondary.http-address</name> |
| <value>node3:50090</value> |
| </property> |
| |
| <property> |
| <name>dfs.nameservices</name> |
| <value>HadoopCluster</value> |
| </property> |
| |
| <property> |
| <name>dfs.ha.namenodes.HadoopCluster</name> |
| <value>nn1,nn2</value> |
| </property> |
| |
| <property> |
| <name>dfs.namenode.rpc-address.HadoopCluster.nn1</name> |
| <value>node1:9000</value> |
| </property> |
| |
| <property> |
| <name>dfs.namenode.rpc-address.HadoopCluster.nn2</name> |
| <value>node2:9000</value> |
| </property> |
| |
| <property> |
| <name>dfs.namenode.http-address.HadoopCluster.nn1</name> |
| <value>node1:50070</value> |
| </property> |
| |
| <property> |
| <name>dfs.namenode.http-address.HadoopCluster.nn2</name> |
| <value>node2:50070</value> |
| </property> |
| |
| <property> |
| <name>dfs.namenode.shared.edits.dir</name> |
| <value>qjournal://node1:8485;node2:8485;node3:8485/HadoopCluster</value> |
| </property> |
| |
| <property> |
| <name>dfs.ha.fencing.methods</name> |
| <value>sshfence</value> |
| </property> |
| |
| <property> |
| <name>dfs.ha.fencing.ssh.private-key-files</name> |
| <value>/root/.ssh/id_rsa</value> |
| </property> |
| |
| <property> |
| <name>dfs.journalnode.edits.dir</name> |
| <value>/opt/app/hadoop/journalnodeData</value> |
| </property> |
| |
| <property> |
| <name>dfs.permissions.enable</name> |
| <value>false</value> |
| </property> |
| |
| <property> |
| <name>dfs.client.failover.proxy.provider.HadoopCluster</name> |
| <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value> |
| </property> |
| <property> |
| <name>dfs.ha.automatic-failover.enabled</name> |
| <value>true</value> |
| </property> |
| </configuration> |
hive-site.xml
| <?xml version="1.0"?> |
| <?xml-stylesheet type="text/xsl" href="configuration.xsl"?> |
| <configuration> |
| <property> |
| <name>javax.jdo.option.ConnectionURL</name> |
| <value>jdbc:mysql://192.168.200.111:3306/hive_metastore?serverTimezone=UTC&createDatabaseIfNotExist=true</value> |
| <description>JDBC connect string for a JDBC metastore</description> |
| </property> |
| |
| <property> |
| <name>javax.jdo.option.ConnectionDriverName</name> |
| <value>com.mysql.jdbc.Driver</value> |
| <description>Driver class name for a JDBC metastore</description> |
| </property> |
| |
| <property> |
| <name>javax.jdo.option.ConnectionUserName</name> |
| <value>root</value> |
| <description>username to use against metastore database</description> |
| </property> |
| |
| <property> |
| <name>javax.jdo.option.ConnectionPassword</name> |
| <value>Jsq123456...</value> |
| <description>password to use against metastore database</description> |
| </property> |
| <property> |
| <name>hive.metastore.warehouse.dir</name> |
| <value>hdfs://HadoopCluster/user/hive/warehouse</value> |
| <description>location of default database for the warehouse</description> |
| </property> |
| |
| <property> |
| <name>hive.cli.print.header</name>配置头部的信息 |
| <value>true</value> |
| </property> |
| <property> |
| <name>hive.cli.print.current.db</name>将数据库的名字进行配置 |
| <value>true</value> |
| </property> |
| </configuration> |
【推荐】国内首个AI IDE,深度理解中文开发场景,立即下载体验Trae
【推荐】编程新体验,更懂你的AI,立即体验豆包MarsCode编程助手
【推荐】抖音旗下AI助手豆包,你的智能百科全书,全免费不限次数
【推荐】轻量又高性能的 SSH 工具 IShell:AI 加持,快人一步
· TypeScript + Deepseek 打造卜卦网站:技术与玄学的结合
· Manus的开源复刻OpenManus初探
· AI 智能体引爆开源社区「GitHub 热点速览」
· 三行代码完成国际化适配,妙~啊~
· .NET Core 中如何实现缓存的预热?