spark 写入数据到Geomesa(Hbase)
package com.grady.geomesa
import org.apache.spark.sql.jts.PointUDT
import org.apache.spark.sql.types.{IntegerType, StringType, StructField, StructType}
import org.apache.spark.sql.{Row, SparkSession}
import org.apache.spark.{SparkConf, sql}
import org.geotools.data.DataStoreFinder
import org.geotools.geometry.jts.JTSFactoryFinder
import org.locationtech.geomesa.utils.geotools.SchemaBuilder
import org.locationtech.jts.geom.Coordinate
import org.locationtech.geomesa.spark.jts._
import scala.collection.JavaConversions._
object SparkWriteGeomesa {
val ToGeomesaCatalog = "gradytest"
val ToGeomesaCatalogFeature = "student"
def main(args: Array[String]): Unit = {
val conf = new SparkConf().setAppName("SparkWriteGeomesa")
val ss = SparkSession.builder().config(conf).getOrCreate().withJTS
// 组装要写入的数据
val inDataFrame = gainData(ss)
inDataFrame.show()
// 写入数据
saveData(inDataFrame, ToGeomesaCatalog, ToGeomesaCatalogFeature)
ss.stop()
}
def gainData(ss: SparkSession): sql.DataFrame = {
val inDataRDD = ss.sparkContext.makeRDD(Array("1,jack,15,11.10,12.10", "2,Lily,16,12.10,13.10", "3,mike,16,14.10,15.10"))
val rowRDD = inDataRDD.map(_.split(','))
.map(row => {
val id = row(0)
val name = row(1)
val age = row(2)
val lon = row(3).toDouble
val lat = row(4).toDouble
val factory = JTSFactoryFinder.getGeometryFactory()
val coordinate = new Coordinate(lon, lat)
val geom = factory.createPoint(coordinate)
Row(geom, id, name, age)
})
val structType = StructType(
Seq(
StructField("geom", PointUDT, nullable = true),
StructField("studentId", StringType, nullable = true),
StructField("name", StringType, nullable = true),
StructField("age", StringType, nullable = true)
)
)
ss.createDataFrame(rowRDD, structType)
}
def saveData(inDataFrame: sql.DataFrame, toGeomesaCatalog: String, toGeomesaCatalogFeature: String) = {
val sft = SchemaBuilder.builder()
.addPoint("geom", true)
// 不可用id, id为保留字段
.addString("studentId")
.addString("name")
.addString("age")
.build(toGeomesaCatalogFeature)
//geomesa.mix.geometries指定同时支持point 和non-point混合几何特性
sft.getUserData.put("geomesa.mixed.geometries",Boolean.box(true))
val params = Map(
"hbase.zookeepers" -> "10.82.232.64:2181",
"hbase.catalog" -> toGeomesaCatalog)
//创建feature
DataStoreFinder.getDataStore(params).createSchema(sft)
inDataFrame.write.format("geomesa")
.options(params)
.option("geomesa.feature", toGeomesaCatalogFeature)
.save()
}
}
hbase 查看表是否生成:
hbase(main):001:0> list
TABLE
gradytest
gradytest_student_id_v4
gradytest_student_z2_geom_v5
## geomesa 数据会序列化,这里是序列化后的数据
hbase(main):003:0> scan 'gradytest_student_z2_geom_v5'
ROW COLUMN+CELL
\x000\x85\xD7\x8C\x9B\xE0\xE7\xF column=d:, timestamp=2022-02-09T19:21:38.287, value=\x03\x00\x04\x02\x00\x0E\x00 \x00"\x00&\x00(\
F000017ed-e37a-4e60-b90f-93fc81e x00\x00\x00\x00\x01\x01@&333333@(333333\x821jac\xEB1\xB5
0ab0e
\x000\x922Q\xB1\xD4\x1E\xFF00001 column=d:, timestamp=2022-02-09T19:19:35.603, value=\x03\x00\x04\x02\x00\x0E\x00 \x00"\x00&\x00(\
7ed-e35c-4d77-99e7-c6918a06c008 x00\x00\x00\x00\x01\x01@(333333@*333333\x822Lil\xF91\xB6
\x000\x99\x8A\xA5\xB6\xEBQ\x0200 column=d:, timestamp=2022-02-09T19:21:38.332, value=\x03\x00\x04\x02\x00\x0E\x00 \x00"\x00&\x00(\
0017ed-e37a-4ebd-b3a5-a9c7399a63 x00\x00\x00\x00\x01\x01@,333333@.333333\x823mik\xE51\xB6
5b
\x000\x99\x8A\xA5\xB6\xEBQ\x0200 column=d:, timestamp=2022-02-09T20:02:28.707, value=\x03\x00\x04\x02\x00\x0E\x00 \x00"\x00&\x00(\
0017ed-e5d1-4257-a75d-b0e2372954 x00\x00\x00\x00\x01\x01@,333333@.333333\x823mik\xE51\xB6
2e
\x020\x85\xD7\x8C\x9B\xE0\xE7\xF column=d:, timestamp=2022-02-09T19:19:35.334, value=\x03\x00\x04\x02\x00\x0E\x00 \x00"\x00&\x00(\
F000017ed-e35c-4d77-a841-b3bcf6f x00\x00\x00\x00\x01\x01@&333333@(333333\x821jac\xEB1\xB5
aa8ac
\x020\x922Q\xB1\xD4\x1E\xFF00001 column=d:, timestamp=2022-02-09T19:21:38.285, value=\x03\x00\x04\x02\x00\x0E\x00 \x00"\x00&\x00(\
7ed-e37a-4e60-9d7f-66988be48234 x00\x00\x00\x00\x01\x01@(333333@*333333\x822Lil\xF91\xB6
\x020\x99\x8A\xA5\xB6\xEBQ\x0200 column=d:, timestamp=2022-02-09T19:19:35.335, value=\x03\x00\x04\x02\x00\x0E\x00 \x00"\x00&\x00(\
0017ed-e35c-4e9a-8600-97ed8d92c4 x00\x00\x00\x00\x01\x01@,333333@.333333\x823mik\xE51\xB6
8b
\x030\x85\xD7\x8C\x9B\xE0\xE7\xF column=d:, timestamp=2022-02-09T20:02:28.622, value=\x03\x00\x04\x02\x00\x0E\x00 \x00"\x00&\x00(\
F000017ed-e5d1-41f8-ae71-84db58b x00\x00\x00\x00\x01\x01@&333333@(333333\x821jac\xEB1\xB5
9478f
\x030\x922Q\xB1\xD4\x1E\xFF00001 column=d:, timestamp=2022-02-09T20:02:28.662, value=\x03\x00\x04\x02\x00\x0E\x00 \x00"\x00&\x00(\
7ed-e5d1-41f8-a308-efcee8b70bf9 x00\x00\x00\x00\x01\x01@(333333@*333333\x822Lil\xF91\xB6
9 row(s)
pom.xml
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<parent>
<artifactId>spark-practise</artifactId>
<groupId>org.example</groupId>
<version>1.0-SNAPSHOT</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>geomesa</artifactId>
<properties>
<maven.compiler.source>8</maven.compiler.source>
<maven.compiler.target>8</maven.compiler.target>
<geomesa.version>3.1.0</geomesa.version>
</properties>
<dependencies>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.locationtech.geomesa</groupId>
<artifactId>geomesa-hbase-spark-runtime-hbase2_2.12</artifactId>
<version>3.3.0</version>
<exclusions>
<exclusion>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.locationtech.geomesa</groupId>
<artifactId>geomesa-spark-core_2.12</artifactId>
<version>3.3.0</version>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-core_${scala.binary.version}</artifactId>
<version>${spark.version}</version>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-sql_${scala.binary.version}</artifactId>
<version>${spark.version}</version>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-yarn_${scala.binary.version}</artifactId>
<version>${spark.version}</version>
<scope>provided</scope>
</dependency>
</dependencies>
<build>
<resources>
<resource>
<directory>src/main/resources</directory>
<filtering>true</filtering>
</resource>
</resources>
<plugins>
<plugin>
<groupId>net.alchim31.maven</groupId>
<artifactId>scala-maven-plugin</artifactId>
<version>3.2.1</version>
<configuration>
<source>1.8</source>
<target>1.8</target>
<scalaVersion>${scala.version}</scalaVersion>
</configuration>
<executions>
<execution>
<id>scala-compile-first</id>
<phase>process-resources</phase>
<goals>
<goal>add-source</goal>
<goal>compile</goal>
</goals>
</execution>
<execution>
<id>scala-test-compile</id>
<phase>process-test-resources</phase>
<goals>
<goal>testCompile</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-shade-plugin</artifactId>
<version>3.2.1</version>
<configuration>
<artifactSet>
<excludes>
<exclude>org.slf4j:*</exclude>
</excludes>
</artifactSet>
</configuration>
<executions>
<execution>
<phase>package</phase>
<goals>
<goal>shade</goal>
</goals>
<configuration>
<createDependencyReducedPom>false</createDependencyReducedPom>
<filters>
<filter>
<artifact>*:*</artifact>
<excludes>
<exclude>META-INF/*.SF</exclude>
<exclude>META-INF/*.DSA</exclude>
<exclude>META-INF/*.RSA</exclude>
</excludes>
</filter>
</filters>
<transformers>
<transformer implementation="org.apache.maven.plugins.shade.resource.ServicesResourceTransformer" />
</transformers>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>