复习spark---创建sparksql及跑通读取json

导入依赖
<dependencies>
        <dependency>
            <groupId>org.apache.spark</groupId>
            <artifactId>spark-sql_2.12</artifactId>
            <version>2.4.6</version>
        </dependency>

        <dependency>
            <groupId>org.apache.spark</groupId>
            <artifactId>spark-hive_2.12</artifactId>
            <version>2.4.6</version>
        </dependency>

        <dependency>
            <groupId>org.apache.hive</groupId>
            <artifactId>hive-exec</artifactId>
            <version>3.1.2</version>
        </dependency>

        <dependency>
            <groupId>mysql</groupId>
            <artifactId>mysql-connector-java</artifactId>
            <version>5.1.27</version>
        </dependency>
    </dependencies>

代码 ``` import org.apache.spark.sql.{DataFrame, DataFrameReader, SparkSession}

object sqlTest extends App {
val session = SparkSession.builder()
.appName("sqltest")
.master("local")
.getOrCreate()

import session.implicits._

// 数据的获取
// 格式 + 路径,特殊的数据格式有单独的方法直接使用
val read: DataFrameReader = session.read
val df: DataFrame = read.json("src/main/resources/json")
df.show() // json格式读出来直接转换为DF,元+原

session.stop()
}

</details>

![](https://img2022.cnblogs.com/blog/2487693/202204/2487693-20220414142703673-1090333426.png)

<details>
<summary>点击查看代码</summary>

{"name":"zhangsan","age":70}
{"name":"lisi"}
{"name":"wangwu","age":18}
{"name":"laoliu","age":28}
{"name":"zhangsan","age":70}
{"name":"lisi"}
{"name":"wangwu","age":18}
{"name":"laoliu","age":28}
{"name":"zhangsan","age":70}
{"name":"lisi"}

</details>
posted @ 2022-04-14 14:29  jsqup  阅读(87)  评论(0编辑  收藏  举报