寒假生活指导26

# coding:utf8  # 指定源代码编码格式为UTF-8

from pyspark.sql import SparkSession  # 导入SparkSession类,用于创建和管理Spark应用上下文
from pyspark.sql.functions import concat, expr, col  # 导入Spark SQL中的函数,这里并未使用但可能在后续操作中用于数据转换或计算
from pyspark.sql.types import StructType, StringType, IntegerType  # 导入数据类型,用于定义DataFrame的结构
from pyspark.sql import functions as F  # 更短的引用方式,指向pyspark.sql.functions模块

if __name__ == '__main__':
    # 创建一个本地SparkSession实例,设置应用程序名为"test",并配置shuffle分区数为2
    spark = SparkSession.builder.appName("test").master("local[*]").config("spark.sql.shuffle.partitions", 2).getOrCreate()

    # 从指定路径读取CSV文件,并将数据加载到DataFrame中,默认不包含列名头,以逗号作为分隔符
    df = spark.read.csv('../data/sql/stu.txt', sep=',', header=False)

    df.select(F.concat_ws("---","user_id","rank","ts")).\
        write.\
        mode("overwrite").\
        format("text").\
        save("../data/output/sql/text")

    df.write.mode("overwrite").\
        format("csv").\
        option("sep",",").\
        option("header",True).\
        save("../data/output/sql/csv")

    df.write.mode("overwrite").\
        format("json"). \
        save("../data/output/sql/json")
    #不写format默认为parquet
    df.write.mode("overwrite").\
        format("parquet"). \
        save("../data/output/sql/parquet")
#写入
    df.write.mode("overwrite").\
        format("jdbc").\
        option("url","jdbc:mysql://.200:3306/book?useSSL=false&useUnicode=true&allowPublicKeyRetrieval=true&serverTimezone=UTC").\
        option("dbtable","word_count").\
        option("user","root").\
        option("password","222222").\
        save()
#读取
    df=spark.read.format("jdbc").\
        option("url","jdbc:mysql://.200:3306/book?useSSL=false&useUnicode=true&allowPublicKeyRetrieval=true&serverTimezone=UTC").\
        option("dbtable","word_count").\
        option("user","root").\
        option("password","222222").\
        load()

sparksql的写入写出、

posted @ 2024-02-03 18:07  一个小虎牙  阅读(2)  评论(0编辑  收藏  举报