方式一:Scala集合创建DataFrame
| import org.apache.spark.SparkConf |
| import org.apache.spark.sql.{DataFrame, SparkSession} |
| |
| object CreateSparkSession { |
| def main(args: Array[String]): Unit = { |
| val sparksqlConf = new SparkConf().setMaster("local[*]").setAppName("sparksql") |
| val sparkSession = SparkSession.builder().config(sparksqlConf).getOrCreate() |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| import sparkSession.implicits._ |
| val array: Seq[(String, Int, String)] = Array(("zs", 20, "男"), ("ww", 30, "男"), ("zl", 25, "男")) |
| val dataFrame: DataFrame = array.toDF("name", "age", "sex") |
| dataFrame.show() |
| } |
| } |
方式二:通过RDD的toDF函数创建DataFrame
| import org.apache.spark.SparkConf |
| import org.apache.spark.sql.{DataFrame, SparkSession} |
| import org.apache.spark.rdd.RDD |
| |
| object CreateSparkSession { |
| def main(args: Array[String]): Unit = { |
| val sparksqlConf = new SparkConf().setMaster("local[*]").setAppName("sparksql") |
| val sparkSession = SparkSession.builder().config(sparksqlConf).getOrCreate() |
| |
| |
| |
| |
| |
| import sparkSession.implicits._ |
| val sparkContext = sparkSession.sparkContext |
| val rdd: RDD[(String, Int, String)] = sparkContext.makeRDD(Array(("zs", 20, "男"), ("ww", 30, "男"), ("zl", 25, "男"))) |
| val dataFrame1: DataFrame = rdd.toDF("name", "age", "sex") |
| dataFrame1.show() |
| } |
| } |
【推荐】国内首个AI IDE,深度理解中文开发场景,立即下载体验Trae
【推荐】编程新体验,更懂你的AI,立即体验豆包MarsCode编程助手
【推荐】抖音旗下AI助手豆包,你的智能百科全书,全免费不限次数
【推荐】轻量又高性能的 SSH 工具 IShell:AI 加持,快人一步
· TypeScript + Deepseek 打造卜卦网站:技术与玄学的结合
· Manus的开源复刻OpenManus初探
· AI 智能体引爆开源社区「GitHub 热点速览」
· 三行代码完成国际化适配,妙~啊~
· .NET Core 中如何实现缓存的预热?