import pandas as pd
from pyspark.sql import SparkSession
from pyspark.sql import SQLContext
from pyspark import SparkContext
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], index=['row1', 'row2'], columns=['c1', 'c2', 'c3'])
print( df)
sc = SparkContext()
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("testDataFrame")\
.getOrCreate()
sentenceData = spark.createDataFrame([
(0.0, "I like Spark"),
(1.0, "Pandas is useful"),
(2.0, "They are coded by Python ")
], ["label", "sentence"])
sentenceData.select("label").show()
sqlContest = SQLContext(sc)
spark_df = sqlContest.createDataFrame(df)
spark_df.select("c1").show()
pandas_df = sentenceData.toPandas()
print (pandas_df)
原文