2.安装Spark与Python练习
一、安装Spark
检查基础环境hadoop,jdk
配置文件
启动spark
二、Python编程练习:英文文本的词频统计
准备文本文件(txt)
txt = open("text.txt", "r",encoding='UTF-8').read() txt = txt.lower() for ch in '!"@#$%^&*()+,-./:;<=>?@[\\]_`~{|}': txt=txt.replace(ch," ") words = txt.split() stop_words = ['so','out','all','for','of','to','on','in','if','by','under','it','at','into','with','about'] lenwords=len(words) afterwords=[] for i in range(lenwords): z=1 for j in range(len(stop_words)): if words[i]==stop_words[j]: continue else: if z==len(stop_words): afterwords.append(words[i]) break z=z+1 continue counts = {} for word in afterwords: counts[word] = counts.get(word,0) + 1 items = list(counts.items()) items.sort(key=lambda x:x[1],reverse=True) i=1 while i<=len(items): word,count = items[i-1] print("{0:<20}{1}".format(word,count)) i=i+1 txt= open("bumi001.txt", "w",encoding='UTF-8') txt.write(str(items)) print("文件写入成功")