python 做词云图

#导入需要模块
import jieba
import numpy as np 
import matplotlib.pyplot as plt 
from PIL import Image 
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
 
text_road=str(input('请输入文章的路径:'))
picture_road=str(input('请输入图片的路径:'))
 
#加载需要分析的文章
text = open(text_road,'r',encoding='utf-8').read()
 
#对文章进行分词
wordlist_after_jieba = jieba.cut(text, cut_all=False)
wl_space_split = " ".join(wordlist_after_jieba)
 
#读取照片通过numpy.array函数将照片等结构数据转化为np-array
mask=np.array(Image.open(picture_road))
 
#选择屏蔽词,不显示在词云里面
stopwords = set(STOPWORDS)
#可以加多个屏蔽词
stopwords.add("<br/>")
 
#创建词云对象
wc = WordCloud(
    background_color="white",
    font_path='/Library/Fonts/Arial Unicode.ttf',
    max_words=1000, # 最多显示词数
    mask=mask, 
    stopwords=stopwords,
    max_font_size=100 # 字体最大值
    )
 
#生成词云
wc.generate(text)
 
#从背景图建立颜色方案
image_colors =ImageColorGenerator(mask) 
 
#将词云颜色设置为背景图方案
wc.recolor(color_func=image_colors) 
 
#显示词云
plt.imshow(wc,interpolation='bilinear')
 
#关闭坐标轴
plt.axis("off")
 
#显示图像
plt.show()
 
#保存词云
wc.to_file('词云图.png')
from wordcloud import WordCloud, STOPWORDS
from imageio import imread
from sklearn.feature_extraction.text import CountVectorizer
import jieba
import csv
# 获取文章内容
with open("caifu.txt") as f:
    contents = f.read()
print("contents变量的类型:", type(contents))

# 使用jieba分词,获取词的列表
contents_cut = jieba.cut(contents)
print("contents_cut变量的类型:", type(contents_cut))
contents_list = " ".join(contents_cut)
print("contents_list变量的类型:", type(contents_list))

# 制作词云图,collocations避免词云图中词的重复,mask定义词云图的形状,图片要有背景色
wc = WordCloud(stopwords=STOPWORDS.add("一个"), collocations=False, 
               background_color="white", 
               font_path=r"C:\Windows\Fonts\simhei.ttf",
               width=400, height=300, random_state=42, 
               mask=imread('axis.png',pilmode="RGB"))
wc.generate(contents_list)
wc.to_file("ciyun.png")

# 使用CountVectorizer统计词频
cv = CountVectorizer()
contents_count = cv.fit_transform([contents_list])
# 词有哪些
list1 = cv.get_feature_names()
# 词的频率
list2 = contents_count.toarray().tolist()[0] 
# 将词与频率一一对应
contents_dict = dict(zip(list1, list2))
# 输出csv文件,newline="",解决输出的csv隔行问题
with open("caifu_output.csv", 'w', newline="") as f:
    writer = csv.writer(f)
    for key, value in contents_dict.items():
        writer.writerow([key, value])
posted @ 2019-12-21 12:55  脱离低级趣味  阅读(850)  评论(0编辑  收藏  举报