python结巴分词及词频统计

 1 def get_words(txt):
 2     seg_list = jieba.cut(txt)
 3     c = Counter()
 4     for x in seg_list:
 5         if len(x) > 1 and x != '\r\n':
 6             c[x] += 1
 7     print('常用词频度统计结果')
 8     for (k, v) in c.most_common(30):
 9         print('%s%s %s %d' % (' ' * (5 - len(k)), k, '*' * int(v / 3), v))
10         
11 if __name__ == '__main__':
12     with codecs.open('comments.txt', 'r', 'gbk') as f:
13         txt = f.read()
14     get_words(txt)
15     # get_text()

 

 

 

 

def get_words(txt):
seg_list = jieba.cut(txt)
c = Counter()
for x in seg_list:
if len(x) > 1 and x != '\r\n':
c[x] += 1
print('常用词频度统计结果')

for (k, v) in c.most_common(30):
print('%s%s %s %d' % (' ' * (5 - len(k)), k, '*' * int(v / 3), v))

if __name__ == '__main__':
with codecs.open('comments.txt', 'r', 'gbk') as f:
txt = f.read()
get_words(txt)
# get_text()
posted @ 2022-03-15 22:48  靠谱杨  阅读(155)  评论(0编辑  收藏  举报