每日学习

今天复习了使用jieba分词:

import jieba
import pandas as pd
import re
from collections import Counter

if __name__ == '__main__':
    filehandle = open("Hotword.txt", "r", encoding='utf-8');
    file=open("final_hotword2.txt","w",encoding='utf-8');
    filepaixu = open("final_hotword.txt","w",encoding='utf-8');
    mystr = filehandle.read()
    seg_list = jieba.cut(mystr)  # 默认是精确模式
    print(seg_list)
    # all_words = cut_words.split()
    # print(all_words)
    stopwords = {}.fromkeys([line.rstrip() for line in open(r'final.txt', encoding='UTF-8')])
    c = Counter()
    for x in seg_list:
        if x not in stopwords:
            if len(x) > 1 and x != '\r\n' and x != 'quot':
                c[x] += 1

    print('\n词频统计结果:')
    for (k, v) in c.most_common(100):  # 输出词频最高的前两个词
        print("%s:%d" % (k, v))
        file.write(k+'\n')
        filepaixu.write(k+":"+str(v)+'\n')

    # print(mystr)
    filehandle.close();
    file.close()
    filepaixu()
    # seg2 = jieba.cut("好好学学python,有用。", cut_all=False)
    # print("精确模式(也是默认模式):", ' '.join(seg2))

 

posted @ 2021-11-12 22:10  哦心有  阅读(45)  评论(0编辑  收藏  举报