期末
import jieba
path=r'"E:\中文数据清理\147\"' with open(r'E:\中文数据清理\stopsCN.txt',encoding='utf-8')as f: stopword=f.read().split('\n') List01=[] List02=[] def read_text(name,start,end): for file in range(start,end): file = 'E:\\中文数据清理\\147\\'+name+'\\'+str(file)+".txt" with open(file,'r',encoding='utf-8') as f: texts=f.read() target = name texts = "".join([text for text in texts if text.isalpha()]) texts = [text for text in jieba.cut(texts,cut_all=True) if len(text) >=2] texts = " ".join([text for text in texts if text not in stopword]) List01.append(target) List02.append(texts) read_text("财经",798977,798997) read_text("彩票",256822,256842) read_text("房产",264410,264430) read_text("股票",644579,644599) List01 List02 from sklearn.model_selection import train_test_split x_train,x_test,y_train,y_test = train_test_split(List02,List01,test_size=0.2) from sklearn.feature_extraction.text import TfidfVectorizer vec = TfidfVectorizer() X_train = vec.fit_transform(x_train) X_test = vec.transform(x_test) from sklearn.naive_bayes import MultinomialNB from sklearn.model_selection import cross_val_score from sklearn.metrics import classification_report # 多项式朴素贝叶斯 mnb = MultinomialNB() module = mnb.fit(X_train, y_train) y_predict = module.predict(X_test) # 对数据进行5次分割 scores=cross_val_score(mnb,X_test,y_test,cv=5) print("验证效果:",scores.mean()) print("分类结果:\n",classification_report(y_predict,y_test)) import collections # 统计测试集和预测集的各类新闻个数 testCount = collections.Counter(y_test) predCount = collections.Counter(y_predict) print('实际:',testCount,'\n', '预测', predCount) # 建立标签列表,实际结果列表,预测结果列表, nameList = list(testCount.keys()) testList = list(testCount.values()) predictList = list(predCount.values()) x = list(range(len(nameList))) print("新闻类别:",nameList,'\n',"实际:",testList,'\n',"预测:",predictList)