垃圾邮件分类
1.读取
2.数据预处理
import csv import nltk import re from nltk.corpus import stopwords from nltk.stem import WordNetLemmatizer import pandas as pd #返回类别 def getLb(data): if data.startswith("J"): return nltk.corpus.wordnet.ADJ elif data.startswith("V"): return nltk.corpus.wordnet.VERB elif data.startswith("N"): return nltk.corpus.wordnet.NOUN elif data.startswith("R"): return nltk.corpus.wordnet.ADV else: return ""; def preprocessing(data): newdata=[] punctuation = '!,;:?"\'' data=re.sub(r'[{}]+'.format(punctuation), '', data).strip().lower();#去标点和转小写 for i in nltk.sent_tokenize(data, "english"): # 对文本按照句子进行分割 for j in nltk.word_tokenize(i): # 对句子进行分词 newdata.append(j) stops = stopwords.words('english') newdata= [i for i in newdata if i not in stops]#去停用词 newdata = nltk.pos_tag(newdata)#词性标注 lem = WordNetLemmatizer() for i, j in enumerate(newdata):#还原词 y = getLb(j[1]) if y: newdata[i] = lem.lemmatize(j[0], y) else: newdata[i] = j[0] return newdata
3.数据划分—训练集和测试集数据划分
from sklearn.model_selection import train_test_split
x_train,x_test, y_train, y_test = train_test_split(data, target, test_size=0.2, random_state=0, stratify=y_train)
4.模型选择
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import MultinomialNB
说明为什么选择这个模型?
根据特征特点来选。
GaussianNB:高斯贝叶斯(适用于特征正态分布)
MultinomialNB:离散型朴素贝叶斯(适用于特征离散分布)√
比较与总结
如果用CountVectorizer进行文本特征生成,与TfidfVectorizer相比,效果如何?
TfidfVectorizer除了统计在本文的单词出现频率之外,还关注包含这个词汇的所有文本的数量,挖掘更有意义的特征;
而CountVectorizer只考虑单词在本文中出现的频率。相较于CountVectorizer,TfidfVectorizer所选取的特征更有意义。