[Bayes] Maximum Likelihood estimates for text classification
Naïve Bayes Classifier.
We will use, specifically, the Bernoulli-Dirichlet model for text classification,
We will train the model using both the Maximum Likelihood estimates and Bayesian updating, and compare these in terms of predictive success, and in terms of what can go wrong.
We will be using the webkb
dataset. - original data: the webkb dataset website.
[Scikit-learn] 1.9 Naive Bayes
[ML] Naive Bayes for Text Classification
[ML] Naive Bayes for email classification
In [31]:
# Make division default to floating-point, saving confusion
# i.e. 3/4 = 0.75 3//4 = 0
from __future__ import division
# Necessary libraries
import scipy as sp
import numpy as np
import matplotlib.pyplot as pl
# Put the graphs where we can see them
# 将matplotlib的图表直接嵌入到Notebook之中
%matplotlib inline
# Display a warning on important floating-point errors
np.seterr(divide='warn', invalid='warn'); # -->
Data 加载
In [32]:
data = np.load(
'webkb.npz',
)
print(type(data))
# training data
xtrain = data['xtrain']
ytrain = data['ytrain']
# test data
xtest = data['xtest']
ytest = data['ytest']
# which class is which?
class_label_strings = data['class_label_strings']
# we don't need the original any more
del(data)
In [33]:
print("X training data dimensions = {!r}".format(xtrain.shape)) # -->
print("Y training data dimensions = {!r}".format(ytrain.shape))
print("X test data dimensions = {!r}".format(xtest.shape))
print("Y test data dimensions = {!r}".format(ytest.shape))
print("Number of y labels = {!r}".format(len(class_label_strings)))
In [37]:
xtrain
Out[37]:
In [39]:
xtrain.shape[1]
Out[39]:
In [38]:
np.arange(xtrain.shape[0])
Out[38]:
In [75]:
# 行内求和
np.sum(xtrain, axis=0)
Out[75]:
In [74]:
# 列内求和
np.sum(xtrain, axis=1)
Out[74]:
In [53]:
pl.bar( np.arange(xtrain.shape[1]), np.sum(xtrain, axis=0), width=1);
显示特定类别数据¶
In [78]:
# 提取 所有y样本中的第二项,也就是类别=2的样本;xtrain的第二维数据全要
x2 = xtrain[ytrain[:, 2]==1, :]
In [77]:
pl.bar( np.arange(x2.shape[1]), np.mean(x2, axis=0), width=1, alpha=0.5);
In [56]:
x3 = xtrain[ytrain[:, 3]==1, :]
pl.bar(np.arange(x3.shape[1]), np.mean(x3, axis=0), width=1, alpha=0.5); # -->
# 其实就是只关心一部分数据(效果就是放大了),这里只关注前100个terms
In [57]:
pl.bar(np.arange(100), np.mean(x2[:, :100], axis=0), width=1, alpha=0.5);
pl.bar(np.arange(100), np.mean(x3[:, :100], axis=0), width=1, alpha=0.5);
Data Y 剖析
In [ ]:
def categorical_bar(val, **kwargs):
"""
Convenient categorical bar plot, labelled with the class strings.
This is handy if you want to plot something versus class.
"""
n_cat = len(class_label_strings)
cat_index = np.arange(n_cat)
bar = pl.bar(cat_index, val, width=1, **kwargs);
pl.xticks(cat_index, class_label_strings)
return bar
In [8]:
categorical_bar(np.sum(ytrain, axis=0));
或者,直接返回数值形式
In [9]:
for label_string, n_in_class in zip(class_label_strings, np.sum(ytrain, axis=0)):
print("{}: {}".format(label_string, n_in_class))
Maximum Likelihood Naïve Bayes Classifier
只是简单地求了平均值,过于naive的方法了啦。
def fit_naive_bayes_ml(x, y): """ Given an array of features `x` and an array of labels `y`, return ML estimates of class probabilities `pi` and class-conditional feature probabilities `theta`. """ n_class = y.shape[1] n_feat = x.shape[1] print(n_feat) print( len(x[0]) ) pi_counts = np.sum(y, axis=0) print(pi_counts) #pi = pi_counts/np.sum(pi_counts) #print(pi) # 也可以通过 np.sum(y)直接求得matrix所有元素的总和 pi = pi_counts/np.sum(y) print("pi: ", pi) print((n_feat, n_class)) theta = np.zeros( (n_feat, n_class) ) print("theta: ", theta) for cls in range(n_class): docs_in_class = (y[:, cls]==1) # 处理某一个特定类的train data,这里统计了该类下的单词在文档中的出现次数 class_feat_count = x[docs_in_class, :].sum(axis=0) # Matrix求总和的两种方式:np.sum(...), x.sum() theta[:, cls] = class_feat_count/(docs_in_class.sum()) # theta[:, cls] = class_feat_count/np.sum(docs_in_class) return pi, theta
pi_hat, theta_hat = fit_naive_bayes_ml(xtrain, ytrain) print("pi_hat: ", pi_hat) print("theta_hat: ", theta_hat)
运行结果:
1703 1703 [ 165. 99. 345. 62. 31.] ('pi: ', array([ 0.23504274, 0.14102564, 0.49145299, 0.08831909, 0.04415954])) (1703, 5) ('theta: ', array([[ 0., 0., 0., 0., 0.], [ 0., 0., 0., 0., 0.], [ 0., 0., 0., 0., 0.], ..., [ 0., 0., 0., 0., 0.], [ 0., 0., 0., 0., 0.], [ 0., 0., 0., 0., 0.]])) ('pi_hat: ', array([ 0.23504274, 0.14102564, 0.49145299, 0.08831909, 0.04415954])) ('theta_hat: ', array([[ 0.01818182, 0.04040404, 0.00289855, 0. , 0. ], [ 0. , 0.01010101, 0.0115942 , 0. , 0.03225806], [ 0. , 0.03030303, 0.00869565, 0.01612903, 0. ], ..., [ 0.01212121, 0. , 0.0115942 , 0.01612903, 0.03225806], [ 0.39393939, 0.45454545, 0.53913043, 0.43548387, 0.41935484], [ 0. , 0. , 0. , 0. , 0. ]]))
这里拿出一个x做预测实验:
categorical_bar( predict_class_prob(xtest[0,:], pi_hat, theta_hat), color='orange' );
from scipy.misc import logsumexp
def predict_class_prob(x, pi, theta):
class_feat_l = np.zeros_like(theta)
# calculations in log space to avoid underflow
# 只提取该样本中包含单词对应的theta值
# 因为,本样本没出现的单词,没必要关心在此
print(len(theta[x==1, :]))
class_feat_l[x==1, :] = np.log(theta[x==1, :])
class_feat_l[x==0, :] = np.log(1 - theta[x==0, :])
class_l = class_feat_l.sum(axis=0) + np.log(pi)
# logsumexp 等价于 np.log(np.sum(np.exp(a)))
return np.exp(class_l - logsumexp(class_l)) # --> 原理是什么
测试这套模型的正确率 (在测试集):
test_correct_ml = predictive_accuracy(xtest, ytest, predict_class, pi_hat, theta_hat)
def predictive_accuracy(xdata, ydata, predictor, *args):
"""
Given an N-by-D array of features `xdata`,
an N-by-C array of one-hot-encoded true classes `ydata`
and a predictor function `predictor`,
return the proportion of correct predictions.
We accept an additional argument list `args`
that will be passed to the predictor function.
"""
correct = np.zeros(xdata.shape[0])
for i, x in enumerate(xdata):
prediction = predictor(x, *args)
correct[i] = np.all(ydata[i, :] == prediction)
return correct.mean()
def predict_class(x, pi, theta):
probs = predict_class_prob(x, pi, theta)
print(probs)
prediction = np.zeros_like(probs)
# 返回最大概率对应的位置,也就是idx
prediction[np.argmax(probs)] = 1
return prediction
以上实验 基本是在批判max likelihood的弊端,也就是无穷大和0带来的各种问题。