Python逻辑回归模型应用举例

取UCI公共测试数据库中澳大利亚信贷批准数据集作为本例数据集,
其拥有14个特征,1个分类标签y(1--同意贷款,0--不同意贷款)共计690个申请者记录

1、数据获取

import  pandas as pd
data = pd.read_excel('credit.xlsx')
data
x1 x2 x3 x4 x5 x6 x7 x8 x9 x10 x11 x12 x13 x14 d
0 1 22.08 11.460 2 4 4 1.585 0 0 0 1 2 100 1213 0
1 0 22.67 7.000 2 8 4 0.165 0 0 0 0 2 160 1 0
2 0 29.58 1.750 1 4 4 1.250 0 0 0 1 2 280 1 0
3 0 21.67 11.500 1 5 3 0.000 1 1 11 1 2 0 1 1
4 1 20.17 8.170 2 6 4 1.960 1 1 14 0 2 60 159 1
... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
685 1 31.57 10.500 2 14 4 6.500 1 0 0 0 2 0 1 1
686 1 20.67 0.415 2 8 4 0.125 0 0 0 0 2 0 45 0
687 0 18.83 9.540 2 6 4 0.085 1 0 0 0 2 100 1 1
688 0 27.42 14.500 2 14 8 3.085 1 1 1 0 2 120 12 1
689 1 41.00 0.040 2 10 4 0.040 0 1 1 0 1 560 1 1

690 rows × 15 columns

2、训练样本与测试样本划分

#训练用的特征数据用x表示,预测变量用y表示   测试样本分别记为x1,y1
#以前600数据为训练数据,后90个为测试数据
x = data.iloc[:600,:14].values
x
array([[1.000e+00, 2.208e+01, 1.146e+01, ..., 2.000e+00, 1.000e+02,
        1.213e+03],
       [0.000e+00, 2.267e+01, 7.000e+00, ..., 2.000e+00, 1.600e+02,
        1.000e+00],
       [0.000e+00, 2.958e+01, 1.750e+00, ..., 2.000e+00, 2.800e+02,
        1.000e+00],
       ...,
       [1.000e+00, 3.492e+01, 2.500e+00, ..., 2.000e+00, 2.390e+02,
        2.010e+02],
       [1.000e+00, 2.408e+01, 8.750e-01, ..., 2.000e+00, 2.540e+02,
        1.951e+03],
       [1.000e+00, 3.733e+01, 6.500e+00, ..., 2.000e+00, 9.300e+01,
        1.000e+00]])
y = data.iloc[:600,14].values
y
array([0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0,
       0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0,
       0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1,
       0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1,
       1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0,
       0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0,
       0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0,
       1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0,
       0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0,
       0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1,
       1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1,
       1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1,
       1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1,
       1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1,
       0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0,
       1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0,
       1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0,
       1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0,
       0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1,
       0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0,
       1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1,
       0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0,
       1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1,
       0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0,
       0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
       1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0,
       1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1,
       0, 0, 1, 1, 0, 1], dtype=int64)
x1 = data.iloc[600:,:14].values
x1
array([[0.000e+00, 2.075e+01, 9.540e+00, ..., 2.000e+00, 2.000e+02,
        1.001e+03],
       [1.000e+00, 3.667e+01, 3.250e+00, ..., 2.000e+00, 1.020e+02,
        6.400e+02],
       [1.000e+00, 2.258e+01, 1.004e+01, ..., 2.000e+00, 6.000e+01,
        3.970e+02],
       ...,
       [0.000e+00, 1.883e+01, 9.540e+00, ..., 2.000e+00, 1.000e+02,
        1.000e+00],
       [0.000e+00, 2.742e+01, 1.450e+01, ..., 2.000e+00, 1.200e+02,
        1.200e+01],
       [1.000e+00, 4.100e+01, 4.000e-02, ..., 1.000e+00, 5.600e+02,
        1.000e+00]])
y1 = data.iloc[600:,14].values
y1
array([0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1,
       1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0,
       0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0,
       0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1,
       1, 1], dtype=int64)

3、逻辑回归分析

#导入逻辑回归模块(LR)
from sklearn.linear_model import LogisticRegression as LR
#利用LR创建逻辑回归对象lr
lr = LR(max_iter=3000)
#调用lr中的fit()方法进行训练
lr.fit(x,y)
LogisticRegression(max_iter=3000)
这里遇到一个问题:TOP: TOTAL NO. of ITERATIONS REACHED LIMIT......extra_warning_msg=_LOGISTIC_SOLVER_CONVERGENCE_MSG

解决办法
意思是达到限制的迭代总数,只需要增加迭代次数(最大值)或缩放数据就可以。
将代码改为(增加迭代次数):

最大迭代次数默认值为1000,把它改为3000即可
lr = LR(max_iter=3000)

#调用lr中的score()方法返回模型准确率
r = lr.score(x,y)  #模型准确率(针对训练数据)
r
0.875
#调用lr中的predict()方法,对测试样本x1进行预测,获取预测结果
R = lr.predict(x1)
R
array([0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1,
       1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0,
       1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0,
       0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1,
       1, 0], dtype=int64)
#预测准确率
Z = R-y1
Rs=len(Z[Z==0])/len(Z)
Rs
0.8666666666666667


import pandas as pd
data = pd.read_excel('credit.xlsx')
x = data.iloc[:600,:14].values
y = data.iloc[:600,14].values
x1= data.iloc[600:,:14].values
y1= data.iloc[600:,14].values
from sklearn.linear_model import LogisticRegression as LR
lr = LR(max_iter=3000)   #创建逻辑回归模型类
lr.fit(x, y) #训练数据
r=lr.score(x, y); # 模型准确率(针对训练数据)
print('模型准确率(针对训练数据):',r)
R=lr.predict(x1)
Z=R-y1
Rs=len(Z[Z==0])/len(Z)
print('预测结果为:',R)
print('预测准确率为:',Rs)
模型准确率(针对训练数据): 0.875
预测结果为: [0 1 1 1 1 0 0 1 0 1 1 0 1 0 1 1 0 0 0 1 0 1 1 0 1 1 1 0 0 0 0 0 1 0 0 1 0
 0 0 0 0 1 1 0 1 0 1 0 1 1 1 0 0 1 0 0 1 0 0 0 1 0 1 1 0 0 0 0 0 0 1 1 0 1
 0 0 0 0 0 1 0 1 1 0 1 1 0 1 1 0]
预测准确率为: 0.8666666666666667

posted @ 2022-04-17 16:58  AubeLiang  阅读(1292)  评论(0编辑  收藏  举报