逻辑回归&&code

没有正则化项的时候的二分类

 

#-*-coding=utf-8-*-
from numpy import loadtxt,where, transpose
import matplotlib.pyplot as plt
from ipykernel.pylab.backend_inline import show
import numpy as np
from scipy.optimize import minimize

def sigmoid(x):
    
    return 1.0/(1+np.e**(-1.0*x))

def cost(theat,x,y):
    m=len(x)
    J=-(1.0/m)*(transpose(y).dot(np.log(sigmoid(x.dot(theat))))+transpose(1-y).dot(np.log(1-sigmoid(x.dot(theat)))));
    if np.isnan(J):
        return(np.inf)
    return J
    
def gradient(theat,x,y):
    m=len(x)
    h=sigmoid(x.dot(transpose(theat)))
    grad=(1.0/m)*(h-y).dot(x)
    return grad

def gradient_two(theat,x,y,alpha=0.0001,iteration=40000000):
    m=len(x)
    for i in xrange(iteration):
        h=sigmoid(x.dot(theat))
        grad1=theat[0]-alpha*(1.0/m)*transpose(h-y).dot(x[:,0]);
        grad2=theat[1]-alpha*(1.0/m)*transpose(h-y).dot(x[:,1]);
        grad3=theat[2]-alpha*(1.0/m)*transpose(h-y).dot(x[:,2]);
        theat[0],theat[1],theat[2]=grad1,grad2,grad3
        print 'cost',cost(theat,x,y)
        print 'grad',grad1,grad2,grad3
    return theat

if __name__=="__main__":
    
    data=loadtxt(r'D:/机器学习/【批量下载】data1等/数据挖掘/ml_data/data1.txt',delimiter=',');
    x=np.c_[np.ones((len(data),1)),data[:,0:2]];
    y=data[:,2]    
    theat=np.zeros(x.shape[1]);
    theat=transpose(theat);
    theat=gradient_two(theat,x,y);
    #res = minimize(cost, theat, args=(x,y), jac=gradient, options={'maxiter':400})
    #print res
    '''最后结果
    theat[0]=-22.3021297062;
    theat[1]=0.183373208731;
    theat[2]=0.178329470851;
    '''
    x1=[20,100]
    y1=[-(theat[0]+theat[1]*x1[0])*1.0/theat[2],-(theat[0]+theat[1]*x1[1])*1.0/theat[2]]
    plt.plot(x1,y1)    
    pos=where(y==1)
    neg=where(y==0)
    plt.scatter(x[pos,1],x[pos,2],marker='o',c='b')
    plt.scatter(x[neg,1],x[neg,2],marker='x',c='r')
    plt.show()
    
    

  

 

 

 

加上正则化后的损失函数和公式(不想再写代码了,意会就可以了 ,逃。。。。

 

 

 

posted @ 2016-09-18 20:01  simple_wxl  阅读(336)  评论(0编辑  收藏  举报