幽魂倪

导航

从头开始使用梯度下降优化在Python中实现多元线性回归

 

其中theta_0,theta_1,theta_2,theta_3,…。,theta_n是参数

def hypothesis(theta, X, n):
h = np.ones((X.shape[0],1))
theta = theta.reshape(1,n+1)
for i in range(0,X.shape[0]):
h[i] = float(np.matmul(theta, X[i]))
h = h.reshape(X.shape[0])
return h
def BGD(theta, alpha, num_iters, h, X, y, n):
cost = np.ones(num_iters)
for i in range(0,num_iters):
theta[0] = theta[0] - (alpha/X.shape[0]) * sum(h - y)
for j in range(1,n+1):
theta[j] = theta[j] - (alpha/X.shape[0]) * sum((h-y) * X.transpose()[j])
h = hypothesis(theta, X, n)
cost[i] = (1/X.shape[0]) * 0.5 * sum(np.square(h - y))
theta = theta.reshape(1,n+1)
return theta, cost
def linear_regression(X, y, alpha, num_iters):
n = X.shape[1]
one_column = np.ones((X.shape[0],1))
X = np.concatenate((one_column, X), axis = 1)
# initializing the parameter vector...
theta = np.zeros(n+1)
# hypothesis calculation....
h = hypothesis(theta, X, n)
# returning the optimized parameters by Gradient Descent...
theta, cost = BGD(theta,alpha,num_iters,h,X,y,n)
return theta, cost
data = np.loadtxt('data1.txt', delimiter=',')
X_train = data[:,[,1]]
y_train = data[:,2]
 
 
 
mean = np.ones(X_train.shape[1])
std = np.ones(X_train.shape[1])
for i in range(0, X_train.shape[1]):
mean[i] = np.mean(X_train.transpose()[i])
std[i] = np.std(X_train.transpose()[i])
for j in range(0,X_train.shape[0]):
X_train[j][i] = (X_train[j][i] - mean[i])/std[i]

  1. “卧房数”或F2的平均值:3.1702
  2. F1的标准偏差:7.86202619e + 02
  3. F2的标准偏差:7.52842809e-01
#调用与主要功能learning_rate = 0.0001
num_iters = 300000
theta, cost = linear_regression(X_train, y_train, 0.0001, 30000)
 
多元线性回归后的theta
cost = list(cost)
n_iterations = [x for x in range(1,30001)]
plt.plot(n_iterations, cost)
plt.xlabel('No. of iterations')
plt.ylabel('Cost')


posted on 2020-06-30 08:43  幽魂倪  阅读(635)  评论(0编辑  收藏  举报