Pytorch 深度学习实践 第2讲
import matplotlib.pylab as plt
x_data = [1.0, 2.0, 3.0]
y_data = [2.0, 4.0, 6.0]
w = 1.0
def forward(x):
return x * w
def cost(xs, ys):
cost = 0
for x, y in zip(xs, ys):
y_pred = forward(x)
cost += (y_pred - y) ** 2
return cost / len(xs)
def gradient(xs, ys):
grad = 0
for x, y in zip(xs, ys):
grad += 2 * x * (x * w - y)
return grad
epoch_list = []
cost_list = []
lr_val = 0.01
print('predict (before training)', 4, forward(4))
for epoch in range(100):
loss_val = cost(x_data, y_data)
grad_val = gradient(x_data, y_data)
w -= lr_val * grad_val
print('epoch:', epoch, 'w=', w, 'loss=', loss_val)
epoch_list.append(epoch)
cost_list.append(loss_val)
print('predict (after training)', 4, forward(4))
plt.plot(epoch_list,cost_list)
plt.ylabel('cost')
plt.xlabel('epoch')
plt.show()
对于随机梯度下降和梯度下降做了折中
import matplotlib.pylab as plt
x_data = [1.0, 2.0, 3.0]
y_data = [2.0, 4.0, 6.0]
w = 1.0
def forward(x):
return x * w
def lost(xs, ys):
y_pred = forward(xs)
return (y_pred - ys) ** 2
def gradient(xs, ys):
return 2 * xs * (xs * w - ys)
epoch_list = []
lost_list = []
lr_val = 0.01
print('predict (before training)', 4, forward(4))
for epoch in range(100):
for x, y in zip(x_data, y_data):
loss_val = lost(x, y)
grad_val = gradient(x, y)
w -= lr_val * grad_val
print('minibatch:', epoch, 'w=', w, 'loss=', loss_val)
epoch_list.append(epoch)
lost_list.append(loss_val)
print('predict (after training)', 4, forward(4))
plt.plot(epoch_list,lost_list)
plt.ylabel('cost')
plt.xlabel('epoch')
plt.show()
# 折中 batch/Mini-batch 一部分数据作为一个batch全部处理更新权重
【推荐】国内首个AI IDE,深度理解中文开发场景,立即下载体验Trae
【推荐】编程新体验,更懂你的AI,立即体验豆包MarsCode编程助手
【推荐】抖音旗下AI助手豆包,你的智能百科全书,全免费不限次数
【推荐】轻量又高性能的 SSH 工具 IShell:AI 加持,快人一步
· Manus重磅发布:全球首款通用AI代理技术深度解析与实战指南
· 被坑几百块钱后,我竟然真的恢复了删除的微信聊天记录!
· 没有Manus邀请码?试试免邀请码的MGX或者开源的OpenManus吧
· 园子的第一款AI主题卫衣上架——"HELLO! HOW CAN I ASSIST YOU TODAY
· 【自荐】一款简洁、开源的在线白板工具 Drawnix