优化算法篇

 梯度下降与随机梯度下降:

复制代码
import torch
import matplotlib.pyplot as plt
import numpy as np
x_data = [5,6,7,8.5,9,10,11.5,12]
y_data = [1,2,8,4,5,6.5,7.5,8]

w = 1
#初始权重

def forward(x):
    return x * w

#MSE
def cost(xs,ys):
    cost = 0
    for x,y in zip(xs,ys):
        y_pred = forward(x)
        cost += (y-y_pred)**2
    return cost/len(xs)

def SGD_loss(xs,ys):
    y_pred = forward(xs)
    return (y_pred - ys)**2

def SGD_gradient(xs,ys):
    return 2*xs*(xs*w-ys)

def gradient(xs,ys):
    grad = 0
    for x,y in zip(xs,ys):
        grad += 2*x*(x*w-y)
    return grad/len(xs)

def draw(x,y):
    fig = plt.figure(num=1, figsize=(4, 4))
    ax = fig.add_subplot(111)
    ax.plot(x,y)
    plt.show()

# epoch_lis  =[]
# loss_lis = []
# learning_rate = 0.012
#
# for epoch in range(100):
#     cost_val = cost(x_data,y_data)
#     grad_val = gradient(x_data,y_data)
#     w -= learning_rate*grad_val
#     print("Epoch = {} w = {} loss = {} ".format(epoch,w,cost_val))
#     epoch_lis.append(epoch)
#     loss_lis.append(cost_val)
# print(forward(4))
# draw(epoch_lis,loss_lis)
# draw(x_data,y_data)


l_lis= []
epoch = []
learning_rate = 0.009
#SGD
for epoch in range(10):
    for x,y in zip(x_data,y_data):
        grad = SGD_gradient(x,y)
        w -= learning_rate*grad
        print(" x:{}  y:{}   grad:{}".format(x,y,grad))
        l = SGD_loss(x,y)
        print("loss: ",l)
        l_lis.append(l)

X = [int(i) for i in range(len(l_lis))]
draw(X,l_lis)
复制代码

 

posted @   0MrMKG  阅读(27)  评论(0编辑  收藏  举报
(评论功能已被禁用)
相关博文:
阅读排行:
· 被坑几百块钱后,我竟然真的恢复了删除的微信聊天记录!
· 【自荐】一款简洁、开源的在线白板工具 Drawnix
· 没有Manus邀请码?试试免邀请码的MGX或者开源的OpenManus吧
· 园子的第一款AI主题卫衣上架——"HELLO! HOW CAN I ASSIST YOU TODAY
· 无需6万激活码!GitHub神秘组织3小时极速复刻Manus,手把手教你使用OpenManus搭建本
点击右上角即可分享
微信分享提示