自然梯度下降模拟练习

机器学习

手写梯度下降

# h(x) = a*x + b
# J(theta) = sum((h(x) - y))
import numpy as np

X_ = [1, 2, 3, 5, 7, 4]
Y_ = [3, 5, 7, 11, 15, 9]
X_ = np.array(X_)
Y_ = np.array(Y_)


def update_parm(a, b, X, Y, alpha):
    """
    参数的更新
    :param a: type:numpy
    :param b: type:numpy
    :param X: type:numpy, one feature input
    :param Y: type:numpy
    :param alpha: study radio
    :return:
    """
    a_ = a - alpha * sum((X * a + b - Y) * X) / len(X)
    b_ = b - alpha * sum((X * a + b - Y)) / len(X)
    return a_, b_


def cost(a, b, X, Y):
    return sum(np.square(a * X + b - Y)) / 2 / len(X)


def start_gard_down():
    a, b = 0, 0
    old = cost(a, b, X_, Y_)
    a, b = update_parm(a, b, X_, Y_, 1e-2)
    t = 1
    while t < 1e8:
        c = cost(a, b, X_, Y_)
        a, b = update_parm(a, b, X_, Y_, 1e-2)
        print('{}th difference:{}'.format(t, old - c))
        if c > old:
            #如果出现新的代价函数的值大于原先的值,则表明学习率alpha太大了,此时无法求得收敛值
            print('alpha is to large')
        elif old - c < 1e-7:
            #当代价函数差值小于0.00000001的时候,停止迭代
            break
        else:
            old = c
            t += 1

    print(a, b)


start_gard_down()

posted @ 2020-09-25 19:19  TSTKSnhx  阅读(169)  评论(1编辑  收藏  举报