【深度学习】简单的回归问题

def compute_error_for_line_given_points(b, w, points):
    total_error = 0
    for i in range(0, len(points)):
        x = points[i][0]
        y = points[i][1]
        total_error += (y - (w * x + b)) ** 2
    return total_error / float(len(points))


def step_gradient(b_current, w_current, points, learning_rate):
    b_gradient = 0
    w_gradient = 0
    N = float(len(points))

    for i in range(0, len(points)):
        x = points[i][0]
        y = points[i][1]
        b_gradient += -(2 / N) * (y - ((w_current * x) + b_current))
        w_gradient += -(2 / N) * (y - ((w_current * x) + b_current)) * x
    print(w_gradient, b_gradient)
    new_b = b_current - (learning_rate * b_gradient)
    new_w = w_current - (learning_rate * w_gradient)
    return [new_b, new_w]


def gradient_descent_runner(points, starting_b, starting_w, learning_rate, num_iterations):
    b = starting_b
    w = starting_w

    for i in range(num_iterations):
        b, w = step_gradient(b, w, points, learning_rate)
        if i % 100 == 0:
            print(compute_error_for_line_given_points(b, w, points))
    return [b, w]


def main():
    points = [
        [1, 2.1],
        [2, 4.2],
        [3, 5.98],
    ]
    print(points)
    learning_rate = 0.01
    b_start = 0
    w_start = 0
    num_iter = 100000
    b, w = gradient_descent_runner(points, b_start, w_start, learning_rate, num_iter)
    print(w, b)
    print(1 * w + b)
    print(2 * w + b)
    print(3 * w + b)


if __name__ == '__main__':
    main()

  

posted @ 2022-08-26 21:38  一支小白  阅读(31)  评论(0编辑  收藏  举报