神经网络与深度学习(邱锡鹏)编程练习 3 Logistic回归 softmax回归 源代码 tf版

Logistic回归

查看代码

import tensorflow as tf
import matplotlib.pyplot as plt

from matplotlib import animation, rc
from IPython.display import HTML
import matplotlib.cm as cm
import numpy as np

dot_num = 100
x_p = np.random.normal(3., 1, dot_num)
print(x_p[0].dtype)  # 检查数据类型
x_p = np.float32(x_p)  # 转换为 float32 Edit by David 2022.6.1
print(x_p[0].dtype)  # 检查数据类型

y_p = np.random.normal(6., 1, dot_num)
y_p = np.float32(y_p)  # 转换为 float32
y = np.ones(dot_num)
# print(y[0].dtype) # 检查数据类型
y = np.float32(y)  # 转换为 float32
C1 = np.array([x_p, y_p, y]).T

x_n = np.random.normal(6., 1, dot_num)
x_n = np.float32(x_n)  # 转换为 float32
y_n = np.random.normal(3., 1, dot_num)
y_n = np.float32(y_n)  # 转换为 float32
y = np.zeros(dot_num)
y = np.float32(y)  # 转换为 float32
C2 = np.array([x_n, y_n, y]).T

# plt.scatter(C1[:, 0], C1[:, 1], c='b', marker='+')
# plt.scatter(C2[:, 0], C2[:, 1], c='g', marker='o')

data_set = np.concatenate((C1, C2), axis=0)
np.random.shuffle(data_set)

epsilon = 1e-12


class LogisticRegression():
    def __init__(self):
        self.W = tf.Variable(shape=[2, 1], dtype=tf.float32,
                             initial_value=tf.random.uniform(shape=[2, 1], minval=-0.1, maxval=0.1))
        self.b = tf.Variable(shape=[1], dtype=tf.float32, initial_value=tf.zeros(shape=[1]))

        self.trainable_variables = [self.W, self.b]

    @tf.function
    def __call__(self, inp):
        logits = tf.matmul(inp, self.W) + self.b  # shape(N, 1)
        pred = tf.nn.sigmoid(logits)
        return pred


# @tf.function Edit by David 2022.6.1
def compute_loss(pred, label):
    #     print(label)
    if not isinstance(label, tf.Tensor):  # isinstance()是Python中的一个内建函数。是用来判断一个对象的变量类型。
        label = tf.constant(label, dtype=tf.float32)  # 创建常量
    pred = tf.squeeze(pred, axis=1)
    '''============================='''
    # 输入label shape(N,), pred shape(N,)
    # 输出 losses shape(N,) 每一个样本一个loss
    # todo 填空一,实现sigmoid的交叉熵损失函数(不使用tf内置的loss 函数)
    losses = -label * tf.math.log(pred + epsilon) - (1. - label) * tf.math.log(1. - pred + epsilon)

    '''============================='''
    loss = tf.reduce_mean(losses)

    pred = tf.where(pred > 0.5, tf.ones_like(pred), tf.zeros_like(pred))
    accuracy = tf.reduce_mean(tf.cast(tf.equal(label, pred), dtype=tf.float32))
    return loss, accuracy


# @tf.function Edit by David 2022.6.1
def train_one_step(model, optimizer, x, y):
    with tf.GradientTape() as tape:
        pred = model(x)
        loss, accuracy = compute_loss(pred, y)

    grads = tape.gradient(loss, model.trainable_variables)
    optimizer.apply_gradients(zip(grads, model.trainable_variables))
    return loss, accuracy, model.W, model.b


if __name__ == '__main__':
    model = LogisticRegression()
    opt = tf.keras.optimizers.SGD(learning_rate=0.01)
    x1, x2, y = list(zip(*data_set))
    x = list(zip(x1, x2))
    # animation_fram = []

    for i in range(200):
        loss, accuracy, W_opt, b_opt = train_one_step(model, opt, x, y)
        # animation_fram.append((W_opt.numpy()[0, 0], W_opt.numpy()[1, 0], b_opt.numpy(), loss.numpy()))
        if i % 20 == 0:
            print(f'loss: {loss.numpy():.4}\t accuracy: {accuracy.numpy():.4}')
            print(f'W_opt: {W_opt.numpy()}\t b_opt: {b_opt.numpy()}')

plt.scatter(C1[:, 0], C1[:, 1], c='b', marker='+')
plt.scatter(C2[:, 0], C2[:, 1], c='g', marker='o')
# plt.scatter(C3[:, 0], C3[:, 1], c='r', marker='*')
print(W_opt.numpy()[0, 0], W_opt.numpy()[1, 0], b_opt.numpy(), loss.numpy())

x = np.arange(0., 10., 0.1)
y = np.arange(0., 10., 0.1)
# yy = a/-b * xx +c/-b # copy from NNDL练习,不懂什么意思 ~ David 2022.6.1
y = W_opt.numpy()[0, 0] / -W_opt.numpy()[1, 0] * x + b_opt.numpy() / -W_opt.numpy()[1, 0]

plt.xlim(0, 10)
plt.ylim(0, 10)
plt.plot(x, y)
plt.legend()
plt.show()

softmax回归

查看代码

import tensorflow as tf
import matplotlib.pyplot as plt

from matplotlib import animation, rc
from IPython.display import HTML
import matplotlib.cm as cm
import numpy as np


dot_num = 100
x_p = np.random.normal(3., 1, dot_num)
x_p = np.float32(x_p) # 转换为 float32 Edit by David 2022.6.1
y_p = np.random.normal(6., 1, dot_num)
y_p = np.float32(y_p) # 转换为 float32 Edit by David 2022.6.1
y = np.ones(dot_num)
y = np.float32(y) # 转换为 float32
C1 = np.array([x_p, y_p, y]).T

x_n = np.random.normal(6., 1, dot_num)
x_n = np.float32(x_n) # 转换为 float32 Edit by David 2022.6.1
y_n = np.random.normal(3., 1, dot_num)
y_n = np.float32(y_n) # 转换为 float32 Edit by David 2022.6.1
y = np.zeros(dot_num)
y = np.float32(y) # 转换为 float32
C2 = np.array([x_n, y_n, y]).T

x_b = np.random.normal(7., 1, dot_num)
x_b = np.float32(x_b) # 转换为 float32 Edit by David 2022.6.1
y_b = np.random.normal(7., 1, dot_num)
y_b = np.float32(y_b) # 转换为 float32 Edit by David 2022.6.1
y = np.ones(dot_num)*2
y = np.float32(y) # 转换为 float32
C3 = np.array([x_b, y_b, y]).T

plt.scatter(C1[:, 0], C1[:, 1], c='b', marker='+')
plt.scatter(C2[:, 0], C2[:, 1], c='g', marker='o')
plt.scatter(C3[:, 0], C3[:, 1], c='r', marker='*')

data_set = np.concatenate((C1, C2, C3), axis=0)
np.random.shuffle(data_set)

epsilon = 1e-12


class SoftmaxRegression():
    def __init__(self):
        '''============================='''
        # todo 填空一,构建模型所需的参数 self.W, self.b 可以参考logistic-regression-exercise
        '''============================='''
        self.W = tf.Variable(shape=[2, 3], dtype=tf.float32,
                             initial_value=tf.random.uniform(shape=[2, 3], minval=-0.1, maxval=0.1))
        self.b = tf.Variable(shape=[1, 3], dtype=tf.float32, initial_value=tf.zeros(shape=[1, 3]))
        self.trainable_variables = [self.W, self.b]

    @tf.function
    def __call__(self, inp):
        logits = tf.matmul(inp, self.W) + self.b  # shape(N, 3)
        pred = tf.nn.softmax(logits)
        return pred


@tf.function
def compute_loss(pred, label):
    label = tf.one_hot(tf.cast(label, dtype=tf.int32), dtype=tf.float32, depth=3)
    '''============================='''
    # 输入label shape(N, 3), pred shape(N, 3)
    # 输出 losses shape(N,) 每一个样本一个loss
    # todo 填空二,实现softmax的交叉熵损失函数(不使用tf内置的loss 函数)
    '''============================='''
    losses = -tf.reduce_mean(label * tf.math.log(pred + epsilon))

    loss = tf.reduce_mean(losses)

    accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(label, axis=1), tf.argmax(pred, axis=1)), dtype=tf.float32))
    return loss, accuracy


@tf.function
def train_one_step(model, optimizer, x, y):
    with tf.GradientTape() as tape:
        pred = model(x)
        loss, accuracy = compute_loss(pred, y)

    grads = tape.gradient(loss, model.trainable_variables)
    optimizer.apply_gradients(zip(grads, model.trainable_variables))
    return loss, accuracy

model = SoftmaxRegression()
opt = tf.keras.optimizers.SGD(learning_rate=0.01)
x1, x2, y = list(zip(*data_set))
x = list(zip(x1, x2))
for i in range(1000):
    loss, accuracy = train_one_step(model, opt, x, y)
    if i%50==49:
        print(f'loss: {loss.numpy():.4}\t accuracy: {accuracy.numpy():.4}')

plt.scatter(C1[:, 0], C1[:, 1], c='b', marker='+')
plt.scatter(C2[:, 0], C2[:, 1], c='g', marker='o')
plt.scatter(C3[:, 0], C3[:, 1], c='r', marker='*')

x = np.arange(0., 10., 0.1)
y = np.arange(0., 10., 0.1)

X, Y = np.meshgrid(x, y)
inp = np.array(list(zip(X.reshape(-1), Y.reshape(-1))), dtype=np.float32)
print(inp.shape)
Z = model(inp)
Z = np.argmax(Z, axis=1)
Z = Z.reshape(X.shape)
plt.contour(X,Y,Z)
plt.show()

posted on 2022-06-01 23:32  HBU_DAVID  阅读(315)  评论(0编辑  收藏  举报

导航