ruijiege

  博客园 :: 首页 :: 博问 :: 闪存 :: 新随笔 :: 联系 :: 订阅 订阅 :: 管理 ::
import random
import struct

import numpy as np
import pandas as pd


def load_labels(file):
    with open(file, "rb") as f:
        data = f.read()

    magic_number, num_samples = struct.unpack(">ii", data[:8])
    if magic_number != 2049:  # 0x00000801
        print(f"magic number mismatch {magic_number} != 2049")
        return None

    labels = np.array(list(data[8:]))
    return labels


def load_images(file):
    with open(file, "rb") as f:
        data = f.read()

    magic_number, num_samples, image_width, image_height = struct.unpack(">iiii", data[:16])
    if magic_number != 2051:  # 0x00000803
        print(f"magic number mismatch {magic_number} != 2051")
        return None

    image_data = np.asarray(list(data[16:]), dtype=np.uint8).reshape(num_samples, -1)
    return image_data


def one_hot(labels, classes):
    n = len(labels)
    output = np.zeros((n, classes), dtype=np.int32)
    for row, label in enumerate(labels):
        output[row, label] = 1
    return output


class Dataset:
    def __init__(self, images, labels):
        self.images = images
        self.labels = labels

    # 获取他的一个item,  dataset = Dataset(),   dataset[index]
    def __getitem__(self, index):
        return self.images[index], self.labels[index]

    # 获取数据集的长度,个数
    def __len__(self):
        return len(self.images)


class DataLoaderIterator:
    def __init__(self, dataloader):
        self.dataloader = dataloader
        self.cursor = 0
        self.indexs = list(range(self.dataloader.count_data))  # 0, ... 60000
        if self.dataloader.shuffle:
            # 打乱一下
            random.shuffle(self.indexs)

    # 合并batch的数据
    def merge_to(self, container, b):
        if len(container) == 0:
            for index, data in enumerate(b):
                if isinstance(data, np.ndarray):
                    container.append(data)
                else:
                    container.append(np.array([data], dtype=type(data)))
        else:
            for index, data in enumerate(b):
                container[index] = np.vstack((container[index], data))
        return container

    def __next__(self):
        if self.cursor >= self.dataloader.count_data:
            raise StopIteration()

        batch_data = []
        remain = min(self.dataloader.batch_size, self.dataloader.count_data - self.cursor)  # 256, 128
        for n in range(remain):
            index = self.indexs[self.cursor]
            data = self.dataloader.dataset[index]
            batch_data = self.merge_to(batch_data, data)
            self.cursor += 1
        return batch_data


class DataLoader:

    # shuffle 打乱
    def __init__(self, dataset, batch_size, shuffle):
        self.dataset = dataset
        self.shuffle = shuffle
        self.count_data = len(dataset)
        self.batch_size = batch_size

    def __iter__(self):
        return DataLoaderIterator(self)


# val_labels = load_labels("E:/杜老师课程/dataset/t10k-labels-idx1-ubyte")  # 10000,
# val_images = load_images("E:/杜老师课程/dataset/t10k-images-idx3-ubyte")  # 10000, 784
# numdata = val_images.shape[0]  # 60000
# val_images = np.hstack((val_images / 255 - 0.5, np.ones((numdata, 1))))  # 10000, 785
# val_pd = pd.DataFrame(val_labels, columns=["label"])
class Module:
    def __init__(self, name):
        self.name = name

    def __call__(self, *args):
        return self.forward(*args)


class Initializer:
    def __init__(self, name):
        self.name = name

    def __call__(self, *args):
        return self.apply(*args)


class Parameter:
    def __init__(self, value):
        self.value = value
        self.delta = np.zeros(value.shape)

    def zero_grad(self):
        self.delta[...] = 0


class LinearLayer(Module):
    def __init__(self, input_feature, output_feature):
        super().__init__("Linear")
        self.input_feature = input_feature
        self.output_feature = output_feature
        self.weights = Parameter(np.zeros((input_feature, output_feature)))
        self.bais = Parameter(np.zeros((1, output_feature)))

        # 权重初始化
        initer = GaussInitializer(0, 1.0)
        initer.apply(self.weights.value)

    def forward(self, x):
        self.x_save = x.copy()
        return x @ self.weights.value + self.bais.value
    # AB=C G
    # dB = A.T @ G
    # dA = G @ B.T
    def backward(self, G):
        self.weights.delta = self.x_save.T @ G
        self.bais.delta[...] = np.sum(G, 0)  # 值复制
        return G @ self.weights.value.T


class GaussInitializer(Initializer):
    # where :math:`\mu` is the mean and :math:`\sigma` the standard
    # deviation. The square of the standard deviation, :math:`\sigma^2`,
    # is called the variance.
    def __init__(self, mu, sigma):
        self.mu = mu
        self.sigma = sigma

    def apply(self, value):
        value[...] = np.random.normal(self.mu, self.sigma, value.shape)


class ReLULayer(Module):
    def __init__(self, inplace=True):
        super().__init__("ReLU")
        self.inplace = inplace

    def forward(self, x):
        self.negative_position = x < 0
        if not self.inplace:
            x = x.copy()

        x[self.negative_position] = 0
        return x

    def backward(self, G):
        if not self.inplace:
            G = G.copy()

        G[self.negative_position] = 0
        return G


class SigmoidCrossEntropyLayer(Module):
    def __init__(self):
        super().__init__("CrossEntropyLoss")

    def sigmoid(self, x):
        return 1 / (1 + np.exp(-x))

    def forward(self, x, label_onehot):
        eps = 1e-4
        self.label_onehot = label_onehot
        self.predict = self.sigmoid(x)
        self.predict = np.clip(self.predict, a_max=1 - eps, a_min=eps)  # 裁切
        self.batch_size = self.predict.shape[0]
        return 1

    def backward(self):
        return (self.predict - self.label_onehot) / self.batch_size


class SoftmaxCrossEntropyLayer(Module):
    def __init__(self):
        super().__init__("CrossEntropyLoss")

    def softmax(self, x):
        return np.exp(x) / np.sum(np.exp(x), axis=1, keepdims=True)

    def forward(self, x, label_onehot):
        eps = 1e-4
        self.label_onehot = label_onehot
        self.predict = self.softmax(x)
        self.predict = np.clip(self.predict, a_max=1 - eps, a_min=eps)  # 裁切
        self.batch_size = self.predict.shape[0]
        return -np.sum(label_onehot * np.log(self.predict) + (1 - label_onehot) *
                       np.log(1 - self.predict)) / self.batch_size

    def backward(self):
        return (self.predict - self.label_onehot) / self.batch_size


class Model(Module):
    def __init__(self, num_feature, num_hidden, num_classes):
        super().__init__("Model")
        self.input_to_hidden = LinearLayer(num_feature, num_hidden)
        self.relu = ReLULayer()
        self.hidden_to_output = LinearLayer(num_hidden, num_classes)

    def forward(self, x):
        x = self.input_to_hidden(x)
        x = self.relu(x)
        x = self.hidden_to_output(x)
        return x

    def backward(self, G):
        G = self.hidden_to_output.backward(G)
        G = self.relu.backward(G)
        G = self.input_to_hidden.backward(G)
        return G


class Optimizer:
    def __init__(self, name, model, lr):
        self.name = name
        self.model = model
        self.lr = lr

        layers = []
        self.params = []
        for attr in model.__dict__:
            layer = model.__dict__[attr]
            if isinstance(layer, Module):
                layers.append(layer)

        for layer in layers:
            for attr in layer.__dict__:
                layer_param = layer.__dict__[attr]
                if isinstance(layer_param, Parameter):
                    self.params.append(layer_param)

    def zero_grad(self):
        for param in self.params:
            param.zero_grad()

    def set_lr(self, lr):
        self.lr = lr


class Adam(Optimizer):
    def __init__(self, model, lr=1e-3, beta1=0.9, beta2=0.999, momentum=0.1):
        super().__init__("Adam", model, lr)
        self.momentum = momentum
        self.beta1 = beta1
        self.beta2 = beta2
        self.t = 0

        for param in self.params:
            param.m = 0
            param.v = 0

    def step(self):
        eps = 1e-8
        self.t += 1
        for param in self.params:
            g = param.delta
            param.m = self.beta1 * param.m + (1 - self.beta1) * g
            param.v = self.beta2 * param.v + (1 - self.beta2) * g ** 2
            mt_ = param.m / (1 - self.beta1 ** self.t)
            vt_ = param.v / (1 - self.beta2 ** self.t)
            param.value -= self.lr * mt_ / (np.sqrt(vt_) + eps)

def estimate_val(predict, gt_labels, classes, loss_func):
    plabel = predict.argmax(1)
    positive = plabel == val_labels
    total_images = predict.shape[0]
    accuracy = sum(positive) / total_images
    return accuracy, loss_func(predict, one_hot(gt_labels, classes))

val_labels = load_labels("E:/杜老师课程/dataset/t10k-labels-idx1-ubyte")  # 10000,
val_images = load_images("E:/杜老师课程/dataset/t10k-images-idx3-ubyte")  # 10000, 784
numdata = val_images.shape[0]  # 60000
val_images = np.hstack((val_images / 255 - 0.5, np.ones((numdata, 1))))  # 10000, 785
val_pd = pd.DataFrame(val_labels, columns=["label"])

train_labels = load_labels("E:/杜老师课程/dataset/train-labels-idx1-ubyte")  # 60000,
train_images = load_images("E:/杜老师课程/dataset/train-images-idx3-ubyte")  # 60000, 784
numdata = train_images.shape[0]  # 60000
train_images = np.hstack((train_images / 255 - 0.5, np.ones((numdata, 1))))  # 60000, 785
train_pd = pd.DataFrame(train_labels, columns=["label"])

classes = 10  # 定义10个类别
batch_size = 32  # 定义每个批次的大小
epochs = 20  # 退出策略,也就是最大把所有数据看10次
lr = 1e-3
train_data = DataLoader(Dataset(train_images,one_hot(train_labels,classes)),batch_size,shuffle=True)

model = Model(train_images.shape[1],256,classes)
loss_func = SigmoidCrossEntropyLayer()
optim = Adam(model,lr)
iters = 0

for epoch in range(epochs):
    for index,(image,label) in enumerate(train_data):
        predict = model(image)
        loss = loss_func(predict,label)
        # optim.zero_grad()
        G = loss_func.backward()
        model.backward(G)
        optim.step()
        iters += 1

        if iters % 1000 == 0:
            print(f"Iter {iters}, {epoch} / {epochs}, Loss {loss:.3f}, LR {lr:g}")
    val_accuracy, val_loss = estimate_val(model(val_images), val_labels, classes, loss_func)
    print(f"Val set, Accuracy: {val_accuracy:.3f}, Loss: {val_loss:.3f}")
View Code

 

posted on 2022-10-23 21:02  哦哟这个怎么搞  阅读(16)  评论(0编辑  收藏  举报