深度学习--GAN实战
DCGAN
| import torch |
| from torch import nn, optim, autograd |
| import numpy as np |
| import visdom |
| import random |
| |
| |
| h_dim = 400 |
| batchsz = 512 |
| viz = visdom.Visdom(use_incoming_socket=False) |
| |
| class Generator(nn.Module): |
| |
| def __init__(self): |
| super(Generator, self).__init__() |
| |
| self.net = nn.Sequential( |
| nn.Linear(2,h_dim), |
| nn.ReLU(True), |
| nn.Linear(h_dim,h_dim), |
| nn.ReLU(True), |
| nn.Linear(h_dim, h_dim), |
| nn.ReLU(True), |
| nn.Linear(h_dim,2) |
| ) |
| |
| def forward(self,z): |
| output = self.net(z) |
| return output |
| |
| class Discriminator(nn.Module): |
| |
| def __init__(self): |
| super(Discriminator,self).__init__() |
| |
| self.net = nn.Sequential( |
| nn.Linear(2,h_dim), |
| nn.ReLU(True), |
| nn.Linear(h_dim,h_dim), |
| nn.ReLU(True), |
| nn.Linear(h_dim, h_dim), |
| nn.ReLU(True), |
| nn.Linear(h_dim,1), |
| nn.Sigmoid() |
| ) |
| |
| def forward(self,x): |
| output = self.net(x) |
| return output.view(-1) |
| |
| |
| |
| |
| def data_generator(): |
| ''' |
| 8-gaussian mixturn models |
| :return: |
| ''' |
| scale = 2. |
| centers = [ |
| (1,0), |
| (-1,0), |
| (0,1), |
| (0,-1), |
| (1./np.sqrt(2),1./np.sqrt(2)), |
| (1./ np.sqrt(2),-1. / np.sqrt(2)), |
| (-1./np.sqrt(2),1./np.sqrt(2)), |
| (-1./ np.sqrt(2),-1. / np.sqrt(2)) |
| ] |
| |
| centers = [(scale*x,scale*y) for x,y in centers] |
| |
| while True: |
| dataset = [] |
| for i in range(batchsz): |
| point = np.random.randn(2)*0.02 |
| center = random.choice(centers) |
| |
| point[0] += center[0] |
| point[1] += center[1] |
| dataset.append(point) |
| |
| dataset = np.array(dataset).astype(np.float32) |
| dataset /= 1.141 |
| |
| yield dataset |
| |
| def main(): |
| |
| |
| torch.manual_seed(23) |
| np.random.seed(23) |
| |
| data_iter = data_generator() |
| x = next(data_iter) |
| |
| |
| G = Generator().cuda() |
| D = Discriminator().cuda() |
| |
| |
| |
| optim_G = optim.Adam(G.parameters(),lr=5e-4,betas=(0.5,0.9)) |
| optim_D = optim.Adam(D.parameters(), lr=5e-4, betas=(0.5, 0.9)) |
| |
| for epoch in range(50000): |
| |
| for _ in range(5): |
| |
| x = next(data_iter) |
| x = torch.from_numpy(x).cuda() |
| |
| predr = D(x) |
| |
| lossr = -predr.mean() |
| |
| z = torch.randn(batchsz,2).cuda() |
| xf = G(z).detach() |
| predf = D(xf) |
| lossf = predf.mean() |
| |
| |
| loss_D = lossr +lossf |
| |
| |
| optim_D.zero_grad() |
| loss_D.backward() |
| optim_D.step() |
| |
| |
| z = torch.randn(batchsz,2).cuda() |
| xf = G(z) |
| predf = D(xf) |
| |
| loss_G = -predf.mean() |
| |
| optim_G.zero_grad() |
| loss_G.backward() |
| optim_D.step() |
| |
| if epoch%100 == 0: |
| print(loss_D.item(),loss_G.item()) |
| |
| |
| if __name__ == '__main__': |
| main() |
| |
WGAN
相较于DCGAN,就是在损失函数上加一个惩罚项
| import torch |
| from torch import nn, optim, autograd |
| import numpy as np |
| import visdom |
| from torch.nn import functional as F |
| from matplotlib import pyplot as plt |
| import random |
| |
| h_dim = 400 |
| batchsz = 512 |
| viz = visdom.Visdom() |
| |
| class Generator(nn.Module): |
| |
| def __init__(self): |
| super(Generator, self).__init__() |
| |
| self.net = nn.Sequential( |
| nn.Linear(2, h_dim), |
| nn.ReLU(True), |
| nn.Linear(h_dim, h_dim), |
| nn.ReLU(True), |
| nn.Linear(h_dim, h_dim), |
| nn.ReLU(True), |
| nn.Linear(h_dim, 2), |
| ) |
| |
| def forward(self, z): |
| output = self.net(z) |
| return output |
| |
| |
| class Discriminator(nn.Module): |
| |
| def __init__(self): |
| super(Discriminator, self).__init__() |
| |
| self.net = nn.Sequential( |
| nn.Linear(2, h_dim), |
| nn.ReLU(True), |
| nn.Linear(h_dim, h_dim), |
| nn.ReLU(True), |
| nn.Linear(h_dim, h_dim), |
| nn.ReLU(True), |
| nn.Linear(h_dim, 1), |
| nn.Sigmoid() |
| ) |
| |
| def forward(self, x): |
| output = self.net(x) |
| return output.view(-1) |
| |
| def data_generator(): |
| |
| scale = 2. |
| centers = [ |
| (1, 0), |
| (-1, 0), |
| (0, 1), |
| (0, -1), |
| (1. / np.sqrt(2), 1. / np.sqrt(2)), |
| (1. / np.sqrt(2), -1. / np.sqrt(2)), |
| (-1. / np.sqrt(2), 1. / np.sqrt(2)), |
| (-1. / np.sqrt(2), -1. / np.sqrt(2)) |
| ] |
| centers = [(scale * x, scale * y) for x, y in centers] |
| while True: |
| dataset = [] |
| for i in range(batchsz): |
| point = np.random.randn(2) * .02 |
| center = random.choice(centers) |
| point[0] += center[0] |
| point[1] += center[1] |
| dataset.append(point) |
| dataset = np.array(dataset, dtype='float32') |
| dataset /= 1.414 |
| yield dataset |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| def generate_image(D, G, xr, epoch): |
| """ |
| Generates and saves a plot of the true distribution, the generator, and the |
| critic. |
| """ |
| N_POINTS = 128 |
| RANGE = 3 |
| plt.clf() |
| |
| points = np.zeros((N_POINTS, N_POINTS, 2), dtype='float32') |
| points[:, :, 0] = np.linspace(-RANGE, RANGE, N_POINTS)[:, None] |
| points[:, :, 1] = np.linspace(-RANGE, RANGE, N_POINTS)[None, :] |
| points = points.reshape((-1, 2)) |
| |
| |
| |
| |
| with torch.no_grad(): |
| points = torch.Tensor(points).cuda() |
| disc_map = D(points).cpu().numpy() |
| x = y = np.linspace(-RANGE, RANGE, N_POINTS) |
| cs = plt.contour(x, y, disc_map.reshape((len(x), len(y))).transpose()) |
| plt.clabel(cs, inline=1, fontsize=10) |
| |
| |
| |
| |
| with torch.no_grad(): |
| z = torch.randn(batchsz, 2).cuda() |
| samples = G(z).cpu().numpy() |
| plt.scatter(xr[:, 0], xr[:, 1], c='orange', marker='.') |
| plt.scatter(samples[:, 0], samples[:, 1], c='green', marker='+') |
| |
| viz.matplot(plt, win='contour', opts=dict(title='p(x):%d'%epoch)) |
| |
| |
| def weights_init(m): |
| if isinstance(m, nn.Linear): |
| |
| nn.init.kaiming_normal_(m.weight) |
| m.bias.data.fill_(0) |
| |
| def gradient_penalty(D, xr, xf): |
| """ |
| |
| :param D: |
| :param xr: |
| :param xf: |
| :return: |
| """ |
| LAMBDA = 0.3 |
| |
| |
| xf = xf.detach() |
| xr = xr.detach() |
| |
| |
| alpha = torch.rand(batchsz, 1).cuda() |
| alpha = alpha.expand_as(xr) |
| |
| interpolates = alpha * xr + ((1 - alpha) * xf) |
| interpolates.requires_grad_() |
| |
| disc_interpolates = D(interpolates) |
| |
| gradients = autograd.grad(outputs=disc_interpolates, inputs=interpolates, |
| grad_outputs=torch.ones_like(disc_interpolates), |
| create_graph=True, retain_graph=True, only_inputs=True)[0] |
| |
| gp = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * LAMBDA |
| |
| return gp |
| |
| def main(): |
| |
| torch.manual_seed(23) |
| np.random.seed(23) |
| |
| G = Generator().cuda() |
| D = Discriminator().cuda() |
| G.apply(weights_init) |
| D.apply(weights_init) |
| |
| optim_G = optim.Adam(G.parameters(), lr=1e-3, betas=(0.5, 0.9)) |
| optim_D = optim.Adam(D.parameters(), lr=1e-3, betas=(0.5, 0.9)) |
| |
| |
| data_iter = data_generator() |
| print('batch:', next(data_iter).shape) |
| |
| viz.line([[0,0]], [0], win='loss', opts=dict(title='loss', |
| legend=['D', 'G'])) |
| |
| for epoch in range(50000): |
| |
| |
| for _ in range(5): |
| x = next(data_iter) |
| xr = torch.from_numpy(x).cuda() |
| |
| |
| predr = (D(xr)) |
| |
| lossr = - (predr.mean()) |
| |
| |
| z = torch.randn(batchsz, 2).cuda() |
| |
| |
| xf = G(z).detach() |
| |
| predf = (D(xf)) |
| |
| lossf = (predf.mean()) |
| |
| |
| gp = gradient_penalty(D, xr, xf) |
| |
| loss_D = lossr + lossf + gp |
| optim_D.zero_grad() |
| loss_D.backward() |
| |
| |
| optim_D.step() |
| |
| |
| |
| z = torch.randn(batchsz, 2).cuda() |
| xf = G(z) |
| predf = (D(xf)) |
| |
| loss_G = - (predf.mean()) |
| optim_G.zero_grad() |
| loss_G.backward() |
| optim_G.step() |
| |
| |
| if epoch % 100 == 0: |
| viz.line([[loss_D.item(), loss_G.item()]], [epoch], win='loss', update='append') |
| |
| generate_image(D, G, xr, epoch) |
| |
| print(loss_D.item(), loss_G.item()) |
| |
| |
| |
| |
| |
| |
| if __name__ == '__main__': |
| main() |
【推荐】国内首个AI IDE,深度理解中文开发场景,立即下载体验Trae
【推荐】编程新体验,更懂你的AI,立即体验豆包MarsCode编程助手
【推荐】抖音旗下AI助手豆包,你的智能百科全书,全免费不限次数
【推荐】轻量又高性能的 SSH 工具 IShell:AI 加持,快人一步
· 无需6万激活码!GitHub神秘组织3小时极速复刻Manus,手把手教你使用OpenManus搭建本
· C#/.NET/.NET Core优秀项目和框架2025年2月简报
· Manus爆火,是硬核还是营销?
· 一文读懂知识蒸馏
· 终于写完轮子一部分:tcp代理 了,记录一下