【源码解读】pix2pix(一):训练
源码地址:https://github.com/mrzhu-cool/pix2pix-pytorch
相比于朱俊彦的版本,这一版更加简单易读
训练的代码在train.py,开头依然是很多代码的共同三板斧,加载参数,加载数据,加载模型
命令行参数
# Training settings parser = argparse.ArgumentParser(description='pix2pix-pytorch-implementation') parser.add_argument('--dataset', required=True, help='facades') parser.add_argument('--batch_size', type=int, default=1, help='training batch size') parser.add_argument('--test_batch_size', type=int, default=1, help='testing batch size') parser.add_argument('--direction', type=str, default='b2a', help='a2b or b2a') parser.add_argument('--input_nc', type=int, default=3, help='input image channels') parser.add_argument('--output_nc', type=int, default=3, help='output image channels') parser.add_argument('--ngf', type=int, default=64, help='generator filters in first conv layer') parser.add_argument('--ndf', type=int, default=64, help='discriminator filters in first conv layer') parser.add_argument('--epoch_count', type=int, default=1, help='the starting epoch count') parser.add_argument('--niter', type=int, default=100, help='# of iter at starting learning rate') parser.add_argument('--niter_decay', type=int, default=100, help='# of iter to linearly decay learning rate to zero') parser.add_argument('--lr', type=float, default=0.0002, help='initial learning rate for adam') parser.add_argument('--lr_policy', type=str, default='lambda', help='learning rate policy: lambda|step|plateau|cosine') parser.add_argument('--lr_decay_iters', type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations') parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5') parser.add_argument('--cuda', action='store_true', help='use cuda?') parser.add_argument('--threads', type=int, default=4, help='number of threads for data loader to use') parser.add_argument('--seed', type=int, default=123, help='random seed to use. Default=123') parser.add_argument('--lamb', type=int, default=10, help='weight on L1 term in objective') opt = parser.parse_args()
数据
print('===> Loading datasets') root_path = "dataset/" train_set = get_training_set(root_path + opt.dataset, opt.direction) test_set = get_test_set(root_path + opt.dataset, opt.direction) training_data_loader = DataLoader(dataset=train_set, num_workers=opt.threads, batch_size=opt.batch_size, shuffle=True) testing_data_loader = DataLoader(dataset=test_set, num_workers=opt.threads, batch_size=opt.test_batch_size, shuffle=False)
模型
print('===> Building models') net_g = define_G(opt.input_nc, opt.output_nc, opt.ngf, 'batch', False, 'normal', 0.02, gpu_id=device) net_d = define_D(opt.input_nc + opt.output_nc, opt.ndf, 'basic', gpu_id=device)
优化器,损失函数
criterionGAN = GANLoss().to(device) criterionL1 = nn.L1Loss().to(device) criterionMSE = nn.MSELoss().to(device) # setup optimizer optimizer_g = optim.Adam(net_g.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999)) optimizer_d = optim.Adam(net_d.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999)) net_g_scheduler = get_scheduler(optimizer_g, opt) net_d_scheduler = get_scheduler(optimizer_d, opt)
接着按批次读取数据,首先更新判别器,判别器的输入是图像对(真,真)(真,假)
###################### # (1) Update D network ###################### optimizer_d.zero_grad() # train with fake fake_ab = torch.cat((real_a, fake_b), 1) pred_fake = net_d.forward(fake_ab.detach()) loss_d_fake = criterionGAN(pred_fake, False) # train with real real_ab = torch.cat((real_a, real_b), 1) pred_real = net_d.forward(real_ab) loss_d_real = criterionGAN(pred_real, True) # Combined D loss loss_d = (loss_d_fake + loss_d_real) * 0.5 loss_d.backward() optimizer_d.step()
然后更新生成器,生成器的损失由判别器产生的损失函数和真假图像之间的L1约束组成
###################### # (2) Update G network ###################### optimizer_g.zero_grad() # First, G(A) should fake the discriminator fake_ab = torch.cat((real_a, fake_b), 1) pred_fake = net_d.forward(fake_ab) loss_g_gan = criterionGAN(pred_fake, True) # Second, G(A) = B loss_g_l1 = criterionL1(fake_b, real_b) * opt.lamb loss_g = loss_g_gan + loss_g_l1 loss_g.backward() optimizer_g.step()
最后更新学习率
update_learning_rate(net_g_scheduler, optimizer_g)
update_learning_rate(net_d_scheduler, optimizer_d)
比较核心的代码是网络构造,以及一些工具函数,放在后面写