from torchvision.utils import save_image
from torch.utils.data import DataLoader
from torchvision import datats
from torch.autograd import Variable
as nn
functional as F
rubber的复数import torch
os.makedirs("images", exist_ok=True)
parr = argpar.ArgumentParr()
parr.add_argument("--n_epochs", type=int, default=200, help="number of epochs of training")
parr.add_argument("--batch_size", type=int, default=64, help="size of the batches")
术语翻译parr.add_argument("--lr", type=float, default=0.00005, help="learning rate")
parr.add_argument("--n_cpu", type=int, default=8, help="number of cpu threads to u during batch generation") parr.add_argument("--latent_dim", type=int, default=100, help="dimensionality of the latent space")
parr.add_argument("--img_size", type=int, default=28, help="size of each image dimension")
parr.add_argument("--channels", type=int, default=1, help="number of image channels")
parr.add_argument("--n_critic", type=int, default=5, help="number of training steps for discriminator per iter") parr.add_argument("--clip_value", type=float, default=0.01, help="lower and upper clip value for disc. weights") parr.add_argument("--sample_interval", type=int, default=400, help="interval betwen image samples")
opt = parr.par_args()
print(opt)
img_shape = (opt.channels, opt.img_size, opt.img_size)
cuda = True if torch.cuda.is_available() el Fal
class Generator(nn.Module):
def __init__(lf):
super(Generator, lf).__init__()
def block(in_feat, out_feat, normalize=True):
layers = [nn.Linear(in_feat, out_feat)]
南外摇号
if normalize:
layers.append(nn.BatchNorm1d(out_feat, 0.8))
breezy
layers.append(nn.LeakyReLU(0.2, inplace=True))
return layers
*block(opt.latent_dim, 128, normalize=Fal),
*block(128, 256),
*block(256, 512),
*block(512, 1024),
nn.Linear(1024, int(np.prod(img_shape))),
nn.Tanh()
)
起绉def forward(lf, z):
img = lf.model(z)
img = img.view(img.shape[0], *img_shape)
return img
class Discriminator(nn.Module):
def __init__(lf):
super(Discriminator, lf).__init__()
nn.Linear(int(np.prod(img_shape)), 512),
nn.LeakyReLU(0.2, inplace=True),
哥本哈根大学nn.Linear(512, 256),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(256, 1),
)
modishdef forward(lf, img):
img_flat = img.view(img.shape[0], -1)
validity = lf.model(img_flat)
return validity
# Initialize generator and discriminator
generator = Generator()
discriminator = Discriminator()
if cuda:
学雅思要多久generator.cuda()
discriminator.cuda()
# Configure data loader
os.makedirs("../../data/mnist", exist_ok=True)
dataloader = torch.utils.data.DataLoader(
datats.MNIST(
"../../data/mnist",
train=True,
download=True,
transform=transforms.Compo([transforms.ToTensor(), transforms.Normalize([0.5], [0.5])]), ),
batch_size=opt.batch_size,
shuffle=True,matter什么意思
)
# Optimizers
optimizer_G = torch.optim.RMSprop(generator.parameters(), lr=opt.lr)
optimizer_D = torch.optim.RMSprop(discriminator.parameters(), lr=opt.lr)
Tensor = torch.cuda.FloatTensor if cuda el torch.FloatTensor
# ----------
# Training
# ----------
batches_done = 0
for epoch in range(opt.n_epochs):
for i, (imgs, _) in enumerate(dataloader):gi joe
# Configure input
real_imgs = pe(Tensor))
# ---------------------
# Train Discriminator
# ---------------------
_grad()
# Sample noi as generator input
z = Variable(Tensor(al(0, 1, (imgs.shape[0], opt.latent_dim))))
# Generate a batch of images
fake_imgs = generator(z).detach()
# Adversarial loss
loss_D = -an(discriminator(real_imgs)) + an(discriminator(fake_imgs))
loss_D.backward()
optimizer_D.step()