This repository was archived by the owner on Apr 17, 2023. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 270
/
Copy pathDCGAN.py
68 lines (53 loc) · 2.11 KB
/
DCGAN.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch.optim as optim
from .base_GAN import BaseGAN
from .utils.config import BaseConfig
from .networks.DCGAN_nets import GNet, DNet
class DCGAN(BaseGAN):
r"""
Implementation of DCGAN
"""
def __init__(self,
dimLatentVector=64,
dimG=64,
dimD=64,
depth=3,
**kwargs):
r"""
Args:
Specific Arguments:
- latentVectorDim (int): dimension of the input latent vector
- dimG (int): reference depth of a layer in the generator
- dimD (int): reference depth of a layer in the discriminator
- depth (int): number of convolution layer in the model
- **kwargs: arguments of the BaseGAN class
"""
if 'config' not in vars(self):
self.config = BaseConfig()
self.config.dimG = dimG
self.config.dimD = dimD
self.config.depth = depth
BaseGAN.__init__(self, dimLatentVector, **kwargs)
def getNetG(self):
gnet = GNet(self.config.latentVectorDim,
self.config.dimOutput,
self.config.dimG,
depthModel=self.config.depth,
generationActivation=self.lossCriterion.generationActivation)
return gnet
def getNetD(self):
dnet = DNet(self.config.dimOutput,
self.config.dimD,
self.lossCriterion.sizeDecisionLayer
+ self.config.categoryVectorDim,
depthModel=self.config.depth)
return dnet
def getOptimizerD(self):
return optim.Adam(filter(lambda p: p.requires_grad, self.netD.parameters()),
betas=[0.5, 0.999], lr=self.config.learningRate)
def getOptimizerG(self):
return optim.Adam(filter(lambda p: p.requires_grad, self.netG.parameters()),
betas=[0.5, 0.999], lr=self.config.learningRate)
def getSize(self):
size = 2**(self.config.depth + 3)
return (size, size)