A GANS Problem that i cannot solve

class Generator(nn.Module):
    '''
    This class is for generator.
    Inputs ->
        noise_dim: dimension of noise vector.
        image_channel: number of channels in images,(Since MNIST is black and white images have 1 channel.)
        hidden_dim: inner dimension of networks.
    '''
    def __init__(self, noise_dim=62, image_channel=1, hidden_dim=64):
        super(Generator, self).__init__()
        self.noise_dim = noise_dim
        self.block1 = nn.Sequential(
            nn.ConvTranspose2d(noise_dim, hidden_dim*8, kernel_size=3, stride=2),
            nn.BatchNorm2d(hidden_dim * 8),
            nn.LeakyReLU(inplace=True)
        )
        self.block2 = nn.Sequential(
            nn.ConvTranspose2d(hidden_dim * 8, hidden_dim * 4, kernel_size=3, stride=2),
            nn.BatchNorm2d(hidden_dim * 4),
            nn.LeakyReLU(inplace=True)
        )
        self.block3 = nn.Sequential(
            nn.ConvTranspose2d(hidden_dim * 4, hidden_dim * 2, kernel_size=3, stride=2),
            nn.BatchNorm2d(hidden_dim *2),
            nn.LeakyReLU(inplace=True)
        )
        self.block4 = nn.Sequential(
            nn.ConvTranspose2d(hidden_dim * 2, hidden_dim, kernel_size=3, stride=2),
            nn.BatchNorm2d(hidden_dim),
            nn.LeakyReLU(inplace=True)
        )
        self.block5 = nn.Sequential(
            nn.ConvTranspose2d(hidden_dim, image_channel, kernel_size=3, stride=2),
            nn.Tanh()
        )
    
    def forward(self, noise):
        '''
        forward pass of generator.
        Input ->
            noise: noise tensor with shape of (number of samples, noise_dim)
        Output ->
            generated image
        '''
        out = noise.view(len(noise), self.noise_dim, 1, 1)
        out = self.block1(out)
        out = self.block2(out)
        out = self.block3(out)
        out = self.block4(out)
        out = self.block5(out)
        

        return out

class Discriminator(nn.Module):
    '''
    This class is for Dsicriminator
    Input ->
        image_channel: number of channels in images,(Since MNIST is black and white images have 1 channel.)
        hidden_dim: inner dimension of networks.
        c_dim: number of latent code dimensions
    '''
    def __init__(self, image_channel=1, hidden_dim=64, c_dim=62):
        super(Discriminator, self).__init__()
        self.block1 = nn.Sequential(
            nn.Conv2d(image_channel, hidden_dim, kernel_size=4, stride=2),
            nn.BatchNorm2d(hidden_dim),
            nn.LeakyReLU(.2, inplace=True)
        )
        self.block2 = nn.Sequential(
            nn.Conv2d(hidden_dim, hidden_dim * 2, kernel_size=4, stride=2),
            nn.BatchNorm2d(hidden_dim * 2),
            nn.LeakyReLU(.2, inplace=True)
        )
        self.block3= nn.Sequential(
            nn.Conv2d(hidden_dim, hidden_dim * 4, kernel_size=4, stride=2),
            nn.BatchNorm2d(hidden_dim * 2),
            nn.LeakyReLU(.2, inplace=True)
        )
        self.d_layer = nn.Conv2d(hidden_dim * 4, 1, kernel_size=4, stride=2)
        self.q_layer1 = nn.Sequential(
            nn.Conv2d(hidden_dim * 4, hidden_dim * 4, kernel_size=4, stride=2),
            nn.BatchNorm2d(hidden_dim * 4),
            nn.LeakyReLU(.2, inplace=True)
        )
        self.q_layer2 = nn.Conv2d(hidden_dim * 4, c_dim * 2, kernel_size=1, stride=2)
    
    def forward(self, image):
        '''
        Forward pass of discriminator
        Input ->
            image: flattened image tensor with dimension of (image_dim)
        Output ->
            returns a 1 dimension tensor representing whtether input image is generated or original.
        '''
        out = self.block1(image)
        intermed_pred = self.block2(out)
        disc_pred = self.d_layer(intermed_pred)
        q_pred = self.q_layer1(intermed_pred)
        q_pred = self.q_layer2(q_pred)
        return disc_pred.view(len(disc_pred), -1), q_pred.view(len(q_pred), -1)

After running this note book i got this following error

RuntimeError                              Traceback (most recent call last)
<ipython-input-11-a7a3435ecb4d> in <module>
     22 
     23         # Get the discriminator's predictions
---> 24         disc_generated_pred, disc_q_pred = disc(generated.detach())
     25         disc_q_mean = disc_q_pred[:, :c_dim]
     26         disc_q_logvar = disc_q_pred[:, c_dim:]

4 frames
/usr/local/lib/python3.8/dist-packages/torch/nn/modules/conv.py in _conv_forward(self, input, weight, bias)
    457                             weight, bias, self.stride,
    458                             _pair(0), self.dilation, self.groups)
--> 459         return F.conv2d(input, weight, bias, self.stride,
    460                         self.padding, self.dilation, self.groups)
    461 

RuntimeError: Given groups=1, weight of size [1, 256, 4, 4], expected input[64, 128, 14, 14] to have 256 channels, but got 128 channels instead

This topic was automatically closed 182 days after the last reply. New replies are no longer allowed.