0
我正在用Pytorch试验Autoencoder。当我使用比较大的神经网络例如nn.Linear(250 * 250,40 * 40)作为第一层时,Jupyter内核不断崩溃。当我使用较小的图层大小时线性(250 * 250,20 * 20)。 Jupyter内核是可以的。任何想法如何解决这个问题?所以我可以运行更大的网络。谢谢。整个网络如下。Jupyter内核崩溃/死亡时使用大型神经网络层,任何想法?
# model:
class AutoEncoder(nn.Module):
def __init__(self):
super().__init__()
self.encoder = nn.Sequential(
nn.Linear(250*250, 20*20),
nn.BatchNorm1d(20*20,momentum=0.5),
nn.Dropout(0.5),
nn.LeakyReLU(),
nn.Linear(20*20, 20*20),
nn.BatchNorm1d(20*20,momentum=0.5),
nn.Dropout(0.5),
nn.LeakyReLU(),
nn.Linear(20*20, 20*20),
nn.BatchNorm1d(20*20,momentum=0.5),
nn.Dropout(0.5),
nn.LeakyReLU(),
nn.Linear(20*20, 15*15),
nn.BatchNorm1d(15*15,momentum=0.5),
nn.Dropout(0.5),
nn.LeakyReLU(),
nn.Linear(15*15, 3),
nn.BatchNorm1d(3,momentum=0.5),
#nn.Dropout(0.5),
#nn.Tanh(),
#nn.Linear(5*5,5),
)
self.decoder = nn.Sequential(
#nn.Linear(5, 5*5),
#nn.BatchNorm1d(5*5,momentum=0.5),
#nn.Dropout(0.5),
#nn.Tanh(),
nn.Linear(3, 15*15),
nn.BatchNorm1d(15*15,momentum=0.5),
nn.Dropout(0.5),
nn.LeakyReLU(),
nn.Linear(15*15, 20*20),
nn.BatchNorm1d(20*20,momentum=0.5),
nn.Dropout(0.5),
nn.LeakyReLU(),
nn.Linear(20*20, 20*20),
nn.BatchNorm1d(20*20,momentum=0.5),
nn.Dropout(0.5),
nn.LeakyReLU(),
nn.Linear(20*20, 250*250),
nn.BatchNorm1d(250*250,momentum=0.5),
nn.Dropout(0.5),
nn.Sigmoid(),
)
def forward(self, x):
encoded = self.encoder(x)
decoded = self.decoder(encoded)
return encoded, decoded