if layer_specs == []:
return w
else:
- k, s = layer_specs[0]
- w = math.ceil((w - k) / s) + 1
+ kernel_size, stride = layer_specs[0]
+ w = math.ceil((w - kernel_size) / stride) + 1
w = minimal_input_size(w, layer_specs[1:])
- return int((w - 1) * s + k)
+ return int((w - 1) * stride + kernel_size)
######################################################################
-layer_specs = [ (11, 5), (5, 2), (3, 2), (3, 2) ]
+# Dummy test
-layers = []
-for l in layer_specs:
- layers.append(nn.Conv2d(1, 1, l[0], l[1]))
+if __name__ == "__main__":
-for l in reversed(layer_specs):
- layers.append(nn.ConvTranspose2d(1, 1, l[0], l[1]))
+ layer_specs = [ (11, 5), (5, 4), (3, 2), (3, 2) ]
-m = nn.Sequential(*layers)
+ layers = []
+ for kernel_size, stride in layer_specs:
+ layers.append(nn.Conv2d(1, 1, kernel_size, stride))
-h = minimal_input_size(240, layer_specs)
-w = minimal_input_size(320, layer_specs)
+ for kernel_size, stride in reversed(layer_specs):
+ layers.append(nn.ConvTranspose2d(1, 1, kernel_size, stride))
-x = Tensor(1, 1, h, w).normal_()
+ m = nn.Sequential(*layers)
-print(x.size(), m(x).size())
+ h = minimal_input_size(240, layer_specs)
+ w = minimal_input_size(320, layer_specs)
+
+ x = Tensor(1, 1, h, w).normal_()
+
+ print(x.size(), m(x).size())
+
+######################################################################