Compute the minimal input size that will be kept unchanged through a conv/convtranspo...
authorFrancois Fleuret <francois@fleuret.org>
Sat, 9 Jun 2018 12:09:08 +0000 (14:09 +0200)
committerFrancois Fleuret <francois@fleuret.org>
Sat, 9 Jun 2018 12:09:08 +0000 (14:09 +0200)
ae_size.py [new file with mode: 0755]

diff --git a/ae_size.py b/ae_size.py
new file mode 100755 (executable)
index 0000000..7bef9f5
--- /dev/null
@@ -0,0 +1,37 @@
+#!/usr/bin/env python
+
+import math
+from torch import nn
+from torch import Tensor
+
+######################################################################
+
+def minimal_input_size(w, layer_specs):
+    assert w > 0, 'The input is too small'
+    if layer_specs == []:
+        return w
+    else:
+        k, s = layer_specs[0]
+        w = math.ceil((w - k) / s) + 1
+        w = minimal_input_size(w, layer_specs[1:])
+        return int((w - 1) * s + k)
+
+######################################################################
+
+layer_specs = [ (11, 5), (5, 2), (3, 2), (3, 2) ]
+
+layers = []
+for l in layer_specs:
+    layers.append(nn.Conv2d(1, 1, l[0], l[1]))
+
+for l in reversed(layer_specs):
+    layers.append(nn.ConvTranspose2d(1, 1, l[0], l[1]))
+
+m = nn.Sequential(*layers)
+
+h = minimal_input_size(240, layer_specs)
+w = minimal_input_size(320, layer_specs)
+
+x = Tensor(1, 1, h, w).normal_()
+
+print(x.size(), m(x).size())