Added the small weight embedding + id layer norm inits.
[mygpt.git] / mygpt.py
index 9da2e68..ebc9a83 100755 (executable)
--- a/mygpt.py
+++ b/mygpt.py
@@ -24,14 +24,12 @@ class WithResidual(nn.Module):
 
 ##############################
 
-class PositionalEncoding(nn.Module):
+class AddPositionalEncoding(nn.Module):
     def __init__(self, len_max):
         super().__init__()
         self.len_max = len_max
 
-    # From Vaswani et al 2018
-    # PE_{t,2i}   = sin(t/(L^{2i/D}))
-    # PE_{t,2i+1} = cos(t/(L^{2i/D}))
+    # [Vaswani et al 2018] PE_{t,2i} = sin(t/(L^{2i/D})), PE_{t,2i+1} = cos(t/(L^{2i/D}))
     def forward(self, x):
         t = torch.arange(x.size(1), dtype = x.dtype, device = x.device)[:, None]
         j = torch.arange(x.size(2), dtype = x.dtype, device = x.device)[None, :]
@@ -96,7 +94,7 @@ class MyGPT(nn.Module):
         self.embedding = nn.Sequential(
             nn.Embedding(vocabulary_size, dim_model),
             nn.Dropout(dropout),
-            PositionalEncoding(len_max),
+            AddPositionalEncoding(len_max),
         )
 
         trunk_blocks = [ ]
@@ -126,12 +124,19 @@ class MyGPT(nn.Module):
 
         self.readout = nn.Linear(in_features = dim_model, out_features = vocabulary_size)
 
+        with torch.no_grad():
+            for m in self.modules():
+                if isinstance(m, nn.Embedding):
+                    m.weight.normal_(mean = 0, std = 2e-2)
+                elif isinstance(m, nn.LayerNorm):
+                    m.bias.zero_()
+                    m.weight.fill_(1.0)
+
     def forward(self, x):
-        x = F.pad(x, (1, 0))
+        x = F.pad(x, (1, -1))
         x = self.embedding(x)
         x = self.trunk(x)
         x = self.readout(x)
-        x = F.pad(x, (0, 0, 0, -1))
         return x
 
 ######################################################################