The "mask" array actually specifies what attention to discard.
[mygpt.git] / mygpt.py
index 3bce361..f954797 100755 (executable)
--- a/mygpt.py
+++ b/mygpt.py
@@ -66,9 +66,9 @@ class QKVAttention(nn.Module):
         a = torch.einsum('nhtd,nhsd->nhts', q, k) / math.sqrt(q.size(3))
 
         if self.causal:
-            mask = torch.arange(a.size(2), device = q.device)[None, None, :, None] \
-                   < torch.arange(a.size(3), device = q.device)[None, None, None, :]
-            a = a.masked_fill(mask, float('-inf'))
+            forbidden_attention = torch.arange(a.size(2), device = q.device)[None, None, :, None] \
+                                < torch.arange(a.size(3), device = q.device)[None, None, None, :]
+            a = a.masked_fill(forbidden_attention, float('-inf'))
 
         a = a.softmax(dim = 3)
         a = F.dropout(a, self.attention_dropout, self.training)
@@ -97,10 +97,6 @@ class MyGPT(nn.Module):
             AddPositionalEncoding(len_max),
         )
 
-        # Small embedding initialization
-        with torch.no_grad():
-            self.embedding[0].weight.normal_(0, 2e-2)
-
         trunk_blocks = [ ]
 
         for _ in range(nb_blocks):
@@ -128,6 +124,14 @@ class MyGPT(nn.Module):
 
         self.readout = nn.Linear(in_features = dim_model, out_features = vocabulary_size)
 
+        with torch.no_grad():
+            for m in self.modules():
+                if isinstance(m, nn.Embedding):
+                    m.weight.normal_(mean = 0, std = 2e-2)
+                elif isinstance(m, nn.LayerNorm):
+                    m.bias.zero_()
+                    m.weight.fill_(1.0)
+
     def forward(self, x):
         x = F.pad(x, (1, -1))
         x = self.embedding(x)