Update.
[mygpt.git] / mygpt.py
index 7bf25b5..080083a 100755 (executable)
--- a/mygpt.py
+++ b/mygpt.py
@@ -47,16 +47,16 @@ class QKVAttention(nn.Module):
         def randw(*d):
             return nn.Parameter(torch.empty(*d).normal_(0, 1 / math.sqrt(d[-1])))
 
-        self.wq = randw(nb_heads, dim_qk, dim_in)
-        self.wk = randw(nb_heads, dim_qk, dim_in)
-        self.wv = randw(nb_heads, dim_v, dim_in)
+        self.w_q = randw(nb_heads, dim_qk, dim_in)
+        self.w_k = randw(nb_heads, dim_qk, dim_in)
+        self.w_v = randw(nb_heads, dim_v, dim_in)
         self.causal = causal
         self.attention_dropout = attention_dropout
 
     def forward(self, x):
-        q = torch.einsum('ntc,hdc->nhtd', x, self.wq)
-        k = torch.einsum('ntc,hdc->nhtd', x, self.wk)
-        v = torch.einsum('ntc,hdc->nhtd', x, self.wv)
+        q = torch.einsum('ntc,hdc->nhtd', x, self.w_q)
+        k = torch.einsum('ntc,hdc->nhtd', x, self.w_k)
+        v = torch.einsum('ntc,hdc->nhtd', x, self.w_v)
         r = math.sqrt(q.size(3))
         a = torch.einsum('nhtd,nhsd->nhts', q, k).div(r)
         if self.causal:
@@ -119,3 +119,18 @@ class MyGPT(nn.Module):
         return x
 
 ######################################################################
+
+if __name__ == '__main__':
+    vocabulary_size = 10
+    x = torch.randint(vocabulary_size, (25, 100))
+
+    model = MyGPT(
+        vocabulary_size = vocabulary_size,
+        dim_model = 16, dim_keys = 50, dim_hidden = 100,
+        nb_heads = 2, nb_blocks = 3,
+        dropout = 0.1
+    )
+
+    y = model(x)
+
+######################################################################