Add cross-attention to QKVAttention.
authorFrancois Fleuret <francois@fleuret.org>
Fri, 1 Jul 2022 08:01:12 +0000 (10:01 +0200)
committerFrancois Fleuret <francois@fleuret.org>
Fri, 1 Jul 2022 08:01:12 +0000 (10:01 +0200)
mygpt.py

index 080083a..4951460 100755 (executable)
--- a/mygpt.py
+++ b/mygpt.py
@@ -41,7 +41,8 @@ class PositionalEncoding(nn.Module):
 ##############################
 
 class QKVAttention(nn.Module):
-    def __init__(self, dim_in, dim_qk, dim_v, nb_heads = 1, causal = False, attention_dropout = 0.0):
+    def __init__(self, dim_in, dim_qk, dim_v,
+                 nb_heads = 1, causal = False, attention_dropout = 0.0):
         super().__init__()
 
         def randw(*d):
@@ -53,12 +54,12 @@ class QKVAttention(nn.Module):
         self.causal = causal
         self.attention_dropout = attention_dropout
 
-    def forward(self, x):
-        q = torch.einsum('ntc,hdc->nhtd', x, self.w_q)
-        k = torch.einsum('ntc,hdc->nhtd', x, self.w_k)
-        v = torch.einsum('ntc,hdc->nhtd', x, self.w_v)
-        r = math.sqrt(q.size(3))
-        a = torch.einsum('nhtd,nhsd->nhts', q, k).div(r)
+    def forward(self, x_q, x_kv = None):
+        if x_kv is None: x_kv = x_q
+        q = torch.einsum('ntc,hdc->nhtd', x_q, self.w_q)
+        k = torch.einsum('ntc,hdc->nhtd', x_kv, self.w_k)
+        v = torch.einsum('ntc,hdc->nhtd', x_kv, self.w_v)
+        a = torch.einsum('nhtd,nhsd->nhts', q, k) / math.sqrt(q.size(3))
         if self.causal:
             mask = torch.tril(q.new_ones(a.size(2), a.size(3)))[None, None, :, :] == 0
             a = a.masked_fill(mask, float('-inf'))