Update.
[mygptrnn.git] / mygpt.py
index b885e21..67c5cfd 100755 (executable)
--- a/mygpt.py
+++ b/mygpt.py
@@ -10,6 +10,8 @@
 # with a caching mechanism for keys and values to avoid a O(N^3) cost
 # for auto-regression.
 
+# This implementation is equipped with RNN layers to replace the MHA
+
 import math, warnings
 
 import torch, einops
@@ -19,6 +21,8 @@ from torch.nn import functional as F
 
 import ffutils
 
+# from blanket import blanket
+
 # import memload
 
 ######################################################################
@@ -124,7 +128,6 @@ class AddPositionalEncoding(nn.Module):
 
 import pscan
 
-
 # X is /.../xTxD   A is /.../xT   Y_init is /.../xD
 
 
@@ -145,6 +148,18 @@ def pscan_dim(A, X, Y_init, dim=-2):
     return Y
 
 
+def pscan_rgrad(grad_Y, A, X, Y_init, dim=-2, eps=1e-2):
+    with torch.no_grad():
+        s_A, s_X = 0, 0
+        for t in range(X.size(dim) - 1, 0, -1):
+            delta = (grad_Y[t] - s_A) / A[t].grad
+            s_A += A[t].grad * delta
+            A[t].grad = delta
+            delta = (grad_Y[t] - s_X) / X[t].grad
+            s_X += X[t].grad * delta
+            X[t].grad = delta
+
+
 def pscan_shape(A, X, Y_init):
     s = X.size()
     A = A.reshape(-1, s[-2])
@@ -188,6 +203,8 @@ class DumbRec(nn.Module):
         nb_lines,
         attention_dropout=0.0,
         len_max=1e5,
+        logger=print,
+        args=None,
     ):
         super().__init__()
 
@@ -317,6 +334,8 @@ class KVRec(nn.Module):
         nb_lines,
         attention_dropout=0.0,
         len_max=1e5,
+        logger=print,
+        args=None,
     ):
         super().__init__()
 
@@ -469,44 +488,53 @@ class Caterpillar(nn.Module):
         caterpillar_height,
         attention_dropout=0.0,
         len_max=1e5,
+        logger=print,
+        args=None,
     ):
         super().__init__()
 
         warnings.warn("Caterpillar", RuntimeWarning)
 
-        def randw(*d):
-            return nn.Parameter(torch.randn(*d) / math.sqrt(d[-1]))
+        def randw(*d, factor=1):
+            return nn.Parameter(torch.randn(*d) * factor / math.sqrt(d[-1]))
 
         self.caterpillar_length = caterpillar_length
         self.caterpillar_height = caterpillar_height
         self.attention_dropout = attention_dropout
 
-        self.proba_flashback = 0.0
-        self.proba_gate_dropout = 0.0
+        self.gate_dropout_proba = args.gate_dropout_proba
+        self.gate_dropout_sync = args.gate_dropout_sync
+        self.gate_dropout_replace = args.gate_dropout_replace
 
-        self.w_G = randw(nb_heads, caterpillar_height, dim_model)
-        self.b_G = nn.Parameter(
-            torch.full(
-                (nb_heads, caterpillar_height), -math.log(caterpillar_height - 1)
-            )
-        )
+        ######################################################################
+
+        self.w_G = randw(nb_heads, caterpillar_height, dim_model, factor=1e-3)
+        self.b_G = nn.Parameter(torch.full((nb_heads, caterpillar_height), 0.0))
 
         self.w_K = randw(nb_heads, dim_qk, dim_model)
         self.w_V = randw(nb_heads, dim_v, dim_model)
         self.w_Q = randw(nb_heads, dim_qk, dim_model)
         self.w_O = randw(dim_v * nb_heads, dim_model)
 
-        self.init_K_rec = randw(caterpillar_height, caterpillar_length, dim_qk)
-        self.init_V_rec = randw(caterpillar_height, caterpillar_length, dim_v)
+        self.init_K_rec = randw(
+            caterpillar_height,
+            caterpillar_length,
+            dim_qk,
+        )
+        self.init_V_rec = randw(
+            caterpillar_height,
+            caterpillar_length,
+            dim_v,
+        )
 
-    def reset_inner_loss(self):
-        self.acc_attention = 0
-        self.acc_nb = 0
+    def reset_inner_loss(self):
+    # self.acc_attention = 0
+    # self.acc_nb = 0
 
-    def get_inner_loss(self):
-        # warnings.warn("l2 regularization", RuntimeWarning)
-        # return (self.acc_attention / self.acc_nb).pow(2).sum()
-        return torch.tensor([0], device=self.w_Q.device)
+    def get_inner_loss(self):
+    # warnings.warn("l2 regularization", RuntimeWarning)
+    # return (self.acc_attention / self.acc_nb).pow(2).sum()
+    # return torch.tensor([0], device=self.w_Q.device)
 
     def forward(self, bs):
         # Dimensions to make the source a bit clearer, that's needed
@@ -519,134 +547,150 @@ class Caterpillar(nn.Module):
         DV = self.w_V.size(1)
         DK = self.w_K.size(1)
         DM = self.w_O.size(1)
-        CH = self.caterpillar_height
-        CL = self.caterpillar_length
+        R = self.caterpillar_height
+        L = self.caterpillar_length
 
         assert (
-            t0 >= CL and (t1 - t0) % CL == 0
+            t0 >= L and (t1 - t0) % L == 0
         ), f"bs.first should be greater than caterpillar_length, and bs.nb should be a multiple of caterpillar_length"
 
         # We cache values to deal efficiently with auto-regression
 
         if bs.init_cache:
-            self.rec_V = X.new_zeros(N, CH, T, DV)
-            self.rec_K = X.new_zeros(N, CH, T, DK)
+            self.rec_V = X.new_zeros(N, R, T, DV)
+            self.rec_K = X.new_zeros(N, R, T, DK)
             # We start the recurrent sequences with optimizable
             # initial values. No idea if it helps.
-            self.rec_V[:, :, t0 - CL : t0] = self.init_V_rec[None, :, :, :]
-            self.rec_K[:, :, t0 - CL : t0] = self.init_K_rec[None, :, :, :]
+            self.rec_V[:, :, t0 - L : t0, :] = self.init_V_rec[None, :, :, :]
+            self.rec_K[:, :, t0 - L : t0, :] = self.init_K_rec[None, :, :, :]
 
             self.cache_Y = X.new_zeros(N, T, DM)
 
+        V = torch.einsum("ntc,hdc->nhtd", X, self.w_V)
+        K = torch.einsum("ntc,hdc->nhtd", X, self.w_K)
+
+        # V, K = blanket(V), blanket(K)
+
         ######################################################################
         # Compute the recurrent state
 
         # This is the Gating sequence that modulates the storing of
-        # the new key and value in the CH pairs of the current
-        # stack. The CH gating values are independent, which means
-        # that the current K/V could be stored in multiple pairs of the
+        # the new key and value in the R pairs of the current
+        # stack. There are R independent gating values, which means
+        # that the current K/V may be stored in multiple pairs of the
         # recurrent state, or not at all.
 
         G = (
-            torch.einsum("ntc,hec->nhet", X, self.w_G) + self.b_G[None, :, :, None]
+            torch.einsum("ntc,hrc->nhrt", X, self.w_G) + self.b_G[None, :, :, None]
         ).sigmoid()
 
-        if self.training and self.proba_gate_dropout > 0.0:
-            warnings.warn("gate droupout", RuntimeWarning)
-            epsilon = 0.5
+        # Clip the gating to avoid values greater than 1 when several
+        # heads hit the same row
 
-        # That was a bad idea
-        # G = F.dropout(G, self.attention_dropout, self.training)
+        G = G / G.sum(1, keepdim=True).clamp(min=1)
 
-        V = torch.einsum("ntc,hdc->nhtd", X, self.w_V)
-        K = torch.einsum("ntc,hdc->nhtd", X, self.w_K)
+        # G_star = (1 - G).log().sum(1, keepdim=True).exp()
 
-        # We prepare the arguments for the parallel scan
+        ######################################################################
 
-        # Clip the gating
-        warnings.warn("gating clipping", RuntimeWarning)
-        G = G / G.sum(1, keepdim=True).clamp(min=1)
+        def recurrence(G, V, K):
+            # We prepare the arguments for the parallel scan
 
-        A = 1 - G.sum(1)
-        gated_V = torch.einsum("nhet,nhtd->netd", G, V)
-        gated_K = torch.einsum("nhet,nhtd->netd", G, K)
+            A = 1 - G.sum(dim=1)
 
-        init_rec_V = self.rec_V[:, :, t0 - CL : t0]
-        init_rec_K = self.rec_K[:, :, t0 - CL : t0]
+            gated_V = torch.einsum("nhrt,nhtd->nrtd", G, V)
+            gated_K = torch.einsum("nhrt,nhtd->nrtd", G, K)
 
-        # Here there is a trick: Since the stack at time t is computed
-        # by updating that at time t-L, the parallel scan operates
-        # with a period of L. To do so we split the time indexing in
-        # two axes, the second of size CL, and run the parallel scan
-        # using the other as the sequence index.
+            # We start from cached values, which matters in inference
 
-        A = A.unflatten(2, (-1, CL))
-        gated_V = gated_V.unflatten(2, (-1, CL))
-        gated_K = gated_K.unflatten(2, (-1, CL))
+            init_rec_V = self.rec_V[:, :, t0 - L : t0]
+            init_rec_K = self.rec_K[:, :, t0 - L : t0]
 
-        next_V = pscan_dim(A, gated_V, init_rec_V, dim=2)
-        next_K = pscan_dim(A, gated_K, init_rec_K, dim=2)
+            # Here there is a trick: Since the stack at position t is
+            # computed by updating that at position t-L, the parallel
+            # scan operates with a period of L. To do so we split the
+            # sequence indexing in two axes, the second of size L, and
+            # run the parallel scan using the first as the sequence index.
 
-        # Put back the sequence index
+            A = A.unflatten(2, (-1, L))
+            gated_V = gated_V.unflatten(2, (-1, L))
+            gated_K = gated_K.unflatten(2, (-1, L))
 
-        self.rec_V[:, :, t0:t1] = next_V.flatten(2, 3)
-        self.rec_K[:, :, t0:t1] = next_K.flatten(2, 3)
+            next_V = pscan_dim(A, gated_V, init_rec_V, dim=2).flatten(2, 3)
+            next_K = pscan_dim(A, gated_K, init_rec_K, dim=2).flatten(2, 3)
 
-        if self.training and self.proba_flashback > 0.0:
-            warnings.warn("flash back", RuntimeWarning)
-            # This piece of code makes the assumption that there is
-            # nothing informative before t0, otherwise we'd have to
-            # implement a cache for V and K too. This should not be
-            # too much of a problem since this is used only during
-            # train, where full sequence are available
+            return next_V, next_K
 
-            n = torch.arange(N, device=X.device)[:, None, None, None]
-            t = torch.arange(t0, t1, device=X.device)[None, None, :, None]
-            dv = torch.arange(DV, device=X.device)[None, None, None, :]
-            dk = torch.arange(DK, device=X.device)[None, None, None, :]
+        #################################################################
 
-            u = (
-                torch.rand(N, CH, t1 - t0, 1, device=X.device).mul(t).long() // CL
-            ) * CL
+        next_V, next_K = recurrence(G, V, K)
 
-            src_time = t - u - t0
-            src_head = torch.randint(H, (N, CH, t1 - t0, 1), device=X.device)
+        if self.training and self.gate_dropout_proba > 0.0:
+            # G is NxHxRxT where r is the caterpillar's row.
 
-            mask = (
-                torch.rand(N, CH, t1 - t0, DV, device=X.device) <= self.proba_flashback
-            ).long()
+            warnings.warn("gate dropout", RuntimeWarning)
 
-            self.rec_V[:, :, t0:t1] = (
-                mask * V[n, src_head, src_time, dv]
-                + (1 - mask) * self.rec_V[:, :, t0:t1]
+            if self.gate_dropout_sync:
+                shape_kill = (N, 1, 1)
+            else:
+                shape_kill = (N, H, R)
+
+            # Pick a point in each of the NxHxR timeline and set this
+            # entry and the following to 1
+            kill = (
+                torch.rand(*shape_kill, t1 - t0, device=G.device).sort(dim=3).indices
+                == 0
+            ).cumsum(dim=3)
+
+            # Keep these mask for only some of the NxHxR
+            kill = kill * (
+                torch.rand(*shape_kill, 1, device=G.device) <= self.gate_dropout_proba
             )
 
-            self.rec_K[:, :, t0:t1] = (
-                mask * K[n, src_head, src_time, dk]
-                + (1 - mask) * self.rec_K[:, :, t0:t1]
+            # The coefficient to keep are the complementary
+            mask = 1 - kill
+
+            masked_next_V, masked_next_K = recurrence(G * mask, V, K)
+
+            if self.gate_dropout_replace:
+                next_V = next_V.detach()
+                next_K = next_K.detach()
+
+            warnings.warn("the rescaling is probably a bad idea", RuntimeWarning)
+
+            next_V = next_V + (masked_next_V - masked_next_V.detach()) / (
+                1 - self.gate_dropout_proba
             )
+            next_K = next_K + (masked_next_K - masked_next_K.detach()) / (
+                1 - self.gate_dropout_proba
+            )
+
+        self.rec_V[:, :, t0:t1] = next_V
+        self.rec_K[:, :, t0:t1] = next_K
 
         ######################################################################
         # compute the readout
 
         Q = torch.einsum("ntc,hdc->nhtd", X, self.w_Q)
 
-        # We build tensors NxHxTxFxL where N is the sample index, H
-        # the head, T the time, F the row in the caterpillar, and L
+        # Q = blanket(Q)
+
+        # We build tensors NxHxTxRxL where N is the sample index, H
+        # the head, T the time, R the row in the caterpillar, and L
         # the column in the caterpillar
 
         windowed_V = moving_window(
-            self.rec_V[:, :, t0 - CL + 1 : t1], dim=2, win_dim=3, win_size=CL
+            self.rec_V[:, :, t0 - L + 1 : t1], dim=2, win_dim=3, win_size=L
         )
 
         windowed_K = moving_window(
-            self.rec_K[:, :, t0 - CL + 1 : t1], dim=2, win_dim=3, win_size=CL
+            self.rec_K[:, :, t0 - L + 1 : t1], dim=2, win_dim=3, win_size=L
         )
 
-        # We have an attention score for each of the CHxCL values
+        # We have an attention score for each of the RxL values
 
         ar = torch.einsum(
-            "nhtd,nftld->nhtfl",
+            "nhtd,nrtld->nhtrl",
             Q,
             windowed_K,
         ) / math.sqrt(DK)
@@ -668,6 +712,8 @@ class Caterpillar(nn.Module):
 
         # Compute the final output
 
+        # Y = blanket(Y)
+
         self.cache_Y[:, t0:t1] = Y @ self.w_O
 
         return BracketedSequence(self.cache_Y, t0, t1 - t0, bs.init_cache)
@@ -685,6 +731,8 @@ class QKVAttention(nn.Module):
         nb_heads=1,
         causal=False,
         attention_dropout=0.0,
+        logger=print,
+        args=None,
     ):
         super().__init__()
 
@@ -772,11 +820,12 @@ class MyGPT(nn.Module):
         nb_blocks,
         nb_lines=None,
         caterpillar_height=None,
-        dim_rec_v=-1,
         causal=False,
         dropout=0.0,
         len_max=1e5,
-        attention_layer="kvrec",
+        attention_layer="caterpillar",
+        logger=print,
+        args=None,
     ):
         super().__init__()
 
@@ -813,34 +862,42 @@ class MyGPT(nn.Module):
                     nb_heads=nb_heads,
                     causal=causal,
                     attention_dropout=dropout,
+                    logger=logger,
+                    args=args,
                 )
             elif attention_layer == "dumbrec":
                 return DumbRec(
                     dim_model=dim_model,
                     dim_qk=dim_keys,
-                    dim_v=dim_rec_v,
+                    dim_v=dim_model // nb_heads,
                     nb_heads=nb_heads,
                     nb_lines=nb_lines,
                     attention_dropout=dropout,
+                    logger=logger,
+                    args=args,
                 )
             elif attention_layer == "kvrec":
                 return KVRec(
                     dim_model=dim_model,
                     dim_qk=dim_keys,
-                    dim_v=dim_rec_v,
+                    dim_v=dim_model // nb_heads,
                     nb_heads=nb_heads,
                     nb_lines=nb_lines,
                     attention_dropout=dropout,
+                    logger=logger,
+                    args=args,
                 )
             elif attention_layer == "caterpillar":
                 return Caterpillar(
                     dim_model=dim_model,
                     dim_qk=dim_keys,
-                    dim_v=dim_rec_v,
+                    dim_v=dim_model // nb_heads,
                     nb_heads=nb_heads,
                     caterpillar_length=self.caterpillar_length,
                     caterpillar_height=self.caterpillar_height,
                     attention_dropout=dropout,
+                    logger=logger,
+                    args=args,
                 )
             else:
                 raise ValueError(f"Unknown attention type {attention_layer}.")
@@ -971,7 +1028,115 @@ class MyGPT(nn.Module):
 ######################################################################
 
 if __name__ == "__main__":
-    print("Basic check.")
+    import argparse
+
+    import numpy as np
+    import matplotlib.pyplot as plt
+    import matplotlib.collections as mc
+
+    args = argparse.Namespace(
+        gate_dropout_proba=0.0, gate_dropout_sync=True, gate_dropout_replace=False
+    )
+
+    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
+
+    dim_model, dim_keys, nb_heads = 512, 64, 1
+    dropout = 0.1
+
+    caterpillar = Caterpillar(
+        dim_model=dim_model,
+        dim_qk=dim_keys,
+        dim_v=dim_model // nb_heads,
+        nb_heads=nb_heads,
+        caterpillar_length=16,
+        caterpillar_height=32,
+        attention_dropout=dropout,
+        args=args,
+    ).to(device)
+
+    qkv = QKVAttention(
+        dim_model=dim_model,
+        dim_qk=dim_keys,
+        dim_v=dim_model // nb_heads,
+        nb_heads=nb_heads,
+        causal=True,
+        attention_dropout=dropout,
+        args=args,
+    ).to(device)
+
+    linear = CacheWrapper(nn.Linear(512, 512)).to(device)
+
+    x = torch.randn(1, 256, dim_model)
+
+    x = x.to(device)
+    x.requires_grad_()
+
+    ######################################################################
+
+    fig = plt.figure()
+    fig.set_figheight(6)
+    fig.set_figwidth(8)
+
+    ax = fig.add_subplot(1, 1, 1)
+
+    # ax.set_xlim(-1.5, 1.5)
+    # ax.set_ylim(-1.5, 1.5)
+    # ax.set(aspect=1)
+    # ax.spines.right.set_visible(False)
+    # ax.spines.top.set_visible(False)
+
+    # dt = 0.01
+    # t = np.arange(dt, 20.0, dt)
+    # ax.semilogx(t, np.exp(-t / 5.0))
+    # ax.grid()
+    ax.set_yscale("log")
+
+    ######################################################################
+
+    for label, model, thickness in [
+        ("nn.Linear", linear, 0.2),
+        ("mygpy.QKVAttention", qkv, 1),
+        ("mygpt.Caterpillar", caterpillar, 2),
+    ]:
+        y = model(BracketedSequence(x, 32, x.size(1) - 32, init_cache=True)).x
+
+        for n, p in [("input", x)] + list(model.named_parameters()):
+            print(f"Processing {model}.{n}")
+            data = []
+            for t in range(y.size(1)):
+                sg = 0
+                for d in torch.randperm(y.size(2))[:8]:
+                    sg += torch.autograd.grad(y[0, t, d], p, retain_graph=True)[0]
+                assert not sg.isinf().any()
+                assert not sg.isnan().any()
+                data.append([t, sg.sum().item()])
+
+            data = torch.tensor(data)
+            # cx, cy = data[:, 0], data[:, 1]
+            cy = data[:, 1].sort().values
+            cx = torch.linspace(0, 1, cy.size(0))
+            ax.plot(
+                cx, cy, label=label + "." + n, linewidth=thickness
+            )  # , color='gray', label='Input')
+
+    # ax.legend(frameon=False, loc="top right")
+
+    # Put a legend to the right of the current axis
+    box = ax.get_position()
+    ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
+    ax.legend(loc="center left", bbox_to_anchor=(1, 0.5))
+
+    filename = "plot.pdf"
+    print(f"saving {filename}")
+    fig.savefig(filename, bbox_inches="tight")
+
+    # if args.window and hasattr(plt.get_current_fig_manager(), 'window'):
+    # plt.get_current_fig_manager().window.setGeometry(2, 2, 1024, 768)
+    # plt.show()
+
+    exit(0)
+
+    ######################################################################
 
     m = Caterpillar(
         dim_model=4,
@@ -993,8 +1158,6 @@ if __name__ == "__main__":
     print((y1 - torch.cat([y3a, y3b], dim=1)).abs().max())
     exit(0)
 
-    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
-
     vocabulary_size = 128
     x = torch.randint(vocabulary_size, (6, 1024))