attention_dropout=0.0,
len_max=1e5,
logger=print,
- **kwargs,
+ args=None,
):
super().__init__()
attention_dropout=0.0,
len_max=1e5,
logger=print,
- **kwargs,
+ args=None,
):
super().__init__()
attention_dropout=0.0,
len_max=1e5,
logger=print,
- **kwargs,
+ args=None,
):
super().__init__()
warnings.warn("Caterpillar", RuntimeWarning)
- def randw(*d, amplitude=None):
- if amplitude is None:
- amplitude = 1 / math.sqrt(d[-1])
- return nn.Parameter(amplitude * torch.randn(*d))
+ def randw(*d, factor=1):
+ return nn.Parameter(torch.randn(*d) * factor / math.sqrt(d[-1]))
self.caterpillar_length = caterpillar_length
self.caterpillar_height = caterpillar_height
self.attention_dropout = attention_dropout
- ######################################################################
- # sup_args
-
- x = kwargs.get("gate_dropout")
- if x is None:
- self.proba_gate_dropout = 0.0
- else:
- self.proba_gate_dropout = float(x)
-
- logger(f"self.proba_gate_dropout {self.proba_gate_dropout}")
-
- x = kwargs.get("default_bg")
- if x is None:
- default_bg = -math.log(caterpillar_height - 1)
- else:
- default_bg = float(x)
-
- logger(f"default_bg {default_bg}")
+ self.gate_dropout_proba = args.gate_dropout_proba
+ self.gate_dropout_sync = args.gate_dropout_sync
+ self.gate_dropout_replace = args.gate_dropout_replace
######################################################################
- self.w_G = randw(nb_heads, caterpillar_height, dim_model)
- self.b_G = nn.Parameter(torch.full((nb_heads, caterpillar_height), default_bg))
+ self.w_G = randw(nb_heads, caterpillar_height, dim_model, factor=1.0)
+ self.b_G = nn.Parameter(torch.full((nb_heads, caterpillar_height), 0.0))
self.w_K = randw(nb_heads, dim_qk, dim_model)
- self.w_V = randw(nb_heads, dim_v, dim_model)
+ self.w_V = randw(nb_heads, dim_v, dim_model, factor=1)
self.w_Q = randw(nb_heads, dim_qk, dim_model)
self.w_O = randw(dim_v * nb_heads, dim_model)
dim_v,
)
- def reset_inner_loss(self):
- self.acc_attention = 0
- self.acc_nb = 0
+ # def reset_inner_loss(self):
+ # self.acc_attention = 0
+ # self.acc_nb = 0
- def get_inner_loss(self):
- # warnings.warn("l2 regularization", RuntimeWarning)
- # return (self.acc_attention / self.acc_nb).pow(2).sum()
- return torch.tensor([0], device=self.w_Q.device)
+ # def get_inner_loss(self):
+ # warnings.warn("l2 regularization", RuntimeWarning)
+ # return (self.acc_attention / self.acc_nb).pow(2).sum()
+ # return torch.tensor([0], device=self.w_Q.device)
def forward(self, bs):
# Dimensions to make the source a bit clearer, that's needed
torch.einsum("ntc,hrc->nhrt", X, self.w_G) + self.b_G[None, :, :, None]
).sigmoid()
- # warnings.warn("softmax gating", RuntimeWarning)
-
- # G = (
- # torch.einsum("ntc,hrc->nhrt", X, self.w_G) + self.b_G[None, :, :, None]
- # ).softmax(dim=2)
-
- ######################################################################
- # The "flashbacks"
-
- if self.training and self.proba_gate_dropout > 0.0:
- # This is a better implementation of "flashbacks".
-
- # G is NxHxExT where e is the caterpillar's row.
+ # Clip the gating to avoid values greater than 1 when several
+ # heads hit the same row
- warnings.warn("gate dropout", RuntimeWarning)
+ # G = G / G.sum(1, keepdim=True).clamp(min=1)
- kill = (
- torch.rand(G.size(), device=G.device) <= self.proba_gate_dropout
- ).float()
+ H = (1 - G).log().sum(1, keepdim=True).exp()
- alpha = G / (1 - self.proba_gate_dropout)
-
- G = alpha * (1 - kill)
+ ######################################################################
def recurrence(G, V, K):
- # Clip the gating to avoid values greater than 1 when several
- # heads hit the same row
-
- G = G / G.sum(1, keepdim=True).clamp(min=1)
-
# We prepare the arguments for the parallel scan
- A = 1 - G.sum(1)
+ A = H
- gated_V = torch.einsum("nhrt,nhtd->nrtd", G, V)
- gated_K = torch.einsum("nhrt,nhtd->nrtd", G, K)
+ gated_V = torch.einsum("nhrt,nhtd->nrtd", H * G / (1 - G), V)
+ gated_K = torch.einsum("nhrt,nhtd->nrtd", H * G / (1 - G), K)
# We start from cached values, which matters in inference
init_rec_V = self.rec_V[:, :, t0 - L : t0]
init_rec_K = self.rec_K[:, :, t0 - L : t0]
- # Associative scan
-
# Here there is a trick: Since the stack at position t is
# computed by updating that at position t-L, the parallel
# scan operates with a period of L. To do so we split the
gated_V = gated_V.unflatten(2, (-1, L))
gated_K = gated_K.unflatten(2, (-1, L))
- next_V = pscan_dim(A, gated_V, init_rec_V, dim=2)
- next_K = pscan_dim(A, gated_K, init_rec_K, dim=2)
-
- next_V = next_V.flatten(2, 3)
- next_K = next_K.flatten(2, 3)
+ next_V = pscan_dim(A, gated_V, init_rec_V, dim=2).flatten(2, 3)
+ next_K = pscan_dim(A, gated_K, init_rec_K, dim=2).flatten(2, 3)
return next_V, next_K
next_V, next_K = recurrence(G, V, K)
+ if self.training and self.gate_dropout_proba > 0.0:
+ # G is NxHxRxT where r is the caterpillar's row.
+
+ warnings.warn("gate dropout", RuntimeWarning)
+
+ if self.gate_dropout_sync:
+ shape_kill = (N, 1, 1)
+ else:
+ shape_kill = (N, H, R)
+
+ # Pick a point in each of the NxHxR timeline and set this
+ # entry and the following to 1
+ kill = (
+ torch.rand(*shape_kill, t1 - t0, device=G.device).sort(dim=3).indices
+ == 0
+ ).cumsum(dim=3)
+
+ # Keep these mask for only some of the NxHxR
+ kill = kill * (
+ torch.rand(*shape_kill, 1, device=G.device) <= self.gate_dropout_proba
+ )
+
+ # The coefficient to keep are the complementary
+ mask = 1 - kill
+
+ masked_next_V, masked_next_K = recurrence(G * mask, V, K)
+
+ if self.gate_dropout_replace:
+ next_V = next_V.detach()
+ next_K = next_K.detach()
+
+ warnings.warn("the rescaling is probably a bad idea", RuntimeWarning)
+
+ next_V = next_V + (masked_next_V - masked_next_V.detach()) / (
+ 1 - self.gate_dropout_proba
+ )
+ next_K = next_K + (masked_next_K - masked_next_K.detach()) / (
+ 1 - self.gate_dropout_proba
+ )
+
self.rec_V[:, :, t0:t1] = next_V
self.rec_K[:, :, t0:t1] = next_K
Q = torch.einsum("ntc,hdc->nhtd", X, self.w_Q)
- # We build tensors NxHxTxFxL where N is the sample index, H
- # the head, T the time, F the row in the caterpillar, and L
+ # We build tensors NxHxTxRxL where N is the sample index, H
+ # the head, T the time, R the row in the caterpillar, and L
# the column in the caterpillar
windowed_V = moving_window(
# We have an attention score for each of the RxL values
ar = torch.einsum(
- "nhtd,nftld->nhtfl",
+ "nhtd,nrtld->nhtrl",
Q,
windowed_K,
) / math.sqrt(DK)
causal=False,
attention_dropout=0.0,
logger=print,
- **kwargs,
+ args=None,
):
super().__init__()
causal=False,
dropout=0.0,
len_max=1e5,
- attention_layer="kvrec",
+ attention_layer="caterpillar",
logger=print,
- **kwargs,
+ args=None,
):
super().__init__()
causal=causal,
attention_dropout=dropout,
logger=logger,
- **kwargs,
+ args=args,
)
elif attention_layer == "dumbrec":
return DumbRec(
nb_lines=nb_lines,
attention_dropout=dropout,
logger=logger,
- **kwargs,
+ args=args,
)
elif attention_layer == "kvrec":
return KVRec(
nb_lines=nb_lines,
attention_dropout=dropout,
logger=logger,
- **kwargs,
+ args=args,
)
elif attention_layer == "caterpillar":
return Caterpillar(
caterpillar_height=self.caterpillar_height,
attention_dropout=dropout,
logger=logger,
- **kwargs,
+ args=args,
)
else:
raise ValueError(f"Unknown attention type {attention_layer}.")
######################################################################
if __name__ == "__main__":
- print("Basic check.")
+ import argparse
+
+ import numpy as np
+ import matplotlib.pyplot as plt
+ import matplotlib.collections as mc
+
+ args = argparse.Namespace(
+ gate_dropout_proba=0.0, gate_dropout_sync=True, gate_dropout_replace=False
+ )
+
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
+
+ dim_model, dim_keys, nb_heads = 512, 64, 1
+ dropout = 0.1
+
+ caterpillar = Caterpillar(
+ dim_model=dim_model,
+ dim_qk=dim_keys,
+ dim_v=dim_model // nb_heads,
+ nb_heads=nb_heads,
+ caterpillar_length=16,
+ caterpillar_height=32,
+ attention_dropout=dropout,
+ args=args,
+ ).to(device)
+
+ qkv = QKVAttention(
+ dim_model=dim_model,
+ dim_qk=dim_keys,
+ dim_v=dim_model // nb_heads,
+ nb_heads=nb_heads,
+ causal=True,
+ attention_dropout=dropout,
+ args=args,
+ ).to(device)
+
+ linear = CacheWrapper(nn.Linear(512, 512)).to(device)
+
+ x = torch.randn(1, 256, dim_model)
+
+ x = x.to(device)
+ x.requires_grad_()
+
+ ######################################################################
+
+ fig = plt.figure()
+ fig.set_figheight(6)
+ fig.set_figwidth(8)
+
+ ax = fig.add_subplot(1, 1, 1)
+
+ # ax.set_xlim(-1.5, 1.5)
+ # ax.set_ylim(-1.5, 1.5)
+ # ax.set(aspect=1)
+ # ax.spines.right.set_visible(False)
+ # ax.spines.top.set_visible(False)
+
+ # dt = 0.01
+ # t = np.arange(dt, 20.0, dt)
+ # ax.semilogx(t, np.exp(-t / 5.0))
+ # ax.grid()
+
+ ######################################################################
+
+ for label, model in [
+ # ("nn.Linear", linear),
+ ("mygpy.QKVAttention", qkv),
+ ("mygpt.Caterpillar", caterpillar),
+ ]:
+ y = model(BracketedSequence(x, 32, x.size(1) - 32, init_cache=True)).x
+
+ data = []
+ for t in range(y.size(1)):
+ for d in torch.randperm(y.size(2))[:8]:
+ g = torch.autograd.grad(y[0, t, d], x, retain_graph=True)[0]
+ sg = g.pow(2).sum().item()
+ # sg = 0
+ # for p in model.parameters():
+ # g = torch.autograd.grad(y[0, t, d], p, retain_graph=True)[0]
+ # sg = sg + g.pow(2).sum().item()
+ data.append([t, sg])
+
+ data = torch.tensor(data)
+ ax.scatter(
+ data[:, 0], data[:, 1], s=1, label=label
+ ) # , color='gray', label='Input')
+
+ # ax.legend(frameon=False, loc="top right")
+
+ # Put a legend to the right of the current axis
+ box = ax.get_position()
+ ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
+ ax.legend(loc="center left", bbox_to_anchor=(1, 0.5))
+
+ filename = "plot.pdf"
+ print(f"saving {filename}")
+ fig.savefig(filename, bbox_inches="tight")
+
+ # if args.window and hasattr(plt.get_current_fig_manager(), 'window'):
+ # plt.get_current_fig_manager().window.setGeometry(2, 2, 1024, 768)
+ # plt.show()
+
+ exit(0)
+
+ ######################################################################
m = Caterpillar(
dim_model=4,
print((y1 - torch.cat([y3a, y3b], dim=1)).abs().max())
exit(0)
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
-
vocabulary_size = 128
x = torch.randint(vocabulary_size, (6, 1024))