from torch import nn
from torch.nn import functional as F
-from functorch.dim import dims
import ffutils
class DumbRec(nn.Module):
def __init__(
self,
- dim_in,
+ dim_model,
dim_qk,
dim_v,
nb_heads,
self.k_star = randw(nb_lines, dim_qk)
- self.w_qw = randw(nb_heads, dim_qk, dim_in)
- self.w_qr = randw(nb_heads, dim_qk, dim_in)
- # self.w_k = randw(nb_heads, dim_qk, dim_in)
- self.w_v = randw(nb_heads, dim_v, dim_in)
- self.w_o = randw(dim_v * nb_heads, dim_in)
+ self.w_qw = randw(nb_heads, dim_qk, dim_model)
+ self.w_qr = randw(nb_heads, dim_qk, dim_model)
+ # self.w_k = randw(nb_heads, dim_qk, dim_model)
+ self.w_v = randw(nb_heads, dim_v, dim_model)
+ self.w_o = randw(dim_v * nb_heads, dim_model)
def reset_inner_loss(self):
self.acc_attention = 0
class KVRec(nn.Module):
def __init__(
self,
- dim_in,
+ dim_model,
dim_qk,
dim_v,
nb_heads,
self.k_star = randw(nb_lines, dim_qk)
- self.w_qw = randw(nb_heads, dim_qk, dim_in)
- self.w_qr = randw(nb_heads, dim_qk, dim_in)
- self.w_k = randw(nb_heads, dim_qk, dim_in)
- self.w_v = randw(nb_heads, dim_v, dim_in)
- self.w_o = randw(dim_v * nb_heads, dim_in)
+ self.w_qw = randw(nb_heads, dim_qk, dim_model)
+ self.w_qr = randw(nb_heads, dim_qk, dim_model)
+ self.w_k = randw(nb_heads, dim_qk, dim_model)
+ self.w_v = randw(nb_heads, dim_v, dim_model)
+ self.w_o = randw(dim_v * nb_heads, dim_model)
def reset_inner_loss(self):
self.acc_attention = 0
def forward(self, bs):
x_q, t0, t1 = bs.x, bs.first, bs.first + bs.nb
- # n,h,l,t,d = dims(5)
-
if bs.init_cache:
self.rec_v = x_q.new_zeros(
x_q.size(0), self.nb_lines, x_q.size(1), self.w_v.size(1)
##############################
+# Returns a tensor with an additional index at rank win_dim, that move
+# along the same dimension as dim, on a domain {0...win_size-1}, and
+# dim is restricted on a domain reduced by win_size-1 values.
+
+
def moving_window(x, dim, win_dim, win_size):
size, stride = x.size(), x.stride()
size = size[:dim] + (size[dim] - win_size + 1,) + size[dim + 1 :]
class Caterpillar(nn.Module):
def __init__(
self,
- dim_in,
+ dim_model,
dim_qk,
dim_v,
nb_heads,
self.caterpillar_height = caterpillar_height
self.attention_dropout = attention_dropout
- self.w_G = randw(nb_heads, caterpillar_height, dim_in)
+ self.w_G = randw(nb_heads, caterpillar_height, dim_model)
self.b_G = nn.Parameter(
torch.full(
(nb_heads, caterpillar_height), -math.log(caterpillar_height - 1)
)
)
- self.w_K = randw(nb_heads, dim_qk, dim_in)
- self.w_V = randw(nb_heads, dim_v, dim_in)
- self.w_Q = randw(nb_heads, dim_qk, dim_in)
- self.w_O = randw(dim_v * nb_heads, dim_in)
+ self.w_K = randw(nb_heads, dim_qk, dim_model)
+ self.w_V = randw(nb_heads, dim_v, dim_model)
+ self.w_Q = randw(nb_heads, dim_qk, dim_model)
+ self.w_O = randw(dim_v * nb_heads, dim_model)
self.init_K_rec = randw(caterpillar_height, caterpillar_length, dim_qk)
self.init_V_rec = randw(caterpillar_height, caterpillar_length, dim_v)
T = bs.x.size(1)
DV = self.w_V.size(1)
DK = self.w_K.size(1)
- Dout = self.w_O.size(1)
+ DM = self.w_O.size(1)
CH = self.caterpillar_height
CL = self.caterpillar_length
t0 >= CL and (t1 - t0) % CL == 0
), f"bs.first should be greater than caterpillar_length, and bs.nb should be a multiple of caterpillar_length"
+ # We cache values to deal efficiently with auto-regression
+
if bs.init_cache:
self.rec_V = X.new_zeros(N, CH, T, DV)
- self.rec_V[:, :, t0 - CL : t0] = self.init_V_rec[None, :, :, :]
self.rec_K = X.new_zeros(N, CH, T, DK)
+ # We start the recurrent sequences with optimizable
+ # initial values. No idea if it helps.
+ self.rec_V[:, :, t0 - CL : t0] = self.init_V_rec[None, :, :, :]
self.rec_K[:, :, t0 - CL : t0] = self.init_K_rec[None, :, :, :]
- self.cache_Y = X.new_zeros(N, T, Dout)
+
+ self.cache_Y = X.new_zeros(N, T, DM)
######################################################################
# Compute the recurrent state
+ # This is the Gating sequence that modulates the storing of
+ # the new key and value in the CH pairs of the current
+ # stack. The CH gating values are independent, which means
+ # that the current K/V could be stored in all the pairs of the
+ # recurrent state, or not at all.
+
G = (
torch.einsum("ntc,hec->nhet", X, self.w_G) + self.b_G[None, :, :, None]
).sigmoid()
+ G = F.dropout(G, self.attention_dropout, self.training)
+
V = torch.einsum("ntc,hdc->nhtd", X, self.w_V)
K = torch.einsum("ntc,hdc->nhtd", X, self.w_K)
+ # We prepare the arguments for the parallel scan
+
A = 1 - G.sum(1)
gated_V = torch.einsum("nhet,nhtd->netd", G, V)
gated_K = torch.einsum("nhet,nhtd->netd", G, K)
init_rec_V = self.rec_V[:, :, t0 - CL : t0]
init_rec_K = self.rec_K[:, :, t0 - CL : t0]
+ # Here there is a trick: Since the stack at time t is computed
+ # by updating that at time t-L, the parallel scan operates
+ # with a period of L. To do so we split the time indexing in
+ # two axes, the second of size CL, and run the parallel scan
+ # using the other as the sequence index.
+
A = A.unflatten(2, (-1, CL))
gated_V = gated_V.unflatten(2, (-1, CL))
gated_K = gated_K.unflatten(2, (-1, CL))
next_V = pscan_dim(A, gated_V, init_rec_V, dim=2)
next_K = pscan_dim(A, gated_K, init_rec_K, dim=2)
+ # Put back the sequence index
+
self.rec_V[:, :, t0:t1] = next_V.flatten(2, 3)
self.rec_K[:, :, t0:t1] = next_K.flatten(2, 3)
Q = torch.einsum("ntc,hdc->nhtd", X, self.w_Q)
- uv = moving_window(
+ # We build tensors NxHxTxFxL where N is the sample index, H
+ # the head, T the time, F the row in the caterpillar, and L
+ # the column in the caterpillar
+
+ windowed_V = moving_window(
self.rec_V[:, :, t0 - CL + 1 : t1], dim=2, win_dim=3, win_size=CL
)
- uk = moving_window(
+ windowed_K = moving_window(
self.rec_K[:, :, t0 - CL + 1 : t1], dim=2, win_dim=3, win_size=CL
)
+ # We have an attention score for each of the CHxCL values
+
ar = torch.einsum(
"nhtd,nftld->nhtfl",
Q,
- uk,
+ windowed_K,
) / math.sqrt(DK)
+ # softmax can operate only on one dimension, hence the
+ # flattening
+
ar = ar.flatten(3).softmax(dim=3).view(ar.size())
ar = F.dropout(ar, self.attention_dropout, self.training)
+ # Compute the output for each head, flatten to concatenate
+
Y = torch.einsum(
"nhtfl,nftld->nthd",
ar,
- uv,
+ windowed_V,
).flatten(2)
+ # Compute the final output
+
self.cache_Y[:, t0:t1] = Y @ self.w_O
return BracketedSequence(self.cache_Y, t0, t1 - t0, bs.init_cache)
class QKVAttention(nn.Module):
def __init__(
self,
- dim_in,
+ dim_model,
dim_qk,
dim_v,
nb_heads=1,
self.attention_dropout = attention_dropout
self.record_attention = False
- self.w_q = randw(nb_heads, dim_qk, dim_in)
- self.w_k = randw(nb_heads, dim_qk, dim_in)
- self.w_v = randw(nb_heads, dim_v, dim_in)
- self.w_o = randw(dim_v * nb_heads, dim_in)
+ self.w_q = randw(nb_heads, dim_qk, dim_model)
+ self.w_k = randw(nb_heads, dim_qk, dim_model)
+ self.w_v = randw(nb_heads, dim_v, dim_model)
+ self.w_o = randw(dim_v * nb_heads, dim_model)
def forward(self, bs):
x_q = bs.x
def attlayer():
if attention_layer == "mha":
return QKVAttention(
- dim_in=dim_model,
+ dim_model=dim_model,
dim_qk=dim_keys,
dim_v=dim_model // nb_heads,
nb_heads=nb_heads,
)
elif attention_layer == "dumbrec":
return DumbRec(
- dim_in=dim_model,
+ dim_model=dim_model,
dim_qk=dim_keys,
dim_v=dim_rec_v,
nb_heads=nb_heads,
)
elif attention_layer == "kvrec":
return KVRec(
- dim_in=dim_model,
+ dim_model=dim_model,
dim_qk=dim_keys,
dim_v=dim_rec_v,
nb_heads=nb_heads,
)
elif attention_layer == "caterpillar":
return Caterpillar(
- dim_in=dim_model,
+ dim_model=dim_model,
dim_qk=dim_keys,
dim_v=dim_rec_v,
nb_heads=nb_heads,
print("Basic check.")
m = Caterpillar(
- dim_in=4,
+ dim_model=4,
dim_qk=3,
dim_v=7,
nb_heads=1,