##############################
-class PositionalEncoding(nn.Module):
+class AddPositionalEncoding(nn.Module):
def __init__(self, len_max):
super().__init__()
self.len_max = len_max
- # From Vaswani et al 2018
- # PE_{t,2i} = sin(t/(L^{2i/D}))
- # PE_{t,2i+1} = cos(t/(L^{2i/D}))
+ # [Vaswani et al 2018] PE_{t,2i} = sin(t/(L^{2i/D})), PE_{t,2i+1} = cos(t/(L^{2i/D}))
def forward(self, x):
t = torch.arange(x.size(1), dtype = x.dtype, device = x.device)[:, None]
j = torch.arange(x.size(2), dtype = x.dtype, device = x.device)[None, :]
a = torch.einsum('nhtd,nhsd->nhts', q, k) / math.sqrt(q.size(3))
if self.causal:
- mask = torch.arange(a.size(2), device = q.device)[None, None, :, None] \
- < torch.arange(a.size(3), device = q.device)[None, None, None, :]
- a = a.masked_fill(mask, float('-inf'))
+ forbidden_attention = torch.arange(a.size(2), device = q.device)[None, None, :, None] \
+ < torch.arange(a.size(3), device = q.device)[None, None, None, :]
+ a = a.masked_fill(forbidden_attention, float('-inf'))
a = a.softmax(dim = 3)
a = F.dropout(a, self.attention_dropout, self.training)
self.embedding = nn.Sequential(
nn.Embedding(vocabulary_size, dim_model),
nn.Dropout(dropout),
- PositionalEncoding(len_max),
+ AddPositionalEncoding(len_max),
)
trunk_blocks = [ ]
self.readout = nn.Linear(in_features = dim_model, out_features = vocabulary_size)
+ with torch.no_grad():
+ for m in self.modules():
+ if isinstance(m, nn.Embedding):
+ m.weight.normal_(mean = 0, std = 2e-2)
+ elif isinstance(m, nn.LayerNorm):
+ m.bias.zero_()
+ m.weight.fill_(1.0)
+
def forward(self, x):
- x = F.pad(x, (1, 0))
+ x = F.pad(x, (1, -1))
x = self.embedding(x)
x = self.trunk(x)
x = self.readout(x)
- x = F.pad(x, (0, 0, 0, -1))
return x
######################################################################