from torch import nn
from torch.nn import functional as F
+from mygpt import BracketedSequence
+
+try:
+ from graph import save_attention_image
+except ImportError:
+ save_attention_image = None
+
######################################################################
num_classes=self.len_source,
)
source1 = torch.rand(nb, 10).sort(dim=1).indices[:, : self.len_source]
- # source1 = torch.randint(10, (nb, self.len_source))
marker1 = torch.full((nb, 1), 10)
result1 = operators.bmm(source1[:, :, None]).squeeze(-1)
marker2 = torch.full((nb, 1), 11)
self.id2token = dict([(n, c) for c, n in self.token2id.items()])
self.t_nul = self.token2id["<nul>"]
- self.t_input = self.token2id["<input>"]
- self.t_output = self.token2id["<output>"]
- self.t_prog = self.token2id["<prog>"]
+ self.t_input = self.token2id["<in>"]
+ self.t_output = self.token2id["<out>"]
+ self.t_prog = self.token2id["<prg>"]
self.t_end = self.token2id["<end>"]
self.train_input = self.tensorize(train_sequences)
f"accuracy_output_test {n_epoch} nb_total {test_nb_total} nb_errors {test_nb_errors} accuracy {100.0*(1-test_nb_errors/test_nb_total):.02f}%"
)
+ if save_attention_image is not None:
+ input = self.test_input[:1]
+ result = input.clone()
+ s = (result == self.t_prog).long()
+ ar_mask = (s.cumsum(dim=1) - s).clamp(min=0, max=1)
+ result = (1 - ar_mask) * result + ar_mask * self.t_nul
+
+ masked_inplace_autoregression(
+ model,
+ self.batch_size,
+ result,
+ ar_mask,
+ deterministic_synthesis,
+ device=self.device,
+ )
+
+ with torch.autograd.no_grad():
+ t = model.training
+ model.eval()
+ model.record_attention(True)
+ model(BracketedSequence(result))
+ model.train(t)
+ ram = model.retrieve_attention()
+ model.record_attention(False)
+
+ tokens_output = [self.id2token[i.item()] for i in result[0]]
+ tokens_input = ["n/a"] + tokens_output[:-1]
+ for n_head in range(ram[0].size(1)):
+ filename = os.path.join(
+ result_dir, f"rpl_attention_{n_epoch}_h{n_head}.pdf"
+ )
+ attention_matrices = [m[0, n_head] for m in ram]
+ save_attention_image(
+ filename,
+ tokens_input,
+ tokens_output,
+ attention_matrices,
+ k_top=10,
+ # min_total_attention=0.9,
+ token_gap=12,
+ layer_gap=50,
+ )
+ logger(f"wrote {filename}")
+
######################################################################