Update.
[picoclvr.git] / tasks.py
index 0eed2aa..17904d8 100755 (executable)
--- a/tasks.py
+++ b/tasks.py
@@ -140,7 +140,6 @@ class ProblemLevel2(Problem):
             num_classes=self.len_source,
         )
         source1 = torch.rand(nb, 10).sort(dim=1).indices[:, : self.len_source]
-        # source1 = torch.randint(10, (nb, self.len_source))
         marker1 = torch.full((nb, 1), 10)
         result1 = operators.bmm(source1[:, :, None]).squeeze(-1)
         marker2 = torch.full((nb, 1), 11)
@@ -1284,45 +1283,36 @@ class RPL(Task):
         )
 
         if save_attention_image is not None:
-            input = self.test_input[:10]
-            result = input.clone()
-            s = (result == self.t_prog).long()
-            ar_mask = (s.cumsum(dim=1) - s).clamp(min=0, max=1)
-            result = (1 - ar_mask) * result + ar_mask * self.t_nul
-
-            masked_inplace_autoregression(
-                model,
-                self.batch_size,
-                result,
-                ar_mask,
-                deterministic_synthesis,
-                device=self.device,
-            )
+            ns=torch.randint(self.text_input.size(0),(1,)).item()
+            input = self.test_input[ns:ns+1].clone()
+            last = (input != self.t_nul).max(0).values.nonzero().max() + 3
+            input = input[:, :last].to(self.device)
 
             with torch.autograd.no_grad():
                 t = model.training
                 model.eval()
                 model.record_attention(True)
-                model(BracketedSequence(result))
+                model(BracketedSequence(input))
                 model.train(t)
-                attention = model.retrieve_attention()
+                ram = model.retrieve_attention()
                 model.record_attention(False)
 
-            n_sample = 0
-            tokens_output = [self.id2token[i.item()] for i in result[n_sample]]
+            tokens_output = [self.id2token[i.item()] for i in input[ns]]
             tokens_input = ["n/a"] + tokens_output[:-1]
-            for n_head in range(attention[0].size(1)):
-                filename = f"rpl_attention_{n_epoch}_h{n_head}.pdf"
+            for n_head in range(ram[0].size(1)):
+                filename = os.path.join(
+                    result_dir, f"rpl_attention_{n_epoch}_h{n_head}.pdf"
+                )
+                attention_matrices = [m[0, n_head] for m in ram]
                 save_attention_image(
                     filename,
                     tokens_input,
                     tokens_output,
-                    attention,
-                    n_sample=n_sample,
-                    n_head=n_head,
+                    attention_matrices,
+                    k_top=10,
+                    # min_total_attention=0.9,
                     token_gap=12,
-                    layer_gap=40,
-                    # k_top=2,
+                    layer_gap=50,
                 )
                 logger(f"wrote {filename}")