Update.
[picoclvr.git] / tasks.py
index 038a8ac..0143ab2 100755 (executable)
--- a/tasks.py
+++ b/tasks.py
@@ -182,36 +182,37 @@ class SandBox(Task):
         )
 
         if save_attention_image is not None:
-            ns = torch.randint(self.test_input.size(0), (1,)).item()
-            input = self.test_input[ns : ns + 1].clone()
-
-            with torch.autograd.no_grad():
-                t = model.training
-                model.eval()
-                model.record_attention(True)
-                model(BracketedSequence(input))
-                model.train(t)
-                ram = model.retrieve_attention()
-                model.record_attention(False)
-
-            tokens_output = [c for c in self.problem.seq2str(input[0])]
-            tokens_input = ["n/a"] + tokens_output[:-1]
-            for n_head in range(ram[0].size(1)):
-                filename = os.path.join(
-                    result_dir, f"rpl_attention_{n_epoch}_h{n_head}.pdf"
-                )
-                attention_matrices = [m[0, n_head] for m in ram]
-                save_attention_image(
-                    filename,
-                    tokens_input,
-                    tokens_output,
-                    attention_matrices,
-                    k_top=10,
-                    # min_total_attention=0.9,
-                    token_gap=12,
-                    layer_gap=50,
-                )
-                logger(f"wrote {filename}")
+            for k in range(10):
+                ns = torch.randint(self.test_input.size(0), (1,)).item()
+                input = self.test_input[ns : ns + 1].clone()
+
+                with torch.autograd.no_grad():
+                    t = model.training
+                    model.eval()
+                    model.record_attention(True)
+                    model(BracketedSequence(input))
+                    model.train(t)
+                    ram = model.retrieve_attention()
+                    model.record_attention(False)
+
+                tokens_output = [c for c in self.problem.seq2str(input[0])]
+                tokens_input = ["n/a"] + tokens_output[:-1]
+                for n_head in range(ram[0].size(1)):
+                    filename = os.path.join(
+                        result_dir, f"sandbox_attention_{k}_h{n_head}.pdf"
+                    )
+                    attention_matrices = [m[0, n_head] for m in ram]
+                    save_attention_image(
+                        filename,
+                        tokens_input,
+                        tokens_output,
+                        attention_matrices,
+                        k_top=10,
+                        # min_total_attention=0.9,
+                        token_gap=12,
+                        layer_gap=50,
+                    )
+                    logger(f"wrote {filename}")
 
 
 ######################################################################