Update.
[picoclvr.git] / tasks.py
index 7a4abbe..08aa8ca 100755 (executable)
--- a/tasks.py
+++ b/tasks.py
@@ -14,10 +14,8 @@ from torch.nn import functional as F
 
 from mygpt import BracketedSequence
 
-try:
-    from graph import save_attention_image
-except ImportError:
-    save_attention_image = None
+# from graph import save_attention_image
+save_attention_image = None
 
 ######################################################################
 
@@ -125,6 +123,12 @@ class SandBox(Task):
             (0, 1),
         }
 
+        if logger is not None:
+            for s, a in zip(self.train_input[:100], self.train_ar_mask[:100]):
+                logger(f"train_sequences {self.problem.seq2str(s)}")
+                a = "".join(["01"[x.item()] for x in a])
+                logger(f"                {a}")
+
     def batches(self, split="train", nb_to_use=-1, desc=None):
         assert split in {"train", "test"}
         input = self.train_input if split == "train" else self.test_input
@@ -196,9 +200,7 @@ class SandBox(Task):
 
         logger(f"main_test_accuracy {n_epoch} {test_nb_correct/test_nb_total}")
 
-        if save_attention_image is None:
-            logger("no save_attention_image (is pycairo installed?)")
-        else:
+        if save_attention_image is not None:
             for k in range(10):
                 ns = torch.randint(self.test_input.size(0), (1,)).item()
                 input = self.test_input[ns : ns + 1].clone()
@@ -206,30 +208,30 @@ class SandBox(Task):
                 with torch.autograd.no_grad():
                     t = model.training
                     model.eval()
-                    model.record_attention(True)
+                    model.record_attention(True)
                     model(BracketedSequence(input))
                     model.train(t)
-                    ram = model.retrieve_attention()
-                    model.record_attention(False)
-
-                tokens_output = [c for c in self.problem.seq2str(input[0])]
-                tokens_input = ["n/a"] + tokens_output[:-1]
-                for n_head in range(ram[0].size(1)):
-                    filename = os.path.join(
-                        result_dir, f"sandbox_attention_{k}_h{n_head}.pdf"
-                    )
-                    attention_matrices = [m[0, n_head] for m in ram]
-                    save_attention_image(
-                        filename,
-                        tokens_input,
-                        tokens_output,
-                        attention_matrices,
-                        k_top=10,
-                        # min_total_attention=0.9,
-                        token_gap=12,
-                        layer_gap=50,
-                    )
-                    logger(f"wrote {filename}")
+                    ram = model.retrieve_attention()
+                    model.record_attention(False)
+
+                tokens_output = [c for c in self.problem.seq2str(input[0])]
+                tokens_input = ["n/a"] + tokens_output[:-1]
+                for n_head in range(ram[0].size(1)):
+                # filename = os.path.join(
+                # result_dir, f"sandbox_attention_{k}_h{n_head}.pdf"
+                # )
+                # attention_matrices = [m[0, n_head] for m in ram]
+                # save_attention_image(
+                # filename,
+                # tokens_input,
+                # tokens_output,
+                # attention_matrices,
+                # k_top=10,
+                ##min_total_attention=0.9,
+                # token_gap=12,
+                # layer_gap=50,
+                # )
+                # logger(f"wrote {filename}")
 
 
 ######################################################################
@@ -1473,6 +1475,7 @@ class Grid(Task):
         nb_test_samples,
         batch_size,
         size,
+        fraction_play=0.0,
         logger=None,
         device=torch.device("cpu"),
     ):
@@ -1488,10 +1491,12 @@ class Grid(Task):
             )
 
         self.train_descr = self.grid_factory.generate_samples(
-            nb_train_samples, lambda r: tqdm.tqdm(r)
+            nb=nb_train_samples,
+            fraction_play=fraction_play,
+            progress_bar=lambda r: tqdm.tqdm(r),
         )
         self.test_descr = self.grid_factory.generate_samples(
-            nb_test_samples, lambda r: tqdm.tqdm(r)
+            nb=nb_test_samples, fraction_play=0.0, progress_bar=lambda r: tqdm.tqdm(r)
         )
 
         # Build the tokenizer
@@ -1561,6 +1566,42 @@ class Grid(Task):
         logger(f"test_performance {n_epoch} {nb_total=} {nb_correct=}")
         logger(f"main_test_accuracy {n_epoch} {nb_correct / nb_total}")
 
+        if n_epoch == 5 or n_epoch == 10 or n_epoch == 20:
+            if save_attention_image is None:
+                logger("no save_attention_image (is pycairo installed?)")
+            else:
+                for k in range(10):
+                    ns = k  # torch.randint(self.test_input.size(0), (1,)).item()
+                    input = self.test_input[ns : ns + 1].clone()
+                    with torch.autograd.no_grad():
+                        t = model.training
+                        model.eval()
+                        model.record_attention(True)
+                        model(BracketedSequence(input))
+                        model.train(t)
+                        ram = model.retrieve_attention()
+                        model.record_attention(False)
+
+                    tokens_output = [self.id2token[t.item()] for t in input[0]]
+                    tokens_input = ["n/a"] + tokens_output[:-1]
+                    for n_head in range(ram[0].size(1)):
+                        filename = os.path.join(
+                            result_dir,
+                            f"sandbox_attention_epoch_{n_epoch}_sample_{k}_head_{n_head}.pdf",
+                        )
+                        attention_matrices = [m[0, n_head] for m in ram]
+                        save_attention_image(
+                            filename,
+                            tokens_input,
+                            tokens_output,
+                            attention_matrices,
+                            k_top=10,
+                            # min_total_attention=0.9,
+                            token_gap=12,
+                            layer_gap=50,
+                        )
+                        logger(f"wrote {filename}")
+
 
 ######################################################################