Update.
[picoclvr.git] / tasks.py
index da39a83..17904d8 100755 (executable)
--- a/tasks.py
+++ b/tasks.py
@@ -12,6 +12,13 @@ import torch, torchvision
 from torch import nn
 from torch.nn import functional as F
 
+from mygpt import BracketedSequence
+
+try:
+    from graph import save_attention_image
+except ImportError:
+    save_attention_image = None
+
 ######################################################################
 
 
@@ -133,7 +140,6 @@ class ProblemLevel2(Problem):
             num_classes=self.len_source,
         )
         source1 = torch.rand(nb, 10).sort(dim=1).indices[:, : self.len_source]
-        # source1 = torch.randint(10, (nb, self.len_source))
         marker1 = torch.full((nb, 1), 10)
         result1 = operators.bmm(source1[:, :, None]).squeeze(-1)
         marker2 = torch.full((nb, 1), 11)
@@ -1059,6 +1065,7 @@ class RPL(Task):
         max_input=9,
         prog_len=6,
         nb_runs=5,
+        no_prog=False,
         logger=None,
         device=torch.device("cpu"),
     ):
@@ -1066,6 +1073,7 @@ class RPL(Task):
 
         self.batch_size = batch_size
         self.device = device
+        self.no_prog = no_prog
 
         train_sequences = [
             rpl.generate(
@@ -1100,13 +1108,43 @@ class RPL(Task):
         self.id2token = dict([(n, c) for c, n in self.token2id.items()])
 
         self.t_nul = self.token2id["<nul>"]
-        self.t_prog = self.token2id["<prog>"]
-        self.t_input = self.token2id["<input>"]
-        self.t_output = self.token2id["<output>"]
+        self.t_input = self.token2id["<in>"]
+        self.t_output = self.token2id["<out>"]
+        self.t_prog = self.token2id["<prg>"]
+        self.t_end = self.token2id["<end>"]
 
         self.train_input = self.tensorize(train_sequences)
         self.test_input = self.tensorize(test_sequences)
 
+        if no_prog:
+            # Excise the program from every train and test example
+            k = torch.arange(self.train_input.size(1), device=self.train_input.device)[
+                None, :
+            ]
+            p = (
+                ((self.train_input == self.t_prog).long() * k)
+                .max(1, keepdim=True)
+                .values
+            )
+            self.train_input = (
+                self.train_input * (k <= p).long()
+                + self.t_end * (k == p + 1).long()
+                + self.t_nul * (k > p + 1).long()
+            )
+            k = torch.arange(self.test_input.size(1), device=self.test_input.device)[
+                None, :
+            ]
+            p = (
+                ((self.test_input == self.t_prog).long() * k)
+                .max(1, keepdim=True)
+                .values
+            )
+            self.test_input = (
+                self.test_input * (k <= p).long()
+                + self.t_end * (k == p + 1).long()
+                + self.t_nul * (k > p + 1).long()
+            )
+
         if logger is not None:
             logger(f"value_max {val_max}")
             for x in self.train_input[:25]:
@@ -1154,13 +1192,13 @@ class RPL(Task):
             )
 
             sum_nb_total, sum_nb_errors = 0, 0
-            for x, y in zip(input, result):
-                seq = [self.id2token[i.item()] for i in y]
+            for one_input, one_result in zip(input, result):
+                seq = [self.id2token[i.item()] for i in one_result]
                 nb_total, nb_errors, prog, stacks = rpl.compute_nb_errors(seq)
                 sum_nb_total += 1
                 sum_nb_errors += 0 if nb_errors == 0 else 1
                 if nb_to_log > 0:
-                    gt_seq = [self.id2token[i.item()] for i in x]
+                    gt_seq = [self.id2token[i.item()] for i in one_input]
                     _, _, gt_prog, _ = rpl.compute_nb_errors(gt_seq)
                     gt_prog = " ".join([str(x) for x in gt_prog])
                     prog = " ".join([str(x) for x in prog])
@@ -1201,14 +1239,20 @@ class RPL(Task):
             )
 
             sum_nb_total, sum_nb_errors = 0, 0
-            for x, y, i, j in zip(input, result, last_output_idx, first_prog_idx):
-                seq = [self.id2token[i.item()] for i in y]
+            for one_input, one_result, i, j in zip(
+                input, result, last_output_idx, first_prog_idx
+            ):
+                seq = [self.id2token[i.item()] for i in one_result]
                 sum_nb_total += 1
-                correct = (x - y).abs().max() == 0
+                correct = (one_input - one_result).abs().max() == 0
                 sum_nb_errors += 0 if correct else 1
                 if nb_to_log > 0:
-                    result_stack = [self.id2token[i.item()] for i in y[i : j + 1]]
-                    target_stack = [self.id2token[i.item()] for i in x[i : j + 1]]
+                    result_stack = [
+                        self.id2token[i.item()] for i in one_result[i : j + 1]
+                    ]
+                    target_stack = [
+                        self.id2token[i.item()] for i in one_input[i : j + 1]
+                    ]
                     comment = "*" if correct else "-"
                     result_stack = " ".join([str(x) for x in result_stack])
                     target_stack = " ".join([str(x) for x in target_stack])
@@ -1221,13 +1265,14 @@ class RPL(Task):
 
         # --------------------------------------------------------------------
 
-        test_nb_total, test_nb_errors = compute_nb_errors_prog(
-            self.test_input[:1000].to(self.device), nb_to_log=10
-        )
+        if not self.no_prog:
+            test_nb_total, test_nb_errors = compute_nb_errors_prog(
+                self.test_input[:1000].to(self.device), nb_to_log=10
+            )
 
-        logger(
-            f"accuracy_prog_test {n_epoch} nb_total {test_nb_total} nb_errors {test_nb_errors} accuracy {100.0*(1-test_nb_errors/test_nb_total):.02f}%"
-        )
+            logger(
+                f"accuracy_prog_test {n_epoch} nb_total {test_nb_total} nb_errors {test_nb_errors} accuracy {100.0*(1-test_nb_errors/test_nb_total):.02f}%"
+            )
 
         test_nb_total, test_nb_errors = compute_nb_errors_output(
             self.test_input[:1000].to(self.device), nb_to_log=10
@@ -1237,6 +1282,40 @@ class RPL(Task):
             f"accuracy_output_test {n_epoch} nb_total {test_nb_total} nb_errors {test_nb_errors} accuracy {100.0*(1-test_nb_errors/test_nb_total):.02f}%"
         )
 
+        if save_attention_image is not None:
+            ns=torch.randint(self.text_input.size(0),(1,)).item()
+            input = self.test_input[ns:ns+1].clone()
+            last = (input != self.t_nul).max(0).values.nonzero().max() + 3
+            input = input[:, :last].to(self.device)
+
+            with torch.autograd.no_grad():
+                t = model.training
+                model.eval()
+                model.record_attention(True)
+                model(BracketedSequence(input))
+                model.train(t)
+                ram = model.retrieve_attention()
+                model.record_attention(False)
+
+            tokens_output = [self.id2token[i.item()] for i in input[ns]]
+            tokens_input = ["n/a"] + tokens_output[:-1]
+            for n_head in range(ram[0].size(1)):
+                filename = os.path.join(
+                    result_dir, f"rpl_attention_{n_epoch}_h{n_head}.pdf"
+                )
+                attention_matrices = [m[0, n_head] for m in ram]
+                save_attention_image(
+                    filename,
+                    tokens_input,
+                    tokens_output,
+                    attention_matrices,
+                    k_top=10,
+                    # min_total_attention=0.9,
+                    token_gap=12,
+                    layer_gap=50,
+                )
+                logger(f"wrote {filename}")
+
 
 ######################################################################