Update.
[picoclvr.git] / tasks.py
index 066f1bb..f4be293 100755 (executable)
--- a/tasks.py
+++ b/tasks.py
@@ -111,13 +111,25 @@ class SandBox(Task):
         self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
 
         # A bit of paranoia never hurts
-        assert (
-            self.nb_codes <= max_nb_codes
-            and self.train_input.min() >= 0
-            and self.test_input.min() >= 0
-            and tuple(self.train_ar_mask.unique()) == (0, 1)
-            and tuple(self.test_ar_mask.unique()) == (0, 1)
-        )
+        assert self.nb_codes <= max_nb_codes
+        assert self.train_input.min() >= 0
+        assert self.test_input.min() >= 0
+        assert tuple(x.item() for x in self.train_ar_mask.unique()) in {
+            (0,),
+            (1,),
+            (0, 1),
+        }
+        assert tuple(x.item() for x in self.test_ar_mask.unique()) in {
+            (0,),
+            (1,),
+            (0, 1),
+        }
+
+        if logger is not None:
+            for s, a in zip(self.train_input[:100], self.train_ar_mask[:100]):
+                logger(f"train_sequences {self.problem.seq2str(s)}")
+                a = "".join(["01"[x.item()] for x in a])
+                logger(f"                {a}")
 
     def batches(self, split="train", nb_to_use=-1, desc=None):
         assert split in {"train", "test"}
@@ -151,17 +163,24 @@ class SandBox(Task):
                 device=self.device,
             )
 
+            log_ground_truth = ar_mask.min() == 0
+
             if logger is not None:
                 for sp, st in zip(result[:10], input[:10]):
                     logger(
                         f"test_sequences {n_epoch} prediction   {self.problem.seq2str(sp)}"
                     )
-                    logger(
-                        f"               {n_epoch} ground truth {self.problem.seq2str(st)}"
-                    )
+                    if log_ground_truth:
+                        logger(
+                            f"               {n_epoch} ground truth {self.problem.seq2str(st)}"
+                        )
 
-            nb_total = ar_mask.sum().item()
-            nb_correct = ((result == input).long() * ar_mask).sum().item()
+            nb_total, nb_correct = self.problem.compute_nb_correct(
+                input, ar_mask, result
+            )
+
+            # nb_total = ar_mask.sum().item()
+            # nb_correct = ((result == input).long() * ar_mask).sum().item()
 
             return nb_total, nb_correct
 
@@ -193,30 +212,30 @@ class SandBox(Task):
                 with torch.autograd.no_grad():
                     t = model.training
                     model.eval()
-                    model.record_attention(True)
+                    model.record_attention(True)
                     model(BracketedSequence(input))
                     model.train(t)
-                    ram = model.retrieve_attention()
-                    model.record_attention(False)
-
-                tokens_output = [c for c in self.problem.seq2str(input[0])]
-                tokens_input = ["n/a"] + tokens_output[:-1]
-                for n_head in range(ram[0].size(1)):
-                    filename = os.path.join(
-                        result_dir, f"sandbox_attention_{k}_h{n_head}.pdf"
-                    )
-                    attention_matrices = [m[0, n_head] for m in ram]
-                    save_attention_image(
-                        filename,
-                        tokens_input,
-                        tokens_output,
-                        attention_matrices,
-                        k_top=10,
-                        # min_total_attention=0.9,
-                        token_gap=12,
-                        layer_gap=50,
-                    )
-                    logger(f"wrote {filename}")
+                    ram = model.retrieve_attention()
+                    model.record_attention(False)
+
+                tokens_output = [c for c in self.problem.seq2str(input[0])]
+                tokens_input = ["n/a"] + tokens_output[:-1]
+                for n_head in range(ram[0].size(1)):
+                # filename = os.path.join(
+                # result_dir, f"sandbox_attention_{k}_h{n_head}.pdf"
+                # )
+                # attention_matrices = [m[0, n_head] for m in ram]
+                # save_attention_image(
+                # filename,
+                # tokens_input,
+                # tokens_output,
+                # attention_matrices,
+                # k_top=10,
+                ##min_total_attention=0.9,
+                # token_gap=12,
+                # layer_gap=50,
+                # )
+                # logger(f"wrote {filename}")
 
 
 ######################################################################
@@ -1555,7 +1574,6 @@ import qmlp
 
 
 class QMLP(Task):
-
     ######################
 
     def __init__(
@@ -1563,6 +1581,7 @@ class QMLP(Task):
         nb_train_samples,
         nb_test_samples,
         batch_size,
+        result_dir,
         logger=None,
         device=torch.device("cpu"),
     ):
@@ -1577,19 +1596,31 @@ class QMLP(Task):
                 f"generating {nb_train_samples+nb_test_samples} samples (can take some time)"
             )
 
-        seq, q_test_set = generate_sequence_and_test_set(
-            nb_mlps=nb_train_samples+nb_test_samples,
+        seq, q_test_set, test_error = qmlp.generate_sequence_and_test_set(
+            nb_mlps=nb_train_samples + nb_test_samples,
             nb_samples=self.nb_samples_per_mlp,
             device=self.device,
             batch_size=64,
             nb_epochs=250,
-            nb_mlps_per_batch=1024
+            nb_mlps_per_batch=1024,
         )
 
         self.train_input = seq[:nb_train_samples]
         self.train_q_test_set = q_test_set[:nb_train_samples]
+        self.train_ref_test_errors = test_error[:nb_train_samples]
         self.test_input = seq[nb_train_samples:]
         self.test_q_test_set = q_test_set[nb_train_samples:]
+        self.test_ref_test_errors = test_error[nb_train_samples:]
+
+        filename = os.path.join(result_dir, f"train_errors_ref.dat")
+        with open(filename, "w") as f:
+            for e in self.train_ref_test_errors:
+                f.write(f"{e}\n")
+
+        filename = os.path.join(result_dir, f"test_errors_ref.dat")
+        with open(filename, "w") as f:
+            for e in self.test_ref_test_errors:
+                f.write(f"{e}\n")
 
         self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
 
@@ -1599,7 +1630,7 @@ class QMLP(Task):
         for batch in tqdm.tqdm(
             input.split(self.batch_size), dynamic_ncols=True, desc=f"epoch-{split}"
         ):
-            yield self.trim(batch)
+            yield batch
 
     def vocabulary_size(self):
         return self.nb_codes
@@ -1609,14 +1640,13 @@ class QMLP(Task):
     ):
         correct = self.test_input[:1000]
         result = correct.clone()
-        ar_mask = torch.arange(result.size(1)) > self.nb_samples_per_mlp * 3 + 1
+        ar_mask = (
+            torch.arange(result.size(1), device=result.device)
+            > self.nb_samples_per_mlp * 3 + 1
+        ).long()[None, :]
+        ar_mask = ar_mask.expand_as(result)
         result *= 1 - ar_mask  # paraaaaanoiaaaaaaa
 
-        logger(f"----------------------------------------------------------")
-
-        for e in self.tensor2str(result[:10]):
-            logger(f"test_before {e}")
-
         masked_inplace_autoregression(
             model,
             self.batch_size,
@@ -1626,18 +1656,14 @@ class QMLP(Task):
             device=self.device,
         )
 
-        logger(f"----------------------------------------------------------")
-
-        for e in self.tensor2str(result[:10]):
-            logger(f"test_after  {e}")
-
-        logger(f"----------------------------------------------------------")
-
-        q_train_set = result[:, : nb_samples * 3]
-        q_params = result[:, nb_samples * 3 + 1 :]
-        error_test = evaluate_q_params(q_params, q_test_set, nb_mlps_per_batch=17)
+        q_train_set = result[:, : self.nb_samples_per_mlp * 3]
+        q_params = result[:, self.nb_samples_per_mlp * 3 + 1 :]
+        error_test = qmlp.evaluate_q_params(q_params, self.test_q_test_set)
 
-        logger(f"{error_test=}")
+        filename = os.path.join(result_dir, f"test_errors_{n_epoch:04d}.dat")
+        with open(filename, "w") as f:
+            for e in error_test:
+                f.write(f"{e}\n")
 
 
 ######################################################################