Update.
[picoclvr.git] / tasks.py
index 44599f7..7a4abbe 100755 (executable)
--- a/tasks.py
+++ b/tasks.py
@@ -111,13 +111,19 @@ class SandBox(Task):
         self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
 
         # A bit of paranoia never hurts
-        assert (
-            self.nb_codes <= max_nb_codes
-            and self.train_input.min() >= 0
-            and self.test_input.min() >= 0
-            and tuple(self.train_ar_mask.unique()) == (0, 1)
-            and tuple(self.test_ar_mask.unique()) == (0, 1)
-        )
+        assert self.nb_codes <= max_nb_codes
+        assert self.train_input.min() >= 0
+        assert self.test_input.min() >= 0
+        assert tuple(x.item() for x in self.train_ar_mask.unique()) in {
+            (0,),
+            (1,),
+            (0, 1),
+        }
+        assert tuple(x.item() for x in self.test_ar_mask.unique()) in {
+            (0,),
+            (1,),
+            (0, 1),
+        }
 
     def batches(self, split="train", nb_to_use=-1, desc=None):
         assert split in {"train", "test"}
@@ -151,17 +157,24 @@ class SandBox(Task):
                 device=self.device,
             )
 
+            log_ground_truth = ar_mask.min() == 0
+
             if logger is not None:
                 for sp, st in zip(result[:10], input[:10]):
                     logger(
                         f"test_sequences {n_epoch} prediction   {self.problem.seq2str(sp)}"
                     )
-                    logger(
-                        f"               {n_epoch} ground truth {self.problem.seq2str(st)}"
-                    )
+                    if log_ground_truth:
+                        logger(
+                            f"               {n_epoch} ground truth {self.problem.seq2str(st)}"
+                        )
 
-            nb_total = ar_mask.sum().item()
-            nb_correct = ((result == input).long() * ar_mask).sum().item()
+            nb_total, nb_correct = self.problem.compute_nb_correct(
+                input, ar_mask, result
+            )
+
+            # nb_total = ar_mask.sum().item()
+            # nb_correct = ((result == input).long() * ar_mask).sum().item()
 
             return nb_total, nb_correct
 
@@ -1588,13 +1601,19 @@ class QMLP(Task):
 
         self.train_input = seq[:nb_train_samples]
         self.train_q_test_set = q_test_set[:nb_train_samples]
+        self.train_ref_test_errors = test_error[:nb_train_samples]
         self.test_input = seq[nb_train_samples:]
         self.test_q_test_set = q_test_set[nb_train_samples:]
-        self.ref_test_errors = test_error
+        self.test_ref_test_errors = test_error[nb_train_samples:]
+
+        filename = os.path.join(result_dir, f"train_errors_ref.dat")
+        with open(filename, "w") as f:
+            for e in self.train_ref_test_errors:
+                f.write(f"{e}\n")
 
         filename = os.path.join(result_dir, f"test_errors_ref.dat")
         with open(filename, "w") as f:
-            for e in self.ref_test_errors:
+            for e in self.test_ref_test_errors:
                 f.write(f"{e}\n")
 
         self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1