Update.
[picoclvr.git] / tasks.py
index 183c3cf..0858282 100755 (executable)
--- a/tasks.py
+++ b/tasks.py
@@ -110,13 +110,14 @@ class SandBox(Task):
 
         self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
 
+
         # A bit of paranoia never hurts
         assert (
             self.nb_codes <= max_nb_codes
             and self.train_input.min() >= 0
             and self.test_input.min() >= 0
-            and tuple(self.train_ar_mask.unique()) == (0, 1)
-            and tuple(self.test_ar_mask.unique()) == (0, 1)
+            and tuple(x.item() for x in self.train_ar_mask.unique()) in { (0,), (1,), (0,1) }
+            and tuple(x.item() for x in self.test_ar_mask.unique()) in { (0,), (1,), (0,1) }
         )
 
     def batches(self, split="train", nb_to_use=-1, desc=None):
@@ -160,8 +161,10 @@ class SandBox(Task):
                         f"               {n_epoch} ground truth {self.problem.seq2str(st)}"
                     )
 
-            nb_total = ar_mask.sum().item()
-            nb_correct = ((result == input).long() * ar_mask).sum().item()
+            nb_total, nb_correct = self.problem.compute_nb_correct(input, ar_mask, result)
+
+            # nb_total = ar_mask.sum().item()
+            # nb_correct = ((result == input).long() * ar_mask).sum().item()
 
             return nb_total, nb_correct
 
@@ -1550,3 +1553,101 @@ class Grid(Task):
 
 
 ######################################################################
+
+import qmlp
+
+
+class QMLP(Task):
+    ######################
+
+    def __init__(
+        self,
+        nb_train_samples,
+        nb_test_samples,
+        batch_size,
+        result_dir,
+        logger=None,
+        device=torch.device("cpu"),
+    ):
+        super().__init__()
+
+        self.device = device
+        self.batch_size = batch_size
+        self.nb_samples_per_mlp = 256
+
+        if logger is not None:
+            logger(
+                f"generating {nb_train_samples+nb_test_samples} samples (can take some time)"
+            )
+
+        seq, q_test_set, test_error = qmlp.generate_sequence_and_test_set(
+            nb_mlps=nb_train_samples + nb_test_samples,
+            nb_samples=self.nb_samples_per_mlp,
+            device=self.device,
+            batch_size=64,
+            nb_epochs=250,
+            nb_mlps_per_batch=1024,
+        )
+
+        self.train_input = seq[:nb_train_samples]
+        self.train_q_test_set = q_test_set[:nb_train_samples]
+        self.train_ref_test_errors = test_error[:nb_train_samples]
+        self.test_input = seq[nb_train_samples:]
+        self.test_q_test_set = q_test_set[nb_train_samples:]
+        self.test_ref_test_errors = test_error[nb_train_samples:]
+
+        filename = os.path.join(result_dir, f"train_errors_ref.dat")
+        with open(filename, "w") as f:
+            for e in self.train_ref_test_errors:
+                f.write(f"{e}\n")
+
+        filename = os.path.join(result_dir, f"test_errors_ref.dat")
+        with open(filename, "w") as f:
+            for e in self.test_ref_test_errors:
+                f.write(f"{e}\n")
+
+        self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
+
+    def batches(self, split="train"):
+        assert split in {"train", "test"}
+        input = self.train_input if split == "train" else self.test_input
+        for batch in tqdm.tqdm(
+            input.split(self.batch_size), dynamic_ncols=True, desc=f"epoch-{split}"
+        ):
+            yield batch
+
+    def vocabulary_size(self):
+        return self.nb_codes
+
+    def produce_results(
+        self, n_epoch, model, result_dir, logger, deterministic_synthesis
+    ):
+        correct = self.test_input[:1000]
+        result = correct.clone()
+        ar_mask = (
+            torch.arange(result.size(1), device=result.device)
+            > self.nb_samples_per_mlp * 3 + 1
+        ).long()[None, :]
+        ar_mask = ar_mask.expand_as(result)
+        result *= 1 - ar_mask  # paraaaaanoiaaaaaaa
+
+        masked_inplace_autoregression(
+            model,
+            self.batch_size,
+            result,
+            ar_mask,
+            deterministic_synthesis,
+            device=self.device,
+        )
+
+        q_train_set = result[:, : self.nb_samples_per_mlp * 3]
+        q_params = result[:, self.nb_samples_per_mlp * 3 + 1 :]
+        error_test = qmlp.evaluate_q_params(q_params, self.test_q_test_set)
+
+        filename = os.path.join(result_dir, f"test_errors_{n_epoch:04d}.dat")
+        with open(filename, "w") as f:
+            for e in error_test:
+                f.write(f"{e}\n")
+
+
+######################################################################