Update.
[picoclvr.git] / tasks.py
index d21e264..0f80d4f 100755 (executable)
--- a/tasks.py
+++ b/tasks.py
@@ -99,7 +99,6 @@ class TaskFromFile(Task):
         ).to("cpu")
 
         if shuffle:
-            print("SHUFFLING!")
             i = torch.randperm(input.size(0))
             input = input[i].contiguous()
             pred_mask = pred_mask[i].contiguous()
@@ -1862,3 +1861,90 @@ class QMLP(Task):
 
 
 ######################################################################
+
+import escape
+
+
+class Escape(Task):
+    def __init__(
+        self,
+        nb_train_samples,
+        nb_test_samples,
+        batch_size,
+        height,
+        width,
+        T,
+        logger=None,
+        device=torch.device("cpu"),
+    ):
+        super().__init__()
+
+        self.batch_size = batch_size
+        self.device = device
+        self.height = height
+        self.width = width
+
+        states, actions, rewards = escape.generate_episodes(
+            nb_train_samples + nb_test_samples, height, width, T
+        )
+        seq = escape.episodes2seq(states, actions, rewards, lookahead_delta=5)
+        self.train_input = seq[:nb_train_samples].to(self.device)
+        self.test_input = seq[nb_train_samples:].to(self.device)
+
+        self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
+
+        # if logger is not None:
+        # for s, a in zip(self.train_input[:100], self.train_ar_mask[:100]):
+        # logger(f"train_sequences {self.problem.seq2str(s)}")
+        # a = "".join(["01"[x.item()] for x in a])
+        # logger(f"                {a}")
+
+    def batches(self, split="train", nb_to_use=-1, desc=None):
+        assert split in {"train", "test"}
+        input = self.train_input if split == "train" else self.test_input
+        if nb_to_use > 0:
+            input = input[:nb_to_use]
+        if desc is None:
+            desc = f"epoch-{split}"
+        for batch in tqdm.tqdm(
+            input.split(self.batch_size), dynamic_ncols=True, desc=desc
+        ):
+            yield batch
+
+    def vocabulary_size(self):
+        return self.nb_codes
+
+    def produce_results(
+        self, n_epoch, model, result_dir, logger, deterministic_synthesis, nmax=1000
+    ):
+        result = self.test_input[:100].clone()
+        ar_mask = (
+            torch.arange(result.size(1), device=result.device)
+            > self.height * self.width + 2
+        ).long()[None, :]
+        ar_mask = ar_mask.expand_as(result)
+        result *= 1 - ar_mask  # paraaaaanoiaaaaaaa
+
+        masked_inplace_autoregression(
+            model,
+            self.batch_size,
+            result,
+            ar_mask,
+            deterministic_synthesis,
+            device=self.device,
+        )
+
+        s, a, r, lr = escape.seq2episodes(
+            result, self.height, self.width, lookahead=True
+        )
+        str = escape.episodes2str(
+            s, a, r, lookahead_rewards=lr, unicode=True, ansi_colors=True
+        )
+
+        filename = os.path.join(result_dir, f"test_seq_{n_epoch:04d}.txt")
+        with open(filename, "w") as f:
+            f.write(str)
+            logger(f"wrote {filename}")
+
+
+######################################################################