Update.
[picoclvr.git] / tasks.py
index e5d3a7e..829eb24 100755 (executable)
--- a/tasks.py
+++ b/tasks.py
@@ -71,7 +71,7 @@ class Task:
 
 
 class TaskFromFile(Task):
-    def tensorize(self, pairs):
+    def tensorize(self, pairs, shuffle):
         len_max = max([len(x[0]) for x in pairs])
 
         input = torch.cat(
@@ -98,6 +98,11 @@ class TaskFromFile(Task):
             0,
         ).to("cpu")
 
+        if shuffle:
+            i = torch.randperm(input.size(0))
+            input = input[i].contiguous()
+            pred_mask = pred_mask[i].contiguous()
+
         return input, pred_mask
 
     # trim all the tensors in the tuple z to remove as much token from
@@ -122,6 +127,7 @@ class TaskFromFile(Task):
         nb_train_samples,
         nb_test_samples,
         batch_size,
+        shuffle=False,
         device=torch.device("cpu"),
     ):
         self.batch_size = batch_size
@@ -156,8 +162,12 @@ class TaskFromFile(Task):
         self.char2id = dict([(c, n) for n, c in enumerate(symbols)])
         self.id2char = dict([(n, c) for c, n in self.char2id.items()])
 
-        self.train_input, self.train_pred_masks = self.tensorize(train_pairs)
-        self.test_input, self.test_pred_masks = self.tensorize(test_pairs)
+        self.train_input, self.train_pred_masks = self.tensorize(
+            train_pairs, shuffle=shuffle
+        )
+        self.test_input, self.test_pred_masks = self.tensorize(
+            test_pairs, shuffle=shuffle
+        )
 
     def batches(self, split="train", nb_to_use=-1, desc=None):
         assert split in {"train", "test"}
@@ -1851,3 +1861,175 @@ class QMLP(Task):
 
 
 ######################################################################
+
+import escape
+
+
+class Escape(Task):
+    def __init__(
+        self,
+        nb_train_samples,
+        nb_test_samples,
+        batch_size,
+        height,
+        width,
+        T,
+        logger=None,
+        device=torch.device("cpu"),
+    ):
+        super().__init__()
+
+        self.batch_size = batch_size
+        self.device = device
+        self.height = height
+        self.width = width
+
+        states, actions, rewards = escape.generate_episodes(
+            nb_train_samples + nb_test_samples, height, width, T
+        )
+        seq = escape.episodes2seq(states, actions, rewards, lookahead_delta=T)
+        # seq = seq[:, seq.size(1) // 3 : 2 * seq.size(1) // 3]
+        self.train_input = seq[:nb_train_samples].to(self.device)
+        self.test_input = seq[nb_train_samples:].to(self.device)
+
+        self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
+
+    def batches(self, split="train", nb_to_use=-1, desc=None):
+        assert split in {"train", "test"}
+        input = self.train_input if split == "train" else self.test_input
+        if nb_to_use > 0:
+            input = input[:nb_to_use]
+        if desc is None:
+            desc = f"epoch-{split}"
+        for batch in tqdm.tqdm(
+            input.split(self.batch_size), dynamic_ncols=True, desc=desc
+        ):
+            yield batch
+
+    def vocabulary_size(self):
+        return self.nb_codes
+
+    def thinking_autoregression(
+        self, n_epoch, model, result_dir, logger, deterministic_synthesis, nmax=1000
+    ):
+        result = self.test_input[:100].clone()
+        t = torch.arange(result.size(1), device=result.device)[None, :]
+
+        state_len = self.height * self.width
+        it_len = state_len + 3  # state / action / reward / lookahead_reward
+
+        def ar(result, ar_mask):
+            ar_mask = ar_mask.expand_as(result)
+            result *= 1 - ar_mask
+            masked_inplace_autoregression(
+                model,
+                self.batch_size,
+                result,
+                ar_mask,
+                deterministic_synthesis,
+                device=self.device,
+                progress_bar_desc=None,
+            )
+
+        # Generate iteration after iteration
+
+        for u in tqdm.tqdm(
+            range(it_len, result.size(1) - it_len + 1, it_len), desc="thinking"
+        ):
+            # Put the lookahead reward to -1 for the current iteration,
+            # sample the next state
+            s = -1
+            result[:, u - 1] = s + 1 + escape.first_lookahead_rewards_code
+            ar_mask = (t >= u).long() * (t < u + state_len).long()
+            ar(result, ar_mask)
+
+            # Put the lookahead reward to +1 for the current
+            # iteration, sample the action and reward
+            s = 1
+            result[:, u - 1] = s + 1 + escape.first_lookahead_rewards_code
+            ar_mask = (t >= u + state_len).long() * (t < u + state_len + 2).long()
+            ar(result, ar_mask)
+
+            # Fix the previous lookahead rewards in a consistant state
+            for v in range(0, u, it_len):
+                # Extract the rewards
+                r = result[:, range(v + state_len + 1 + it_len, u + it_len - 1, it_len)]
+                r = r - escape.first_lookahead_rewards_code - 1
+                a = r.min(dim=1).values
+                b = r.max(dim=1).values
+                s = (a < 0).long() * a + (a >= 0).long() * b
+                result[:, v + state_len + 2] = (
+                    s + 1 + escape.first_lookahead_rewards_code
+                )
+
+        # Saving the generated sequences
+
+        s, a, r, lr = escape.seq2episodes(
+            result, self.height, self.width, lookahead=True
+        )
+        str = escape.episodes2str(
+            s, a, r, lookahead_rewards=lr, unicode=True, ansi_colors=True
+        )
+
+        filename = os.path.join(result_dir, f"test_thinking_seq_{n_epoch:04d}.txt")
+        with open(filename, "w") as f:
+            f.write(str)
+            logger(f"wrote {filename}")
+
+    def produce_results(
+        self, n_epoch, model, result_dir, logger, deterministic_synthesis, nmax=1000
+    ):
+        result = self.test_input[:100].clone()
+
+        # Saving the ground truth
+
+        s, a, r, lr = escape.seq2episodes(
+            result, self.height, self.width, lookahead=True
+        )
+        str = escape.episodes2str(
+            s, a, r, lookahead_rewards=lr, unicode=True, ansi_colors=True
+        )
+
+        filename = os.path.join(result_dir, f"test_true_seq_{n_epoch:04d}.txt")
+        with open(filename, "w") as f:
+            f.write(str)
+            logger(f"wrote {filename}")
+
+        # Re-generating from the first frame
+
+        ar_mask = (
+            torch.arange(result.size(1), device=result.device)
+            >= self.height * self.width + 3
+        ).long()[None, :]
+        ar_mask = ar_mask.expand_as(result)
+        result *= 1 - ar_mask  # paraaaaanoiaaaaaaa
+
+        masked_inplace_autoregression(
+            model,
+            self.batch_size,
+            result,
+            ar_mask,
+            deterministic_synthesis,
+            device=self.device,
+        )
+
+        # Saving the generated sequences
+
+        s, a, r, lr = escape.seq2episodes(
+            result, self.height, self.width, lookahead=True
+        )
+        str = escape.episodes2str(
+            s, a, r, lookahead_rewards=lr, unicode=True, ansi_colors=True
+        )
+
+        filename = os.path.join(result_dir, f"test_seq_{n_epoch:04d}.txt")
+        with open(filename, "w") as f:
+            f.write(str)
+            logger(f"wrote {filename}")
+
+        self.thinking_autoregression(
+            n_epoch, model, result_dir, logger, deterministic_synthesis, nmax
+        )
+
+
+######################################################################