Update.
[picoclvr.git] / tasks.py
index dba6e13..fddcaff 100755 (executable)
--- a/tasks.py
+++ b/tasks.py
@@ -1874,6 +1874,7 @@ class Escape(Task):
         height,
         width,
         T,
+        nb_walls,
         logger=None,
         device=torch.device("cpu"),
     ):
@@ -1885,20 +1886,15 @@ class Escape(Task):
         self.width = width
 
         states, actions, rewards = escape.generate_episodes(
-            nb_train_samples + nb_test_samples, height, width, T
+            nb_train_samples + nb_test_samples, height, width, T, nb_walls
         )
-        seq = escape.episodes2seq(states, actions, rewards)
+        seq = escape.episodes2seq(states, actions, rewards, lookahead_delta=T)
+        # seq = seq[:, seq.size(1) // 3 : 2 * seq.size(1) // 3]
         self.train_input = seq[:nb_train_samples].to(self.device)
         self.test_input = seq[nb_train_samples:].to(self.device)
 
         self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
 
-        # if logger is not None:
-        # for s, a in zip(self.train_input[:100], self.train_ar_mask[:100]):
-        # logger(f"train_sequences {self.problem.seq2str(s)}")
-        # a = "".join(["01"[x.item()] for x in a])
-        # logger(f"                {a}")
-
     def batches(self, split="train", nb_to_use=-1, desc=None):
         assert split in {"train", "test"}
         input = self.train_input if split == "train" else self.test_input
@@ -1914,13 +1910,97 @@ class Escape(Task):
     def vocabulary_size(self):
         return self.nb_codes
 
+    def thinking_autoregression(
+        self, n_epoch, model, result_dir, logger, deterministic_synthesis, nmax=1000
+    ):
+        result = self.test_input[:250].clone()
+        t = torch.arange(result.size(1), device=result.device)[None, :]
+
+        state_len = self.height * self.width
+        it_len = state_len + 3  # state / action / reward / lookahead_reward
+
+        def ar(result, ar_mask):
+            ar_mask = ar_mask.expand_as(result)
+            result *= 1 - ar_mask
+            masked_inplace_autoregression(
+                model,
+                self.batch_size,
+                result,
+                ar_mask,
+                deterministic_synthesis,
+                device=self.device,
+                progress_bar_desc=None,
+            )
+
+        # Generate iteration after iteration
+
+        for u in tqdm.tqdm(
+            range(it_len, result.size(1) - it_len + 1, it_len), desc="thinking"
+        ):
+            # Put the lookahead reward to either 0 or -1 for the
+            # current iteration, sample the next state
+            s = -(torch.rand(result.size(0), device=result.device) < 0.2).long()
+            result[:, u - 1] = s + 1 + escape.first_lookahead_rewards_code
+            ar_mask = (t >= u).long() * (t < u + state_len).long()
+            ar(result, ar_mask)
+
+            # Put the lookahead reward to +1 for the current
+            # iteration, sample the action and reward
+            s = 1
+            result[:, u - 1] = s + 1 + escape.first_lookahead_rewards_code
+            ar_mask = (t >= u + state_len).long() * (t < u + state_len + 2).long()
+            ar(result, ar_mask)
+
+            # Fix the previous lookahead rewards in a consistant state
+            for v in range(0, u, it_len):
+                # Extract the rewards
+                r = result[:, range(v + state_len + 1 + it_len, u + it_len - 1, it_len)]
+                r = r - escape.first_rewards_code - 1
+                a = r.min(dim=1).values
+                b = r.max(dim=1).values
+                s = (a < 0).long() * a + (a >= 0).long() * b
+                result[:, v + state_len + 2] = (
+                    s + 1 + escape.first_lookahead_rewards_code
+                )
+
+        # Saving the generated sequences
+
+        s, a, r, lr = escape.seq2episodes(
+            result, self.height, self.width, lookahead=True
+        )
+        str = escape.episodes2str(
+            s, a, r, lookahead_rewards=lr, unicode=True, ansi_colors=True
+        )
+
+        filename = os.path.join(result_dir, f"test_thinking_seq_{n_epoch:04d}.txt")
+        with open(filename, "w") as f:
+            f.write(str)
+            logger(f"wrote {filename}")
+
     def produce_results(
         self, n_epoch, model, result_dir, logger, deterministic_synthesis, nmax=1000
     ):
-        result = self.test_input[:100].clone()
+        result = self.test_input[:250].clone()
+
+        # Saving the ground truth
+
+        s, a, r, lr = escape.seq2episodes(
+            result, self.height, self.width, lookahead=True
+        )
+        str = escape.episodes2str(
+            s, a, r, lookahead_rewards=lr, unicode=True, ansi_colors=True
+        )
+
+        filename = os.path.join(result_dir, f"test_true_seq_{n_epoch:04d}.txt")
+        with open(filename, "w") as f:
+            f.write(str)
+            logger(f"wrote {filename}")
+
+        # Re-generating from the first frame
+
         ar_mask = (
             torch.arange(result.size(1), device=result.device)
-            > self.height * self.width + 2
+            >= self.height * self.width + 3
         ).long()[None, :]
         ar_mask = ar_mask.expand_as(result)
         result *= 1 - ar_mask  # paraaaaanoiaaaaaaa
@@ -1934,13 +2014,23 @@ class Escape(Task):
             device=self.device,
         )
 
-        s, a, r = escape.seq2episodes(result, self.height, self.width)
-        str = escape.episodes2str(s, a, r, unicode=True, ansi_colors=True)
+        # Saving the generated sequences
+
+        s, a, r, lr = escape.seq2episodes(
+            result, self.height, self.width, lookahead=True
+        )
+        str = escape.episodes2str(
+            s, a, r, lookahead_rewards=lr, unicode=True, ansi_colors=True
+        )
 
         filename = os.path.join(result_dir, f"test_seq_{n_epoch:04d}.txt")
         with open(filename, "w") as f:
             f.write(str)
             logger(f"wrote {filename}")
 
+        self.thinking_autoregression(
+            n_epoch, model, result_dir, logger, deterministic_synthesis, nmax
+        )
+
 
 ######################################################################