Update.
[picoclvr.git] / tasks.py
index 8e8faa9..aa5df72 100755 (executable)
--- a/tasks.py
+++ b/tasks.py
@@ -1867,10 +1867,10 @@ class QMLP(Task):
 
 ######################################################################
 
-import escape
+import greed
 
 
-class Escape(Task):
+class Greed(Task):
     def __init__(
         self,
         nb_train_samples,
@@ -1890,10 +1890,10 @@ class Escape(Task):
         self.height = height
         self.width = width
 
-        states, actions, rewards = escape.generate_episodes(
+        states, actions, rewards = greed.generate_episodes(
             nb_train_samples + nb_test_samples, height, width, T, nb_walls
         )
-        seq = escape.episodes2seq(states, actions, rewards)
+        seq = greed.episodes2seq(states, actions, rewards)
         # seq = seq[:, seq.size(1) // 3 : 2 * seq.size(1) // 3]
         self.train_input = seq[:nb_train_samples].to(self.device)
         self.test_input = seq[nb_train_samples:].to(self.device)
@@ -1905,6 +1905,15 @@ class Escape(Task):
         self.index_reward = self.state_len + 2
         self.it_len = self.state_len + 3  # lookahead_reward / state / action / reward
 
+    def wipe_lookahead_rewards(self, batch):
+        t = torch.arange(batch.size(1), device=batch.device)[None, :]
+        u = torch.randint(batch.size(1), (batch.size(0), 1), device=batch.device)
+        lr_mask = (t <= u).long() * (
+            t % self.it_len == self.index_lookahead_reward
+        ).long()
+
+        return lr_mask * greed.lookahead_reward2code(2) + (1 - lr_mask) * batch
+
     def batches(self, split="train", nb_to_use=-1, desc=None):
         assert split in {"train", "test"}
         input = self.train_input if split == "train" else self.test_input
@@ -1915,17 +1924,10 @@ class Escape(Task):
         for batch in tqdm.tqdm(
             input.split(self.batch_size), dynamic_ncols=True, desc=desc
         ):
-            t = torch.arange(batch.size(1), device=batch.device)[None, :]
-            u = torch.randint(batch.size(1), (batch.size(0), 1), device=batch.device)
-            lr_mask = (t <= u).long() * (
-                t % self.it_len == self.index_lookahead_reward
-            ).long()
-
-            batch = lr_mask * escape.lookahead_reward2code(2) + (1 - lr_mask) * batch
-            yield batch
+            yield self.wipe_lookahead_rewards(batch)
 
     def vocabulary_size(self):
-        return escape.nb_codes
+        return greed.nb_codes
 
     def thinking_autoregression(
         self, n_epoch, model, result_dir, logger, deterministic_synthesis, nmax=1000
@@ -1954,7 +1956,7 @@ class Escape(Task):
         # Erase all the content but that of the first iteration
         result[:, self.it_len :] = -1
         # Set the lookahead_reward of the firs to UNKNOWN
-        result[:, self.index_lookahead_reward] = escape.lookahead_reward2code(2)
+        result[:, self.index_lookahead_reward] = greed.lookahead_reward2code(2)
 
         t = torch.arange(result.size(1), device=result.device)[None, :]
 
@@ -1968,30 +1970,30 @@ class Escape(Task):
             if u > 0:
                 result[
                     :, u + self.index_lookahead_reward
-                ] = escape.lookahead_reward2code(2)
+                ] = greed.lookahead_reward2code(2)
                 ar_mask = (t >= u + self.index_states).long() * (
                     t < u + self.index_states + self.state_len
                 ).long()
                 ar(result, ar_mask)
 
             # Generate the action and reward with lookahead_reward to +1
-            result[:, u + self.index_lookahead_reward] = escape.lookahead_reward2code(1)
+            result[:, u + self.index_lookahead_reward] = greed.lookahead_reward2code(1)
             ar_mask = (t >= u + self.index_action).long() * (
                 t <= u + self.index_reward
             ).long()
             ar(result, ar_mask)
 
             # Set the lookahead_reward to UNKNOWN for the next iterations
-            result[:, u + self.index_lookahead_reward] = escape.lookahead_reward2code(2)
+            result[:, u + self.index_lookahead_reward] = greed.lookahead_reward2code(2)
 
         filename = os.path.join(result_dir, f"test_thinking_compute_{n_epoch:04d}.txt")
         with open(filename, "w") as f:
             for n in range(10):
                 for s in snapshots:
-                    lr, s, a, r = escape.seq2episodes(
+                    lr, s, a, r = greed.seq2episodes(
                         s[n : n + 1], self.height, self.width
                     )
-                    str = escape.episodes2str(
+                    str = greed.episodes2str(
                         lr, s, a, r, unicode=True, ansi_colors=True
                     )
                     f.write(str)
@@ -1999,8 +2001,8 @@ class Escape(Task):
 
         # Saving the generated sequences
 
-        lr, s, a, r = escape.seq2episodes(result, self.height, self.width)
-        str = escape.episodes2str(lr, s, a, r, unicode=True, ansi_colors=True)
+        lr, s, a, r = greed.seq2episodes(result, self.height, self.width)
+        str = greed.episodes2str(lr, s, a, r, unicode=True, ansi_colors=True)
 
         filename = os.path.join(result_dir, f"test_thinking_seq_{n_epoch:04d}.txt")
         with open(filename, "w") as f:
@@ -2010,16 +2012,16 @@ class Escape(Task):
     def produce_results(
         self, n_epoch, model, result_dir, logger, deterministic_synthesis, nmax=1000
     ):
-        result = self.test_input[:250].clone()
+        result = self.wipe_lookahead_rewards(self.test_input[:250].clone())
 
         # Saving the ground truth
 
-        lr, s, a, r = escape.seq2episodes(
+        lr, s, a, r = greed.seq2episodes(
             result,
             self.height,
             self.width,
         )
-        str = escape.episodes2str(lr, s, a, r, unicode=True, ansi_colors=True)
+        str = greed.episodes2str(lr, s, a, r, unicode=True, ansi_colors=True)
 
         filename = os.path.join(result_dir, f"test_true_seq_{n_epoch:04d}.txt")
         with open(filename, "w") as f:
@@ -2046,12 +2048,12 @@ class Escape(Task):
 
         # Saving the generated sequences
 
-        lr, s, a, r = escape.seq2episodes(
+        lr, s, a, r = greed.seq2episodes(
             result,
             self.height,
             self.width,
         )
-        str = escape.episodes2str(lr, s, a, r, unicode=True, ansi_colors=True)
+        str = greed.episodes2str(lr, s, a, r, unicode=True, ansi_colors=True)
 
         filename = os.path.join(result_dir, f"test_seq_{n_epoch:04d}.txt")
         with open(filename, "w") as f: