Update.
authorFrançois Fleuret <francois@fleuret.org>
Sun, 24 Mar 2024 22:11:55 +0000 (23:11 +0100)
committerFrançois Fleuret <francois@fleuret.org>
Sun, 24 Mar 2024 22:11:55 +0000 (23:11 +0100)
tasks.py

index 56c2b0f..fddcaff 100755 (executable)
--- a/tasks.py
+++ b/tasks.py
@@ -1939,7 +1939,7 @@ class Escape(Task):
         ):
             # Put the lookahead reward to either 0 or -1 for the
             # current iteration, sample the next state
-            s = -1  # (torch.rand(result.size(0), device = result.device) < 0.2).long()
+            s = -(torch.rand(result.size(0), device=result.device) < 0.2).long()
             result[:, u - 1] = s + 1 + escape.first_lookahead_rewards_code
             ar_mask = (t >= u).long() * (t < u + state_len).long()
             ar(result, ar_mask)
@@ -1980,7 +1980,7 @@ class Escape(Task):
     def produce_results(
         self, n_epoch, model, result_dir, logger, deterministic_synthesis, nmax=1000
     ):
-        result = self.test_input[:100].clone()
+        result = self.test_input[:250].clone()
 
         # Saving the ground truth