######################################################################
+def action2code(r):
+ return first_actions_code + r
+
+
+def code2action(r):
+ return r - first_actions_code
+
+
+def reward2code(r):
+ return first_rewards_code + r + 1
+
+
+def code2reward(r):
+ return r - first_rewards_code - 1
+
+
+def lookahead_reward2code(r):
+ return first_lookahead_rewards_code + r + 1
+
+
+def code2lookahead_reward(r):
+ return r - first_lookahead_rewards_code - 1
+
+
+######################################################################
+
+
def generate_episodes(nb, height=6, width=6, T=10, nb_walls=3):
rnd = torch.rand(nb, height, width)
rnd[:, 0, :] = 0
)
hit = (hit > 0).long()
- assert hit.min() == 0 and hit.max() <= 1
+ # assert hit.min() == 0 and hit.max() <= 1
rewards[:, t + 1] = -hit + (1 - hit) * agent[:, t + 1, -1, -1]
actions = actions[:, :, None] + first_actions_code
if lookahead_delta is not None:
- # r = rewards
- # u = F.pad(r, (0, lookahead_delta - 1)).as_strided(
- # (r.size(0), r.size(1), lookahead_delta),
- # (r.size(1) + lookahead_delta - 1, 1, 1),
- # )
- # a = u[:, :, 1:].min(dim=-1).values
- # b = u[:, :, 1:].max(dim=-1).values
- # s = (a < 0).long() * a + (a >= 0).long() * b
- # lookahead_rewards = (1 + s[:, :, None]) + first_lookahead_rewards_code
-
- # a[n,t]=min_s>t r[n,s]
a = rewards.new_zeros(rewards.size())
b = rewards.new_zeros(rewards.size())
for t in range(a.size(1) - 1):
r = rewards[:, :, None]
rewards = (r + 1) + first_rewards_code
- assert (
- states.min() >= first_state_code
- and states.max() < first_state_code + nb_state_codes
- )
- assert (
- actions.min() >= first_actions_code
- and actions.max() < first_actions_code + nb_actions_codes
- )
- assert (
- rewards.min() >= first_rewards_code
- and rewards.max() < first_rewards_code + nb_rewards_codes
- )
+ # assert (
+ # states.min() >= first_state_code
+ # and states.max() < first_state_code + nb_state_codes
+ # )
+ # assert (
+ # actions.min() >= first_actions_code
+ # and actions.max() < first_actions_code + nb_actions_codes
+ # )
+ # assert (
+ # rewards.min() >= first_rewards_code
+ # and rewards.max() < first_rewards_code + nb_rewards_codes
+ # )
if lookahead_delta is None:
return torch.cat([states, actions, rewards], dim=2).flatten(1)
else:
- assert (
- lookahead_rewards.min() >= first_lookahead_rewards_code
- and lookahead_rewards.max()
- < first_lookahead_rewards_code + nb_lookahead_rewards_codes
- )
+ # assert (
+ # lookahead_rewards.min() >= first_lookahead_rewards_code
+ # and lookahead_rewards.max()
+ # < first_lookahead_rewards_code + nb_lookahead_rewards_codes
+ # )
return torch.cat([states, actions, rewards, lookahead_rewards], dim=2).flatten(
1
)