Update.
[picoclvr.git] / problems.py
index 5686404..ef48162 100755 (executable)
@@ -17,6 +17,69 @@ class Problem:
     def seq2str(self, seq):
         return "[NOT IMPLEMENTED]"
 
+    def compute_nb_correct(self, input, ar_mask, result):
+        nb_total = ar_mask.sum().item()
+        nb_correct = ((result == input).long() * ar_mask).sum().item()
+        return nb_total, nb_correct
+
+####################
+
+
+class ProblemDegradation(Problem):
+    def __init__(self, nb_state_tokens=5, nb_time_steps=5, value_max=25, hard=False):
+        self.nb_state_tokens = nb_state_tokens
+        self.nb_time_steps = nb_time_steps
+        self.value_max = value_max
+        self.hard = hard
+
+    def generate_sequences(self,nb):
+
+        x = (torch.rand(nb,self.nb_state_tokens).sort(dim=-1).indices == 0).long() * self.value_max
+        seq = [x]
+
+        for t in range(self.nb_time_steps-1):
+            v = torch.rand(x.size()) * (x > 0).float()
+            u = (v.max(dim=-1,keepdim=True).values == v).long()
+            n = (u*x*torch.rand(x.size())).long().sum(dim=-1,keepdim=True) // 2
+            x = x + n * (u.roll(shifts=-1,dims=-1) - 2 * u + u.roll(shifts=1,dims=-1))
+            seq.append(x)
+
+        if self.hard: seq.reverse()
+
+        seq = torch.cat(seq,dim=1)
+        return seq,seq.new_full(seq.size(), 1, dtype=torch.int64)
+
+    def compute_nb_correct(self, input, ar_mask, result):
+        nb_total = result.size(0)
+        nb_correct = 0
+
+        for seq in result:
+            states = list(seq.split(self.nb_state_tokens))
+            if self.hard:
+                states.reverse()
+
+            d = states[0]
+            j=d.sort(descending=True).indices[0]
+            e=d.new_zeros(d.size())
+            e[j]=self.value_max
+            if (d-e).abs().sum() == 0:
+                nb_errors = 0
+                for k in range(len(states)-1):
+                    d=states[k]-states[k+1]
+                    j=d.sort(descending=True).indices[0]
+                    e=d.new_zeros(d.size())
+                    e[j]=d[j]
+                    e[(j+1)%e.size(0)]=-d[j]//2
+                    e[(j-1)%e.size(0)]=-d[j]//2
+                    if (d-e).abs().sum() > 0:
+                        nb_errors += 1
+                if nb_errors == 0:
+                    nb_correct += 1
+
+        return nb_total, nb_correct
+
+    def seq2str(self, seq):
+        return " | ".join( [ " ".join([f"{x:02d}" for x in s ]) for s in seq.split(self.nb_state_tokens) ] )
 
 ####################
 
@@ -87,7 +150,7 @@ class ProblemByHeart(Problem):
 
 
 class ProblemLearnOperator(Problem):
-    def __init__(self, nb_operators=100, len_source=5, len_result=8):
+    def __init__(self, nb_operators=100, len_source=6, len_result=9):
         self.len_source = len_source
         self.len_result = len_result
         self.len_nb_operator = int(math.log(nb_operators) / math.log(10)) + 1
@@ -197,7 +260,7 @@ class ProblemAddition(Problem):
 
 
 if __name__ == "__main__":
-    p = ProblemTwoTargets(12, 4)
-    s, m = p.generate_sequences(10)
-    for x in s:
-        print(p.seq2str(x))
+    p = ProblemDegradation(hard=False)
+    s, m = p.generate_sequences(10000)
+    print(p.seq2str(s[0]))
+    print(p.compute_nb_correct(None, None, s))