Update.
[picoclvr.git] / problems.py
index 68a46b3..51e90ed 100755 (executable)
@@ -22,91 +22,88 @@ class Problem:
         nb_correct = ((result == input).long() * ar_mask).sum().item()
         return nb_total, nb_correct
 
-####################
 
+####################
+class ProblemDegradation(Problem):
+    def __init__(self, nb_state_tokens=5, nb_time_steps=12, value_max=25, hard=False):
+        assert value_max // nb_state_tokens >= 2
+        self.nb_state_tokens = nb_state_tokens
+        self.nb_time_steps = nb_time_steps
+        self.value_max = value_max
+        self.hard = hard
 
-class ProblemTwoCuts(Problem):
-    def __init__(self, len_total=50, nb_values=100, global_constraint=True):
-        self.len_total = len_total
-        self.nb_values = nb_values
-        self.global_constraint = global_constraint
-
-    def generate_sequences_internal(self, nb):
-        return u,v,a,b,c
-
-    def generate_sequences(self,nb):
-
-        u = torch.randint(self.len_total, (nb,))
-        v = torch.randint(self.len_total, (nb,))
-
-        a = torch.randint(self.nb_values, (nb,))
-        b = torch.randint(self.nb_values, (nb,))
-        c = torch.randint(self.nb_values, (nb,))
-
-        while True:
-            to_compute = torch.logical_or(u>=v-self.len_total//10,u<v-self.len_total//5)
-            to_compute =torch.logical_or(to_compute, u == 0)
-            to_compute =torch.logical_or(to_compute, v == self.len_total)
-            n = to_compute.long().sum()
-            if n == 0:
-                break
-            else:
-                u[to_compute] = torch.randint(self.len_total, (n,))
-                v[to_compute] = torch.randint(self.len_total, (n,))
-
-        while True:
-            to_compute = a==b
-            to_compute = torch.logical_or(to_compute,b==c)
-            to_compute = torch.logical_or(to_compute,a==c)
-
-            if self.global_constraint:
-                to_compute = torch.logical_or(to_compute,(a*u+b*(v-u)+c*(self.len_total-v)) // self.len_total != self.nb_values//2)
-
-            n = to_compute.long().sum()
-            if n == 0:
-                break
-            else:
-                a[to_compute] = torch.randint(self.nb_values, (n,))
-                b[to_compute] = torch.randint(self.nb_values, (n,))
-                c[to_compute] = torch.randint(self.nb_values, (n,))
-
-        assert (u>=v).long().sum() == 0
-        assert (a==b).long().sum() == 0
-        assert (a==c).long().sum() == 0
-        assert (c==b).long().sum() == 0
-
-        t = torch.arange(self.len_total)
-        seq = (t[None,:] < u[:,None]).long() * a[:,None] + \
-            (t[None,:] >= u[:,None]).long() * (t[None,:] < v[:,None]).long() * b[:,None] + \
-            (t[None,:] >= v[:,None]).long() * c[:,None]
-
-        return seq,seq.new_full(seq.size(), 1, dtype=torch.int64)
+    def generate_sequences(self, nb):
+        x = (
+            torch.rand(nb, self.nb_state_tokens).sort(dim=-1).indices == 0
+        ).long() * self.value_max
+        seq = [x]
+
+        for t in range(self.nb_time_steps - 1):
+            v = (torch.rand(x.size()).sort(dim=-1).indices + 1) * (x >= 2).long()
+            u = (v.max(dim=-1, keepdim=True).values == v).long()
+            n = (
+                (u * x)
+                .minimum(2 + torch.randint(self.value_max // 4 - 2, x.size()))
+                .sum(dim=-1, keepdim=True)
+            )
+            m = 1 + ((n - 1) * torch.rand(n.size())).long()
+            x = (
+                x
+                + m * u.roll(shifts=-1, dims=-1)
+                - n * u
+                + (n - m) * u.roll(shifts=1, dims=-1)
+            )
+            seq.append(x)
+
+        if self.hard:
+            seq.reverse()
+
+        seq = torch.cat(seq, dim=1)
+        return seq, seq.new_full(seq.size(), 1, dtype=torch.int64)
 
     def compute_nb_correct(self, input, ar_mask, result):
         nb_total = result.size(0)
         nb_correct = 0
-        i = torch.arange(result.size(1), device=result.device)
-
-        for k in range(nb_total):
-            s = result[k]
-            a = s[0]
-            uu = (s != a).nonzero()
-            if uu.size(0) > 0:
-                u = uu.min()
-                b = s[u]
-                vv = torch.logical_and(s != b, i >= u).nonzero()
-                if vv.size(0) > 0:
-                    v = vv.min()
-                    c = s[v]
-                    ww = torch.logical_and(s != c, i >= v).nonzero()
-                    if ww.size(0) == 0:
-                        if not self.global_constraint or (a*u+b*(v-u)+c*(self.len_total-v)) // self.len_total == self.nb_values//2:
-                            nb_correct += 1
+        e = result.new_zeros(self.nb_state_tokens)
+
+        for seq in result:
+            states = list(seq.split(self.nb_state_tokens))
+            if self.hard:
+                states.reverse()
+
+            d = states[0]
+            j = d.sort(descending=True).indices[0]
+            e.zero_()
+            e[j] = self.value_max
+            if (d - e).abs().sum() == 0:
+                nb_errors = 0
+                for k in range(len(states) - 1):
+                    d = states[k + 1] - states[k]
+                    j = d.sort(descending=False).indices[0]
+                    if (
+                        d[j] == 0
+                        or d[j] > self.value_max // 4
+                        or d[(j + 1) % e.size(0)] <= 0
+                        or d[(j + 1) % e.size(0)] >= -d[j]
+                    ):
+                        nb_errors += 1
+                    else:
+                        e.zero_()
+                        e[j] = d[j]
+                        e[(j + 1) % e.size(0)] = d[(j + 1) % e.size(0)]
+                        e[(j - 1) % e.size(0)] = -d[(j + 1) % e.size(0)] - d[j]
+                        if (d - e).abs().sum() > 0:
+                            nb_errors += 1
+                if nb_errors == 0:
+                    nb_correct += 1
 
         return nb_total, nb_correct
 
     def seq2str(self, seq):
-        return " ".join( [ f"{x:02d}" for x in seq ] )
+        return " | ".join(
+            [" ".join([f"{x:02d}" for x in s]) for s in seq.split(self.nb_state_tokens)]
+        )
+
 
 ####################
 
@@ -286,7 +283,111 @@ class ProblemAddition(Problem):
         return "".join(self.id2char[x.item()] for x in seq)
 
 
+####################
+
+
+class ProblemMixing(Problem):
+    def __init__(self, height=3, width=3, nb_time_steps=12, hard=False):
+        self.height = height
+        self.width = width
+        self.nb_time_steps = nb_time_steps
+        self.hard = hard
+
+    def start_random(self, nb):
+        y = torch.arange(self.height * self.width).reshape(1, -1).expand(nb, -1)
+
+        m = (torch.rand(y.size()).sort(dim=-1).indices < y.size(1) // 2).long()
+
+        y = (y * m + self.height * self.width * (1 - m)).reshape(
+            nb, self.height, self.width
+        )
+
+        return y
+
+    def start_error(self, x):
+        x = x.flatten(1)
+        u = torch.arange(self.height * self.width).reshape(1, -1)
+        m = ((x - u).abs() == 0).long()
+        d = (x - (m * u + (1-m) * self.height * self.width)).abs().sum(-1) + (
+            m.sum(dim=-1) != self.height * self.width // 2
+        ).long()
+        return d
+
+    def moves(self, x):
+        y = (
+            x[:, None, :, :]
+            .expand(-1, self.height * 2 + self.width * 2, -1, -1)
+            .clone()
+        )
+        k = 0
+
+        for i in range(self.height):
+            y[:, k, i, :] = y[:, k, i, :].roll(dims=-1, shifts=-1)
+            k += 1
+            y[:, k, i, :] = y[:, k, i, :].roll(dims=-1, shifts=1)
+            k += 1
+
+        for j in range(self.width):
+            y[:, k, :, j] = y[:, k, :, j].roll(dims=-1, shifts=-1)
+            k += 1
+            y[:, k, :, j] = y[:, k, :, j].roll(dims=-1, shifts=1)
+            k += 1
+
+        return y
+
+    def generate_sequences(self, nb):
+        x = self.start_random(nb)
+
+        seq = [x.flatten(1)]
+
+        for t in range(self.nb_time_steps - 1):
+            y = self.moves(x)
+            x = y[torch.arange(nb), torch.randint(y.size(1), (nb,))]
+            seq.append(x.flatten(1))
+
+        if self.hard:
+            seq.reverse()
+
+        seq = torch.cat(seq, dim=1)
+        return seq, seq.new_full(seq.size(), 1, dtype=torch.int64)
+
+    def compute_nb_correct(self, input, ar_mask, result):
+        a = [
+            x.reshape(result.size(0), self.height, self.width)
+            for x in result.split(self.height * self.width, dim=1)
+        ]
+        if self.hard:
+            a.reverse()
+
+        x = a[0]
+
+        d = self.start_error(x)
+
+        for t in range(self.nb_time_steps - 1):
+            x0, x = a[t], a[t + 1]
+            y = self.moves(x0)
+            d = d + (x[:, None] - y).abs().sum((-1, -2)).min(dim=-1).values
+
+        nb_total, nb_correct = result.size(0), (d == 0).long().sum().item()
+
+        return nb_total, nb_correct
+
+    def seq2str(self, seq):
+        return " | ".join(
+            [
+                " ".join(
+                    ["-".join([f"{x:02d}" for x in s]) for s in r.split(self.width)]
+                )
+                for r in seq.split(self.height * self.width)
+            ]
+        )
+
+
+####################
+
 if __name__ == "__main__":
-    p = ProblemTwoCuts(12)
+    p = ProblemMixing(width=4, hard=True)
     s, m = p.generate_sequences(10000)
+    for x in s[:5]:
+        print(p.seq2str(x))
     print(p.compute_nb_correct(None, None, s))