Update.
authorFrançois Fleuret <francois@fleuret.org>
Sun, 9 Jul 2023 10:58:42 +0000 (12:58 +0200)
committerFrançois Fleuret <francois@fleuret.org>
Sun, 9 Jul 2023 10:58:42 +0000 (12:58 +0200)
expr.py
tasks.py
world.py

diff --git a/expr.py b/expr.py
index 8690504..f294d68 100755 (executable)
--- a/expr.py
+++ b/expr.py
@@ -45,10 +45,6 @@ def generate_program(nb_variables, length):
     s = ""
     variables = set()
 
-    # We take length itself half of the time, and uniform between 1
-    # and length otherwise. The actual length can be slightly greater
-
-    length = min(length, 1 + torch.randint(length * 2, (1,)).item())
     while len(s) < length:
         v = random_var(nb_variables=nb_variables)
         s += v + "=" + random_expr(variables, budget=20) + ";"
@@ -70,10 +66,15 @@ def generate_sequences(nb, nb_variables=5, length=20):
     assert nb_variables <= 26
     sequences = []
     result_max = 99
+
     for n in range(nb):
+        # We take length itself half of the time, and uniform between
+        # 1 and length otherwise. The actual length can be slightly
+        # greater
+
+        l = min(length, 1 + torch.randint(length * 2, (1,)).item())
         result = None
         while result == None or max(result.values()) > result_max:
-            l = length
             p, v = generate_program(nb_variables, l)
             v = ", ".join(['"' + v + '": ' + v for v in v])
             ldict = {}
index 04b8f84..4d7e90e 100755 (executable)
--- a/tasks.py
+++ b/tasks.py
@@ -840,9 +840,8 @@ class Expr(Task):
         for batch in tqdm.tqdm(
             input.split(self.batch_size), dynamic_ncols=True, desc=desc
         ):
-            if split == "train":
-                last = (batch != self.filler).max(0).values.nonzero().max() + 3
-                batch = batch[:, :last]
+            last = (batch != self.filler).max(0).values.nonzero().max() + 3
+            batch = batch[:, :last]
             yield batch
 
     def vocabulary_size(self):
@@ -866,7 +865,8 @@ class Expr(Task):
 
             def compute_nb_correct(input):
                 result = input.clone()
-                ar_mask = (result == self.space).long().cumsum(dim=1).clamp(max=1)
+                s = (result == self.space).long()
+                ar_mask = (s.cumsum(dim=1) - s).clamp(min=0, max=1)
                 result = (1 - ar_mask) * result + ar_mask * self.filler
                 masked_inplace_autoregression(
                     model,
index 5ba0f36..fb5d5c7 100755 (executable)
--- a/world.py
+++ b/world.py
@@ -67,7 +67,7 @@ def random_scene():
     colors = [
         (1.00, 0.00, 0.00),
         (0.00, 1.00, 0.00),
-        (0.00, 0.00, 1.00),
+        (0.60, 0.60, 1.00),
         (1.00, 1.00, 0.00),
         (0.75, 0.75, 0.75),
     ]
@@ -100,8 +100,7 @@ def sequence(nb_steps=10, all_frames=False):
     ]
 
     while True:
-
-        frames =[]
+        frames = []
 
         scene = random_scene()
         xh, yh = tuple(x.item() for x in torch.rand(2))
@@ -150,8 +149,111 @@ def sequence(nb_steps=10, all_frames=False):
     return frames, actions
 
 
+######################################################################
+
+
+# ||x_i - c_j||^2 = ||x_i||^2 + ||c_j||^2 - 2<x_i, c_j>
+def sq2matrix(x, c):
+    nx = x.pow(2).sum(1)
+    nc = c.pow(2).sum(1)
+    return nx[:, None] + nc[None, :] - 2 * x @ c.t()
+
+
+def update_centroids(x, c, nb_min=1):
+    _, b = sq2matrix(x, c).min(1)
+    b.squeeze_()
+    nb_resets = 0
+
+    for k in range(0, c.size(0)):
+        i = b.eq(k).nonzero(as_tuple=False).squeeze()
+        if i.numel() >= nb_min:
+            c[k] = x.index_select(0, i).mean(0)
+        else:
+            n = torch.randint(x.size(0), (1,))
+            nb_resets += 1
+            c[k] = x[n]
+
+    return c, b, nb_resets
+
+
+def kmeans(x, nb_centroids, nb_min=1):
+    if x.size(0) < nb_centroids * nb_min:
+        print("Not enough points!")
+        exit(1)
+
+    c = x[torch.randperm(x.size(0))[:nb_centroids]]
+    t = torch.full((x.size(0),), -1)
+    n = 0
+
+    while True:
+        c, u, nb_resets = update_centroids(x, c, nb_min)
+        n = n + 1
+        nb_changes = (u - t).sign().abs().sum() + nb_resets
+        t = u
+        if nb_changes == 0:
+            break
+
+    return c, t
+
+
+######################################################################
+
+
+def patchify(x, factor, invert_size=None):
+    if invert_size is None:
+        return (
+            x.reshape(
+                x.size(0), #0
+                x.size(1), #1
+                factor, #2
+                x.size(2) // factor,#3
+                factor,#4
+                x.size(3) // factor,#5
+            )
+            .permute(0, 2, 4, 1, 3, 5)
+            .reshape(-1, x.size(1), x.size(2) // factor, x.size(3) // factor)
+        )
+    else:
+        return (
+            x.reshape(
+                invert_size[0], #0
+                factor, #1
+                factor, #2
+                invert_size[1], #3
+                invert_size[2] // factor, #4
+                invert_size[3] // factor, #5
+            )
+            .permute(0, 3, 1, 4, 2, 5)
+            .reshape(invert_size)
+        )
+
+
 if __name__ == "__main__":
-    frames, actions = sequence(nb_steps=31,all_frames=True)
-    frames = torch.cat(frames,0)
-    print(f"{frames.size()=}")
-    torchvision.utils.save_image(frames, "seq.png", nrow=8)
+    import time
+
+    all_frames = []
+    nb = 1000
+    start_time = time.perf_counter()
+    for n in range(nb):
+        frames, actions = sequence(nb_steps=31)
+        all_frames += frames
+    end_time = time.perf_counter()
+    print(f"{nb / (end_time - start_time):.02f} samples per second")
+
+    input = torch.cat(all_frames, 0)
+    x = patchify(input, 8)
+    y = x.reshape(x.size(0), -1)
+    print(f"{x.size()=} {y.size()=}")
+    centroids, t = kmeans(y, 4096)
+    results = centroids[t]
+    results = results.reshape(x.size())
+    results = patchify(results, 8, input.size())
+
+    print(f"{input.size()=} {results.size()=}")
+
+    torchvision.utils.save_image(input[:64], "orig.png", nrow=8)
+    torchvision.utils.save_image(results[:64], "qtiz.png", nrow=8)
+
+    # frames, actions = sequence(nb_steps=31, all_frames=True)
+    # frames = torch.cat(frames, 0)
+    # torchvision.utils.save_image(frames, "seq.png", nrow=8)