X-Git-Url: https://www.fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;f=world.py;h=d95bddb7e27e3052b5df161333f957053f8bdde6;hb=HEAD;hp=e76c07f7c5e75185bc89acc3ed23a419ec7a0d2e;hpb=9e62722596c40655041a0a812512115f1036c6fc;p=picoclvr.git diff --git a/world.py b/world.py index e76c07f..d95bddb 100755 --- a/world.py +++ b/world.py @@ -1,6 +1,11 @@ #!/usr/bin/env python -import math +# Any copyright is dedicated to the Public Domain. +# https://creativecommons.org/publicdomain/zero/1.0/ + +# Written by Francois Fleuret + +import math, sys, tqdm import torch, torchvision @@ -8,8 +13,12 @@ from torch import nn from torch.nn import functional as F import cairo +###################################################################### + class Box: + nb_rgb_levels = 10 + def __init__(self, x, y, w, h, r, g, b): self.x = x self.y = y @@ -30,7 +39,189 @@ class Box: return False -def scene2tensor(xh, yh, scene, size=64): +###################################################################### + + +class Normalizer(nn.Module): + def __init__(self, mu, std): + super().__init__() + self.register_buffer("mu", mu) + self.register_buffer("log_var", 2 * torch.log(std)) + + def forward(self, x): + return (x - self.mu) / torch.exp(self.log_var / 2.0) + + +class SignSTE(nn.Module): + def __init__(self): + super().__init__() + + def forward(self, x): + # torch.sign() takes three values + s = (x >= 0).float() * 2 - 1 + + if self.training: + u = torch.tanh(x) + return s + u - u.detach() + else: + return s + + +class DiscreteSampler2d(nn.Module): + def __init__(self): + super().__init__() + + def forward(self, x): + s = (x >= x.max(-3, keepdim=True).values).float() + + if self.training: + u = x.softmax(dim=-3) + return s + u - u.detach() + else: + return s + + +def loss_H(binary_logits, h_threshold=1): + p = binary_logits.sigmoid().mean(0) + h = (-p.xlogy(p) - (1 - p).xlogy(1 - p)) / math.log(2) + h.clamp_(max=h_threshold) + return h_threshold - h.mean() + + +def train_encoder( + train_input, + test_input, + depth, + nb_bits_per_token, + dim_hidden=48, + lambda_entropy=0.0, + lr_start=1e-3, + lr_end=1e-4, + nb_epochs=10, + batch_size=25, + logger=None, + device=torch.device("cpu"), +): + mu, std = train_input.float().mean(), train_input.float().std() + + def encoder_core(depth, dim): + l = [ + [ + nn.Conv2d( + dim * 2**k, dim * 2**k, kernel_size=5, stride=1, padding=2 + ), + nn.ReLU(), + nn.Conv2d(dim * 2**k, dim * 2 ** (k + 1), kernel_size=2, stride=2), + nn.ReLU(), + ] + for k in range(depth) + ] + + return nn.Sequential(*[x for m in l for x in m]) + + def decoder_core(depth, dim): + l = [ + [ + nn.ConvTranspose2d( + dim * 2 ** (k + 1), dim * 2**k, kernel_size=2, stride=2 + ), + nn.ReLU(), + nn.ConvTranspose2d( + dim * 2**k, dim * 2**k, kernel_size=5, stride=1, padding=2 + ), + nn.ReLU(), + ] + for k in range(depth - 1, -1, -1) + ] + + return nn.Sequential(*[x for m in l for x in m]) + + encoder = nn.Sequential( + Normalizer(mu, std), + nn.Conv2d(3, dim_hidden, kernel_size=1, stride=1), + nn.ReLU(), + # 64x64 + encoder_core(depth=depth, dim=dim_hidden), + # 8x8 + nn.Conv2d(dim_hidden * 2**depth, nb_bits_per_token, kernel_size=1, stride=1), + ) + + quantizer = SignSTE() + + decoder = nn.Sequential( + nn.Conv2d(nb_bits_per_token, dim_hidden * 2**depth, kernel_size=1, stride=1), + # 8x8 + decoder_core(depth=depth, dim=dim_hidden), + # 64x64 + nn.ConvTranspose2d(dim_hidden, 3 * Box.nb_rgb_levels, kernel_size=1, stride=1), + ) + + model = nn.Sequential(encoder, decoder) + + nb_parameters = sum(p.numel() for p in model.parameters()) + + logger(f"vqae nb_parameters {nb_parameters}") + + model.to(device) + + for k in range(nb_epochs): + lr = math.exp( + math.log(lr_start) + math.log(lr_end / lr_start) / (nb_epochs - 1) * k + ) + optimizer = torch.optim.Adam(model.parameters(), lr=lr) + + acc_train_loss = 0.0 + + for input in tqdm.tqdm(train_input.split(batch_size), desc="vqae-train"): + input = input.to(device) + z = encoder(input) + zq = quantizer(z) + output = decoder(zq) + + output = output.reshape( + output.size(0), -1, 3, output.size(2), output.size(3) + ) + + train_loss = F.cross_entropy(output, input) + + if lambda_entropy > 0: + train_loss = train_loss + lambda_entropy * loss_H(z, h_threshold=0.5) + + acc_train_loss += train_loss.item() * input.size(0) + + optimizer.zero_grad() + train_loss.backward() + optimizer.step() + + acc_test_loss = 0.0 + + for input in tqdm.tqdm(test_input.split(batch_size), desc="vqae-test"): + input = input.to(device) + z = encoder(input) + zq = quantizer(z) + output = decoder(zq) + + output = output.reshape( + output.size(0), -1, 3, output.size(2), output.size(3) + ) + + test_loss = F.cross_entropy(output, input) + + acc_test_loss += test_loss.item() * input.size(0) + + train_loss = acc_train_loss / train_input.size(0) + test_loss = acc_test_loss / test_input.size(0) + + logger(f"vqae train {k} lr {lr} train_loss {train_loss} test_loss {test_loss}") + sys.stdout.flush() + + return encoder, quantizer, decoder + + +###################################################################### + + +def scene2tensor(xh, yh, scene, size): width, height = size, size pixel_map = torch.ByteTensor(width, height, 4).fill_(255) data = pixel_map.numpy() @@ -47,7 +238,12 @@ def scene2tensor(xh, yh, scene, size=64): ctx.rel_line_to(0, b.h * size) ctx.rel_line_to(-b.w * size, 0) ctx.close_path() - ctx.set_source_rgba(b.r, b.g, b.b, 1.0) + ctx.set_source_rgba( + b.r / (Box.nb_rgb_levels - 1), + b.g / (Box.nb_rgb_levels - 1), + b.b / (Box.nb_rgb_levels - 1), + 1.0, + ) ctx.fill() hs = size * 0.1 @@ -59,20 +255,31 @@ def scene2tensor(xh, yh, scene, size=64): ctx.close_path() ctx.fill() - return pixel_map[None, :, :, :3].flip(-1).permute(0, 3, 1, 2).float() / 255 + return ( + pixel_map[None, :, :, :3] + .flip(-1) + .permute(0, 3, 1, 2) + .long() + .mul(Box.nb_rgb_levels) + .floor_divide(256) + ) -def random_scene(): +def random_scene(nb_insert_attempts=3): scene = [] colors = [ - (1.00, 0.00, 0.00), - (0.00, 1.00, 0.00), - (0.60, 0.60, 1.00), - (1.00, 1.00, 0.00), - (0.75, 0.75, 0.75), + ((Box.nb_rgb_levels - 1), 0, 0), + (0, (Box.nb_rgb_levels - 1), 0), + (0, 0, (Box.nb_rgb_levels - 1)), + ((Box.nb_rgb_levels - 1), (Box.nb_rgb_levels - 1), 0), + ( + (Box.nb_rgb_levels * 2) // 3, + (Box.nb_rgb_levels * 2) // 3, + (Box.nb_rgb_levels * 2) // 3, + ), ] - for k in range(10): + for k in range(nb_insert_attempts): wh = torch.rand(2) * 0.2 + 0.2 xy = torch.rand(2) * (1 - wh) c = colors[torch.randint(len(colors), (1,))] @@ -85,7 +292,7 @@ def random_scene(): return scene -def sequence(nb_steps=10, all_frames=False): +def generate_episode(steps, size=64): delta = 0.1 effects = [ (False, 0, 0), @@ -105,14 +312,16 @@ def sequence(nb_steps=10, all_frames=False): scene = random_scene() xh, yh = tuple(x.item() for x in torch.rand(2)) - frames.append(scene2tensor(xh, yh, scene)) + actions = torch.randint(len(effects), (len(steps),)) + nb_changes = 0 - actions = torch.randint(len(effects), (nb_steps,)) - change = False + for s, a in zip(steps, actions): + if s: + frames.append(scene2tensor(xh, yh, scene, size=size)) - for a in actions: - g, dx, dy = effects[a] - if g: + grasp, dx, dy = effects[a] + + if grasp: for b in scene: if b.x <= xh and b.x + b.w >= xh and b.y <= yh and b.y + b.h >= yh: x, y = b.x, b.y @@ -129,7 +338,7 @@ def sequence(nb_steps=10, all_frames=False): else: xh += dx yh += dy - change = True + nb_changes += 1 else: x, y = xh, yh xh += dx @@ -137,13 +346,7 @@ def sequence(nb_steps=10, all_frames=False): if xh < 0 or xh > 1 or yh < 0 or yh > 1: xh, yh = x, y - if all_frames: - frames.append(scene2tensor(xh, yh, scene)) - - if not all_frames: - frames.append(scene2tensor(xh, yh, scene)) - - if change: + if nb_changes > len(steps) // 3: break return frames, actions @@ -152,177 +355,131 @@ def sequence(nb_steps=10, all_frames=False): ###################################################################### -# ||x_i - c_j||^2 = ||x_i||^2 + ||c_j||^2 - 2 -def sq2matrix(x, c): - nx = x.pow(2).sum(1) - nc = c.pow(2).sum(1) - return nx[:, None] + nc[None, :] - 2 * x @ c.t() - - -def update_centroids(x, c, nb_min=1): - _, b = sq2matrix(x, c).min(1) - b.squeeze_() - nb_resets = 0 - - for k in range(0, c.size(0)): - i = b.eq(k).nonzero(as_tuple=False).squeeze() - if i.numel() >= nb_min: - c[k] = x.index_select(0, i).mean(0) - else: - n = torch.randint(x.size(0), (1,)) - nb_resets += 1 - c[k] = x[n] - - return c, b, nb_resets - - -def kmeans(x, nb_centroids, nb_min=1): - if x.size(0) < nb_centroids * nb_min: - print("Not enough points!") - exit(1) - - c = x[torch.randperm(x.size(0))[:nb_centroids]] - t = torch.full((x.size(0),), -1) - n = 0 - - while True: - c, u, nb_resets = update_centroids(x, c, nb_min) - n = n + 1 - nb_changes = (u - t).sign().abs().sum() + nb_resets - t = u - if nb_changes == 0: - break - - return c, t - - -###################################################################### - - -def patchify(x, factor, invert_size=None): - if invert_size is None: - return ( - x.reshape( - x.size(0), # 0 - x.size(1), # 1 - factor, # 2 - x.size(2) // factor, # 3 - factor, # 4 - x.size(3) // factor, # 5 - ) - .permute(0, 2, 4, 1, 3, 5) - .reshape(-1, x.size(1), x.size(2) // factor, x.size(3) // factor) - ) - else: - return ( - x.reshape( - invert_size[0], # 0 - factor, # 1 - factor, # 2 - invert_size[1], # 3 - invert_size[2] // factor, # 4 - invert_size[3] // factor, # 5 - ) - .permute(0, 3, 1, 4, 2, 5) - .reshape(invert_size) - ) - - -def train_encoder(input, device=torch.device("cpu")): - class SomeLeNet(nn.Module): - def __init__(self): - super().__init__() - self.conv1 = nn.Conv2d(1, 32, kernel_size=5) - self.conv2 = nn.Conv2d(32, 64, kernel_size=5) - self.fc1 = nn.Linear(256, 200) - self.fc2 = nn.Linear(200, 10) - - def forward(self, x): - x = F.relu(F.max_pool2d(self.conv1(x), kernel_size=3)) - x = F.relu(F.max_pool2d(self.conv2(x), kernel_size=2)) - x = x.view(x.size(0), -1) - x = F.relu(self.fc1(x)) - x = self.fc2(x) - return x - - ###################################################################### - - model = SomeLeNet() - - nb_parameters = sum(p.numel() for p in model.parameters()) - - print(f"nb_parameters {nb_parameters}") - - optimizer = torch.optim.SGD(model.parameters(), lr=lr) - criterion = nn.CrossEntropyLoss() - - model.to(device) - criterion.to(device) - - train_input, train_targets = train_input.to(device), train_targets.to(device) - test_input, test_targets = test_input.to(device), test_targets.to(device) - - mu, std = train_input.mean(), train_input.std() - train_input.sub_(mu).div_(std) - test_input.sub_(mu).div_(std) - - start_time = time.perf_counter() - - for k in range(nb_epochs): - acc_loss = 0.0 +def generate_episodes(nb, steps): + all_frames, all_actions = [], [] + for n in tqdm.tqdm(range(nb), dynamic_ncols=True, desc="world-data"): + frames, actions = generate_episode(steps) + all_frames += frames + all_actions += [actions[None, :]] + return torch.cat(all_frames, 0).contiguous(), torch.cat(all_actions, 0) + + +def create_data_and_processors( + nb_train_samples, + nb_test_samples, + mode, + nb_steps, + depth=3, + nb_bits_per_token=8, + nb_epochs=10, + device=torch.device("cpu"), + device_storage=torch.device("cpu"), + logger=None, +): + assert mode in ["first_last"] + + if mode == "first_last": + steps = [True] + [False] * (nb_steps + 1) + [True] + + if logger is None: + logger = lambda s: print(s) + + train_input, train_actions = generate_episodes(nb_train_samples, steps) + train_input, train_actions = train_input.to(device_storage), train_actions.to( + device_storage + ) + test_input, test_actions = generate_episodes(nb_test_samples, steps) + test_input, test_actions = test_input.to(device_storage), test_actions.to( + device_storage + ) - for input, targets in zip( - train_input.split(batch_size), train_targets.split(batch_size) - ): - output = model(input) - loss = criterion(output, targets) - acc_loss += loss.item() + encoder, quantizer, decoder = train_encoder( + train_input, + test_input, + depth=depth, + nb_bits_per_token=nb_bits_per_token, + lambda_entropy=1.0, + nb_epochs=nb_epochs, + logger=logger, + device=device, + ) + encoder.train(False) + quantizer.train(False) + decoder.train(False) + + z = encoder(train_input[:1].to(device)) + pow2 = (2 ** torch.arange(z.size(1), device=device))[None, None, :] + z_h, z_w = z.size(2), z.size(3) + + logger(f"vqae input {train_input[0].size()} output {z[0].size()}") + + def frame2seq(input, batch_size=25): + seq = [] + p = pow2.to(device) + for x in input.split(batch_size): + x = x.to(device) + z = encoder(x) + ze_bool = (quantizer(z) >= 0).long() + output = ( + ze_bool.permute(0, 2, 3, 1).reshape( + ze_bool.size(0), -1, ze_bool.size(1) + ) + * p + ).sum(-1) + + seq.append(output) + + return torch.cat(seq, dim=0) + + def seq2frame(input, batch_size=25, T=1e-2): + frames = [] + p = pow2.to(device) + for seq in input.split(batch_size): + seq = seq.to(device) + zd_bool = (seq[:, :, None] // p) % 2 + zd_bool = zd_bool.reshape(zd_bool.size(0), z_h, z_w, -1).permute(0, 3, 1, 2) + logits = decoder(zd_bool * 2.0 - 1.0) + logits = logits.reshape( + logits.size(0), -1, 3, logits.size(2), logits.size(3) + ).permute(0, 2, 3, 4, 1) + output = torch.distributions.categorical.Categorical( + logits=logits / T + ).sample() - optimizer.zero_grad() - loss.backward() - optimizer.step() + frames.append(output) - nb_test_errors = 0 - for input, targets in zip( - test_input.split(batch_size), test_targets.split(batch_size) - ): - wta = model(input).argmax(1) - nb_test_errors += (wta != targets).long().sum() - test_error = nb_test_errors / test_input.size(0) - duration = time.perf_counter() - start_time + return torch.cat(frames, dim=0) - print(f"loss {k} {duration:.02f}s {acc_loss:.02f} {test_error*100:.02f}%") + return train_input, train_actions, test_input, test_actions, frame2seq, seq2frame ###################################################################### if __name__ == "__main__": - import time - - all_frames = [] - nb = 1000 - start_time = time.perf_counter() - for n in range(nb): - frames, actions = sequence(nb_steps=31) - all_frames += frames - end_time = time.perf_counter() - print(f"{nb / (end_time - start_time):.02f} samples per second") - - input = torch.cat(all_frames, 0) + ( + train_input, + train_actions, + test_input, + test_actions, + frame2seq, + seq2frame, + ) = create_data_and_processors( + 250, + 1000, + nb_epochs=5, + mode="first_last", + nb_steps=20, + ) - # x = patchify(input, 8) - # y = x.reshape(x.size(0), -1) - # print(f"{x.size()=} {y.size()=}") - # centroids, t = kmeans(y, 4096) - # results = centroids[t] - # results = results.reshape(x.size()) - # results = patchify(results, 8, input.size()) + input = test_input[:256] - print(f"{input.size()=} {results.size()=}") + seq = frame2seq(input) + output = seq2frame(seq) - torchvision.utils.save_image(input[:64], "orig.png", nrow=8) - torchvision.utils.save_image(results[:64], "qtiz.png", nrow=8) + torchvision.utils.save_image( + input.float() / (Box.nb_rgb_levels - 1), "orig.png", nrow=16 + ) - # frames, actions = sequence(nb_steps=31, all_frames=True) - # frames = torch.cat(frames, 0) - # torchvision.utils.save_image(frames, "seq.png", nrow=8) + torchvision.utils.save_image( + output.float() / (Box.nb_rgb_levels - 1), "qtiz.png", nrow=16 + )