X-Git-Url: https://www.fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;f=main.py;h=9437136ce1a45b066d6884e205540083bfb4d2d6;hb=HEAD;hp=38dccb9f8eea57437ce7574d4f87208ab0077b38;hpb=87214829798bca7e3eb853df4a27bcb918bb9f67;p=picoclvr.git diff --git a/main.py b/main.py index 38dccb9..3ff64b7 100755 --- a/main.py +++ b/main.py @@ -5,16 +5,14 @@ # Written by Francois Fleuret -# torch.backends.cuda.matmul.allow_tf23 -# torch.autocast(torch.bfloat16) - -import math, sys, argparse, time, tqdm, os +import math, sys, argparse, time, tqdm, os, datetime, warnings import torch, torchvision from torch import nn from torch.nn import functional as F -import mygpt, tensorstack +import ffutils +import mygpt, tasks, problems ###################################################################### @@ -32,19 +30,28 @@ parser = argparse.ArgumentParser( ) parser.add_argument( - "--task", type=str, default="picoclvr", help="picoclvr, mnist, maze, snake, stack" + "--task", + type=str, + default="twotargets", + help="file, byheart, learnop, guessop, mixing, memory, twotargets, addition, picoclvr, mnist, maze, snake, stack, expr, rpl, grid, qmlp, greed", ) parser.add_argument("--log_filename", type=str, default="train.log", help=" ") -parser.add_argument("--result_dir", type=str, default="results_default") +parser.add_argument("--result_dir", type=str, default=None) parser.add_argument("--seed", type=int, default=0) -parser.add_argument("--nb_epochs", type=int, default=None) +parser.add_argument("--max_percents_of_test_in_train", type=int, default=1) + +######################################## + +parser.add_argument("--nb_epochs", type=int, default=50) parser.add_argument("--batch_size", type=int, default=None) +parser.add_argument("--physical_batch_size", type=int, default=None) + parser.add_argument("--nb_train_samples", type=int, default=None) parser.add_argument("--nb_test_samples", type=int, default=None) @@ -55,26 +62,59 @@ parser.add_argument("--learning_rate", type=float, default=1e-4) parser.add_argument("--learning_rate_schedule", type=str, default="10: 2e-5,30: 4e-6") -parser.add_argument("--dim_model", type=int, default=512) +######################################## -parser.add_argument("--dim_keys", type=int, default=64) +parser.add_argument("--model", type=str, default=None) -parser.add_argument("--dim_hidden", type=int, default=2048) +parser.add_argument("--dim_model", type=int, default=None) -parser.add_argument("--nb_heads", type=int, default=8) +parser.add_argument("--dim_keys", type=int, default=None) -parser.add_argument("--nb_blocks", type=int, default=12) +parser.add_argument("--dim_hidden", type=int, default=None) + +parser.add_argument("--nb_heads", type=int, default=None) + +parser.add_argument("--nb_blocks", type=int, default=None) parser.add_argument("--dropout", type=float, default=0.1) +######################################## + parser.add_argument("--deterministic_synthesis", action="store_true", default=False) parser.add_argument("--no_checkpoint", action="store_true", default=False) -parser.add_argument("--overwrite_results", action="store_true", default=False) +parser.add_argument("--resume", action="store_true", default=False) parser.add_argument("--checkpoint_name", type=str, default="checkpoint.pth") +############################## +# filetask + +parser.add_argument("--filetask_train_file", type=str, default=None) + +parser.add_argument("--filetask_test_file", type=str, default=None) + +############################## +# rpl options + +parser.add_argument("--rpl_nb_starting_values", type=int, default=3) + +parser.add_argument("--rpl_max_input", type=int, default=9) + +parser.add_argument("--rpl_prog_len", type=int, default=8) + +parser.add_argument("--rpl_nb_runs", type=int, default=5) + +parser.add_argument("--rpl_no_prog", action="store_true", default=False) + +############################## +# grid options + +parser.add_argument("--grid_size", type=int, default=6) + +parser.add_argument("--grid_fraction_play", type=float, default=0) + ############################## # picoclvr options @@ -98,896 +138,274 @@ parser.add_argument("--maze_nb_walls", type=int, default=15) ############################## # Snake options -parser.add_argument("--snake_height", type=int, default=6) +parser.add_argument("--snake_height", type=int, default=9) -parser.add_argument("--snake_width", type=int, default=8) +parser.add_argument("--snake_width", type=int, default=12) parser.add_argument("--snake_nb_colors", type=int, default=5) parser.add_argument("--snake_length", type=int, default=200) ############################## -# Snake options +# ByHeart options + +parser.add_argument("--byheart_separation", type=int, default=1) + +############################## +# Stack options parser.add_argument("--stack_nb_steps", type=int, default=100) -parser.add_argument("--stack_nb_stacks", type=int, default=1) +parser.add_argument("--stack_nb_stacks", type=int, default=3) parser.add_argument("--stack_nb_digits", type=int, default=3) parser.add_argument("--stack_fraction_values_for_train", type=float, default=None) +############################## +# Expr options + +parser.add_argument("--expr_nb_variables", type=int, default=5) + +parser.add_argument("--expr_sequence_length", type=int, default=40) + +parser.add_argument("--expr_operand_max", type=int, default=9) + +parser.add_argument("--expr_result_max", type=int, default=99) + +parser.add_argument("--expr_input_file", type=str, default=None) + +############################## +# Mixing + +parser.add_argument("--mixing_hard", action="store_true", default=False) + +parser.add_argument("--mixing_deterministic_start", action="store_true", default=False) + +############################## +# greed options + +parser.add_argument("--greed_height", type=int, default=5) + +parser.add_argument("--greed_width", type=int, default=7) + +parser.add_argument("--greed_T", type=int, default=25) + +parser.add_argument("--greed_nb_walls", type=int, default=5) + +parser.add_argument("--greed_nb_coins", type=int, default=2) + ###################################################################### args = parser.parse_args() assert args.picocvlr_prune_properties in {"none", "train+eval", "eval"} -try: - os.mkdir(args.result_dir) -except FileExistsError: - if not args.overwrite_results: - print(f"result directory {args.result_dir} already exists") - exit(1) - -log_file = open(os.path.join(args.result_dir, args.log_filename), "a") - -if args.seed >= 0: - # torch.backends.cudnn.deterministic = True - # torch.backends.cudnn.benchmark = False - # torch.use_deterministic_algorithms(True) - torch.manual_seed(args.seed) - if torch.cuda.is_available(): - torch.cuda.manual_seed_all(args.seed) +if args.result_dir is None: + args.result_dir = f"results_{args.task}" ###################################################################### -default_args = { - "picoclvr": { - "nb_epochs": 25, +default_task_args = { + "file": { + "model": "37M", "batch_size": 25, "nb_train_samples": 250000, "nb_test_samples": 10000, }, - "mnist": { - "nb_epochs": 25, - "batch_size": 10, + "addition": { + "model": "352M", + "batch_size": 25, + "nb_train_samples": 250000, + "nb_test_samples": 10000, + }, + "byheart": { + "model": "37M", + "batch_size": 25, + "nb_train_samples": 50000, + "nb_test_samples": 10000, + }, + "expr": { + "model": "352M", + "batch_size": 25, + "nb_train_samples": 2500000, + "nb_test_samples": 10000, + }, + "grid": { + "model": "37M", + "batch_size": 25, "nb_train_samples": 250000, "nb_test_samples": 10000, }, + "qmlp": { + "model": "37M", + "batch_size": 10, + "nb_train_samples": 100000, + "nb_test_samples": 1000, + }, + "guessop": { + "model": "352M", + "batch_size": 25, + "nb_train_samples": 1000000, + "nb_test_samples": 10000, + }, + "learnop": { + "model": "37M", + "batch_size": 25, + "nb_train_samples": 50000, + "nb_test_samples": 10000, + }, "maze": { - "nb_epochs": 25, + "model": "37M", + "batch_size": 5, + "nb_train_samples": 100000, + "nb_test_samples": 10000, + }, + "picoclvr": { + "model": "37M", "batch_size": 25, "nb_train_samples": 250000, "nb_test_samples": 10000, }, + "rpl": { + "model": "352M", + "batch_size": 5, + "nb_train_samples": 2500000, + "nb_test_samples": 10000, + }, "snake": { - "nb_epochs": 5, + "model": "37M", "batch_size": 25, "nb_train_samples": 250000, "nb_test_samples": 10000, }, "stack": { - "nb_epochs": 5, + "model": "37M", "batch_size": 25, "nb_train_samples": 100000, "nb_test_samples": 1000, }, + "twotargets": { + "model": "37M", + "batch_size": 25, + "nb_train_samples": 50000, + "nb_test_samples": 10000, + }, + "memory": { + "model": "37M", + "batch_size": 100, + "nb_train_samples": 25000, + "nb_test_samples": 1000, + }, + "mixing": { + "model": "37M", + "batch_size": 25, + "nb_train_samples": 250000, + "nb_test_samples": 10000, + }, + "mnist": { + "model": "37M", + "batch_size": 10, + "nb_train_samples": 60000, + "nb_test_samples": 10000, + }, + "greed": { + "model": "37M", + "batch_size": 25, + "nb_train_samples": 25000, + "nb_test_samples": 10000, + }, } -if args.task in default_args: - for k, v in default_args[args.task].items(): +if args.task in default_task_args: + for k, v in default_task_args[args.task].items(): if getattr(args, k) is None: setattr(args, k, v) ###################################################################### +default_model_args = { + "17K": { + "dim_model": 32, + "dim_keys": 32, + "dim_hidden": 32, + "nb_heads": 2, + "nb_blocks": 2, + }, + "4M": { + "dim_model": 256, + "dim_keys": 32, + "dim_hidden": 1024, + "nb_heads": 4, + "nb_blocks": 6, + }, + "37M": { + "dim_model": 512, + "dim_keys": 64, + "dim_hidden": 2048, + "nb_heads": 8, + "nb_blocks": 12, + }, + "122M": { + "dim_model": 768, + "dim_keys": 64, + "dim_hidden": 2048, + "nb_heads": 8, + "nb_blocks": 24, + }, + "352M": { + "dim_model": 1024, + "dim_keys": 64, + "dim_hidden": 2048, + "nb_heads": 8, + "nb_blocks": 48, + }, +} -def log_string(s): - t = time.strftime("%Y%m%d-%H:%M:%S ", time.localtime()) - - if log_file is not None: - log_file.write(t + s + "\n") - log_file.flush() - - print(t + s) - sys.stdout.flush() - - -for n in vars(args): - log_string(f"args.{n} {getattr(args, n)}") - -###################################################################### - - -# ra_mask is boolean, with 1s on the values to generate - - -def masked_inplace_autoregression( - model, - batch_size, - input, - ar_mask, - forbidden_tokens=None, - progress_bar_desc="autoregression", - device=torch.device("cpu"), -): - # p = logits.softmax(1) - # entropy[:,s]= p.xlogy(p).sum(1) / math.log(2) - batches = zip(input.split(batch_size), ar_mask.split(batch_size)) - if progress_bar_desc is not None: - batches = tqdm.tqdm( - batches, - dynamic_ncols=True, - desc=progress_bar_desc, - total=input.size(0) // batch_size, - ) - for input, ar_mask in batches: - i = (ar_mask.sum(0) > 0).nonzero() - if i.min() > 0: - model( - mygpt.BracketedSequence(input, 0, i.min()) - ) # Needed to initialize the model's cache - for s in range(i.min(), i.max() + 1): - output = model(mygpt.BracketedSequence(input, s, 1)).x - logits = output[:, s] - if forbidden_tokens is not None: - logits = logits.masked_fill(forbidden_tokens, float("-inf")) - if args.deterministic_synthesis: - t_next = logits.argmax(1) - else: - dist = torch.distributions.categorical.Categorical(logits=logits) - t_next = dist.sample() - input[:, s] = ar_mask[:, s] * t_next + (1 - ar_mask[:, s]) * input[:, s] - - -###################################################################### - - -class Task: - def batches(self, split="train"): - pass - - def vocabulary_size(self): - pass - - def produce_results(self, n_epoch, model): - pass - - -###################################################################### - -import picoclvr - - -class TaskPicoCLVR(Task): - # Make a tensor from a list of strings - def tensorize(self, descr): - token_descr = [s.strip().split(" ") for s in descr] - l = max([len(s) for s in token_descr]) - token_descr = [s + [""] * (l - len(s)) for s in token_descr] - id_descr = [[self.token2id[u] for u in s] for s in token_descr] - return torch.tensor(id_descr, device=self.device) - - # Make a list of strings from a tensor - def detensorize(self, x): - return [" ".join([self.id2token[t.item()] for t in r]) for r in x] - - # trim all the tensors in the tuple z to remove as much token from - # left and right in the first tensor. If z is a tuple, all its - # elements are trimed according to the triming for the first - def trim(self, z, token=""): - n = self.token2id[token] - if type(z) == tuple: - x = z[0] - i = (1 - (F.pad(x, (1, 1), value=n) == n).min(0).values.long()).cumsum(0) - a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min() - return tuple([t[:, a:b] for t in z]) - else: - i = (1 - (F.pad(z, (1, 1), value=n) == n).min(0).values.long()).cumsum(0) - a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min() - return z[:, a:b] - - ###################### - # Not the cleanest part of the code - - # Extract the last image of each sequence, from the last - # included, and set to all the tokens from the beginning of - # that image to the end - def excise_last_image(self, input): - t_img, t_nul = self.token2id[""], self.token2id[""] - nb_img_tokens = self.height * self.width + 1 - - input = input.clone() - t = (input == t_img).long() - tail_masks = (t.cumsum(dim=1) == t.sum(dim=1, keepdim=True)).long() - i = (t * tail_masks).nonzero(as_tuple=True) - j = ( - i[0][:, None], - i[1][:, None] + torch.arange(nb_img_tokens, device=input.device)[None, :], - ) - images = self.trim(input[j]) - input[j] = t_nul - loss_masks = 1 - tail_masks - input, loss_masks = self.trim((input, loss_masks)) - return input, loss_masks, images - - def add_true_image(self, input, images, loss_masks): - t_nul = self.token2id[""] - nb_img_tokens = self.height * self.width + 1 - input = F.pad(input, (0, nb_img_tokens), value=t_nul) - loss_masks = F.pad(loss_masks, (0, nb_img_tokens), value=0) - t = (input == t_nul).long() - i = (t.cumsum(dim=1) == 1).nonzero(as_tuple=True) - j = ( - i[0][:, None], - i[1][:, None] + torch.arange(nb_img_tokens, device=input.device)[None, :], - ) - input[j] = images - loss_masks[j] = 1 - input, loss_masks = self.trim((input, loss_masks)) - return input, loss_masks - - def add_generated_image(self, input, loss_masks, model): - t_img, t_nul = self.token2id[""], self.token2id[""] - nb_img_tokens = self.height * self.width + 1 - - input = F.pad(input, (0, nb_img_tokens), value=t_nul) - loss_masks = F.pad(loss_masks, (0, nb_img_tokens), value=0) - t = (input == t_nul).long() - i = (t.cumsum(dim=1) == 1).nonzero(as_tuple=True) - input[i] = t_img - - j = ( - i[0][:, None], - i[1][:, None] - + 1 - + torch.arange(nb_img_tokens - 1, device=input.device)[None, :], - ) - ar_masks = input.new_zeros(input.size(), dtype=torch.int64) - ar_masks[j] = 1 - forbidden_tokens = ( - torch.arange(self.vocabulary_size(), device=input.device) == t_nul - ) - with torch.autograd.no_grad(): - t = model.training - model.eval() - masked_inplace_autoregression( - model, - self.batch_size, - input, - ar_masks, - forbidden_tokens, - progress_bar_desc=None, - device=self.device, - ) - model.train(t) - - input, loss_masks = self.trim((input, loss_masks)) - - return input, loss_masks - - ###################### - - def __init__( - self, - nb_train_samples, - nb_test_samples, - batch_size, - height, - width, - nb_colors=5, - device=torch.device("cpu"), - pruner_train=None, - pruner_eval=None, - ): - def generate_descr(nb, cache_suffix, pruner): - return picoclvr.generate( - nb, - height=self.height, - width=self.width, - nb_colors=nb_colors, - pruner=pruner, - ) - - self.height = height - self.width = width - self.batch_size = batch_size - self.device = device - self.pruner_train = pruner_train - self.pruner_eval = pruner_eval - - param = { - "nb_train_samples": nb_train_samples, - "nb_test_samples": nb_test_samples, - "height": height, - "width": width, - "nb_colors": nb_colors, - "batch_size": batch_size, - "rng_state": list(torch.get_rng_state()), - } - - log_string( - f"generating {nb_train_samples+nb_test_samples} samples (can take some time)" - ) - self.train_descr = generate_descr( - nb_train_samples, "train", pruner=self.pruner_train - ) - self.test_descr = generate_descr(nb_test_samples, "test", pruner=None) - - # Build the tokenizer - tokens = {"", ""} - for d in [self.train_descr, self.test_descr]: - for s in d: - for t in s.strip().split(" "): - tokens.add(t) - # make this set a sorted list to get the same tensors given - # the same descr - tokens = list(tokens) - tokens.sort() - self.token2id = dict([(t, n) for n, t in enumerate(tokens)]) - self.id2token = dict([(n, t) for n, t in enumerate(tokens)]) - - # Tokenize the train and test sets - self.train_input = self.tensorize(self.train_descr) - self.test_input = self.tensorize(self.test_descr) - - def batches(self, split="train"): - assert split in {"train", "test"} - input = self.train_input if split == "train" else self.test_input - for batch in tqdm.tqdm( - input.split(self.batch_size), dynamic_ncols=True, desc=f"epoch-{split}" - ): - yield self.trim(batch) - - def vocabulary_size(self): - return len(self.token2id) - - def compute_missing_properties(self, n_epoch, model, pruner=None): - acc_nb_requested_properties = [] - acc_nb_missing_properties = [] - acc_nb_results = 0 - - for input in tqdm.tqdm( - self.test_input.split(self.batch_size), - dynamic_ncols=True, - desc=f"test-properties", - ): - tape, loss_masks, _ = self.excise_last_image(input) - tape, loss_masks = self.add_generated_image(tape, loss_masks, model) - result_descr = self.detensorize(tape) - np = picoclvr.nb_properties( - result_descr, - height=self.height, - width=self.width, - pruner=pruner, - ) - nb_requested_properties, _, nb_missing_properties = zip(*np) - acc_nb_requested_properties += nb_requested_properties - acc_nb_missing_properties += nb_missing_properties - acc_nb_results += len(result_descr) - - nb_requested_properties = sum(acc_nb_requested_properties) - nb_missing_properties = sum(acc_nb_missing_properties) - - prefix = "" if pruner is None else "pruned_" - log_string(f"nb_{prefix}samples {n_epoch} {acc_nb_results}") - log_string( - f"property_{prefix}nb {n_epoch} requested {sum(acc_nb_requested_properties)} missing {sum(acc_nb_missing_properties)}" - ) - log_string( - f"property_{prefix}miss {n_epoch} {100*nb_missing_properties/nb_requested_properties:.02f}%" - ) - - ###################################################################### - - def produce_results(self, n_epoch, model): - self.compute_missing_properties(n_epoch, model) - - if self.pruner_eval is not None: - self.compute_missing_properties(n_epoch, model, self.pruner_eval) - - nb_tokens_to_generate = self.height * self.width + 3 - result_descr = [] - nb_per_primer = 8 - primer = [] - - for primer_descr in [ - "red above green green top blue right of red", - "there is red there is yellow there is blue", - "red below yellow yellow below green green below blue red right yellow left green right blue left", - "green bottom yellow bottom green left of blue yellow right of blue blue top", - ]: - primer += [primer_descr] * nb_per_primer - - tape = self.tensorize(primer) - loss_masks = 1 - (tape == self.token2id[""]).long() - tape, loss_masks = self.add_generated_image(tape, loss_masks, model) - result_descr = self.detensorize(tape) - - np = picoclvr.nb_properties(result_descr, height=self.height, width=self.width) - - acc_nb_requested_properties, _, acc_nb_missing_properties = zip(*np) - acc_nb_results = len(result_descr) - - nb_requested_properties = sum(acc_nb_requested_properties) - nb_missing_properties = sum(acc_nb_missing_properties) - - prefix = "demo_" - log_string(f"nb_{prefix}samples {n_epoch} {acc_nb_results}") - log_string( - f"property_{prefix}nb {n_epoch} requested {sum(acc_nb_requested_properties)} missing {sum(acc_nb_missing_properties)}" - ) - log_string( - f"property_{prefix}miss {n_epoch} {100*nb_missing_properties/nb_requested_properties:.02f}%" - ) - - img = picoclvr.descr2img(result_descr, height=self.height, width=self.width) - - if img.dim() == 5: - if img.size(1) == 1: - img = F.pad(img.squeeze(1), pad=(1, 1, 1, 1), value=64) - else: - img = torch.cat( - [ - torchvision.utils.make_grid(x, padding=1, pad_value=64)[None] - for x in img - ], - 0, - ) - - image_name = os.path.join(args.result_dir, f"picoclvr_result_{n_epoch:04d}.png") - torchvision.utils.save_image( - img / 255.0, image_name, nrow=nb_per_primer, padding=1, pad_value=0.0 - ) - log_string(f"wrote {image_name}") - - -###################################################################### - - -class TaskMNIST(Task): - def __init__(self, batch_size, device=torch.device("cpu")): - self.device = device - self.batch_size = batch_size - - def batches(self, split="train"): - assert split in {"train", "test"} - data_set = torchvision.datasets.MNIST( - root="./data", train=(split == "train"), download=True - ) - data_input = data_set.data.view(-1, 28 * 28).long() - if args.nb_train_samples is not None: - data_input = data_input[: args.nb_train_samples] - for batch in tqdm.tqdm( - data_input.split(self.batch_size), desc=f"epoch-{split}" - ): - yield batch - - def vocabulary_size(self): - return 256 - - def produce_results(self, n_epoch, model): - results = torch.empty(64, 28 * 28, device=self.device, dtype=torch.int64) - ar_mask = torch.full_like(results, 1) - masked_inplace_autoregression( - model, self.batch_size, results, ar_mask, device=self.device - ) - image_name = os.path.join(args.result_dir, f"mnist_result_{n_epoch:04d}.png") - torchvision.utils.save_image( - 1 - results.reshape(-1, 1, 28, 28) / 255.0, - image_name, - nrow=16, - pad_value=0.8, - ) - log_string(f"wrote {image_name}") - - -###################################################################### - -import maze - - -class TaskMaze(Task): - def map2seq(self, *m): - return torch.cat([x.flatten(1) for x in m], 1) - - def seq2map(self, s): - s = s.reshape(s.size(0), -1, self.height, self.width) - return (s[:, k] for k in range(s.size(1))) - - def __init__( - self, - nb_train_samples, - nb_test_samples, - batch_size, - height, - width, - nb_walls, - device=torch.device("cpu"), - ): - self.batch_size = batch_size - self.height = height - self.width = width - self.device = device - - train_mazes, train_paths, _ = maze.create_maze_data( - nb_train_samples, - height=height, - width=width, - nb_walls=nb_walls, - progress_bar=lambda x: tqdm.tqdm(x, dynamic_ncols=True, desc=f"data-train"), - ) - self.train_input = self.map2seq(train_mazes.to(device), train_paths.to(device)) - - test_mazes, test_paths, _ = maze.create_maze_data( - nb_test_samples, - height=height, - width=width, - nb_walls=nb_walls, - progress_bar=lambda x: tqdm.tqdm(x, dynamic_ncols=True, desc=f"data-test"), - ) - self.test_input = self.map2seq(test_mazes.to(device), test_paths.to(device)) - - self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1 - - def batches(self, split="train", nb_to_use=-1, desc=None): - assert split in {"train", "test"} - input = self.train_input if split == "train" else self.test_input - if nb_to_use > 0: - input = input[:nb_to_use] - if desc is None: - desc = f"epoch-{split}" - for batch in tqdm.tqdm( - input.split(self.batch_size), dynamic_ncols=True, desc=desc - ): - yield batch - - def vocabulary_size(self): - return self.nb_codes - - def compute_error(self, model, split="train", nb_to_use=-1): - nb_total, nb_correct = 0, 0 - count = torch.zeros( - self.width * self.height, - self.width * self.height, - device=self.device, - dtype=torch.int64, - ) - for input in tqdm.tqdm( - task.batches(split, nb_to_use), - dynamic_ncols=True, - desc=f"test-mazes", - ): - result = input.clone() - ar_mask = result.new_zeros(result.size()) - ar_mask[:, self.height * self.width :] = 1 - result *= 1 - ar_mask - masked_inplace_autoregression( - model, - self.batch_size, - result, - ar_mask, - progress_bar_desc=None, - device=self.device, - ) - mazes, paths = self.seq2map(result) - path_correctness = maze.path_correctness(mazes, paths) - nb_correct += path_correctness.long().sum() - nb_total += mazes.size(0) - - optimal_path_lengths = ( - (input[:, self.height * self.width :] == maze.v_path).long().sum(1) - ) - predicted_path_lengths = ( - (result[:, self.height * self.width :] == maze.v_path).long().sum(1) - ) - optimal_path_lengths = optimal_path_lengths[path_correctness] - predicted_path_lengths = predicted_path_lengths[path_correctness] - count[optimal_path_lengths, predicted_path_lengths] += 1 - - if count.max() == 0: - count = None - else: - count = count[ - : count.sum(1).nonzero().max() + 1, : count.sum(0).nonzero().max() + 1 - ] - - return nb_total, nb_correct, count - - def produce_results(self, n_epoch, model): - with torch.autograd.no_grad(): - t = model.training - model.eval() - - train_nb_total, train_nb_correct, count = self.compute_error( - model, "train", nb_to_use=1000 - ) - log_string( - f"accuracy_train nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%" - ) - - test_nb_total, test_nb_correct, count = self.compute_error( - model, "test", nb_to_use=1000 - ) - log_string( - f"accuracy_test nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%" - ) - - if count is not None: - proportion_optimal = count.diagonal().sum().float() / count.sum() - log_string(f"proportion_optimal_test {proportion_optimal*100:.02f}%") - with open( - os.path.join(args.result_dir, f"maze_result_{n_epoch:04d}.txt"), "w" - ) as f: - for i in range(count.size(0)): - for j in range(count.size(1)): - eol = " " if j < count.size(1) - 1 else "\n" - f.write(f"{count[i,j]}{eol}") - - input = self.test_input[:48] - result = input.clone() - ar_mask = result.new_zeros(result.size()) - ar_mask[:, self.height * self.width :] = 1 - result *= 1 - ar_mask - masked_inplace_autoregression( - model, self.batch_size, result, ar_mask, device=self.device - ) - - mazes, paths = self.seq2map(input) - _, predicted_paths = self.seq2map(result) - - filename = os.path.join(args.result_dir, f"maze_result_{n_epoch:04d}.png") - maze.save_image( - filename, - mazes=mazes, - target_paths=paths, - predicted_paths=predicted_paths, - path_correct=maze.path_correctness(mazes, predicted_paths), - path_optimal=maze.path_optimality(paths, predicted_paths), - ) - log_string(f"wrote {filename}") - - model.train(t) - +if args.model in default_model_args: + for k, v in default_model_args[args.model].items(): + if getattr(args, k) is None: + setattr(args, k, v) +else: + raise ValueError(f"Unknown model {args.model}") ###################################################################### +try: + os.mkdir(args.result_dir) +except FileExistsError: + if not args.resume: + print(f"result directory {args.result_dir} already exists") + exit(1) -import snake - - -class TaskSnake(Task): - def __init__( - self, - nb_train_samples, - nb_test_samples, - batch_size, - height, - width, - nb_colors, - length, - prompt_length, - device=torch.device("cpu"), - ): - self.batch_size = batch_size - self.height = height - self.width = width - self.device = device - self.prompt_length = prompt_length - - self.train_input, self.train_prior_visits, _, _ = snake.generate_sequences( - nb_train_samples, - height, - width, - nb_colors, - length, - prompt_length, - self.device, - ) - self.test_input, self.test_prior_visits, _, _ = snake.generate_sequences( - nb_test_samples, - height, - width, - nb_colors, - length, - prompt_length, - self.device, - ) - - self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1 - - def batches(self, split="train", nb_to_use=-1, desc=None): - assert split in {"train", "test"} - input = self.train_input if split == "train" else self.test_input - if nb_to_use > 0: - input = input[:nb_to_use] - if desc is None: - desc = f"epoch-{split}" - for batch in tqdm.tqdm( - input.split(self.batch_size), dynamic_ncols=True, desc=desc - ): - yield batch - - def vocabulary_size(self): - return self.nb_codes - - def produce_results(self, n_epoch, model): - with torch.autograd.no_grad(): - t = model.training - model.eval() - - def compute_nb_correct(input, prior_visits): - result = input.clone() - i = torch.arange(result.size(1), device=result.device)[None, :] - ar_mask = ( - torch.logical_and(i >= self.prompt_length * 2, i % 2 == 0) - .long() - .expand_as(result) - ) - result *= 1 - ar_mask - - # snake.solver(result,ar_mask) - - masked_inplace_autoregression( - model, self.batch_size, result, ar_mask, device=self.device - ) - - nb_total = ((prior_visits > 0) * ar_mask).sum() - - nb_correct = ( - (result == input).long() * (prior_visits > 0) * ar_mask - ).sum() - - # nb_total = result.size(0) - # nb_correct = ((result - input).abs().sum(1) == 0).sum() - - return nb_total, nb_correct - - # train_nb_total, train_nb_correct = compute_nb_correct( - # self.train_input, self.train_prior_visits - # ) - - # log_string( - # f"accuracy_train nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%" - # ) - - test_nb_total, test_nb_correct = compute_nb_correct( - self.test_input[:1000], self.test_prior_visits[:1000] - ) - - log_string( - f"accuracy_test nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%" - ) - - model.train(t) +log_file = open(os.path.join(args.result_dir, args.log_filename), "a") +if args.seed >= 0: + # torch.backends.cudnn.deterministic = True + # torch.backends.cudnn.benchmark = False + # torch.use_deterministic_algorithms(True) + torch.manual_seed(args.seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(args.seed) ###################################################################### -import stack - - -class TaskStack(Task): - def __init__( - self, - nb_train_samples, - nb_test_samples, - batch_size, - nb_steps, - nb_stacks, - nb_digits, - fraction_values_for_train=None, - device=torch.device("cpu"), - ): - self.batch_size = batch_size - self.nb_steps = nb_steps - self.nb_stacks = nb_stacks - self.nb_digits = nb_digits - self.device = device - - if fraction_values_for_train is None: - values_for_train = None - values_for_test = None - else: - all = torch.randperm(10**nb_digits) - nb_for_train = int(all.size(0) * fraction_values_for_train) - values_for_train = all[:nb_for_train] - values_for_test = all[nb_for_train:] - - self.train_input, self.train_stack_counts = stack.generate_sequences( - nb_train_samples, - nb_steps, - nb_stacks, - nb_digits, - values_for_train, - self.device, - ) - - self.test_input, self.test_stack_counts = stack.generate_sequences( - nb_test_samples, - nb_steps, - nb_stacks, - nb_digits, - values_for_test, - self.device, - ) - - mask = self.test_input.clone() - stack.remove_popped_values(mask, self.nb_stacks, self.nb_digits) - mask = mask != self.test_input - counts = self.test_stack_counts.flatten()[mask.flatten()] - counts = F.one_hot(counts).sum(0) - log_string(f"stack_count {counts}") - - self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1 - - def batches(self, split="train", nb_to_use=-1, desc=None): - assert split in {"train", "test"} - input = self.train_input if split == "train" else self.test_input - if nb_to_use > 0: - input = input[:nb_to_use] - if desc is None: - desc = f"epoch-{split}" - for batch in tqdm.tqdm( - input.split(self.batch_size), dynamic_ncols=True, desc=desc - ): - yield batch - - def vocabulary_size(self): - return self.nb_codes - - def produce_results(self, n_epoch, model): - with torch.autograd.no_grad(): - t = model.training - model.eval() - - def compute_nb_correct(input): - result = input.clone() - stack.remove_popped_values(result, self.nb_stacks, self.nb_digits) - ar_mask = (result != input).long() - masked_inplace_autoregression( - model, self.batch_size, result, ar_mask, device=self.device - ) - - errors = ((result != input).long() * ar_mask).reshape( - -1, 1 + self.nb_digits - ) - ar_mask = ar_mask.reshape(-1, 1 + self.nb_digits) - - nb_total = ar_mask.max(1).values.sum() - nb_correct = nb_total - errors.max(1).values.sum() +def log_string(s): + t = time.strftime("%Y%m%d-%H:%M:%S ", time.localtime()) - return nb_total, nb_correct + if log_file is not None: + log_file.write(t + s + "\n") + log_file.flush() - test_nb_total, test_nb_correct = compute_nb_correct(self.test_input[:1000]) + print(t + s) + sys.stdout.flush() - log_string( - f"accuracy_test nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%" - ) - #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - l = 50 - l = l - l % (1 + self.nb_digits) - input = self.test_input[:10, :l] - result = input.clone() - stack.remove_popped_values(result, self.nb_stacks, self.nb_digits) - ar_mask = (result != input).long() - for n in range(result.size(0)): - log_string( - f"test_before {stack.seq_to_str(result[n],nb_stacks=self.nb_stacks,nb_digits=self.nb_digits)}" - ) - masked_inplace_autoregression( - model, self.batch_size, result, ar_mask, device=self.device - ) - for n in range(result.size(0)): - log_string( - f"test_after {stack.seq_to_str(result[n],nb_stacks=self.nb_stacks,nb_digits=self.nb_digits)}" - ) - #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +log_string(f"argv {' '.join(sys.argv)}") - model.train(t) +for n in vars(args): + log_string(f"args.{n} {getattr(args, n)}") ###################################################################### @@ -1011,41 +429,142 @@ picoclvr_pruner_eval = ( ###################################################################### -if args.task == "picoclvr": - task = TaskPicoCLVR( +if args.physical_batch_size is None: + args.physical_batch_size = args.batch_size +else: + assert args.batch_size % args.physical_batch_size == 0 + +assert args.nb_train_samples % args.batch_size == 0 +assert args.nb_test_samples % args.batch_size == 0 + +if args.task == "file": + assert ( + args.filetask_train_file is not None and args.filetask_test_file is not None + ), "You have to specify the task train and test files" + task = tasks.TaskFromFile( + args.filetask_train_file, + args.filetask_test_file, + nb_train_samples=args.nb_train_samples, + nb_test_samples=args.nb_test_samples, + batch_size=args.physical_batch_size, + shuffle=True, + device=device, + ) + args.max_percents_of_test_in_train = 0 + +elif args.task == "byheart": + task = tasks.SandBox( + problem=problems.ProblemByHeart(separation=args.byheart_separation), + nb_train_samples=args.nb_train_samples, + nb_test_samples=args.nb_test_samples, + batch_size=args.physical_batch_size, + logger=log_string, + device=device, + ) + args.max_percents_of_test_in_train = -1 + +elif args.task == "learnop": + task = tasks.SandBox( + problem=problems.ProblemLearnOperator(), + nb_train_samples=args.nb_train_samples, + nb_test_samples=args.nb_test_samples, + batch_size=args.physical_batch_size, + logger=log_string, + device=device, + ) + + +elif args.task == "guessop": + task = tasks.SandBox( + problem=problems.ProblemGuessOperator(), + nb_train_samples=args.nb_train_samples, + nb_test_samples=args.nb_test_samples, + batch_size=args.physical_batch_size, + logger=log_string, + device=device, + ) + + +elif args.task == "twotargets": + task = tasks.SandBox( + problem=problems.ProblemTwoTargets(), nb_train_samples=args.nb_train_samples, nb_test_samples=args.nb_test_samples, - batch_size=args.batch_size, + batch_size=args.physical_batch_size, + logger=log_string, + device=device, + ) + +elif args.task == "memory": + task = tasks.SandBox( + problem=problems.ProblemMemory(), + nb_train_samples=args.nb_train_samples, + nb_test_samples=args.nb_test_samples, + batch_size=args.physical_batch_size, + logger=log_string, + device=device, + ) + +elif args.task == "mixing": + task = tasks.SandBox( + problem=problems.ProblemMixing( + hard=args.mixing_hard, random_start=not args.mixing_deterministic_start + ), + nb_train_samples=args.nb_train_samples, + nb_test_samples=args.nb_test_samples, + batch_size=args.physical_batch_size, + logger=log_string, + device=device, + ) + +elif args.task == "addition": + task = tasks.SandBox( + problem=problems.ProblemAddition(), + nb_train_samples=args.nb_train_samples, + nb_test_samples=args.nb_test_samples, + batch_size=args.physical_batch_size, + logger=log_string, + device=device, + ) + +elif args.task == "picoclvr": + task = tasks.PicoCLVR( + nb_train_samples=args.nb_train_samples, + nb_test_samples=args.nb_test_samples, + batch_size=args.physical_batch_size, height=args.picoclvr_height, width=args.picoclvr_width, nb_colors=args.picoclvr_nb_colors, + logger=log_string, device=device, pruner_train=picoclvr_pruner_train, pruner_eval=picoclvr_pruner_eval, ) elif args.task == "mnist": - task = TaskMNIST( - batch_size=args.batch_size, + task = tasks.MNIST( + nb_train_samples=args.nb_train_samples, + nb_test_samples=args.nb_test_samples, + batch_size=args.physical_batch_size, device=device, ) elif args.task == "maze": - task = TaskMaze( + task = tasks.Maze( nb_train_samples=args.nb_train_samples, nb_test_samples=args.nb_test_samples, - batch_size=args.batch_size, + batch_size=args.physical_batch_size, height=args.maze_height, width=args.maze_width, nb_walls=args.maze_nb_walls, - device=device, + device="cpu", ) elif args.task == "snake": - task = TaskSnake( + task = tasks.Snake( nb_train_samples=args.nb_train_samples, nb_test_samples=args.nb_test_samples, - batch_size=args.batch_size, + batch_size=args.physical_batch_size, height=args.snake_height, width=args.snake_width, nb_colors=args.snake_nb_colors, @@ -1055,10 +574,11 @@ elif args.task == "snake": ) elif args.task == "stack": - task = TaskStack( + task = tasks.Stack( nb_train_samples=args.nb_train_samples, nb_test_samples=args.nb_test_samples, - batch_size=args.batch_size, + batch_size=args.physical_batch_size, + logger=log_string, nb_steps=args.stack_nb_steps, nb_stacks=args.stack_nb_stacks, nb_digits=args.stack_nb_digits, @@ -1066,6 +586,67 @@ elif args.task == "stack": device=device, ) +elif args.task == "expr": + task = tasks.Expr( + nb_train_samples=args.nb_train_samples, + nb_test_samples=args.nb_test_samples, + nb_variables=args.expr_nb_variables, + sequence_length=args.expr_sequence_length, + operand_max=args.expr_operand_max, + result_max=args.expr_result_max, + batch_size=args.physical_batch_size, + device=device, + ) + +elif args.task == "rpl": + task = tasks.RPL( + nb_train_samples=args.nb_train_samples, + nb_test_samples=args.nb_test_samples, + batch_size=args.physical_batch_size, + nb_starting_values=args.rpl_nb_starting_values, + max_input=args.rpl_max_input, + prog_len=args.rpl_prog_len, + nb_runs=args.rpl_nb_runs, + no_prog=args.rpl_no_prog, + logger=log_string, + device=device, + ) + +elif args.task == "grid": + task = tasks.Grid( + nb_train_samples=args.nb_train_samples, + nb_test_samples=args.nb_test_samples, + batch_size=args.physical_batch_size, + size=args.grid_size, + fraction_play=args.grid_fraction_play, + logger=log_string, + device=device, + ) + +elif args.task == "qmlp": + task = tasks.QMLP( + nb_train_samples=args.nb_train_samples, + nb_test_samples=args.nb_test_samples, + batch_size=args.physical_batch_size, + result_dir=args.result_dir, + logger=log_string, + device=device, + ) + +elif args.task == "greed": + task = tasks.Greed( + nb_train_samples=args.nb_train_samples, + nb_test_samples=args.nb_test_samples, + batch_size=args.physical_batch_size, + height=args.greed_height, + width=args.greed_width, + T=args.greed_T, + nb_walls=args.greed_nb_walls, + nb_coins=args.greed_nb_coins, + logger=log_string, + device=device, + ) + else: raise ValueError(f"Unknown task {args.task}") @@ -1123,15 +704,64 @@ else: ###################################################################### -nb_epochs = args.nb_epochs if args.nb_epochs > 0 else nb_epochs_default +if args.task == "expr" and args.expr_input_file is not None: + task.produce_results( + n_epoch=nb_epochs_finished, + model=model, + result_dir=args.result_dir, + logger=log_string, + deterministic_synthesis=args.deterministic_synthesis, + input_file=args.expr_input_file, + ) + + exit(0) + +###################################################################### + +# Compute the entropy of the training tokens token_count = 0 -for input in task.batches(split="train"): +for input in task.batches(split="train", desc="train-entropy"): token_count += F.one_hot(input, num_classes=task.vocabulary_size()).sum((0, 1)) token_probas = token_count / token_count.sum() entropy = -torch.xlogy(token_probas, token_probas).sum() train_set_perplexity = math.exp(entropy) +###################################################################### +# A bit of paranoia never hurts + +if args.max_percents_of_test_in_train >= 0: + + def subsets_as_tuples(batches, cs): + s = set() + for batch in batches: + for x in batch: + s.add(tuple([v.item() for v in x])) + if len(s) == cs: + yield s + s = set() + yield s + + nb_test, nb_in_train = 0, 0 + for test_subset in subsets_as_tuples( + task.batches(split="test", desc="test-check"), 25000 + ): + in_train = set() + for train_subset in subsets_as_tuples( + task.batches(split="train", desc="train-check"), 25000 + ): + in_train.update(test_subset.intersection(train_subset)) + nb_in_train += len(in_train) + nb_test += len(test_subset) + + log_string( + f"data_check {nb_in_train*100/nb_test:.02f}% ({nb_in_train}/{nb_test}) of test samples are in the train set" + ) + + assert ( + nb_in_train <= args.max_percents_of_test_in_train * nb_test / 100 + ), f"More than {args.max_percents_of_test_in_train}% of test samples are in the train set" + ############################## if args.learning_rate_schedule == "cos": @@ -1158,12 +788,18 @@ log_string(f"learning_rate_schedule {learning_rate_schedule}") ############################## -nb_samples_seen = 0 +if nb_epochs_finished >= args.nb_epochs: + task.produce_results( + n_epoch=nb_epochs_finished, + model=model, + result_dir=args.result_dir, + logger=log_string, + deterministic_synthesis=args.deterministic_synthesis, + ) -if nb_epochs_finished >= nb_epochs: - task.produce_results(nb_epochs_finished, model) +time_pred_result = None -for n_epoch in range(nb_epochs_finished, nb_epochs): +for n_epoch in range(nb_epochs_finished, args.nb_epochs): learning_rate = learning_rate_schedule[n_epoch] log_string(f"learning_rate {learning_rate}") @@ -1183,27 +819,37 @@ for n_epoch in range(nb_epochs_finished, nb_epochs): for input in task.batches(split="train"): input = input.to(device) + + if nb_train_samples % args.batch_size == 0: + optimizer.zero_grad() + output = model(mygpt.BracketedSequence(input)).x loss = F.cross_entropy(output.transpose(1, 2), input) acc_train_loss += loss.item() * input.size(0) + nb_train_samples += input.size(0) - nb_samples_seen += input.size(0) - optimizer.zero_grad() loss.backward() - optimizer.step() + + if nb_train_samples % args.batch_size == 0: + optimizer.step() with torch.autograd.no_grad(): model.eval() nb_test_samples, acc_test_loss = 0, 0.0 + nb_samples_accumulated = 0 for input in task.batches(split="test"): input = input.to(device) - output = model(mygpt.BracketedSequence(input)).x + bs = model(mygpt.BracketedSequence(input)) + output = bs.x + loss = F.cross_entropy(output.transpose(1, 2), input) + acc_test_loss += loss.item() * input.size(0) + nb_test_samples += input.size(0) train_perplexity = math.exp(min(100, acc_train_loss / nb_train_samples)) @@ -1213,7 +859,20 @@ for n_epoch in range(nb_epochs_finished, nb_epochs): f"perplexity {n_epoch} train_set {train_set_perplexity} train_prediction {train_perplexity} test_prediction {test_perplexity}" ) - task.produce_results(n_epoch, model) + task.produce_results( + n_epoch=n_epoch, + model=model, + result_dir=args.result_dir, + logger=log_string, + deterministic_synthesis=args.deterministic_synthesis, + ) + + time_current_result = datetime.datetime.now() + if time_pred_result is not None: + log_string( + f"next_result {time_current_result + (time_current_result - time_pred_result)}" + ) + time_pred_result = time_current_result checkpoint = { "nb_epochs_finished": n_epoch + 1,