d21e2648466a606b3067fc680feb7305a6b95781
[picoclvr.git] / tasks.py
1 #!/usr/bin/env python
2
3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
5
6 # Written by Francois Fleuret <francois@fleuret.org>
7
8 import math, os, tqdm
9
10 import torch, torchvision
11
12 from torch import nn
13 from torch.nn import functional as F
14
15 from mygpt import BracketedSequence
16
17 # from graph import save_attention_image
18 save_attention_image = None
19
20 ######################################################################
21
22
23 def masked_inplace_autoregression(
24     model,
25     batch_size,
26     input,
27     ar_mask,
28     deterministic_synthesis,
29     forbidden_tokens=None,
30     progress_bar_desc="autoregression",
31     device=torch.device("cpu"),
32 ):
33     assert input.size() == ar_mask.size()
34
35     batches = zip(input.split(batch_size), ar_mask.split(batch_size))
36
37     if progress_bar_desc is not None:
38         batches = tqdm.tqdm(
39             batches,
40             dynamic_ncols=True,
41             desc=progress_bar_desc,
42             total=(input.size(0) + batch_size - 1) // batch_size,
43         )
44
45     with torch.autograd.no_grad():
46         t = model.training
47         model.eval()
48
49         for input, ar_mask in batches:
50             model.masked_inplace_autoregression(
51                 input, ar_mask, forbidden_tokens, deterministic_synthesis
52             )
53
54         model.train(t)
55
56
57 ######################################################################
58
59
60 class Task:
61     def batches(self, split="train"):
62         pass
63
64     def vocabulary_size(self):
65         pass
66
67     def produce_results(
68         self, n_epoch, model, result_dir, logger, deterministic_synthesis
69     ):
70         pass
71
72
73 class TaskFromFile(Task):
74     def tensorize(self, pairs, shuffle):
75         len_max = max([len(x[0]) for x in pairs])
76
77         input = torch.cat(
78             [
79                 torch.tensor(
80                     [
81                         [self.char2id[c] for c in s[0] + "#" * (len_max - len(s[0]))]
82                         for s in pairs
83                     ]
84                 )
85             ],
86             0,
87         ).to("cpu")
88
89         pred_mask = torch.cat(
90             [
91                 torch.tensor(
92                     [
93                         [int(c) for c in s[1] + "0" * (len_max - len(s[1]))]
94                         for s in pairs
95                     ]
96                 )
97             ],
98             0,
99         ).to("cpu")
100
101         if shuffle:
102             print("SHUFFLING!")
103             i = torch.randperm(input.size(0))
104             input = input[i].contiguous()
105             pred_mask = pred_mask[i].contiguous()
106
107         return input, pred_mask
108
109     # trim all the tensors in the tuple z to remove as much token from
110     # left and right in the first tensor. If z is a tuple, all its
111     # elements are trimed according to the triming for the first
112     def trim(self, z, token="#"):
113         n = self.char2id[token]
114         if type(z) == tuple:
115             x = z[0]
116             i = (1 - (F.pad(x, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
117             a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
118             return tuple([t[:, a:b] for t in z])
119         else:
120             i = (1 - (F.pad(z, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
121             a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
122             return z[:, a:b]
123
124     def __init__(
125         self,
126         train_filename,
127         test_filename,
128         nb_train_samples,
129         nb_test_samples,
130         batch_size,
131         shuffle=False,
132         device=torch.device("cpu"),
133     ):
134         self.batch_size = batch_size
135         self.device = device
136
137         def read_file(filename, nb=-1):
138             pairs = []
139             with open(filename, "r") as f:
140                 while True:
141                     sequence = f.readline().strip()
142                     if not sequence:
143                         break
144                     pred_mask = f.readline().strip()
145                     assert len(sequence) == len(pred_mask)
146                     assert set(pred_mask).issubset({"0", "1", "2"}), f"{set(pred_mask)}"
147                     pairs.append((sequence, pred_mask))
148                     if len(pairs) == nb:
149                         break
150
151             if nb > 0:
152                 pairs = pairs[:nb]
153                 assert len(pairs) == nb
154
155             return pairs
156
157         train_pairs = read_file(train_filename, nb_train_samples)
158         test_pairs = read_file(test_filename, nb_test_samples)
159
160         symbols = ["#"] + list(
161             set("".join([x[0] for x in train_pairs + test_pairs])) - set(["#"])
162         )
163         self.char2id = dict([(c, n) for n, c in enumerate(symbols)])
164         self.id2char = dict([(n, c) for c, n in self.char2id.items()])
165
166         self.train_input, self.train_pred_masks = self.tensorize(
167             train_pairs, shuffle=shuffle
168         )
169         self.test_input, self.test_pred_masks = self.tensorize(
170             test_pairs, shuffle=shuffle
171         )
172
173     def batches(self, split="train", nb_to_use=-1, desc=None):
174         assert split in {"train", "test"}
175         input = self.train_input if split == "train" else self.test_input
176         if nb_to_use > 0:
177             input = input[:nb_to_use]
178         if desc is None:
179             desc = f"epoch-{split}"
180         for batch in tqdm.tqdm(
181             input.split(self.batch_size), dynamic_ncols=True, desc=desc
182         ):
183             yield self.trim(batch).to(self.device)
184
185     def vocabulary_size(self):
186         return len(self.char2id)
187
188     def tensor2str(self, t):
189         return ["".join([self.id2char[x.item()] for x in s]) for s in t]
190
191     def produce_results(
192         self, n_epoch, model, result_dir, logger, deterministic_synthesis
193     ):
194         correct = self.trim(self.test_input[:1000]).to(self.device)
195         result = correct.clone()
196         pred_mask = self.test_pred_masks[:1000, : result.size(1)].to(self.device)
197         ar_mask = (pred_mask > 0).long()
198         result *= 1 - ar_mask  # paraaaaanoiaaaaaaa
199
200         logger(f"----------------------------------------------------------")
201
202         for e in self.tensor2str(result[:50]):
203             logger(f"test_before {e}")
204
205         masked_inplace_autoregression(
206             model,
207             self.batch_size,
208             result,
209             ar_mask,
210             deterministic_synthesis,
211             device=self.device,
212         )
213
214         logger(f"----------------------------------------------------------")
215
216         for e, c in zip(self.tensor2str(result[:50]), self.tensor2str(correct[:50])):
217             logger(f"test_after  {e}")
218             logger(f"correct     {c}")
219
220         logger(f"----------------------------------------------------------")
221
222         err_mask = (pred_mask == 2).long()
223         nb_total = err_mask.sum().item()
224         nb_correct = ((correct == result).long() * err_mask).sum().item()
225
226         logger(f"test_performance {n_epoch} {nb_total=} {nb_correct=}")
227         logger(f"main_test_accuracy {n_epoch} {nb_correct / nb_total}")
228
229
230 ####################
231
232 import problems
233
234
235 class SandBox(Task):
236     def __init__(
237         self,
238         problem,
239         nb_train_samples,
240         nb_test_samples,
241         batch_size,
242         logger=None,
243         device=torch.device("cpu"),
244         max_nb_codes=1024,
245     ):
246         super().__init__()
247
248         self.batch_size = batch_size
249         self.device = device
250         self.problem = problem
251
252         self.train_input, self.train_ar_mask = self.problem.generate_sequences(
253             nb_train_samples
254         )
255         self.test_input, self.test_ar_mask = self.problem.generate_sequences(
256             nb_test_samples
257         )
258
259         self.train_input, self.train_ar_mask = self.train_input.to(
260             device
261         ), self.train_ar_mask.to(device)
262         self.test_input, self.test_ar_mask = self.test_input.to(
263             device
264         ), self.test_ar_mask.to(device)
265
266         self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
267
268         # A bit of paranoia never hurts
269         assert self.nb_codes <= max_nb_codes
270         assert self.train_input.min() >= 0
271         assert self.test_input.min() >= 0
272         assert tuple(x.item() for x in self.train_ar_mask.unique()) in {
273             (0,),
274             (1,),
275             (0, 1),
276         }
277         assert tuple(x.item() for x in self.test_ar_mask.unique()) in {
278             (0,),
279             (1,),
280             (0, 1),
281         }
282
283         if logger is not None:
284             for s, a in zip(self.train_input[:100], self.train_ar_mask[:100]):
285                 logger(f"train_sequences {self.problem.seq2str(s)}")
286                 a = "".join(["01"[x.item()] for x in a])
287                 logger(f"                {a}")
288
289     def batches(self, split="train", nb_to_use=-1, desc=None):
290         assert split in {"train", "test"}
291         input = self.train_input if split == "train" else self.test_input
292         if nb_to_use > 0:
293             input = input[:nb_to_use]
294         if desc is None:
295             desc = f"epoch-{split}"
296         for batch in tqdm.tqdm(
297             input.split(self.batch_size), dynamic_ncols=True, desc=desc
298         ):
299             yield batch
300
301     def vocabulary_size(self):
302         return self.nb_codes
303
304     def produce_results(
305         self, n_epoch, model, result_dir, logger, deterministic_synthesis, nmax=1000
306     ):
307         def compute_accuracy(input, ar_mask, logger=None):
308             input, ar_mask = input[:nmax], ar_mask[:nmax]
309             result = input.clone() * (1 - ar_mask)
310
311             masked_inplace_autoregression(
312                 model,
313                 self.batch_size,
314                 result,
315                 ar_mask,
316                 deterministic_synthesis,
317                 progress_bar_desc=None,
318                 device=self.device,
319             )
320
321             log_ground_truth = ar_mask.min() == 0
322
323             if logger is not None:
324                 for sp, st in zip(result[:10], input[:10]):
325                     logger(
326                         f"test_sequences {n_epoch} prediction   {self.problem.seq2str(sp)}"
327                     )
328                     if log_ground_truth:
329                         logger(
330                             f"               {n_epoch} ground truth {self.problem.seq2str(st)}"
331                         )
332
333             nb_total, nb_correct = self.problem.compute_nb_correct(
334                 input, ar_mask, result
335             )
336
337             # nb_total = ar_mask.sum().item()
338             # nb_correct = ((result == input).long() * ar_mask).sum().item()
339
340             return nb_total, nb_correct
341
342         train_nb_total, train_nb_correct = compute_accuracy(
343             self.train_input, self.train_ar_mask
344         )
345
346         logger(
347             f"accuracy_train {n_epoch} nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
348         )
349
350         test_nb_total, test_nb_correct = compute_accuracy(
351             self.test_input, self.test_ar_mask, logger
352         )
353
354         logger(
355             f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
356         )
357
358         logger(f"main_test_accuracy {n_epoch} {test_nb_correct/test_nb_total}")
359
360         if save_attention_image is not None:
361             for k in range(10):
362                 ns = torch.randint(self.test_input.size(0), (1,)).item()
363                 input = self.test_input[ns : ns + 1].clone()
364
365                 with torch.autograd.no_grad():
366                     t = model.training
367                     model.eval()
368                     # model.record_attention(True)
369                     model(BracketedSequence(input))
370                     model.train(t)
371                     # ram = model.retrieve_attention()
372                     # model.record_attention(False)
373
374                 # tokens_output = [c for c in self.problem.seq2str(input[0])]
375                 # tokens_input = ["n/a"] + tokens_output[:-1]
376                 # for n_head in range(ram[0].size(1)):
377                 # filename = os.path.join(
378                 # result_dir, f"sandbox_attention_{k}_h{n_head}.pdf"
379                 # )
380                 # attention_matrices = [m[0, n_head] for m in ram]
381                 # save_attention_image(
382                 # filename,
383                 # tokens_input,
384                 # tokens_output,
385                 # attention_matrices,
386                 # k_top=10,
387                 ##min_total_attention=0.9,
388                 # token_gap=12,
389                 # layer_gap=50,
390                 # )
391                 # logger(f"wrote {filename}")
392
393
394 ######################################################################
395
396 import picoclvr
397
398
399 class PicoCLVR(Task):
400     # Make a tensor from a list of strings
401     def tensorize(self, descr):
402         token_descr = [s.strip().split(" ") for s in descr]
403         l = max([len(s) for s in token_descr])
404         token_descr = [s + ["<nul>"] * (l - len(s)) for s in token_descr]
405         id_descr = [[self.token2id[u] for u in s] for s in token_descr]
406         return torch.tensor(id_descr, device=self.device)
407
408     # Make a list of strings from a tensor
409     def detensorize(self, x):
410         return [" ".join([self.id2token[t.item()] for t in r]) for r in x]
411
412     # trim all the tensors in the tuple z to remove as much token from
413     # left and right in the first tensor. If z is a tuple, all its
414     # elements are trimed according to the triming for the first
415     def trim(self, z, token="<nul>"):
416         n = self.token2id[token]
417         if type(z) == tuple:
418             x = z[0]
419             i = (1 - (F.pad(x, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
420             a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
421             return tuple([t[:, a:b] for t in z])
422         else:
423             i = (1 - (F.pad(z, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
424             a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
425             return z[:, a:b]
426
427     ######################
428
429     def __init__(
430         self,
431         nb_train_samples,
432         nb_test_samples,
433         batch_size,
434         height,
435         width,
436         nb_colors=5,
437         logger=None,
438         device=torch.device("cpu"),
439         pruner_train=None,
440         pruner_eval=None,
441     ):
442         super().__init__()
443
444         def generate_descr(nb, cache_suffix, pruner):
445             return picoclvr.generate(
446                 nb,
447                 height=self.height,
448                 width=self.width,
449                 nb_colors=nb_colors,
450                 pruner=pruner,
451             )
452
453         self.height = height
454         self.width = width
455         self.batch_size = batch_size
456         self.device = device
457         self.pruner_train = pruner_train
458         self.pruner_eval = pruner_eval
459
460         if logger is not None:
461             logger(
462                 f"generating {nb_train_samples+nb_test_samples} samples (can take some time)"
463             )
464
465         self.train_descr = generate_descr(
466             nb_train_samples, "train", pruner=self.pruner_train
467         )
468         self.test_descr = generate_descr(nb_test_samples, "test", pruner=None)
469
470         # Build the tokenizer
471         tokens = {"<nul>", "<img>"}
472         for d in [self.train_descr, self.test_descr]:
473             for s in d:
474                 for t in s.strip().split(" "):
475                     tokens.add(t)
476         # make this set a sorted list to get the same tensors given
477         # the same descr
478         tokens = list(tokens)
479         tokens.sort()
480         self.token2id = dict([(t, n) for n, t in enumerate(tokens)])
481         self.id2token = dict([(n, t) for n, t in enumerate(tokens)])
482         self.t_img, self.t_nul = self.token2id["<img>"], self.token2id["<nul>"]
483
484         # Tokenize the train and test sets
485         self.train_input = self.tensorize(self.train_descr)
486         self.test_input = self.tensorize(self.test_descr)
487
488     def batches(self, split="train"):
489         assert split in {"train", "test"}
490         input = self.train_input if split == "train" else self.test_input
491         for batch in tqdm.tqdm(
492             input.split(self.batch_size), dynamic_ncols=True, desc=f"epoch-{split}"
493         ):
494             yield self.trim(batch)
495
496     def vocabulary_size(self):
497         return len(self.token2id)
498
499     def compute_missing_properties(
500         self, n_epoch, model, logger, deterministic_synthesis, pruner=None
501     ):
502         acc_nb_requested_properties = []
503         acc_nb_missing_properties = []
504         acc_nb_results = 0
505
506         for input in tqdm.tqdm(
507             self.test_input.split(self.batch_size),
508             dynamic_ncols=True,
509             desc=f"test-properties",
510         ):
511             result = input.clone()
512             ar_mask = (result == self.t_img).long().cumsum(dim=1).clamp(max=1)
513             result = (1 - ar_mask) * result + ar_mask * self.t_nul
514             masked_inplace_autoregression(
515                 model,
516                 self.batch_size,
517                 result,
518                 ar_mask,
519                 deterministic_synthesis,
520                 progress_bar_desc=None,
521                 device=self.device,
522             )
523
524             result_descr = self.detensorize(result)
525             np = picoclvr.nb_properties(
526                 result_descr,
527                 height=self.height,
528                 width=self.width,
529                 pruner=pruner,
530             )
531             nb_requested_properties, _, nb_missing_properties = zip(*np)
532             acc_nb_requested_properties += nb_requested_properties
533             acc_nb_missing_properties += nb_missing_properties
534             acc_nb_results += len(result_descr)
535
536         nb_requested_properties = sum(acc_nb_requested_properties)
537         nb_missing_properties = sum(acc_nb_missing_properties)
538
539         prefix = "" if pruner is None else "pruned_"
540         logger(f"nb_{prefix}samples {n_epoch} {acc_nb_results}")
541         logger(
542             f"property_{prefix}nb {n_epoch} requested {sum(acc_nb_requested_properties)} missing {sum(acc_nb_missing_properties)}"
543         )
544         logger(
545             f"property_{prefix}miss {n_epoch} {100*nb_missing_properties/nb_requested_properties:.02f}%"
546         )
547
548         logger(
549             f"main_test_accuracy {n_epoch} {1-nb_missing_properties/nb_requested_properties}"
550         )
551
552     ######################################################################
553
554     def produce_results(
555         self, n_epoch, model, result_dir, logger, deterministic_synthesis
556     ):
557         self.compute_missing_properties(n_epoch, model, logger, deterministic_synthesis)
558
559         if self.pruner_eval is not None:
560             self.compute_missing_properties(n_epoch, model, self.pruner_eval)
561
562         nb_tokens_to_generate = self.height * self.width + 3
563         result_descr = []
564         nb_per_primer = 8
565         primer = []
566
567         for primer_descr in [
568             "red above green <sep> green top <sep> blue right of red",
569             "there is red <sep> there is yellow <sep> there is blue",
570             "red below yellow <sep> yellow below green <sep> green below blue <sep> red right <sep> yellow left <sep> green right <sep> blue left",
571             "green bottom <sep> yellow bottom <sep> green left of blue <sep> yellow right of blue <sep> blue top",
572         ]:
573             primer += [primer_descr + " <img>"] * nb_per_primer
574
575         result = self.tensorize(primer)
576         fill = result.new_full(
577             result.size()[:-1] + (self.height * self.width + 1,), self.t_nul
578         )
579         result = torch.cat((result, fill), 1)
580         ar_mask = (result == self.t_nul).long()
581         masked_inplace_autoregression(
582             model,
583             self.batch_size,
584             result,
585             ar_mask,
586             deterministic_synthesis,
587             device=self.device,
588         )
589         result_descr = self.detensorize(result)
590
591         np = picoclvr.nb_properties(result_descr, height=self.height, width=self.width)
592
593         acc_nb_requested_properties, _, acc_nb_missing_properties = zip(*np)
594         acc_nb_results = len(result_descr)
595
596         nb_requested_properties = sum(acc_nb_requested_properties)
597         nb_missing_properties = sum(acc_nb_missing_properties)
598
599         prefix = "demo_"
600         logger(f"nb_{prefix}samples {n_epoch} {acc_nb_results}")
601         logger(
602             f"property_{prefix}nb {n_epoch} requested {sum(acc_nb_requested_properties)} missing {sum(acc_nb_missing_properties)}"
603         )
604         logger(
605             f"property_{prefix}miss {n_epoch} {100*nb_missing_properties/nb_requested_properties:.02f}%"
606         )
607
608         img = picoclvr.descr2img(result_descr, height=self.height, width=self.width)
609
610         if img.dim() == 5:
611             if img.size(1) == 1:
612                 img = F.pad(img.squeeze(1), pad=(1, 1, 1, 1), value=64)
613             else:
614                 img = torch.cat(
615                     [
616                         torchvision.utils.make_grid(x, padding=1, pad_value=64)[None]
617                         for x in img
618                     ],
619                     0,
620                 )
621
622         image_name = os.path.join(result_dir, f"picoclvr_result_{n_epoch:04d}.png")
623         torchvision.utils.save_image(
624             img / 255.0, image_name, nrow=nb_per_primer, padding=1, pad_value=0.0
625         )
626         logger(f"wrote {image_name}")
627
628
629 ######################################################################
630
631
632 class MNIST(Task):
633     def __init__(
634         self, nb_train_samples, nb_test_samples, batch_size, device=torch.device("cpu")
635     ):
636         super().__init__()
637
638         self.nb_train_samples = (nb_train_samples,)
639         self.nb_test_samples = (nb_test_samples,)
640         self.batch_size = batch_size
641         self.device = device
642         data_set = torchvision.datasets.MNIST(root="./data", train=True, download=True)
643         self.train_input = data_set.data[:nb_train_samples].view(-1, 28 * 28).long()
644         data_set = torchvision.datasets.MNIST(root="./data", train=False, download=True)
645         self.test_input = data_set.data[:nb_test_samples].view(-1, 28 * 28).long()
646
647     def batches(self, split="train", nb_to_use=-1, desc=None):
648         assert split in {"train", "test"}
649         input = self.train_input if split == "train" else self.test_input
650         if nb_to_use > 0:
651             input = input[:nb_to_use]
652         if desc is None:
653             desc = f"epoch-{split}"
654         for batch in tqdm.tqdm(
655             input.split(self.batch_size), dynamic_ncols=True, desc=desc
656         ):
657             yield batch
658
659     def vocabulary_size(self):
660         return 256
661
662     def produce_results(
663         self, n_epoch, model, result_dir, logger, deterministic_synthesis
664     ):
665         results = torch.empty(64, 28 * 28, device=self.device, dtype=torch.int64)
666         ar_mask = torch.full_like(results, 1)
667         masked_inplace_autoregression(
668             model,
669             self.batch_size,
670             results,
671             ar_mask,
672             deterministic_synthesis,
673             device=self.device,
674         )
675         image_name = os.path.join(result_dir, f"mnist_result_{n_epoch:04d}.png")
676         torchvision.utils.save_image(
677             1 - results.reshape(-1, 1, 28, 28) / 255.0,
678             image_name,
679             nrow=16,
680             pad_value=0.8,
681         )
682         logger(f"wrote {image_name}")
683
684
685 ######################################################################
686
687 import maze
688
689
690 class Maze(Task):
691     def map2seq(self, *m):
692         return torch.cat([x.flatten(1) for x in m], 1)
693
694     def seq2map(self, s):
695         s = s.reshape(s.size(0), -1, self.height, self.width)
696         return (s[:, k] for k in range(s.size(1)))
697
698     def __init__(
699         self,
700         nb_train_samples,
701         nb_test_samples,
702         batch_size,
703         height,
704         width,
705         nb_walls,
706         device=torch.device("cpu"),
707     ):
708         super().__init__()
709
710         self.batch_size = batch_size
711         self.height = height
712         self.width = width
713         self.device = device
714
715         train_mazes, train_paths, _ = maze.create_maze_data(
716             nb_train_samples,
717             height=height,
718             width=width,
719             nb_walls=nb_walls,
720             progress_bar=lambda x: tqdm.tqdm(x, dynamic_ncols=True, desc=f"data-train"),
721         )
722         self.train_input = self.map2seq(train_mazes.to(device), train_paths.to(device))
723
724         test_mazes, test_paths, _ = maze.create_maze_data(
725             nb_test_samples,
726             height=height,
727             width=width,
728             nb_walls=nb_walls,
729             progress_bar=lambda x: tqdm.tqdm(x, dynamic_ncols=True, desc=f"data-test"),
730         )
731         self.test_input = self.map2seq(test_mazes.to(device), test_paths.to(device))
732
733         self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
734
735     def batches(self, split="train", nb_to_use=-1, desc=None):
736         assert split in {"train", "test"}
737         input = self.train_input if split == "train" else self.test_input
738         if nb_to_use > 0:
739             input = input[:nb_to_use]
740         if desc is None:
741             desc = f"epoch-{split}"
742         for batch in tqdm.tqdm(
743             input.split(self.batch_size), dynamic_ncols=True, desc=desc
744         ):
745             yield batch
746
747     def vocabulary_size(self):
748         return self.nb_codes
749
750     def compute_error(
751         self, model, split="train", nb_to_use=-1, deterministic_synthesis=False
752     ):
753         nb_total, nb_correct = 0, 0
754         count = torch.zeros(
755             self.width * self.height,
756             self.width * self.height,
757             device=self.device,
758             dtype=torch.int64,
759         )
760
761         for input in self.batches(split, nb_to_use):
762             result = input.clone()
763             ar_mask = result.new_zeros(result.size())
764             ar_mask[:, self.height * self.width :] = 1
765             result *= 1 - ar_mask
766             masked_inplace_autoregression(
767                 model,
768                 self.batch_size,
769                 result,
770                 ar_mask,
771                 deterministic_synthesis,
772                 progress_bar_desc=None,
773                 device=self.device,
774             )
775             mazes, paths = self.seq2map(result)
776             path_correctness = maze.path_correctness(mazes, paths)
777             nb_correct += path_correctness.long().sum()
778             nb_total += mazes.size(0)
779
780             optimal_path_lengths = (
781                 (input[:, self.height * self.width :] == maze.v_path).long().sum(1)
782             )
783             predicted_path_lengths = (
784                 (result[:, self.height * self.width :] == maze.v_path).long().sum(1)
785             )
786             optimal_path_lengths = optimal_path_lengths[path_correctness]
787             predicted_path_lengths = predicted_path_lengths[path_correctness]
788             count[optimal_path_lengths, predicted_path_lengths] += 1
789
790         if count.max() == 0:
791             count = None
792         else:
793             count = count[
794                 : count.sum(1).nonzero().max() + 1, : count.sum(0).nonzero().max() + 1
795             ]
796
797         return nb_total, nb_correct, count
798
799     def produce_results(
800         self, n_epoch, model, result_dir, logger, deterministic_synthesis
801     ):
802         train_nb_total, train_nb_correct, count = self.compute_error(
803             model,
804             "train",
805             nb_to_use=1000,
806             deterministic_synthesis=deterministic_synthesis,
807         )
808         logger(
809             f"accuracy_train {n_epoch} nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
810         )
811
812         test_nb_total, test_nb_correct, count = self.compute_error(
813             model,
814             "test",
815             nb_to_use=1000,
816             deterministic_synthesis=deterministic_synthesis,
817         )
818         logger(
819             f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
820         )
821
822         logger(f"main_test_accuracy {n_epoch} {test_nb_correct/test_nb_total}")
823
824         if count is not None:
825             proportion_optimal = count.diagonal().sum().float() / count.sum()
826             logger(f"proportion_optimal_test {proportion_optimal*100:.02f}%")
827             with open(
828                 os.path.join(result_dir, f"maze_result_{n_epoch:04d}.txt"), "w"
829             ) as f:
830                 for i in range(count.size(0)):
831                     for j in range(count.size(1)):
832                         eol = " " if j < count.size(1) - 1 else "\n"
833                         f.write(f"{count[i,j]}{eol}")
834
835         input = self.test_input[:48]
836         result = input.clone()
837         ar_mask = result.new_zeros(result.size())
838         ar_mask[:, self.height * self.width :] = 1
839         result *= 1 - ar_mask
840         masked_inplace_autoregression(
841             model,
842             self.batch_size,
843             result,
844             ar_mask,
845             deterministic_synthesis,
846             device=self.device,
847         )
848
849         mazes, paths = self.seq2map(input)
850         _, predicted_paths = self.seq2map(result)
851
852         filename = os.path.join(result_dir, f"maze_result_{n_epoch:04d}.png")
853         maze.save_image(
854             filename,
855             mazes=mazes,
856             target_paths=paths,
857             predicted_paths=predicted_paths,
858             path_correct=maze.path_correctness(mazes, predicted_paths),
859             path_optimal=maze.path_optimality(paths, predicted_paths),
860         )
861         logger(f"wrote {filename}")
862
863
864 ######################################################################
865
866
867 import snake
868
869
870 class Snake(Task):
871     def __init__(
872         self,
873         nb_train_samples,
874         nb_test_samples,
875         batch_size,
876         height,
877         width,
878         nb_colors,
879         length,
880         prompt_length,
881         device=torch.device("cpu"),
882     ):
883         super().__init__()
884
885         self.batch_size = batch_size
886         self.height = height
887         self.width = width
888         self.device = device
889         self.prompt_length = prompt_length
890
891         self.train_input, self.train_prior_visits, _, _ = snake.generate_sequences(
892             nb_train_samples,
893             height,
894             width,
895             nb_colors,
896             length,
897             prompt_length,
898             self.device,
899         )
900         self.test_input, self.test_prior_visits, _, _ = snake.generate_sequences(
901             nb_test_samples,
902             height,
903             width,
904             nb_colors,
905             length,
906             prompt_length,
907             self.device,
908         )
909
910         self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
911
912     def batches(self, split="train", nb_to_use=-1, desc=None):
913         assert split in {"train", "test"}
914         input = self.train_input if split == "train" else self.test_input
915         if nb_to_use > 0:
916             input = input[:nb_to_use]
917         if desc is None:
918             desc = f"epoch-{split}"
919         for batch in tqdm.tqdm(
920             input.split(self.batch_size), dynamic_ncols=True, desc=desc
921         ):
922             yield batch
923
924     def vocabulary_size(self):
925         return self.nb_codes
926
927     def produce_results(
928         self, n_epoch, model, result_dir, logger, deterministic_synthesis
929     ):
930         def compute_nb_correct(input, prior_visits):
931             result = input.clone()
932             i = torch.arange(result.size(1), device=result.device)[None, :]
933             ar_mask = (
934                 torch.logical_and(i >= self.prompt_length * 2, i % 2 == 0)
935                 .long()
936                 .expand_as(result)
937             )
938             result *= 1 - ar_mask
939
940             masked_inplace_autoregression(
941                 model,
942                 self.batch_size,
943                 result,
944                 ar_mask,
945                 deterministic_synthesis,
946                 device=self.device,
947             )
948
949             nb_total = ((prior_visits > 0) * ar_mask).sum()
950
951             nb_correct = ((result == input).long() * (prior_visits > 0) * ar_mask).sum()
952
953             return nb_total, nb_correct
954
955         test_nb_total, test_nb_correct = compute_nb_correct(
956             self.test_input[:1000], self.test_prior_visits[:1000]
957         )
958
959         logger(
960             f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
961         )
962
963         logger(f"main_test_accuracy {n_epoch} {test_nb_correct/test_nb_total}")
964
965
966 ######################################################################
967
968
969 import stack
970
971
972 class Stack(Task):
973     def __init__(
974         self,
975         nb_train_samples,
976         nb_test_samples,
977         batch_size,
978         logger,
979         nb_steps,
980         nb_stacks,
981         nb_digits,
982         fraction_values_for_train=None,
983         device=torch.device("cpu"),
984     ):
985         super().__init__()
986
987         self.batch_size = batch_size
988         self.nb_steps = nb_steps
989         self.nb_stacks = nb_stacks
990         self.nb_digits = nb_digits
991         self.device = device
992
993         if fraction_values_for_train is None:
994             values_for_train = None
995             values_for_test = None
996         else:
997             all = torch.randperm(10**nb_digits)
998             nb_for_train = int(all.size(0) * fraction_values_for_train)
999             values_for_train = all[:nb_for_train]
1000             values_for_test = all[nb_for_train:]
1001
1002         self.train_input, self.train_stack_counts = stack.generate_sequences(
1003             nb_train_samples,
1004             nb_steps,
1005             nb_stacks,
1006             nb_digits,
1007             values_for_train,
1008             self.device,
1009         )
1010
1011         self.test_input, self.test_stack_counts = stack.generate_sequences(
1012             nb_test_samples,
1013             nb_steps,
1014             nb_stacks,
1015             nb_digits,
1016             values_for_test,
1017             self.device,
1018         )
1019
1020         i = torch.logical_and(self.test_input % 2 == 1, self.test_input < 2 * nb_stacks)
1021         counts = self.test_stack_counts.flatten()[i.flatten()]
1022         counts = F.one_hot(counts).sum(0)
1023         logger(f"test_pop_stack_counts {counts}")
1024
1025         self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
1026
1027     def batches(self, split="train", nb_to_use=-1, desc=None):
1028         assert split in {"train", "test"}
1029         input = self.train_input if split == "train" else self.test_input
1030         if nb_to_use > 0:
1031             input = input[:nb_to_use]
1032         if desc is None:
1033             desc = f"epoch-{split}"
1034         for batch in tqdm.tqdm(
1035             input.split(self.batch_size), dynamic_ncols=True, desc=desc
1036         ):
1037             yield batch
1038
1039     def vocabulary_size(self):
1040         return self.nb_codes
1041
1042     def produce_results(
1043         self, n_epoch, model, result_dir, logger, deterministic_synthesis
1044     ):
1045         def compute_nb_correct(input):
1046             result = input.clone()
1047             stack.remove_popped_values(result, self.nb_stacks, self.nb_digits)
1048             ar_mask = (result != input).long()
1049             masked_inplace_autoregression(
1050                 model,
1051                 self.batch_size,
1052                 result,
1053                 ar_mask,
1054                 deterministic_synthesis,
1055                 device=self.device,
1056             )
1057
1058             errors = ((result != input).long() * ar_mask).reshape(
1059                 -1, 1 + self.nb_digits
1060             )
1061             ar_mask = ar_mask.reshape(-1, 1 + self.nb_digits)
1062
1063             nb_total = ar_mask.max(1).values.sum()
1064             nb_correct = nb_total - errors.max(1).values.sum()
1065
1066             return nb_total, nb_correct
1067
1068         test_nb_total, test_nb_correct = compute_nb_correct(self.test_input[:1000])
1069
1070         logger(
1071             f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
1072         )
1073
1074         logger(f"main_test_accuracy {n_epoch} {test_nb_correct/test_nb_total}")
1075
1076         ##############################################################
1077         # Log a few generated sequences
1078         input = self.test_input[:10, : 12 * (1 + self.nb_digits)]
1079         result = input.clone()
1080         stack.remove_popped_values(result, self.nb_stacks, self.nb_digits)
1081         ar_mask = (result != input).long()
1082
1083         # for n in range(result.size(0)):
1084         # logger(
1085         # f"test_before {stack.seq_to_str(result[n],nb_stacks=self.nb_stacks,nb_digits=self.nb_digits)}"
1086         # )
1087
1088         masked_inplace_autoregression(
1089             model,
1090             self.batch_size,
1091             result,
1092             ar_mask,
1093             deterministic_synthesis,
1094             device=self.device,
1095         )
1096
1097         for n in range(result.size(0)):
1098             logger(
1099                 f"test_after  {stack.seq_to_str(result[n],nb_stacks=self.nb_stacks,nb_digits=self.nb_digits)}"
1100             )
1101         ##############################################################
1102
1103
1104 ######################################################################
1105
1106 import rpl
1107
1108
1109 class RPL(Task):
1110     def tensorize(self, sequences):
1111         len_max = max([len(x) for x in sequences])
1112         return torch.cat(
1113             [
1114                 torch.tensor(
1115                     [
1116                         [
1117                             self.token2id[str(c)]
1118                             for c in s + ["<nul>"] * (len_max - len(s))
1119                         ]
1120                         for s in sequences
1121                     ]
1122                 )
1123             ],
1124             0,
1125         )
1126
1127     def seq2str(self, seq):
1128         return " ".join([self.id2token[i] for i in seq])
1129
1130     def __init__(
1131         self,
1132         nb_train_samples,
1133         nb_test_samples,
1134         batch_size,
1135         nb_starting_values=3,
1136         max_input=9,
1137         prog_len=6,
1138         nb_runs=5,
1139         no_prog=False,
1140         logger=None,
1141         device=torch.device("cpu"),
1142     ):
1143         super().__init__()
1144
1145         self.batch_size = batch_size
1146         self.device = device
1147         self.no_prog = no_prog
1148
1149         train_sequences = [
1150             rpl.generate(
1151                 nb_starting_values=nb_starting_values,
1152                 nb_result_values_max=4 * nb_starting_values,
1153                 max_input=max_input,
1154                 prog_len=prog_len,
1155                 nb_runs=nb_runs,
1156             )
1157             for _ in tqdm.tqdm(range(nb_train_samples), desc="train-data")
1158         ]
1159
1160         test_sequences = [
1161             rpl.generate(
1162                 nb_starting_values=nb_starting_values,
1163                 nb_result_values_max=4 * nb_starting_values,
1164                 max_input=max_input,
1165                 prog_len=prog_len,
1166                 nb_runs=nb_runs,
1167             )
1168             for _ in tqdm.tqdm(range(nb_test_samples), desc="test-data")
1169         ]
1170
1171         symbols = list(
1172             set(["<nul>"] + [x for l in train_sequences + test_sequences for x in l])
1173         )
1174         val_max = max([x if type(x) is int else 0 for x in symbols])
1175         symbols = list(filter(lambda x: type(x) is str, symbols))
1176         symbols.sort()
1177         symbols += [str(n) for n in range(val_max + 1)]
1178         self.token2id = dict([(c, n) for n, c in enumerate(symbols)])
1179         self.id2token = dict([(n, c) for c, n in self.token2id.items()])
1180
1181         self.t_nul = self.token2id["<nul>"]
1182         self.t_input = self.token2id["<in>"]
1183         self.t_output = self.token2id["<out>"]
1184         self.t_prog = self.token2id["<prg>"]
1185         self.t_end = self.token2id["<end>"]
1186
1187         self.train_input = self.tensorize(train_sequences)
1188         self.test_input = self.tensorize(test_sequences)
1189
1190         if no_prog:
1191             # Excise the program from every train and test example
1192             k = torch.arange(self.train_input.size(1), device=self.train_input.device)[
1193                 None, :
1194             ]
1195             p = (
1196                 ((self.train_input == self.t_prog).long() * k)
1197                 .max(1, keepdim=True)
1198                 .values
1199             )
1200             self.train_input = (
1201                 self.train_input * (k <= p).long()
1202                 + self.t_end * (k == p + 1).long()
1203                 + self.t_nul * (k > p + 1).long()
1204             )
1205             k = torch.arange(self.test_input.size(1), device=self.test_input.device)[
1206                 None, :
1207             ]
1208             p = (
1209                 ((self.test_input == self.t_prog).long() * k)
1210                 .max(1, keepdim=True)
1211                 .values
1212             )
1213             self.test_input = (
1214                 self.test_input * (k <= p).long()
1215                 + self.t_end * (k == p + 1).long()
1216                 + self.t_nul * (k > p + 1).long()
1217             )
1218
1219         if logger is not None:
1220             logger(f"value_max {val_max}")
1221             for x in self.train_input[:25]:
1222                 end = (x != self.t_nul).nonzero().max().item() + 1
1223                 seq = [self.id2token[i.item()] for i in x[:end]]
1224                 s = " ".join(seq)
1225                 logger(f"example_seq {s}")
1226
1227         self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
1228
1229     def batches(self, split="train", nb_to_use=-1, desc=None):
1230         assert split in {"train", "test"}
1231         input = self.train_input if split == "train" else self.test_input
1232         if nb_to_use > 0:
1233             input = input[:nb_to_use]
1234         if desc is None:
1235             desc = f"epoch-{split}"
1236         for batch in tqdm.tqdm(
1237             input.split(self.batch_size), dynamic_ncols=True, desc=desc
1238         ):
1239             last = (batch != self.t_nul).max(0).values.nonzero().max() + 3
1240             batch = batch[:, :last].to(self.device)
1241             yield batch
1242
1243     def vocabulary_size(self):
1244         return self.nb_codes
1245
1246     def produce_results(
1247         self, n_epoch, model, result_dir, logger, deterministic_synthesis
1248     ):
1249         # --------------------------------------------------------------------
1250         def compute_nb_errors_prog(input, nb_to_log=0):
1251             result = input.clone()
1252             s = (result == self.t_prog).long()
1253             ar_mask = (s.cumsum(dim=1) - s).clamp(min=0, max=1)
1254             result = (1 - ar_mask) * result + ar_mask * self.t_nul
1255
1256             masked_inplace_autoregression(
1257                 model,
1258                 self.batch_size,
1259                 result,
1260                 ar_mask,
1261                 deterministic_synthesis,
1262                 device=self.device,
1263             )
1264
1265             sum_nb_total, sum_nb_errors = 0, 0
1266             for one_input, one_result in zip(input, result):
1267                 seq = [self.id2token[i.item()] for i in one_result]
1268                 nb_total, nb_errors, prog, stacks = rpl.compute_nb_errors(seq)
1269                 sum_nb_total += 1
1270                 sum_nb_errors += 0 if nb_errors == 0 else 1
1271                 if nb_to_log > 0:
1272                     gt_seq = [self.id2token[i.item()] for i in one_input]
1273                     _, _, gt_prog, _ = rpl.compute_nb_errors(gt_seq)
1274                     gt_prog = " ".join([str(x) for x in gt_prog])
1275                     prog = " ".join([str(x) for x in prog])
1276                     comment = "*" if nb_errors == 0 else "-"
1277                     logger(f"{comment} PROG [{gt_prog}] PREDICTED [{prog}]")
1278                     for start_stack, target_stack, result_stack, correct in stacks:
1279                         comment = "*" if correct else "-"
1280                         start_stack = " ".join([str(x) for x in start_stack])
1281                         target_stack = " ".join([str(x) for x in target_stack])
1282                         result_stack = " ".join([str(x) for x in result_stack])
1283                         logger(
1284                             f"  {comment} [{start_stack}] -> [{target_stack}] PREDICTED [{result_stack}]"
1285                         )
1286                     nb_to_log -= 1
1287
1288             return sum_nb_total, sum_nb_errors
1289
1290         # --------------------------------------------------------------------
1291         def compute_nb_errors_output(input, nb_to_log=0):
1292             result = input.clone()
1293             k = torch.arange(result.size(1), device=result.device)[None, :]
1294             last_output_idx = (
1295                 ((result == self.t_output) * k).max(dim=1, keepdim=True).values
1296             )
1297             first_prog_idx = (
1298                 ((result == self.t_prog) * k).max(dim=1, keepdim=True).values
1299             )
1300             ar_mask = (k > last_output_idx).long() * (k < first_prog_idx).long()
1301             result = (1 - ar_mask) * result + ar_mask * self.t_nul
1302
1303             masked_inplace_autoregression(
1304                 model,
1305                 self.batch_size,
1306                 result,
1307                 ar_mask,
1308                 deterministic_synthesis,
1309                 device=self.device,
1310             )
1311
1312             sum_nb_total, sum_nb_errors = 0, 0
1313             for one_input, one_result, i, j in zip(
1314                 input, result, last_output_idx, first_prog_idx
1315             ):
1316                 seq = [self.id2token[i.item()] for i in one_result]
1317                 sum_nb_total += 1
1318                 correct = (one_input - one_result).abs().max() == 0
1319                 sum_nb_errors += 0 if correct else 1
1320                 if nb_to_log > 0:
1321                     result_stack = [
1322                         self.id2token[i.item()] for i in one_result[i : j + 1]
1323                     ]
1324                     target_stack = [
1325                         self.id2token[i.item()] for i in one_input[i : j + 1]
1326                     ]
1327                     comment = "*" if correct else "-"
1328                     result_stack = " ".join([str(x) for x in result_stack])
1329                     target_stack = " ".join([str(x) for x in target_stack])
1330                     logger(
1331                         f"output_test {comment} [{target_stack}] PREDICTED [{result_stack}]"
1332                     )
1333                     nb_to_log -= 1
1334
1335             return sum_nb_total, sum_nb_errors
1336
1337         # --------------------------------------------------------------------
1338
1339         if not self.no_prog:
1340             test_nb_total, test_nb_errors = compute_nb_errors_prog(
1341                 self.test_input[:1000].to(self.device), nb_to_log=10
1342             )
1343
1344             logger(
1345                 f"accuracy_prog_test {n_epoch} nb_total {test_nb_total} nb_errors {test_nb_errors} accuracy {100.0*(1-test_nb_errors/test_nb_total):.02f}%"
1346             )
1347
1348             logger(f"main_test_accuracy {n_epoch} {1-test_nb_errors/test_nb_total}")
1349
1350         test_nb_total, test_nb_errors = compute_nb_errors_output(
1351             self.test_input[:1000].to(self.device), nb_to_log=10
1352         )
1353
1354         logger(
1355             f"accuracy_output_test {n_epoch} nb_total {test_nb_total} nb_errors {test_nb_errors} accuracy {100.0*(1-test_nb_errors/test_nb_total):.02f}%"
1356         )
1357
1358         if save_attention_image is None:
1359             logger("no save_attention_image (is pycairo installed?)")
1360         else:
1361             ns = torch.randint(self.test_input.size(0), (1,)).item()
1362             input = self.test_input[ns : ns + 1].clone()
1363             last = (input != self.t_nul).max(0).values.nonzero().max() + 3
1364             input = input[:, :last].to(self.device)
1365
1366             with torch.autograd.no_grad():
1367                 t = model.training
1368                 model.eval()
1369                 model.record_attention(True)
1370                 model(BracketedSequence(input))
1371                 model.train(t)
1372                 ram = model.retrieve_attention()
1373                 model.record_attention(False)
1374
1375             tokens_output = [self.id2token[i.item()] for i in input[0]]
1376             tokens_input = ["n/a"] + tokens_output[:-1]
1377             for n_head in range(ram[0].size(1)):
1378                 filename = os.path.join(
1379                     result_dir, f"rpl_attention_{n_epoch}_h{n_head}.pdf"
1380                 )
1381                 attention_matrices = [m[0, n_head] for m in ram]
1382                 save_attention_image(
1383                     filename,
1384                     tokens_input,
1385                     tokens_output,
1386                     attention_matrices,
1387                     k_top=10,
1388                     # min_total_attention=0.9,
1389                     token_gap=12,
1390                     layer_gap=50,
1391                 )
1392                 logger(f"wrote {filename}")
1393
1394
1395 ######################################################################
1396
1397
1398 import expr
1399
1400
1401 class Expr(Task):
1402     def tensorize(self, sequences):
1403         len_max = max([len(x) for x in sequences])
1404         return torch.cat(
1405             [
1406                 torch.tensor(
1407                     [
1408                         [self.char2id[c] for c in s + "#" * (len_max - len(s))]
1409                         for s in sequences
1410                     ]
1411                 )
1412             ],
1413             0,
1414         ).to(self.device)
1415
1416     def __init__(
1417         self,
1418         nb_train_samples,
1419         nb_test_samples,
1420         nb_variables,
1421         sequence_length,
1422         operand_max,
1423         result_max,
1424         batch_size,
1425         device=torch.device("cpu"),
1426     ):
1427         super().__init__()
1428
1429         self.batch_size = batch_size
1430         self.device = device
1431
1432         train_sequences = expr.generate_sequences(
1433             nb_train_samples,
1434             nb_variables=nb_variables,
1435             length=sequence_length,
1436             operand_max=operand_max,
1437             result_max=result_max,
1438         )
1439
1440         test_sequences = expr.generate_sequences(
1441             nb_test_samples,
1442             nb_variables=nb_variables,
1443             length=sequence_length,
1444             operand_max=operand_max,
1445             result_max=result_max,
1446         )
1447
1448         symbols = list(set("#" + "".join(train_sequences + test_sequences)))
1449         symbols.sort()
1450
1451         self.char2id = dict([(c, n) for n, c in enumerate(symbols)])
1452         self.id2char = dict([(n, c) for c, n in self.char2id.items()])
1453
1454         self.filler, self.space = self.char2id["#"], self.char2id[" "]
1455
1456         self.train_input = self.tensorize(train_sequences)
1457         self.test_input = self.tensorize(test_sequences)
1458
1459         self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
1460
1461     def batches(self, split="train", nb_to_use=-1, desc=None):
1462         assert split in {"train", "test"}
1463         input = self.train_input if split == "train" else self.test_input
1464         if nb_to_use > 0:
1465             input = input[:nb_to_use]
1466         if desc is None:
1467             desc = f"epoch-{split}"
1468         for batch in tqdm.tqdm(
1469             input.split(self.batch_size), dynamic_ncols=True, desc=desc
1470         ):
1471             last = (batch != self.filler).max(0).values.nonzero().max() + 3
1472             batch = batch[:, :last]
1473             yield batch
1474
1475     def vocabulary_size(self):
1476         return self.nb_codes
1477
1478     def seq2str(self, s):
1479         return "".join([self.id2char[k.item()] for k in s])
1480
1481     def produce_results(
1482         self,
1483         n_epoch,
1484         model,
1485         result_dir,
1486         logger,
1487         deterministic_synthesis,
1488         input_file=None,
1489     ):
1490         def compute_nb_correct(input):
1491             result = input.clone()
1492             s = (result == self.space).long()
1493             ar_mask = (s.cumsum(dim=1) - s).clamp(min=0, max=1)
1494             result = (1 - ar_mask) * result + ar_mask * self.filler
1495             masked_inplace_autoregression(
1496                 model,
1497                 self.batch_size,
1498                 result,
1499                 ar_mask,
1500                 deterministic_synthesis,
1501                 device=self.device,
1502             )
1503
1504             nb_total = input.size(0)
1505             nb_correct = (input == result).long().min(1).values.sum()
1506
1507             #######################################################################
1508             # Comput predicted vs. true variable values
1509
1510             nb_delta = torch.zeros(5, dtype=torch.int64)
1511             nb_missed = 0
1512
1513             values_input = expr.extract_results([self.seq2str(s) for s in input])
1514             values_result = expr.extract_results([self.seq2str(s) for s in result])
1515
1516             filename = os.path.join(result_dir, f"expr_result_{n_epoch:04d}.txt")
1517
1518             with open(filename, "w") as f:
1519                 for i, r in zip(values_input, values_result):
1520                     for n, vi in i.items():
1521                         vr = r.get(n)
1522                         f.write(f"{vi} {-1 if vr is None else vr}\n")
1523
1524                         if vr is None or vr < 0:
1525                             nb_missed += 1
1526                         else:
1527                             d = abs(vr - vi)
1528                             if d >= nb_delta.size(0):
1529                                 nb_missed += 1
1530                             else:
1531                                 nb_delta[d] += 1
1532
1533             ######################################################################
1534
1535             return nb_total, nb_correct, nb_delta, nb_missed
1536
1537         (
1538             test_nb_total,
1539             test_nb_correct,
1540             test_nb_delta,
1541             test_nb_missed,
1542         ) = compute_nb_correct(self.test_input[:10000])
1543
1544         logger(
1545             f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
1546         )
1547
1548         logger(f"main_test_accuracy {n_epoch} {test_nb_correct/test_nb_total}")
1549
1550         nb_total = test_nb_delta.sum() + test_nb_missed
1551         for d in range(test_nb_delta.size(0)):
1552             logger(
1553                 f"error_value {n_epoch} delta {d} {test_nb_delta[d]} {test_nb_delta[d]*100/nb_total:.02f}%"
1554             )
1555         logger(
1556             f"error_value {n_epoch} missed {test_nb_missed} {test_nb_missed*100/nb_total:.02f}%"
1557         )
1558
1559         ##############################################################
1560         # Log a few generated sequences
1561         if input_file is None:
1562             input = self.test_input[:10]
1563         else:
1564             with open(input_file, "r") as f:
1565                 sequences = [e.strip() for e in f.readlines()]
1566                 sequences = [s + " " + "#" * 50 for s in sequences]
1567                 input = self.tensorize(sequences)
1568
1569         result = input.clone()
1570         s = (result == self.space).long()
1571         ar_mask = (s.cumsum(dim=1) - s).clamp(min=0, max=1)
1572         result = (1 - ar_mask) * result + ar_mask * self.filler
1573
1574         for n in range(result.size(0)):
1575             logger(f"test_before {self.seq2str(result[n])}")
1576
1577         masked_inplace_autoregression(
1578             model,
1579             self.batch_size,
1580             result,
1581             ar_mask,
1582             deterministic_synthesis,
1583             device=self.device,
1584         )
1585
1586         correct = (1 - ar_mask) * self.space + ar_mask * input
1587         for n in range(result.size(0)):
1588             comment = "GOOD" if (result[n] - input[n]).abs().max() == 0 else ""
1589             logger(f"test_after  {self.seq2str(result[n])} {comment}")
1590             logger(f"truth       {self.seq2str(correct[n])}")
1591         ##############################################################
1592
1593
1594 ######################################################################
1595
1596 import grid
1597
1598
1599 class Grid(Task):
1600     # Make a tensor from a list of strings
1601     def str2tensor(self, descr):
1602         token_descr = [s.strip().split(" ") for s in descr]
1603         l = max([len(s) for s in token_descr])
1604         token_descr = [s + ["#"] * (l - len(s)) for s in token_descr]
1605         id_descr = [[self.token2id[u] for u in s] for s in token_descr]
1606         return torch.tensor(id_descr, device=self.device)
1607
1608     # Make a list of strings from a tensor
1609     def tensor2str(self, x):
1610         return [" ".join([self.id2token[t.item()] for t in r]) for r in x]
1611
1612     # trim all the tensors in the tuple z to remove as much token from
1613     # left and right in the first tensor. If z is a tuple, all its
1614     # elements are trimed according to the triming for the first
1615     def trim(self, z, token="#"):
1616         n = self.token2id[token]
1617         if type(z) == tuple:
1618             x = z[0]
1619             i = (1 - (F.pad(x, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
1620             a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
1621             return tuple([t[:, a:b] for t in z])
1622         else:
1623             i = (1 - (F.pad(z, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
1624             a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
1625             return z[:, a:b]
1626
1627     ######################
1628
1629     def __init__(
1630         self,
1631         nb_train_samples,
1632         nb_test_samples,
1633         batch_size,
1634         size,
1635         fraction_play=0.0,
1636         logger=None,
1637         device=torch.device("cpu"),
1638     ):
1639         super().__init__()
1640
1641         self.device = device
1642         self.batch_size = batch_size
1643         self.grid_factory = grid.GridFactory(size=size)
1644         self.fraction_play = fraction_play
1645
1646         if logger is not None:
1647             logger(
1648                 f"generating {nb_train_samples+nb_test_samples} samples (can take some time)"
1649             )
1650
1651         self.train_descr = self.grid_factory.generate_samples(
1652             nb=nb_train_samples,
1653             fraction_play=fraction_play,
1654             progress_bar=lambda r: tqdm.tqdm(r),
1655         )
1656
1657         self.test_descr = self.grid_factory.generate_samples(
1658             nb=nb_test_samples, fraction_play=0.0, progress_bar=lambda r: tqdm.tqdm(r)
1659         )
1660
1661         if fraction_play > 0:
1662             self.play_descr = self.grid_factory.generate_samples(
1663                 nb=25, fraction_play=1.0, progress_bar=lambda r: tqdm.tqdm(r)
1664             )
1665         else:
1666             self.play_descr = []
1667
1668         # Build the tokenizer
1669         tokens = set()
1670         for d in [self.train_descr, self.test_descr, self.play_descr]:
1671             for s in d:
1672                 for t in s.strip().split(" "):
1673                     tokens.add(t)
1674         # make this set a sorted list to get the same tensors given
1675         # the same descr
1676         tokens = list(tokens)
1677         tokens.sort()
1678         tokens = ["#"] + tokens
1679         self.token2id = dict([(t, n) for n, t in enumerate(tokens)])
1680         self.id2token = dict([(n, t) for n, t in enumerate(tokens)])
1681         self.t_nul = self.token2id["#"]
1682         self.t_true = self.token2id["true"]
1683         self.t_false = self.token2id["false"]
1684         self.t_pipe = self.token2id["|"]
1685
1686         # Tokenize the train and test sets
1687         self.train_input = self.str2tensor(self.train_descr)
1688         self.test_input = self.str2tensor(self.test_descr)
1689         self.play_input = (
1690             None if len(self.play_descr) == 0 else self.str2tensor(self.play_descr)
1691         )
1692
1693     def batches(self, split="train"):
1694         assert split in {"train", "test"}
1695         input = self.train_input if split == "train" else self.test_input
1696         for batch in tqdm.tqdm(
1697             input.split(self.batch_size), dynamic_ncols=True, desc=f"epoch-{split}"
1698         ):
1699             yield self.trim(batch)
1700
1701     def vocabulary_size(self):
1702         return len(self.token2id)
1703
1704     def produce_results(
1705         self, n_epoch, model, result_dir, logger, deterministic_synthesis
1706     ):
1707         correct = self.test_input[:1000]
1708         result = correct.clone()
1709         ar_mask = torch.logical_or(result == self.t_true, result == self.t_false).long()
1710         result *= 1 - ar_mask  # paraaaaanoiaaaaaaa
1711
1712         logger(f"----------------------------------------------------------")
1713
1714         for e in self.tensor2str(result[:10]):
1715             logger(f"test_before {e}")
1716
1717         masked_inplace_autoregression(
1718             model,
1719             self.batch_size,
1720             result,
1721             ar_mask,
1722             deterministic_synthesis,
1723             device=self.device,
1724         )
1725
1726         logger(f"----------------------------------------------------------")
1727
1728         for e in self.tensor2str(result[:10]):
1729             logger(f"test_after  {e}")
1730
1731         logger(f"----------------------------------------------------------")
1732
1733         nb_total = ar_mask.sum().item()
1734         nb_correct = ((correct == result).long() * ar_mask).sum().item()
1735
1736         logger(f"test_performance {n_epoch} {nb_total=} {nb_correct=}")
1737         logger(f"main_test_accuracy {n_epoch} {nb_correct / nb_total}")
1738
1739         if self.play_input is not None:
1740             result = self.play_input.clone()
1741             ar_mask = (result == self.t_pipe).long().cumsum(dim=1).clamp(max=1)
1742             result *= 1 - ar_mask  # paraaaaanoiaaaaaaa
1743
1744             logger(f"----------------------------------------------------------")
1745
1746             for e in self.tensor2str(result[:10]):
1747                 logger(f"play_before {e}")
1748
1749             masked_inplace_autoregression(
1750                 model,
1751                 self.batch_size,
1752                 result,
1753                 ar_mask,
1754                 deterministic_synthesis,
1755                 device=self.device,
1756             )
1757
1758             logger(f"----------------------------------------------------------")
1759
1760             for e in self.tensor2str(result[:10]):
1761                 logger(f"play_after  {e}")
1762
1763             logger(f"----------------------------------------------------------")
1764
1765
1766 ######################################################################
1767
1768 import qmlp
1769
1770
1771 class QMLP(Task):
1772     ######################
1773
1774     def __init__(
1775         self,
1776         nb_train_samples,
1777         nb_test_samples,
1778         batch_size,
1779         result_dir,
1780         logger=None,
1781         device=torch.device("cpu"),
1782     ):
1783         super().__init__()
1784
1785         self.device = device
1786         self.batch_size = batch_size
1787         self.nb_samples_per_mlp = 256
1788
1789         if logger is not None:
1790             logger(
1791                 f"generating {nb_train_samples+nb_test_samples} samples (can take some time)"
1792             )
1793
1794         seq, q_test_set, test_error = qmlp.generate_sequence_and_test_set(
1795             nb_mlps=nb_train_samples + nb_test_samples,
1796             nb_samples=self.nb_samples_per_mlp,
1797             device=self.device,
1798             batch_size=64,
1799             nb_epochs=250,
1800             nb_mlps_per_batch=1024,
1801         )
1802
1803         self.train_input = seq[:nb_train_samples]
1804         self.train_q_test_set = q_test_set[:nb_train_samples]
1805         self.train_ref_test_errors = test_error[:nb_train_samples]
1806         self.test_input = seq[nb_train_samples:]
1807         self.test_q_test_set = q_test_set[nb_train_samples:]
1808         self.test_ref_test_errors = test_error[nb_train_samples:]
1809
1810         filename = os.path.join(result_dir, f"train_errors_ref.dat")
1811         with open(filename, "w") as f:
1812             for e in self.train_ref_test_errors:
1813                 f.write(f"{e}\n")
1814
1815         filename = os.path.join(result_dir, f"test_errors_ref.dat")
1816         with open(filename, "w") as f:
1817             for e in self.test_ref_test_errors:
1818                 f.write(f"{e}\n")
1819
1820         self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
1821
1822     def batches(self, split="train"):
1823         assert split in {"train", "test"}
1824         input = self.train_input if split == "train" else self.test_input
1825         for batch in tqdm.tqdm(
1826             input.split(self.batch_size), dynamic_ncols=True, desc=f"epoch-{split}"
1827         ):
1828             yield batch
1829
1830     def vocabulary_size(self):
1831         return self.nb_codes
1832
1833     def produce_results(
1834         self, n_epoch, model, result_dir, logger, deterministic_synthesis
1835     ):
1836         correct = self.test_input[:1000]
1837         result = correct.clone()
1838         ar_mask = (
1839             torch.arange(result.size(1), device=result.device)
1840             > self.nb_samples_per_mlp * 3 + 1
1841         ).long()[None, :]
1842         ar_mask = ar_mask.expand_as(result)
1843         result *= 1 - ar_mask  # paraaaaanoiaaaaaaa
1844
1845         masked_inplace_autoregression(
1846             model,
1847             self.batch_size,
1848             result,
1849             ar_mask,
1850             deterministic_synthesis,
1851             device=self.device,
1852         )
1853
1854         q_train_set = result[:, : self.nb_samples_per_mlp * 3]
1855         q_params = result[:, self.nb_samples_per_mlp * 3 + 1 :]
1856         error_test = qmlp.evaluate_q_params(q_params, self.test_q_test_set)
1857
1858         filename = os.path.join(result_dir, f"test_errors_{n_epoch:04d}.dat")
1859         with open(filename, "w") as f:
1860             for e in error_test:
1861                 f.write(f"{e}\n")
1862
1863
1864 ######################################################################