Update.
[mygptrnn.git] / main.py
1 #!/usr/bin/env python
2
3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
5
6 # Written by Francois Fleuret <francois@fleuret.org>
7
8 import math, sys, argparse, time, tqdm, os, datetime, warnings
9
10 import torch, torchvision
11 from torch import nn
12 from torch.nn import functional as F
13
14 # torch.autograd.set_detect_anomaly(True) #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
15
16 import ffutils
17 import mygpt, tasks, problems
18
19 ######################################################################
20
21
22 def str2bool(x):
23     x = x.lower()
24     if x in {"1", "true", "yes"}:
25         return True
26     elif x in {"0", "false", "no"}:
27         return False
28     else:
29         raise ValueError
30
31
32 parser = argparse.ArgumentParser(
33     description="An implementation of GPT with cache.",
34     formatter_class=argparse.ArgumentDefaultsHelpFormatter,
35 )
36
37 parser.add_argument(
38     "--task",
39     type=str,
40     default="twotargets",
41     help="byheart, learnop, guessop, mixing, memory, twotargets, addition, picoclvr, mnist, maze, snake, stack, expr, rpl, grid, qmlp",
42 )
43
44 parser.add_argument("--log_filename", type=str, default="train.log", help=" ")
45
46 parser.add_argument("--result_dir", type=str, default=None)
47
48 parser.add_argument("--seed", type=int, default=0)
49
50 parser.add_argument("--max_percents_of_test_in_train", type=int, default=1)
51
52 parser.add_argument("--force_cpu", type=str2bool, default=False)
53
54 ########################################
55
56 parser.add_argument("--nb_epochs", type=int, default=25)
57
58 parser.add_argument("--physical_batch_size", type=int, default=None)
59
60 parser.add_argument("--batch_size", type=int, default=25)
61
62 parser.add_argument("--nb_train_samples", type=int, default=None)
63
64 parser.add_argument("--nb_test_samples", type=int, default=None)
65
66 parser.add_argument("--optim", type=str, default="adam")
67
68 ########################################
69
70 parser.add_argument("--nb_warmup_iter", type=int, default=100)
71
72 parser.add_argument("--nb_decay_iter", type=int, default=5000)
73
74 parser.add_argument("--learning_rate", type=float, default=6e-4)
75
76 parser.add_argument("--min_learning_rate", type=float, default=6e-5)
77
78 # legacy
79
80 parser.add_argument("--legacy_lr_schedule", type=str2bool, default=True)
81
82 parser.add_argument("--legacy_large_lr", type=float, default=1e-4)
83
84 parser.add_argument("--legacy_small_lr", type=float, default=2e-5)
85
86 parser.add_argument("--legacy_nb_epoch_large_lr", type=float, default=10)
87
88 ########################################
89
90 parser.add_argument("--model", type=str, default=None)
91
92 parser.add_argument("--attention", type=str, default=None)
93
94 parser.add_argument("--memex_proba", type=float, default=0)
95
96 parser.add_argument("--memex_nb_epochs", type=float, default=None)
97
98 parser.add_argument("--dim_model", type=int, default=None)
99
100 parser.add_argument("--dim_keys", type=int, default=None)
101
102 parser.add_argument("--dim_hidden", type=int, default=None)
103
104 parser.add_argument("--nb_heads", type=int, default=None)
105
106 parser.add_argument("--nb_lines", type=int, default=None)
107
108 parser.add_argument("--caterpillar_height", type=int, default=None)
109
110 parser.add_argument("--gate_dropout_proba", type=float, default=0.0)
111
112 parser.add_argument("--gate_dropout_sync", type=str2bool, default=False)
113
114 parser.add_argument("--gate_dropout_replace", type=str2bool, default=False)
115
116 parser.add_argument("--rho_inner_loss", type=float, default=0.0)
117
118 parser.add_argument("--nb_blocks", type=int, default=None)
119
120 parser.add_argument("--dropout", type=float, default=0.1)
121
122 ########################################
123
124 parser.add_argument("--deterministic_synthesis", action="store_true", default=False)
125
126 parser.add_argument("--no_checkpoint", action="store_true", default=False)
127
128 parser.add_argument("--continue_training", action="store_true", default=False)
129
130 parser.add_argument("--checkpoint_name", type=str, default="checkpoint.pth")
131
132 ##############################
133 # rpl options
134
135 parser.add_argument("--rpl_nb_starting_values", type=int, default=3)
136
137 parser.add_argument("--rpl_max_input", type=int, default=9)
138
139 parser.add_argument("--rpl_prog_len", type=int, default=8)
140
141 parser.add_argument("--rpl_nb_runs", type=int, default=5)
142
143 parser.add_argument("--rpl_no_prog", action="store_true", default=False)
144
145 ##############################
146 # grid options
147
148 parser.add_argument("--grid_size", type=int, default=6)
149
150 parser.add_argument("--grid_nb_colors", type=int, default=6)
151
152 parser.add_argument("--grid_nb_shapes", type=int, default=6)
153
154 ##############################
155 # picoclvr options
156
157 parser.add_argument("--picoclvr_nb_colors", type=int, default=5)
158
159 parser.add_argument("--picoclvr_height", type=int, default=12)
160
161 parser.add_argument("--picoclvr_width", type=int, default=16)
162
163 parser.add_argument("--picocvlr_prune_properties", type=str, default="none")
164
165 ##############################
166 # Maze options
167
168 parser.add_argument("--maze_height", type=int, default=13)
169
170 parser.add_argument("--maze_width", type=int, default=21)
171
172 parser.add_argument("--maze_nb_walls", type=int, default=15)
173
174 ##############################
175 # Snake options
176
177 parser.add_argument("--snake_height", type=int, default=9)
178
179 parser.add_argument("--snake_width", type=int, default=12)
180
181 parser.add_argument("--snake_nb_colors", type=int, default=5)
182
183 parser.add_argument("--snake_length", type=int, default=200)
184
185 ##############################
186 # Stack options
187
188 parser.add_argument("--stack_nb_steps", type=int, default=100)
189
190 parser.add_argument("--stack_nb_stacks", type=int, default=3)
191
192 parser.add_argument("--stack_nb_digits", type=int, default=3)
193
194 parser.add_argument("--stack_fraction_values_for_train", type=float, default=0.75)
195
196 ##############################
197 # Expr options
198
199 parser.add_argument("--expr_nb_variables", type=int, default=5)
200
201 parser.add_argument("--expr_sequence_length", type=int, default=40)
202
203 parser.add_argument("--expr_operand_max", type=int, default=9)
204
205 parser.add_argument("--expr_result_max", type=int, default=99)
206
207 parser.add_argument("--expr_input_file", type=str, default=None)
208
209 ##############################
210 # Memory
211
212 parser.add_argument("--memory_len_total", type=int, default=32)
213
214 ##############################
215 # Mixing
216
217 parser.add_argument("--mixing_hard", action="store_true", default=False)
218
219 parser.add_argument("--mixing_deterministic_start", action="store_true", default=False)
220
221 ######################################################################
222
223 # args = parser.parse_args()
224
225 args, sup_args = parser.parse_known_args()
226
227 sup_args = dict([x.removeprefix("--").split("=") for x in sup_args])
228
229 if args.result_dir is None:
230     args.result_dir = f"results_{args.task}_{args.model}"
231
232 ######################################################################
233
234 if not args.force_cpu and torch.cuda.is_available():
235     device = torch.device("cuda")
236     torch.backends.cuda.matmul.allow_tf32 = True
237 else:
238     device = torch.device("cpu")
239
240 ######################################################################
241
242 default_task_args = {
243     "addition": {
244         "model": "352M",
245         "physical_batch_size": 25,
246         "nb_train_samples": 250000,
247         "nb_test_samples": 10000,
248     },
249     "byheart": {
250         "model": "37M",
251         "physical_batch_size": 25,
252         "nb_train_samples": 50000,
253         "nb_test_samples": 10000,
254     },
255     "expr": {
256         "model": "352M",
257         "physical_batch_size": 25,
258         "nb_train_samples": 2500000,
259         "nb_test_samples": 10000,
260     },
261     "grid": {
262         "model": "37M",
263         "physical_batch_size": 25,
264         "nb_train_samples": 250000,
265         "nb_test_samples": 10000,
266     },
267     "qmlp": {
268         "model": "37M",
269         "physical_batch_size": 10,
270         "nb_train_samples": 100000,
271         "nb_test_samples": 1000,
272     },
273     "guessop": {
274         "model": "352M",
275         "physical_batch_size": 25,
276         "nb_train_samples": 1000000,
277         "nb_test_samples": 10000,
278     },
279     "learnop": {
280         "model": "37M",
281         "physical_batch_size": 25,
282         "nb_train_samples": 50000,
283         "nb_test_samples": 10000,
284     },
285     "maze": {
286         "model": "37M",
287         "physical_batch_size": 5,
288         "nb_train_samples": 100000,
289         "nb_test_samples": 10000,
290     },
291     "picoclvr": {
292         "model": "37M",
293         "physical_batch_size": 25,
294         "nb_train_samples": 250000,
295         "nb_test_samples": 10000,
296     },
297     "rpl": {
298         "model": "352M",
299         "physical_batch_size": 5,
300         "nb_train_samples": 2500000,
301         "nb_test_samples": 10000,
302     },
303     "snake": {
304         "model": "37M",
305         "physical_batch_size": 25,
306         "nb_train_samples": 250000,
307         "nb_test_samples": 10000,
308     },
309     "stack": {
310         "model": "37M",
311         "physical_batch_size": 25,
312         "nb_train_samples": 100000,
313         "nb_test_samples": 1000,
314     },
315     "twotargets": {
316         "model": "37M",
317         "physical_batch_size": 25,
318         "nb_train_samples": 50000,
319         "nb_test_samples": 10000,
320     },
321     "memory": {
322         "model": "37M",
323         "physical_batch_size": 25,
324         "nb_train_samples": 25000,
325         "nb_test_samples": 10000,
326     },
327     "mixing": {
328         "model": "37M",
329         "physical_batch_size": 25,
330         "nb_train_samples": 250000,
331         "nb_test_samples": 10000,
332     },
333     "mnist": {
334         "model": "37M",
335         "physical_batch_size": 5,
336         "nb_train_samples": 60000,
337         "nb_test_samples": 10000,
338     },
339 }
340
341 if args.task in default_task_args:
342     for k, v in default_task_args[args.task].items():
343         if getattr(args, k) is None:
344             setattr(args, k, v)
345
346 ######################################################################
347
348 default_model_args = {
349     "17K": {
350         "attention": "mha",
351         "dim_model": 32,
352         "dim_keys": 32,
353         "dim_hidden": 32,
354         "nb_heads": 2,
355         "nb_blocks": 2,
356     },
357     "17K-C": {
358         "attention": "caterpillar",
359         "dim_model": 32,
360         "dim_keys": 32,
361         "dim_hidden": 32,
362         "nb_heads": 2,
363         "nb_lines": 16,
364         "caterpillar_height": 4,
365         "nb_blocks": 2,
366     },
367     "4M": {
368         "attention": "mha",
369         "dim_model": 256,
370         "dim_keys": 32,
371         "dim_hidden": 1024,
372         "nb_heads": 4,
373         "nb_blocks": 6,
374     },
375     "4M-C": {
376         "attention": "caterpillar",
377         "dim_model": 256,
378         "dim_keys": 32,
379         "dim_hidden": 1024,
380         "nb_heads": 4,
381         "nb_lines": 32,
382         "caterpillar_height": 4,
383         "nb_blocks": 6,
384     },
385     "37M": {
386         "attention": "mha",
387         "dim_model": 512,
388         "dim_keys": 64,
389         "dim_hidden": 2048,
390         "nb_heads": 8,
391         "nb_blocks": 12,
392     },
393     "37M-C": {
394         "attention": "caterpillar",
395         "dim_model": 512,
396         "dim_keys": 64,
397         "dim_hidden": 2048,
398         "nb_heads": 8,
399         "nb_lines": 256,
400         "caterpillar_height": 32,
401         "nb_blocks": 12,
402     },
403     "122M": {
404         "attention": "mha",
405         "dim_model": 768,
406         "dim_keys": 64,
407         "dim_hidden": 2048,
408         "nb_heads": 8,
409         "nb_blocks": 24,
410     },
411     "122M-C": {
412         "attention": "caterpillar",
413         "dim_model": 768,
414         "dim_keys": 64,
415         "dim_hidden": 2048,
416         "nb_heads": 8,
417         "nb_lines": 128,
418         "nb_blocks": 24,
419     },
420     "352M": {
421         "attention": "mha",
422         "dim_model": 1024,
423         "dim_keys": 64,
424         "dim_hidden": 2048,
425         "nb_heads": 8,
426         "nb_blocks": 48,
427     },
428     "352M-C": {
429         "attention": "caterpillar",
430         "dim_model": 1024,
431         "dim_keys": 64,
432         "dim_hidden": 2048,
433         "nb_heads": 8,
434         "nb_lines": 128,
435         "nb_blocks": 48,
436     },
437 }
438
439 if args.model in default_model_args:
440     for k, v in default_model_args[args.model].items():
441         if getattr(args, k) is None:
442             setattr(args, k, v)
443 else:
444     raise ValueError(f"Unknown model {args.model}")
445
446 ######################################################################
447
448 try:
449     os.mkdir(args.result_dir)
450 except FileExistsError:
451     if not args.continue_training:
452         print(f"result directory {args.result_dir} already exists")
453         exit(1)
454
455 loss_file = open(os.path.join(args.result_dir, "loss.dat"), "a")
456 lambda_file = open(os.path.join(args.result_dir, "lambda.dat"), "a")
457
458 log_file = open(os.path.join(args.result_dir, args.log_filename), "a")
459
460 if args.seed >= 0:
461     # torch.backends.cudnn.deterministic = True
462     # torch.backends.cudnn.benchmark = False
463     # torch.use_deterministic_algorithms(True)
464     torch.manual_seed(args.seed)
465     if torch.cuda.is_available():
466         torch.cuda.manual_seed_all(args.seed)
467
468 ######################################################################
469
470
471 def log_string(s):
472     t = time.strftime("%Y%m%d-%H:%M:%S ", time.localtime())
473
474     if log_file is not None:
475         log_file.write(t + s + "\n")
476         log_file.flush()
477
478     print(t + s)
479     sys.stdout.flush()
480
481
482 with os.popen("sha256sum *.py") as f:
483     for l in f:
484         log_string(f"sha256sum {l.strip()}")
485
486 now = time.strftime("%Y%m%d-%H%M%S", time.localtime())
487 os.system(f"tar zcvf {args.result_dir}/src-{now}.tgz *.py *.sh")
488
489 log_string(f"argv {' '.join(sys.argv)}")
490
491 for n in vars(args):
492     log_string(f"args.{n} {getattr(args, n)}")
493
494 for k, v in sup_args.items():
495     log_string(f'sup_args["{k}"] "{v}"')
496
497
498 ######################################################################
499
500
501 def get_lr(n_epoch, it):
502     if args.legacy_lr_schedule:
503         # my crude scheduling to compare to previous baseline, added
504         # warmup though
505
506         if it < args.nb_warmup_iter:
507             return args.legacy_large_lr * it / args.nb_warmup_iter
508         elif n_epoch < args.legacy_nb_epoch_large_lr:
509             return args.legacy_large_lr
510         else:
511             return args.legacy_small_lr
512
513     # from nanoGPT
514
515     # 1) linear warmup for warmup_iter steps
516     if it < args.nb_warmup_iter:
517         return args.learning_rate * it / args.nb_warmup_iter
518     # 2) if it > nb_decay_iter, return min learning rate
519     if it > args.nb_decay_iter:
520         return args.min_learning_rate
521     # 3) in between, use cosine decay down to min learning rate
522     decay_ratio = (it - args.nb_warmup_iter) / (
523         args.nb_decay_iter - args.nb_warmup_iter
524     )
525     coeff = 0.5 * (1.0 + math.cos(math.pi * decay_ratio))  # coeff ranges 0..1
526     return args.min_learning_rate + coeff * (
527         args.learning_rate - args.min_learning_rate
528     )
529
530
531 ######################################################################
532
533
534 def add_memex_v1(batches, memex_proba, marker_token):
535     for input in batches:
536         if torch.rand(1).item() < memex_proba:
537             t = (
538                 torch.arange(1 + 2 * input.size(1), device=input.device)[None, :]
539                 .expand(input.size(0), -1)
540                 .clone()
541             )
542
543             u0 = torch.randint(input.size(1), (input.size(0), 1), device=input.device)
544             caterpillar_length = args.nb_lines // args.caterpillar_height
545             u1 = (
546                 u0
547                 + torch.randint(
548                     caterpillar_length, (input.size(0), 1), device=input.device
549                 )
550                 + 1
551             )
552
553             m0 = (t < u0).long()
554             m1 = (t >= u1).long() * (t < u1 + input.size(1)).long()
555
556             t = t * m0 + ((-1) * (1 - m0) * (1 - m1)) + (t - u1) * m1
557             m = (t < 0).long()
558             n = torch.arange(input.size(0), device=input.device)[:, None].expand(
559                 -1, t.size(1)
560             )
561
562             new_input = input[n, t.clamp(min=0)]
563             new_input = (1 - m) * new_input + m * (marker_token)
564
565             memex_mask = new_input.new_zeros(new_input.size())
566             memex_mask[:, input.size(1) :] = 1.0
567
568             yield new_input, memex_mask
569
570         yield input
571
572
573 # The marker token is not used for this one
574 def add_memex_v2(batches, memex_proba, marker_token):
575     for input in batches:
576         if torch.rand(1).item() < memex_proba:
577             t = torch.arange(input.size(1) // 4, device=input.device)[None, :].expand(
578                 input.size(0), -1
579             )
580             t = t + torch.randint(
581                 input.size(1) - t.size(1), (t.size(0), 1), device=t.device
582             )
583             n = torch.arange(input.size(0), device=input.device)[:, None].expand(
584                 -1, t.size(1)
585             )
586
587             flash = input[n, t]
588             new_input = torch.cat([input, flash], dim=1)
589
590             memex_mask = new_input.new_zeros(new_input.size())
591             memex_mask[:, input.size(1) :] = 1.0
592
593             yield new_input, memex_mask
594
595         else:
596             yield input
597
598
599 def add_memex_v3(batches, memex_proba, marker_token):
600     for input in batches:
601         if torch.rand(1).item() < memex_proba:
602             memex_len = input.size(1) // 4
603
604             t = torch.arange(input.size(1) + memex_len, device=input.device)[
605                 None, :
606             ].expand(input.size(0), -1)
607             n = torch.arange(input.size(0), device=input.device)[:, None].expand(
608                 -1, t.size(1)
609             )
610
611             # Call me the tensor-spaghetti master
612
613             trigger = torch.rand(t.size(), device=t.device)
614             trigger[:, -memex_len:] = 2.0
615             trigger[:, 0] = 2.0
616             trigger = (trigger == trigger.min(dim=1, keepdim=True).values).long()
617             memex_mask = trigger.clone()
618             memex_mask[:, memex_len:] -= trigger[:, :-memex_len]
619             memex_mask = memex_mask.cumsum(dim=1)
620
621             u = 1 - memex_mask
622             u[:, 0] = 0
623             u = u.cumsum(dim=1)
624             assert u.min() == 0
625             assert u.max() == input.size(1) - 1
626
627             v = (
628                 (trigger.cumsum(dim=1) - trigger).cumsum(dim=1)
629                 + torch.randint(
630                     input.size(1) - memex_len, (input.size(0), 1), device=t.device
631                 )
632             ) * memex_mask
633             assert v.min() >= 0
634             assert v.max() < input.size(1)
635             u = u * (1 - memex_mask) + v * memex_mask
636
637             new_input = input[n, u]
638             assert input.max() < vocabulary_size
639             assert new_input.max() < vocabulary_size
640             limits = trigger.clone()
641             limits[:, memex_len - 1 :] += limits[:, : -(memex_len - 1)]
642             assert limits.min() == 0
643             assert limits.max() == 1
644             new_input = new_input * (1 - limits) + marker_token * limits
645             assert marker_token < vocabulary_size
646             assert new_input.max() < vocabulary_size
647
648             yield new_input, memex_mask
649
650         else:
651             yield input
652
653
654 ######################################################################
655
656 assert args.picocvlr_prune_properties in {"none", "train+eval", "eval"}
657
658 assert args.batch_size % args.physical_batch_size == 0
659
660
661 def picoclvr_pruner_horizontal_green(p):
662     return not ("green" in p and ("left" in p or "right" in p))
663
664
665 picoclvr_pruner_train = (
666     picoclvr_pruner_horizontal_green
667     if args.picocvlr_prune_properties in {"train+eval"}
668     else None
669 )
670
671 picoclvr_pruner_eval = (
672     (lambda p: not picoclvr_pruner_horizontal_green(p))
673     if args.picocvlr_prune_properties in {"train+eval", "eval"}
674     else None
675 )
676
677 ######################################################################
678
679 device_data = device
680
681 if args.task == "byheart":
682     task = tasks.SandBox(
683         problem=problems.ProblemByHeart(),
684         nb_train_samples=args.nb_train_samples,
685         nb_test_samples=args.nb_test_samples,
686         batch_size=args.physical_batch_size,
687         logger=log_string,
688         device=device_data,
689     )
690     args.max_percents_of_test_in_train = -1
691
692 elif args.task == "learnop":
693     task = tasks.SandBox(
694         problem=problems.ProblemLearnOperator(),
695         nb_train_samples=args.nb_train_samples,
696         nb_test_samples=args.nb_test_samples,
697         batch_size=args.physical_batch_size,
698         logger=log_string,
699         device=device_data,
700     )
701
702
703 elif args.task == "guessop":
704     task = tasks.SandBox(
705         problem=problems.ProblemGuessOperator(),
706         nb_train_samples=args.nb_train_samples,
707         nb_test_samples=args.nb_test_samples,
708         batch_size=args.physical_batch_size,
709         logger=log_string,
710         device=device_data,
711     )
712
713
714 elif args.task == "twotargets":
715     task = tasks.SandBox(
716         problem=problems.ProblemTwoTargets(),
717         nb_train_samples=args.nb_train_samples,
718         nb_test_samples=args.nb_test_samples,
719         batch_size=args.physical_batch_size,
720         logger=log_string,
721         device=device_data,
722     )
723
724 elif args.task == "memory":
725     task = tasks.SandBox(
726         problem=problems.ProblemMemory(len_total=args.memory_len_total),
727         nb_train_samples=args.nb_train_samples,
728         nb_test_samples=args.nb_test_samples,
729         batch_size=args.physical_batch_size,
730         logger=log_string,
731         device=device_data,
732     )
733
734 elif args.task == "mixing":
735     task = tasks.SandBox(
736         problem=problems.ProblemMixing(
737             hard=args.mixing_hard, random_start=not args.mixing_deterministic_start
738         ),
739         nb_train_samples=args.nb_train_samples,
740         nb_test_samples=args.nb_test_samples,
741         batch_size=args.physical_batch_size,
742         logger=log_string,
743         device=device_data,
744     )
745
746 elif args.task == "addition":
747     task = tasks.SandBox(
748         problem=problems.ProblemAddition(),
749         nb_train_samples=args.nb_train_samples,
750         nb_test_samples=args.nb_test_samples,
751         batch_size=args.physical_batch_size,
752         logger=log_string,
753         device=device_data,
754     )
755
756 elif args.task == "picoclvr":
757     task = tasks.PicoCLVR(
758         nb_train_samples=args.nb_train_samples,
759         nb_test_samples=args.nb_test_samples,
760         batch_size=args.physical_batch_size,
761         height=args.picoclvr_height,
762         width=args.picoclvr_width,
763         nb_colors=args.picoclvr_nb_colors,
764         logger=log_string,
765         device=device_data,
766         pruner_train=picoclvr_pruner_train,
767         pruner_eval=picoclvr_pruner_eval,
768     )
769
770 elif args.task == "mnist":
771     task = tasks.MNIST(
772         nb_train_samples=args.nb_train_samples,
773         nb_test_samples=args.nb_test_samples,
774         batch_size=args.physical_batch_size,
775         device=device_data,
776     )
777
778 elif args.task == "maze":
779     task = tasks.Maze(
780         nb_train_samples=args.nb_train_samples,
781         nb_test_samples=args.nb_test_samples,
782         batch_size=args.physical_batch_size,
783         height=args.maze_height,
784         width=args.maze_width,
785         nb_walls=args.maze_nb_walls,
786         device=device_data,
787     )
788
789 elif args.task == "snake":
790     task = tasks.Snake(
791         nb_train_samples=args.nb_train_samples,
792         nb_test_samples=args.nb_test_samples,
793         batch_size=args.physical_batch_size,
794         height=args.snake_height,
795         width=args.snake_width,
796         nb_colors=args.snake_nb_colors,
797         length=args.snake_length,
798         prompt_length=args.snake_length // 2,
799         device=device_data,
800     )
801
802 elif args.task == "stack":
803     task = tasks.Stack(
804         nb_train_samples=args.nb_train_samples,
805         nb_test_samples=args.nb_test_samples,
806         batch_size=args.physical_batch_size,
807         logger=log_string,
808         nb_steps=args.stack_nb_steps,
809         nb_stacks=args.stack_nb_stacks,
810         nb_digits=args.stack_nb_digits,
811         fraction_values_for_train=args.stack_fraction_values_for_train,
812         device=device_data,
813     )
814
815 elif args.task == "expr":
816     task = tasks.Expr(
817         nb_train_samples=args.nb_train_samples,
818         nb_test_samples=args.nb_test_samples,
819         nb_variables=args.expr_nb_variables,
820         sequence_length=args.expr_sequence_length,
821         operand_max=args.expr_operand_max,
822         result_max=args.expr_result_max,
823         batch_size=args.physical_batch_size,
824         device=device_data,
825     )
826
827 elif args.task == "rpl":
828     task = tasks.RPL(
829         nb_train_samples=args.nb_train_samples,
830         nb_test_samples=args.nb_test_samples,
831         batch_size=args.physical_batch_size,
832         nb_starting_values=args.rpl_nb_starting_values,
833         max_input=args.rpl_max_input,
834         prog_len=args.rpl_prog_len,
835         nb_runs=args.rpl_nb_runs,
836         no_prog=args.rpl_no_prog,
837         logger=log_string,
838         device=device_data,
839     )
840
841 elif args.task == "grid":
842     task = tasks.Grid(
843         nb_train_samples=args.nb_train_samples,
844         nb_test_samples=args.nb_test_samples,
845         batch_size=args.physical_batch_size,
846         size=args.grid_size,
847         nb_shapes=args.grid_nb_shapes,
848         nb_colors=args.grid_nb_colors,
849         logger=log_string,
850         device=device_data,
851     )
852
853 elif args.task == "qmlp":
854     task = tasks.QMLP(
855         nb_train_samples=args.nb_train_samples,
856         nb_test_samples=args.nb_test_samples,
857         batch_size=args.physical_batch_size,
858         result_dir=args.result_dir,
859         logger=log_string,
860         device=device_data,
861     )
862
863 else:
864     raise ValueError(f"Unknown task {args.task}")
865
866 ######################################################################
867
868 log_string(f"device {device}")
869
870 vocabulary_size = task.vocabulary_size()
871
872 if args.memex_proba > 0:
873     memex_marker = vocabulary_size
874     vocabulary_size += 1
875
876 log_string(f"vocabulary_size {vocabulary_size}")
877
878 ##############################
879
880 model = mygpt.MyGPT(
881     vocabulary_size=vocabulary_size,
882     dim_model=args.dim_model,
883     dim_keys=args.dim_keys,
884     dim_hidden=args.dim_hidden,
885     nb_heads=args.nb_heads,
886     nb_lines=args.nb_lines,
887     caterpillar_height=args.caterpillar_height,
888     nb_blocks=args.nb_blocks,
889     causal=True,
890     dropout=args.dropout,
891     attention_layer=args.attention,
892     logger=log_string,
893     args=args,
894 )
895
896 model.to(device)
897
898 nb_parameters = sum(p.numel() for p in model.parameters())
899 log_string(f"nb_parameters {nb_parameters} ({int(nb_parameters/1e6)}M)")
900
901 ######################################################################
902
903 nb_epochs_finished = 0
904
905 if args.no_checkpoint:
906     log_string(f"not trying to load checkpoint.")
907
908 else:
909     try:
910         checkpoint_name = os.path.join(args.result_dir, args.checkpoint_name)
911         checkpoint = torch.load(checkpoint_name)
912         nb_epochs_finished = checkpoint["nb_epochs_finished"]
913         model.load_state_dict(checkpoint["model_state"])
914         torch.set_rng_state(checkpoint["rng_state"])
915         if torch.cuda.is_available():
916             torch.cuda.set_rng_state(checkpoint["cuda_rng_state"])
917
918         log_string(f"checkpoint loaded with {nb_epochs_finished} epochs finished.")
919
920     except FileNotFoundError:
921         log_string("starting from scratch.")
922
923     except:
924         log_string("error when loading the checkpoint.")
925         exit(1)
926
927 ######################################################################
928
929 if args.task == "expr" and args.expr_input_file is not None:
930     task.produce_results(
931         n_epoch=nb_epochs_finished,
932         model=model,
933         result_dir=args.result_dir,
934         logger=log_string,
935         deterministic_synthesis=args.deterministic_synthesis,
936         input_file=args.expr_input_file,
937     )
938
939     exit(0)
940
941 ######################################################################
942
943 nb_epochs = args.nb_epochs if args.nb_epochs > 0 else nb_epochs_default
944
945 # Compute the entropy of the training tokens
946
947 token_count = 0
948 for input in task.batches(split="train"):
949     token_count += F.one_hot(input, num_classes=task.vocabulary_size()).sum((0, 1))
950 token_probas = token_count / token_count.sum()
951 entropy = -torch.xlogy(token_probas, token_probas).sum()
952 train_set_perplexity = math.exp(entropy)
953
954 ######################################################################
955 # A bit of paranoia never hurts
956
957 if args.max_percents_of_test_in_train >= 0:
958
959     def subsets_as_tuples(batches, cs):
960         s = set()
961         for batch in batches:
962             for x in batch:
963                 s.add(tuple([v.item() for v in x]))
964                 if len(s) == cs:
965                     yield s
966                     s = set()
967         yield s
968
969     nb_test, nb_in_train = 0, 0
970     for test_subset in subsets_as_tuples(task.batches(split="test"), 25000):
971         in_train = set()
972         for train_subset in subsets_as_tuples(task.batches(split="train"), 25000):
973             in_train.update(test_subset.intersection(train_subset))
974         nb_in_train += len(in_train)
975         nb_test += len(test_subset)
976
977     log_string(
978         f"data_check {nb_in_train*100/nb_test:.02f}% ({nb_in_train}/{nb_test}) of test samples are in the train set"
979     )
980
981     assert (
982         nb_in_train <= args.max_percents_of_test_in_train * nb_test / 100
983     ), f"More than {args.max_percents_of_test_in_train}% of test samples are in the train set"
984
985 ##############################
986
987 if "calibrate" in sup_args:
988     for input in task.batches(split="train", desc="calibrate"):
989         input = input.to(device)
990         output = model(mygpt.BracketedSequence(input)).x
991
992     for n, m in model.named_modules():
993         for a in dir(m):
994             x = getattr(m, a)
995             if isinstance(x, mygpt.Calibrator):
996                 print(f"####### ${n} | ${a} ########################")
997                 mean, std = x.moments()
998                 print("mean\n", mean, "\n")
999                 print("std\n", std, "\n")
1000                 print(f"############################################\n\n")
1001
1002     exit(0)
1003
1004 ##############################
1005
1006 nb_samples_seen = 0
1007
1008 if nb_epochs_finished >= nb_epochs:
1009     task.produce_results(
1010         n_epoch=nb_epochs_finished,
1011         model=model,
1012         result_dir=args.result_dir,
1013         logger=log_string,
1014         deterministic_synthesis=args.deterministic_synthesis,
1015     )
1016
1017 time_pred_result = datetime.datetime.now()
1018
1019 it = 0
1020
1021 n_batch = 0
1022
1023
1024 def the_dot_products(value1, value2, params):
1025     g1g1, g1g2, g2g2 = 0, 0, 0
1026     for p in params:
1027         g1 = torch.autograd.grad(value1, p, retain_graph=True)[0]
1028         g2 = torch.autograd.grad(value2, p, retain_graph=True)[0]
1029         g1g1 += g1.pow(2).sum()[None]
1030         g2g2 += g2.pow(2).sum()[None]
1031         g1g2 += (g1 * g2).sum()[None]
1032     return torch.cat([g1g1, g1g2, g2g2])
1033
1034
1035 def update_ave_grad(value, params, name, eps=1e-3):
1036     for p in params:
1037         g = torch.autograd.grad(value, p, retain_graph=True)[0]
1038         ag = getattr(p, name) if hasattr(p, name) else 0
1039         setattr(p, name, (1 - eps) * ag + eps * g)
1040
1041
1042 def norm(params, name):
1043     s = 0
1044     for p in params:
1045         s += getattr(p, name).pow(2).sum()
1046     return s
1047
1048
1049 for n_epoch in range(nb_epochs_finished, nb_epochs):
1050     if args.optim == "sgd":
1051         optimizer = torch.optim.SGD(model.parameters(), lr=args.learning_rate)
1052     elif args.optim == "adam":
1053         optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
1054     elif args.optim == "adamw":
1055         optimizer = torch.optim.AdamW(model.parameters(), lr=args.learning_rate)
1056     else:
1057         raise ValueError(f"Unknown optimizer {args.optim}.")
1058
1059     model.train()
1060
1061     nb_train_samples, acc_train_loss, acc_train_inner_loss = 0, 0.0, 0.0
1062
1063     memex_proba = (
1064         args.memex_proba
1065         if args.memex_nb_epochs is None or n_epoch < args.memex_nb_epochs
1066         else 0.0
1067     )
1068
1069     log_string(f"memex_proba {memex_proba}")
1070
1071     warnings.warn("memex v3", RuntimeWarning)
1072     train_batches = add_memex_v3(
1073         batches=task.batches(split="train"),
1074         memex_proba=memex_proba,
1075         marker_token=memex_marker,
1076     )
1077
1078     def add_none(it):
1079         for x in it:
1080             yield x
1081         yield None
1082
1083     nb_acc_samples = 0
1084
1085     for input in add_none(train_batches):
1086         if input is not None:
1087             if type(input) is tuple:
1088                 input, memex_mask = input
1089                 memex_mask = memex_mask.to(device)
1090             else:
1091                 memex_mask = None
1092
1093             model.reset_inner_loss()
1094             input = input.to(device)
1095
1096             output = model(mygpt.BracketedSequence(input)).x
1097
1098             if memex_mask is None:
1099                 loss = F.cross_entropy(output.transpose(1, 2), input)
1100             else:
1101                 loss = F.cross_entropy(output.transpose(1, 2), input, reduction="none")
1102                 loss_regular = (loss * (1 - memex_mask)).mean()
1103                 loss_memex = (loss * memex_mask).mean()
1104
1105                 if it < 100 or torch.rand(1) < 0.01:
1106                     update_ave_grad(loss_regular, model.parameters(), "grad_regular")
1107                     update_ave_grad(loss_memex, model.parameters(), "grad_memex")
1108                     norm_regular = norm(model.parameters(), "grad_regular")
1109                     norm_memex = norm(model.parameters(), "grad_memex")
1110                     l_memex = (
1111                         max(norm_regular, norm_memex) - norm_regular
1112                     ) / norm_memex
1113
1114                 loss = loss_regular + l_memex * loss_memex
1115
1116             inner_loss = model.get_inner_loss()
1117
1118             acc_train_loss += loss.item() * input.size(0)
1119             acc_train_inner_loss += inner_loss.item() * input.size(0)
1120
1121             nb_train_samples += input.size(0)
1122             nb_samples_seen += input.size(0)
1123
1124             total_loss = loss + (
1125                 args.rho_inner_loss * inner_loss if args.rho_inner_loss > 0 else 0.0
1126             )
1127
1128             it += 1
1129             lr = get_lr(n_epoch, it)
1130             for param_group in optimizer.param_groups:
1131                 param_group["lr"] = lr
1132
1133                 # log_string(f"learning_rate {lr}")
1134
1135             total_loss.backward()
1136             nb_acc_samples += input.size(0)
1137
1138         if (input is None and nb_acc_samples > 0) or nb_acc_samples == args.batch_size:
1139             assert nb_acc_samples <= args.batch_size
1140             optimizer.step()
1141             grad_norm = sum([p.grad.pow(2).sum() for p in model.parameters()]).sqrt()
1142             loss_file.write(f"{n_epoch} {n_batch} {loss.item()} {grad_norm.item()}\n")
1143             lambda_file.write(
1144                 f"{n_epoch} {n_batch} {l_memex} {norm_regular} {norm_memex}\n"
1145             )
1146             optimizer.zero_grad()
1147             nb_acc_samples = 0
1148             n_batch += 1
1149
1150     with torch.autograd.no_grad():
1151         model.eval()
1152
1153         nb_test_samples, acc_test_loss = 0, 0.0
1154
1155         for input in task.batches(split="test"):
1156             input = input.to(device)
1157
1158             output = model(mygpt.BracketedSequence(input)).x
1159             loss = F.cross_entropy(output.transpose(1, 2), input)
1160             acc_test_loss += loss.item() * input.size(0)
1161             nb_test_samples += input.size(0)
1162
1163         log_string(
1164             f"loss {n_epoch} train_loss {acc_train_loss/nb_train_samples} train_inner_loss {acc_train_inner_loss/nb_train_samples} test_prediction {acc_test_loss/nb_test_samples}"
1165         )
1166
1167         task.produce_results(
1168             n_epoch=n_epoch,
1169             model=model,
1170             result_dir=args.result_dir,
1171             logger=log_string,
1172             deterministic_synthesis=args.deterministic_synthesis,
1173         )
1174
1175         train_perplexity = math.exp(min(100, acc_train_loss / nb_train_samples))
1176         test_perplexity = math.exp(min(100, acc_test_loss / nb_test_samples))
1177
1178         log_string(
1179             f"perplexity {n_epoch} train_set {train_set_perplexity} train_prediction {train_perplexity} test_prediction {test_perplexity}"
1180         )
1181
1182         time_current_result = datetime.datetime.now()
1183         log_string(
1184             f"next_result {time_current_result + (time_current_result - time_pred_result)}"
1185         )
1186         time_pred_result = time_current_result
1187
1188     checkpoint = {
1189         "nb_epochs_finished": n_epoch + 1,
1190         "model_state": model.state_dict(),
1191         "rng_state": torch.get_rng_state(),
1192     }
1193
1194     if torch.cuda.is_available():
1195         checkpoint["cuda_rng_state"] = torch.cuda.get_rng_state()
1196
1197     checkpoint_name = os.path.join(args.result_dir, args.checkpoint_name)
1198     torch.save(checkpoint, checkpoint_name)
1199     log_string(f"saved checkpoint {checkpoint_name}")
1200
1201 ######################################################################