Update.
[mygptrnn.git] / main.py
1 #!/usr/bin/env python
2
3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
5
6 # Written by Francois Fleuret <francois@fleuret.org>
7
8 import math, sys, argparse, time, tqdm, os, datetime, warnings
9
10 import torch, torchvision
11 from torch import nn
12 from torch.nn import functional as F
13
14 import ffutils
15 import mygpt, tasks, problems
16
17 ######################################################################
18
19
20 def str2bool(x):
21     x = x.lower()
22     if x in {"1", "true", "yes"}:
23         return True
24     elif x in {"0", "false", "no"}:
25         return False
26     else:
27         raise ValueError
28
29
30 parser = argparse.ArgumentParser(
31     description="An implementation of GPT with cache.",
32     formatter_class=argparse.ArgumentDefaultsHelpFormatter,
33 )
34
35 parser.add_argument(
36     "--task",
37     type=str,
38     default="twotargets",
39     help="byheart, learnop, guessop, mixing, memory, twotargets, addition, picoclvr, mnist, maze, snake, stack, expr, rpl, grid, qmlp",
40 )
41
42 parser.add_argument("--log_filename", type=str, default="train.log", help=" ")
43
44 parser.add_argument("--result_dir", type=str, default=None)
45
46 parser.add_argument("--seed", type=int, default=0)
47
48 parser.add_argument("--max_percents_of_test_in_train", type=int, default=1)
49
50 parser.add_argument("--force_cpu", type=str2bool, default=False)
51
52 ########################################
53
54 parser.add_argument("--nb_epochs", type=int, default=50)
55
56 parser.add_argument("--batch_size", type=int, default=None)
57
58 parser.add_argument("--nb_train_samples", type=int, default=None)
59
60 parser.add_argument("--nb_test_samples", type=int, default=None)
61
62 parser.add_argument("--optim", type=str, default="adam")
63
64 ########################################
65
66 parser.add_argument("--nb_warmup_iter", type=int, default=100)
67
68 parser.add_argument("--nb_decay_iter", type=int, default=5000)
69
70 parser.add_argument("--learning_rate", type=float, default=6e-4)
71
72 parser.add_argument("--min_learning_rate", type=float, default=6e-5)
73
74 # legacy
75
76 parser.add_argument("--legacy_lr_schedule", type=str2bool, default=True)
77
78 parser.add_argument("--legacy_large_lr", type=float, default=1e-4)
79
80 parser.add_argument("--legacy_small_lr", type=float, default=2e-5)
81
82 parser.add_argument("--legacy_nb_epoch_large_lr", type=float, default=10)
83
84 ########################################
85
86 parser.add_argument("--model", type=str, default=None)
87
88 parser.add_argument("--attention", type=str, default=None)
89
90 parser.add_argument("--dim_model", type=int, default=None)
91
92 parser.add_argument("--dim_keys", type=int, default=None)
93
94 parser.add_argument("--dim_hidden", type=int, default=None)
95
96 parser.add_argument("--nb_heads", type=int, default=None)
97
98 parser.add_argument("--nb_lines", type=int, default=None)
99
100 parser.add_argument("--caterpillar_height", type=int, default=None)
101
102 parser.add_argument("--gate_dropout_proba", type=float, default=0.0)
103
104 parser.add_argument("--gate_dropout_sync", type=str2bool, default=True)
105
106 parser.add_argument("--gate_dropout_replace", type=str2bool, default=True)
107
108 parser.add_argument("--rho_inner_loss", type=float, default=0.0)
109
110 parser.add_argument("--nb_blocks", type=int, default=None)
111
112 parser.add_argument("--dropout", type=float, default=0.1)
113
114 ########################################
115
116 parser.add_argument("--deterministic_synthesis", action="store_true", default=False)
117
118 parser.add_argument("--no_checkpoint", action="store_true", default=False)
119
120 parser.add_argument("--continue_training", action="store_true", default=False)
121
122 parser.add_argument("--checkpoint_name", type=str, default="checkpoint.pth")
123
124 ##############################
125 # rpl options
126
127 parser.add_argument("--rpl_nb_starting_values", type=int, default=3)
128
129 parser.add_argument("--rpl_max_input", type=int, default=9)
130
131 parser.add_argument("--rpl_prog_len", type=int, default=8)
132
133 parser.add_argument("--rpl_nb_runs", type=int, default=5)
134
135 parser.add_argument("--rpl_no_prog", action="store_true", default=False)
136
137 ##############################
138 # grid options
139
140 parser.add_argument("--grid_size", type=int, default=6)
141
142 parser.add_argument("--grid_nb_colors", type=int, default=6)
143
144 parser.add_argument("--grid_nb_shapes", type=int, default=6)
145
146 ##############################
147 # picoclvr options
148
149 parser.add_argument("--picoclvr_nb_colors", type=int, default=5)
150
151 parser.add_argument("--picoclvr_height", type=int, default=12)
152
153 parser.add_argument("--picoclvr_width", type=int, default=16)
154
155 parser.add_argument("--picocvlr_prune_properties", type=str, default="none")
156
157 ##############################
158 # Maze options
159
160 parser.add_argument("--maze_height", type=int, default=13)
161
162 parser.add_argument("--maze_width", type=int, default=21)
163
164 parser.add_argument("--maze_nb_walls", type=int, default=15)
165
166 ##############################
167 # Snake options
168
169 parser.add_argument("--snake_height", type=int, default=9)
170
171 parser.add_argument("--snake_width", type=int, default=12)
172
173 parser.add_argument("--snake_nb_colors", type=int, default=5)
174
175 parser.add_argument("--snake_length", type=int, default=200)
176
177 ##############################
178 # Stack options
179
180 parser.add_argument("--stack_nb_steps", type=int, default=100)
181
182 parser.add_argument("--stack_nb_stacks", type=int, default=3)
183
184 parser.add_argument("--stack_nb_digits", type=int, default=3)
185
186 parser.add_argument("--stack_fraction_values_for_train", type=float, default=0.75)
187
188 ##############################
189 # Expr options
190
191 parser.add_argument("--expr_nb_variables", type=int, default=5)
192
193 parser.add_argument("--expr_sequence_length", type=int, default=40)
194
195 parser.add_argument("--expr_operand_max", type=int, default=9)
196
197 parser.add_argument("--expr_result_max", type=int, default=99)
198
199 parser.add_argument("--expr_input_file", type=str, default=None)
200
201 ##############################
202 # Memory
203
204 parser.add_argument("--memory_len_total", type=int, default=32)
205
206 ##############################
207 # Mixing
208
209 parser.add_argument("--mixing_hard", action="store_true", default=False)
210
211 parser.add_argument("--mixing_deterministic_start", action="store_true", default=False)
212
213 ######################################################################
214
215 # args = parser.parse_args()
216
217 args, sup_args = parser.parse_known_args()
218
219 sup_args = dict([x.removeprefix("--").split("=") for x in sup_args])
220
221 if args.result_dir is None:
222     args.result_dir = f"results_{args.task}_{args.model}"
223
224 ######################################################################
225
226 if not args.force_cpu and torch.cuda.is_available():
227     device = torch.device("cuda")
228     torch.backends.cuda.matmul.allow_tf32 = True
229 else:
230     device = torch.device("cpu")
231
232 ######################################################################
233
234 default_task_args = {
235     "addition": {
236         "model": "352M",
237         "batch_size": 25,
238         "nb_train_samples": 250000,
239         "nb_test_samples": 10000,
240     },
241     "byheart": {
242         "model": "37M",
243         "batch_size": 25,
244         "nb_train_samples": 50000,
245         "nb_test_samples": 10000,
246     },
247     "expr": {
248         "model": "352M",
249         "batch_size": 25,
250         "nb_train_samples": 2500000,
251         "nb_test_samples": 10000,
252     },
253     "grid": {
254         "model": "37M",
255         "batch_size": 25,
256         "nb_train_samples": 250000,
257         "nb_test_samples": 10000,
258     },
259     "qmlp": {
260         "model": "37M",
261         "batch_size": 10,
262         "nb_train_samples": 100000,
263         "nb_test_samples": 1000,
264     },
265     "guessop": {
266         "model": "352M",
267         "batch_size": 25,
268         "nb_train_samples": 1000000,
269         "nb_test_samples": 10000,
270     },
271     "learnop": {
272         "model": "37M",
273         "batch_size": 25,
274         "nb_train_samples": 50000,
275         "nb_test_samples": 10000,
276     },
277     "maze": {
278         "model": "37M",
279         "batch_size": 5,
280         "nb_train_samples": 100000,
281         "nb_test_samples": 10000,
282     },
283     "picoclvr": {
284         "model": "37M",
285         "batch_size": 25,
286         "nb_train_samples": 250000,
287         "nb_test_samples": 10000,
288     },
289     "rpl": {
290         "model": "352M",
291         "batch_size": 5,
292         "nb_train_samples": 2500000,
293         "nb_test_samples": 10000,
294     },
295     "snake": {
296         "model": "37M",
297         "batch_size": 25,
298         "nb_train_samples": 250000,
299         "nb_test_samples": 10000,
300     },
301     "stack": {
302         "model": "37M",
303         "batch_size": 25,
304         "nb_train_samples": 100000,
305         "nb_test_samples": 1000,
306     },
307     "twotargets": {
308         "model": "37M",
309         "batch_size": 25,
310         "nb_train_samples": 50000,
311         "nb_test_samples": 10000,
312     },
313     "memory": {
314         "model": "37M",
315         "batch_size": 25,
316         "nb_train_samples": 25000,
317         "nb_test_samples": 10000,
318     },
319     "mixing": {
320         "model": "37M",
321         "batch_size": 25,
322         "nb_train_samples": 250000,
323         "nb_test_samples": 10000,
324     },
325     "mnist": {
326         "model": "37M",
327         "batch_size": 10,
328         "nb_train_samples": 60000,
329         "nb_test_samples": 10000,
330     },
331 }
332
333 if args.task in default_task_args:
334     for k, v in default_task_args[args.task].items():
335         if getattr(args, k) is None:
336             setattr(args, k, v)
337
338 ######################################################################
339
340 default_model_args = {
341     "17K": {
342         "attention": "mha",
343         "dim_model": 32,
344         "dim_keys": 32,
345         "dim_hidden": 32,
346         "nb_heads": 2,
347         "nb_blocks": 2,
348     },
349     "17K-C": {
350         "attention": "caterpillar",
351         "dim_model": 32,
352         "dim_keys": 32,
353         "dim_hidden": 32,
354         "nb_heads": 2,
355         "nb_lines": 16,
356         "caterpillar_height": 4,
357         "nb_blocks": 2,
358     },
359     "4M": {
360         "attention": "mha",
361         "dim_model": 256,
362         "dim_keys": 32,
363         "dim_hidden": 1024,
364         "nb_heads": 4,
365         "nb_blocks": 6,
366     },
367     "4M-C": {
368         "attention": "caterpillar",
369         "dim_model": 256,
370         "dim_keys": 32,
371         "dim_hidden": 1024,
372         "nb_heads": 4,
373         "nb_lines": 32,
374         "caterpillar_height": 4,
375         "nb_blocks": 6,
376     },
377     "37M": {
378         "attention": "mha",
379         "dim_model": 512,
380         "dim_keys": 64,
381         "dim_hidden": 2048,
382         "nb_heads": 8,
383         "nb_blocks": 12,
384     },
385     "37M-C": {
386         "attention": "caterpillar",
387         "dim_model": 512,
388         "dim_keys": 64,
389         "dim_hidden": 2048,
390         "nb_heads": 8,
391         "nb_lines": 256,
392         "caterpillar_height": 32,
393         "nb_blocks": 12,
394     },
395     "122M": {
396         "attention": "mha",
397         "dim_model": 768,
398         "dim_keys": 64,
399         "dim_hidden": 2048,
400         "nb_heads": 8,
401         "nb_blocks": 24,
402     },
403     "122M-C": {
404         "attention": "caterpillar",
405         "dim_model": 768,
406         "dim_keys": 64,
407         "dim_hidden": 2048,
408         "nb_heads": 8,
409         "nb_lines": 128,
410         "nb_blocks": 24,
411     },
412     "352M": {
413         "attention": "mha",
414         "dim_model": 1024,
415         "dim_keys": 64,
416         "dim_hidden": 2048,
417         "nb_heads": 8,
418         "nb_blocks": 48,
419     },
420     "352M-C": {
421         "attention": "caterpillar",
422         "dim_model": 1024,
423         "dim_keys": 64,
424         "dim_hidden": 2048,
425         "nb_heads": 8,
426         "nb_lines": 128,
427         "nb_blocks": 48,
428     },
429 }
430
431 if args.model in default_model_args:
432     for k, v in default_model_args[args.model].items():
433         if getattr(args, k) is None:
434             setattr(args, k, v)
435 else:
436     raise ValueError(f"Unknown model {args.model}")
437
438 ######################################################################
439
440 try:
441     os.mkdir(args.result_dir)
442 except FileExistsError:
443     if not args.continue_training:
444         print(f"result directory {args.result_dir} already exists")
445         exit(1)
446
447 loss_file = open(os.path.join(args.result_dir, "loss.dat"), "a")
448
449 log_file = open(os.path.join(args.result_dir, args.log_filename), "a")
450
451 if args.seed >= 0:
452     # torch.backends.cudnn.deterministic = True
453     # torch.backends.cudnn.benchmark = False
454     # torch.use_deterministic_algorithms(True)
455     torch.manual_seed(args.seed)
456     if torch.cuda.is_available():
457         torch.cuda.manual_seed_all(args.seed)
458
459 ######################################################################
460
461
462 def log_string(s):
463     t = time.strftime("%Y%m%d-%H:%M:%S ", time.localtime())
464
465     if log_file is not None:
466         log_file.write(t + s + "\n")
467         log_file.flush()
468
469     print(t + s)
470     sys.stdout.flush()
471
472
473 with os.popen("sha256sum *.py") as f:
474     for l in f:
475         log_string(f"sha256sum {l.strip()}")
476
477 now = time.strftime("%Y%m%d-%H%M%S", time.localtime())
478 os.system(f"tar zcvf {args.result_dir}/src-{now}.tgz *.py *.sh")
479
480 log_string(f"argv {' '.join(sys.argv)}")
481
482 for n in vars(args):
483     log_string(f"args.{n} {getattr(args, n)}")
484
485 for k, v in sup_args.items():
486     log_string(f'sup_args["{k}"] "{v}"')
487
488
489 ######################################################################
490
491
492 def get_lr(n_epoch, it):
493     if args.legacy_lr_schedule:
494         # my crude scheduling to compare to previous baseline, added
495         # warmup though
496
497         if it < args.nb_warmup_iter:
498             return args.legacy_large_lr * it / args.nb_warmup_iter
499         elif n_epoch < args.legacy_nb_epoch_large_lr:
500             return args.legacy_large_lr
501         else:
502             return args.legacy_small_lr
503
504     # from nanoGPT
505
506     # 1) linear warmup for warmup_iter steps
507     if it < args.nb_warmup_iter:
508         return args.learning_rate * it / args.nb_warmup_iter
509     # 2) if it > nb_decay_iter, return min learning rate
510     if it > args.nb_decay_iter:
511         return args.min_learning_rate
512     # 3) in between, use cosine decay down to min learning rate
513     decay_ratio = (it - args.nb_warmup_iter) / (
514         args.nb_decay_iter - args.nb_warmup_iter
515     )
516     coeff = 0.5 * (1.0 + math.cos(math.pi * decay_ratio))  # coeff ranges 0..1
517     return args.min_learning_rate + coeff * (
518         args.learning_rate - args.min_learning_rate
519     )
520
521
522 ######################################################################
523
524
525 assert args.picocvlr_prune_properties in {"none", "train+eval", "eval"}
526
527
528 def picoclvr_pruner_horizontal_green(p):
529     return not ("green" in p and ("left" in p or "right" in p))
530
531
532 picoclvr_pruner_train = (
533     picoclvr_pruner_horizontal_green
534     if args.picocvlr_prune_properties in {"train+eval"}
535     else None
536 )
537
538 picoclvr_pruner_eval = (
539     (lambda p: not picoclvr_pruner_horizontal_green(p))
540     if args.picocvlr_prune_properties in {"train+eval", "eval"}
541     else None
542 )
543
544 ######################################################################
545
546 device_data = device
547
548 if args.task == "byheart":
549     task = tasks.SandBox(
550         problem=problems.ProblemByHeart(),
551         nb_train_samples=args.nb_train_samples,
552         nb_test_samples=args.nb_test_samples,
553         batch_size=args.batch_size,
554         logger=log_string,
555         device=device_data,
556     )
557     args.max_percents_of_test_in_train = -1
558
559 elif args.task == "learnop":
560     task = tasks.SandBox(
561         problem=problems.ProblemLearnOperator(),
562         nb_train_samples=args.nb_train_samples,
563         nb_test_samples=args.nb_test_samples,
564         batch_size=args.batch_size,
565         logger=log_string,
566         device=device_data,
567     )
568
569
570 elif args.task == "guessop":
571     task = tasks.SandBox(
572         problem=problems.ProblemGuessOperator(),
573         nb_train_samples=args.nb_train_samples,
574         nb_test_samples=args.nb_test_samples,
575         batch_size=args.batch_size,
576         logger=log_string,
577         device=device_data,
578     )
579
580
581 elif args.task == "twotargets":
582     task = tasks.SandBox(
583         problem=problems.ProblemTwoTargets(),
584         nb_train_samples=args.nb_train_samples,
585         nb_test_samples=args.nb_test_samples,
586         batch_size=args.batch_size,
587         logger=log_string,
588         device=device_data,
589     )
590
591 elif args.task == "memory":
592     task = tasks.SandBox(
593         problem=problems.ProblemMemory(len_total=args.memory_len_total),
594         nb_train_samples=args.nb_train_samples,
595         nb_test_samples=args.nb_test_samples,
596         batch_size=args.batch_size,
597         logger=log_string,
598         device=device_data,
599     )
600
601 elif args.task == "mixing":
602     task = tasks.SandBox(
603         problem=problems.ProblemMixing(
604             hard=args.mixing_hard, random_start=not args.mixing_deterministic_start
605         ),
606         nb_train_samples=args.nb_train_samples,
607         nb_test_samples=args.nb_test_samples,
608         batch_size=args.batch_size,
609         logger=log_string,
610         device=device_data,
611     )
612
613 elif args.task == "addition":
614     task = tasks.SandBox(
615         problem=problems.ProblemAddition(),
616         nb_train_samples=args.nb_train_samples,
617         nb_test_samples=args.nb_test_samples,
618         batch_size=args.batch_size,
619         logger=log_string,
620         device=device_data,
621     )
622
623 elif args.task == "picoclvr":
624     task = tasks.PicoCLVR(
625         nb_train_samples=args.nb_train_samples,
626         nb_test_samples=args.nb_test_samples,
627         batch_size=args.batch_size,
628         height=args.picoclvr_height,
629         width=args.picoclvr_width,
630         nb_colors=args.picoclvr_nb_colors,
631         logger=log_string,
632         device=device_data,
633         pruner_train=picoclvr_pruner_train,
634         pruner_eval=picoclvr_pruner_eval,
635     )
636
637 elif args.task == "mnist":
638     task = tasks.MNIST(
639         nb_train_samples=args.nb_train_samples,
640         nb_test_samples=args.nb_test_samples,
641         batch_size=args.batch_size,
642         device=device_data,
643     )
644
645 elif args.task == "maze":
646     task = tasks.Maze(
647         nb_train_samples=args.nb_train_samples,
648         nb_test_samples=args.nb_test_samples,
649         batch_size=args.batch_size,
650         height=args.maze_height,
651         width=args.maze_width,
652         nb_walls=args.maze_nb_walls,
653         device=device_data,
654     )
655
656 elif args.task == "snake":
657     task = tasks.Snake(
658         nb_train_samples=args.nb_train_samples,
659         nb_test_samples=args.nb_test_samples,
660         batch_size=args.batch_size,
661         height=args.snake_height,
662         width=args.snake_width,
663         nb_colors=args.snake_nb_colors,
664         length=args.snake_length,
665         prompt_length=args.snake_length // 2,
666         device=device_data,
667     )
668
669 elif args.task == "stack":
670     task = tasks.Stack(
671         nb_train_samples=args.nb_train_samples,
672         nb_test_samples=args.nb_test_samples,
673         batch_size=args.batch_size,
674         logger=log_string,
675         nb_steps=args.stack_nb_steps,
676         nb_stacks=args.stack_nb_stacks,
677         nb_digits=args.stack_nb_digits,
678         fraction_values_for_train=args.stack_fraction_values_for_train,
679         device=device_data,
680     )
681
682 elif args.task == "expr":
683     task = tasks.Expr(
684         nb_train_samples=args.nb_train_samples,
685         nb_test_samples=args.nb_test_samples,
686         nb_variables=args.expr_nb_variables,
687         sequence_length=args.expr_sequence_length,
688         operand_max=args.expr_operand_max,
689         result_max=args.expr_result_max,
690         batch_size=args.batch_size,
691         device=device_data,
692     )
693
694 elif args.task == "rpl":
695     task = tasks.RPL(
696         nb_train_samples=args.nb_train_samples,
697         nb_test_samples=args.nb_test_samples,
698         batch_size=args.batch_size,
699         nb_starting_values=args.rpl_nb_starting_values,
700         max_input=args.rpl_max_input,
701         prog_len=args.rpl_prog_len,
702         nb_runs=args.rpl_nb_runs,
703         no_prog=args.rpl_no_prog,
704         logger=log_string,
705         device=device_data,
706     )
707
708 elif args.task == "grid":
709     task = tasks.Grid(
710         nb_train_samples=args.nb_train_samples,
711         nb_test_samples=args.nb_test_samples,
712         batch_size=args.batch_size,
713         size=args.grid_size,
714         nb_shapes=args.grid_nb_shapes,
715         nb_colors=args.grid_nb_colors,
716         logger=log_string,
717         device=device_data,
718     )
719
720 elif args.task == "qmlp":
721     task = tasks.QMLP(
722         nb_train_samples=args.nb_train_samples,
723         nb_test_samples=args.nb_test_samples,
724         batch_size=args.batch_size,
725         result_dir=args.result_dir,
726         logger=log_string,
727         device=device_data,
728     )
729
730 else:
731     raise ValueError(f"Unknown task {args.task}")
732
733 ######################################################################
734
735 log_string(f"device {device}")
736
737 vocabulary_size = task.vocabulary_size()
738
739 log_string(f"vocabulary_size {vocabulary_size}")
740
741 ##############################
742
743 model = mygpt.MyGPT(
744     vocabulary_size=vocabulary_size,
745     dim_model=args.dim_model,
746     dim_keys=args.dim_keys,
747     dim_hidden=args.dim_hidden,
748     nb_heads=args.nb_heads,
749     nb_lines=args.nb_lines,
750     caterpillar_height=args.caterpillar_height,
751     nb_blocks=args.nb_blocks,
752     causal=True,
753     dropout=args.dropout,
754     attention_layer=args.attention,
755     logger=log_string,
756     args=args,
757 )
758
759 model.to(device)
760
761 nb_parameters = sum(p.numel() for p in model.parameters())
762 log_string(f"nb_parameters {nb_parameters} ({int(nb_parameters/1e6)}M)")
763
764 ######################################################################
765
766 nb_epochs_finished = 0
767
768 if args.no_checkpoint:
769     log_string(f"not trying to load checkpoint.")
770
771 else:
772     try:
773         checkpoint_name = os.path.join(args.result_dir, args.checkpoint_name)
774         checkpoint = torch.load(checkpoint_name)
775         nb_epochs_finished = checkpoint["nb_epochs_finished"]
776         model.load_state_dict(checkpoint["model_state"])
777         torch.set_rng_state(checkpoint["rng_state"])
778         if torch.cuda.is_available():
779             torch.cuda.set_rng_state(checkpoint["cuda_rng_state"])
780
781         log_string(f"checkpoint loaded with {nb_epochs_finished} epochs finished.")
782
783     except FileNotFoundError:
784         log_string("starting from scratch.")
785
786     except:
787         log_string("error when loading the checkpoint.")
788         exit(1)
789
790 ######################################################################
791
792 if args.task == "expr" and args.expr_input_file is not None:
793     task.produce_results(
794         n_epoch=nb_epochs_finished,
795         model=model,
796         result_dir=args.result_dir,
797         logger=log_string,
798         deterministic_synthesis=args.deterministic_synthesis,
799         input_file=args.expr_input_file,
800     )
801
802     exit(0)
803
804 ######################################################################
805
806 nb_epochs = args.nb_epochs if args.nb_epochs > 0 else nb_epochs_default
807
808 # Compute the entropy of the training tokens
809
810 token_count = 0
811 for input in task.batches(split="train"):
812     token_count += F.one_hot(input, num_classes=task.vocabulary_size()).sum((0, 1))
813 token_probas = token_count / token_count.sum()
814 entropy = -torch.xlogy(token_probas, token_probas).sum()
815 train_set_perplexity = math.exp(entropy)
816
817 ######################################################################
818 # A bit of paranoia never hurts
819
820 if args.max_percents_of_test_in_train >= 0:
821
822     def subsets_as_tuples(batches, cs):
823         s = set()
824         for batch in batches:
825             for x in batch:
826                 s.add(tuple([v.item() for v in x]))
827                 if len(s) == cs:
828                     yield s
829                     s = set()
830         yield s
831
832     nb_test, nb_in_train = 0, 0
833     for test_subset in subsets_as_tuples(task.batches(split="test"), 25000):
834         in_train = set()
835         for train_subset in subsets_as_tuples(task.batches(split="train"), 25000):
836             in_train.update(test_subset.intersection(train_subset))
837         nb_in_train += len(in_train)
838         nb_test += len(test_subset)
839
840     log_string(
841         f"data_check {nb_in_train*100/nb_test:.02f}% ({nb_in_train}/{nb_test}) of test samples are in the train set"
842     )
843
844     assert (
845         nb_in_train <= args.max_percents_of_test_in_train * nb_test / 100
846     ), f"More than {args.max_percents_of_test_in_train}% of test samples are in the train set"
847
848 ##############################
849
850 if "calibrate" in sup_args:
851     for input in task.batches(split="train", desc="calibrate"):
852         input = input.to(device)
853         output = model(mygpt.BracketedSequence(input)).x
854
855     for n, m in model.named_modules():
856         for a in dir(m):
857             x = getattr(m, a)
858             if isinstance(x, mygpt.Calibrator):
859                 print(f"####### ${n} | ${a} ########################")
860                 mean, std = x.moments()
861                 print("mean\n", mean, "\n")
862                 print("std\n", std, "\n")
863                 print(f"############################################\n\n")
864
865     exit(0)
866
867 ##############################
868
869 nb_samples_seen = 0
870
871 if nb_epochs_finished >= nb_epochs:
872     task.produce_results(
873         n_epoch=nb_epochs_finished,
874         model=model,
875         result_dir=args.result_dir,
876         logger=log_string,
877         deterministic_synthesis=args.deterministic_synthesis,
878     )
879
880 time_pred_result = datetime.datetime.now()
881
882 it = 0
883
884 n_batch = 0
885
886 for n_epoch in range(nb_epochs_finished, nb_epochs):
887     if args.optim == "sgd":
888         optimizer = torch.optim.SGD(model.parameters(), lr=args.learning_rate)
889     elif args.optim == "adam":
890         optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
891     elif args.optim == "adamw":
892         optimizer = torch.optim.AdamW(model.parameters(), lr=args.learning_rate)
893     else:
894         raise ValueError(f"Unknown optimizer {args.optim}.")
895
896     model.train()
897
898     nb_train_samples, acc_train_loss, acc_train_inner_loss = 0, 0.0, 0.0
899
900     for input in task.batches(split="train"):
901         model.reset_inner_loss()
902         input = input.to(device)
903
904         output = model(mygpt.BracketedSequence(input)).x
905         loss = F.cross_entropy(output.transpose(1, 2), input)
906         inner_loss = model.get_inner_loss()
907
908         acc_train_loss += loss.item() * input.size(0)
909         acc_train_inner_loss += inner_loss.item() * input.size(0)
910
911         nb_train_samples += input.size(0)
912         nb_samples_seen += input.size(0)
913
914         total_loss = loss + (
915             args.rho_inner_loss * inner_loss if args.rho_inner_loss > 0 else 0.0
916         )
917
918         it += 1
919         lr = get_lr(n_epoch, it)
920         for param_group in optimizer.param_groups:
921             param_group["lr"] = lr
922
923         # log_string(f"learning_rate {lr}")
924
925         optimizer.zero_grad()
926         total_loss.backward()
927         optimizer.step()
928
929         grad_norm = sum([p.grad.pow(2).sum() for p in model.parameters()]).sqrt()
930
931         loss_file.write(f"{n_epoch} {n_batch} {loss.item()} {grad_norm.item()}\n")
932
933         n_batch += 1
934
935     with torch.autograd.no_grad():
936         model.eval()
937
938         nb_test_samples, acc_test_loss = 0, 0.0
939
940         for input in task.batches(split="test"):
941             input = input.to(device)
942
943             output = model(mygpt.BracketedSequence(input)).x
944             loss = F.cross_entropy(output.transpose(1, 2), input)
945             acc_test_loss += loss.item() * input.size(0)
946             nb_test_samples += input.size(0)
947
948         log_string(
949             f"loss {n_epoch} train_loss {acc_train_loss/nb_train_samples} train_inner_loss {acc_train_inner_loss/nb_train_samples} test_prediction {acc_test_loss/nb_test_samples}"
950         )
951
952         task.produce_results(
953             n_epoch=n_epoch,
954             model=model,
955             result_dir=args.result_dir,
956             logger=log_string,
957             deterministic_synthesis=args.deterministic_synthesis,
958         )
959
960         train_perplexity = math.exp(min(100, acc_train_loss / nb_train_samples))
961         test_perplexity = math.exp(min(100, acc_test_loss / nb_test_samples))
962
963         log_string(
964             f"perplexity {n_epoch} train_set {train_set_perplexity} train_prediction {train_perplexity} test_prediction {test_perplexity}"
965         )
966
967         time_current_result = datetime.datetime.now()
968         log_string(
969             f"next_result {time_current_result + (time_current_result - time_pred_result)}"
970         )
971         time_pred_result = time_current_result
972
973     checkpoint = {
974         "nb_epochs_finished": n_epoch + 1,
975         "model_state": model.state_dict(),
976         "rng_state": torch.get_rng_state(),
977     }
978
979     if torch.cuda.is_available():
980         checkpoint["cuda_rng_state"] = torch.cuda.get_rng_state()
981
982     checkpoint_name = os.path.join(args.result_dir, args.checkpoint_name)
983     torch.save(checkpoint, checkpoint_name)
984     log_string(f"saved checkpoint {checkpoint_name}")
985
986 ######################################################################