Update.
[mygptrnn.git] / main.py
1 #!/usr/bin/env python
2
3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
5
6 # Written by Francois Fleuret <francois@fleuret.org>
7
8 import math, sys, argparse, time, tqdm, os, datetime, warnings
9
10 import torch, torchvision
11 from torch import nn
12 from torch.nn import functional as F
13
14 import ffutils
15 import mygpt, tasks, problems
16
17 ######################################################################
18
19
20 def str2bool(x):
21     x = x.lower()
22     if x in {"1", "true", "yes"}:
23         return True
24     elif x in {"0", "false", "no"}:
25         return False
26     else:
27         raise ValueError
28
29
30 parser = argparse.ArgumentParser(
31     description="An implementation of GPT with cache.",
32     formatter_class=argparse.ArgumentDefaultsHelpFormatter,
33 )
34
35 parser.add_argument(
36     "--task",
37     type=str,
38     default="twotargets",
39     help="byheart, learnop, guessop, mixing, memory, twotargets, addition, picoclvr, mnist, maze, snake, stack, expr, rpl, grid, qmlp",
40 )
41
42 parser.add_argument("--log_filename", type=str, default="train.log", help=" ")
43
44 parser.add_argument("--result_dir", type=str, default=None)
45
46 parser.add_argument("--seed", type=int, default=0)
47
48 parser.add_argument("--max_percents_of_test_in_train", type=int, default=1)
49
50 parser.add_argument("--force_cpu", type=str2bool, default=False)
51
52 ########################################
53
54 parser.add_argument("--nb_epochs", type=int, default=50)
55
56 parser.add_argument("--batch_size", type=int, default=None)
57
58 parser.add_argument("--nb_train_samples", type=int, default=None)
59
60 parser.add_argument("--nb_test_samples", type=int, default=None)
61
62 parser.add_argument("--optim", type=str, default="adam")
63
64 ########################################
65
66 parser.add_argument("--nb_warmup_iter", type=int, default=100)
67
68 parser.add_argument("--nb_decay_iter", type=int, default=5000)
69
70 parser.add_argument("--learning_rate", type=float, default=6e-4)
71
72 parser.add_argument("--min_learning_rate", type=float, default=6e-5)
73
74 # legacy
75
76 parser.add_argument("--legacy_lr_schedule", type=str2bool, default=True)
77
78 parser.add_argument("--legacy_large_lr", type=float, default=1e-4)
79
80 parser.add_argument("--legacy_small_lr", type=float, default=2e-5)
81
82 parser.add_argument("--legacy_nb_epoch_large_lr", type=float, default=10)
83
84 ########################################
85
86 parser.add_argument("--model", type=str, default=None)
87
88 parser.add_argument("--attention", type=str, default=None)
89
90 parser.add_argument("--dim_model", type=int, default=None)
91
92 parser.add_argument("--dim_keys", type=int, default=None)
93
94 parser.add_argument("--dim_hidden", type=int, default=None)
95
96 parser.add_argument("--nb_heads", type=int, default=None)
97
98 parser.add_argument("--nb_lines", type=int, default=None)
99
100 parser.add_argument("--caterpillar_height", type=int, default=None)
101
102 parser.add_argument("--gate_dropout_proba", type=float, default=0.0)
103
104 parser.add_argument("--gate_dropout_sync", type=bool, default=False)
105
106 parser.add_argument("--rho_inner_loss", type=float, default=0.0)
107
108 parser.add_argument("--nb_blocks", type=int, default=None)
109
110 parser.add_argument("--dropout", type=float, default=0.1)
111
112 ########################################
113
114 parser.add_argument("--deterministic_synthesis", action="store_true", default=False)
115
116 parser.add_argument("--no_checkpoint", action="store_true", default=False)
117
118 parser.add_argument("--continue_training", action="store_true", default=False)
119
120 parser.add_argument("--checkpoint_name", type=str, default="checkpoint.pth")
121
122 ##############################
123 # rpl options
124
125 parser.add_argument("--rpl_nb_starting_values", type=int, default=3)
126
127 parser.add_argument("--rpl_max_input", type=int, default=9)
128
129 parser.add_argument("--rpl_prog_len", type=int, default=8)
130
131 parser.add_argument("--rpl_nb_runs", type=int, default=5)
132
133 parser.add_argument("--rpl_no_prog", action="store_true", default=False)
134
135 ##############################
136 # grid options
137
138 parser.add_argument("--grid_size", type=int, default=6)
139
140 parser.add_argument("--grid_nb_colors", type=int, default=6)
141
142 parser.add_argument("--grid_nb_shapes", type=int, default=6)
143
144 ##############################
145 # picoclvr options
146
147 parser.add_argument("--picoclvr_nb_colors", type=int, default=5)
148
149 parser.add_argument("--picoclvr_height", type=int, default=12)
150
151 parser.add_argument("--picoclvr_width", type=int, default=16)
152
153 parser.add_argument("--picocvlr_prune_properties", type=str, default="none")
154
155 ##############################
156 # Maze options
157
158 parser.add_argument("--maze_height", type=int, default=13)
159
160 parser.add_argument("--maze_width", type=int, default=21)
161
162 parser.add_argument("--maze_nb_walls", type=int, default=15)
163
164 ##############################
165 # Snake options
166
167 parser.add_argument("--snake_height", type=int, default=9)
168
169 parser.add_argument("--snake_width", type=int, default=12)
170
171 parser.add_argument("--snake_nb_colors", type=int, default=5)
172
173 parser.add_argument("--snake_length", type=int, default=200)
174
175 ##############################
176 # Stack options
177
178 parser.add_argument("--stack_nb_steps", type=int, default=100)
179
180 parser.add_argument("--stack_nb_stacks", type=int, default=3)
181
182 parser.add_argument("--stack_nb_digits", type=int, default=3)
183
184 parser.add_argument("--stack_fraction_values_for_train", type=float, default=0.75)
185
186 ##############################
187 # Expr options
188
189 parser.add_argument("--expr_nb_variables", type=int, default=5)
190
191 parser.add_argument("--expr_sequence_length", type=int, default=40)
192
193 parser.add_argument("--expr_operand_max", type=int, default=9)
194
195 parser.add_argument("--expr_result_max", type=int, default=99)
196
197 parser.add_argument("--expr_input_file", type=str, default=None)
198
199 ##############################
200 # Memory
201
202 parser.add_argument("--memory_len_total", type=int, default=32)
203
204 ##############################
205 # Mixing
206
207 parser.add_argument("--mixing_hard", action="store_true", default=False)
208
209 parser.add_argument("--mixing_deterministic_start", action="store_true", default=False)
210
211 ######################################################################
212
213 # args = parser.parse_args()
214
215 args, sup_args = parser.parse_known_args()
216
217 sup_args = dict([x.removeprefix("--").split("=") for x in sup_args])
218
219 if args.result_dir is None:
220     args.result_dir = f"results_{args.task}_{args.model}"
221
222 ######################################################################
223
224 if not args.force_cpu and torch.cuda.is_available():
225     device = torch.device("cuda")
226     torch.backends.cuda.matmul.allow_tf32 = True
227 else:
228     device = torch.device("cpu")
229
230 ######################################################################
231
232 default_task_args = {
233     "addition": {
234         "model": "352M",
235         "batch_size": 25,
236         "nb_train_samples": 250000,
237         "nb_test_samples": 10000,
238     },
239     "byheart": {
240         "model": "37M",
241         "batch_size": 25,
242         "nb_train_samples": 50000,
243         "nb_test_samples": 10000,
244     },
245     "expr": {
246         "model": "352M",
247         "batch_size": 25,
248         "nb_train_samples": 2500000,
249         "nb_test_samples": 10000,
250     },
251     "grid": {
252         "model": "37M",
253         "batch_size": 25,
254         "nb_train_samples": 250000,
255         "nb_test_samples": 10000,
256     },
257     "qmlp": {
258         "model": "37M",
259         "batch_size": 10,
260         "nb_train_samples": 100000,
261         "nb_test_samples": 1000,
262     },
263     "guessop": {
264         "model": "352M",
265         "batch_size": 25,
266         "nb_train_samples": 1000000,
267         "nb_test_samples": 10000,
268     },
269     "learnop": {
270         "model": "37M",
271         "batch_size": 25,
272         "nb_train_samples": 50000,
273         "nb_test_samples": 10000,
274     },
275     "maze": {
276         "model": "37M",
277         "batch_size": 5,
278         "nb_train_samples": 100000,
279         "nb_test_samples": 10000,
280     },
281     "picoclvr": {
282         "model": "37M",
283         "batch_size": 25,
284         "nb_train_samples": 250000,
285         "nb_test_samples": 10000,
286     },
287     "rpl": {
288         "model": "352M",
289         "batch_size": 5,
290         "nb_train_samples": 2500000,
291         "nb_test_samples": 10000,
292     },
293     "snake": {
294         "model": "37M",
295         "batch_size": 25,
296         "nb_train_samples": 250000,
297         "nb_test_samples": 10000,
298     },
299     "stack": {
300         "model": "37M",
301         "batch_size": 25,
302         "nb_train_samples": 100000,
303         "nb_test_samples": 1000,
304     },
305     "twotargets": {
306         "model": "37M",
307         "batch_size": 25,
308         "nb_train_samples": 50000,
309         "nb_test_samples": 10000,
310     },
311     "memory": {
312         "model": "37M",
313         "batch_size": 25,
314         "nb_train_samples": 25000,
315         "nb_test_samples": 10000,
316     },
317     "mixing": {
318         "model": "37M",
319         "batch_size": 25,
320         "nb_train_samples": 250000,
321         "nb_test_samples": 10000,
322     },
323     "mnist": {
324         "model": "37M",
325         "batch_size": 10,
326         "nb_train_samples": 60000,
327         "nb_test_samples": 10000,
328     },
329 }
330
331 if args.task in default_task_args:
332     for k, v in default_task_args[args.task].items():
333         if getattr(args, k) is None:
334             setattr(args, k, v)
335
336 ######################################################################
337
338 default_model_args = {
339     "17K": {
340         "attention": "mha",
341         "dim_model": 32,
342         "dim_keys": 32,
343         "dim_hidden": 32,
344         "nb_heads": 2,
345         "nb_blocks": 2,
346     },
347     "17K-C": {
348         "attention": "caterpillar",
349         "dim_model": 32,
350         "dim_keys": 32,
351         "dim_hidden": 32,
352         "nb_heads": 2,
353         "nb_lines": 16,
354         "caterpillar_height": 4,
355         "nb_blocks": 2,
356     },
357     "4M": {
358         "attention": "mha",
359         "dim_model": 256,
360         "dim_keys": 32,
361         "dim_hidden": 1024,
362         "nb_heads": 4,
363         "nb_blocks": 6,
364     },
365     "4M-C": {
366         "attention": "caterpillar",
367         "dim_model": 256,
368         "dim_keys": 32,
369         "dim_hidden": 1024,
370         "nb_heads": 4,
371         "nb_lines": 32,
372         "caterpillar_height": 4,
373         "nb_blocks": 6,
374     },
375     "37M": {
376         "attention": "mha",
377         "dim_model": 512,
378         "dim_keys": 64,
379         "dim_hidden": 2048,
380         "nb_heads": 8,
381         "nb_blocks": 12,
382     },
383     "37M-C": {
384         "attention": "caterpillar",
385         "dim_model": 512,
386         "dim_keys": 64,
387         "dim_hidden": 2048,
388         "nb_heads": 8,
389         "nb_lines": 256,
390         "caterpillar_height": 32,
391         "nb_blocks": 12,
392     },
393     "122M": {
394         "attention": "mha",
395         "dim_model": 768,
396         "dim_keys": 64,
397         "dim_hidden": 2048,
398         "nb_heads": 8,
399         "nb_blocks": 24,
400     },
401     "122M-C": {
402         "attention": "caterpillar",
403         "dim_model": 768,
404         "dim_keys": 64,
405         "dim_hidden": 2048,
406         "nb_heads": 8,
407         "nb_lines": 128,
408         "nb_blocks": 24,
409     },
410     "352M": {
411         "attention": "mha",
412         "dim_model": 1024,
413         "dim_keys": 64,
414         "dim_hidden": 2048,
415         "nb_heads": 8,
416         "nb_blocks": 48,
417     },
418     "352M-C": {
419         "attention": "caterpillar",
420         "dim_model": 1024,
421         "dim_keys": 64,
422         "dim_hidden": 2048,
423         "nb_heads": 8,
424         "nb_lines": 128,
425         "nb_blocks": 48,
426     },
427 }
428
429 if args.model in default_model_args:
430     for k, v in default_model_args[args.model].items():
431         if getattr(args, k) is None:
432             setattr(args, k, v)
433 else:
434     raise ValueError(f"Unknown model {args.model}")
435
436 ######################################################################
437
438 try:
439     os.mkdir(args.result_dir)
440 except FileExistsError:
441     if not args.continue_training:
442         print(f"result directory {args.result_dir} already exists")
443         exit(1)
444
445 loss_file = open(os.path.join(args.result_dir, "loss.dat"), "a")
446
447 log_file = open(os.path.join(args.result_dir, args.log_filename), "a")
448
449 if args.seed >= 0:
450     # torch.backends.cudnn.deterministic = True
451     # torch.backends.cudnn.benchmark = False
452     # torch.use_deterministic_algorithms(True)
453     torch.manual_seed(args.seed)
454     if torch.cuda.is_available():
455         torch.cuda.manual_seed_all(args.seed)
456
457 ######################################################################
458
459
460 def log_string(s):
461     t = time.strftime("%Y%m%d-%H:%M:%S ", time.localtime())
462
463     if log_file is not None:
464         log_file.write(t + s + "\n")
465         log_file.flush()
466
467     print(t + s)
468     sys.stdout.flush()
469
470
471 with os.popen("sha256sum *.py") as f:
472     for l in f:
473         log_string(f"sha256sum {l.strip()}")
474
475 now = time.strftime("%Y%m%d-%H%M%S", time.localtime())
476 os.system(f"tar zcvf {args.result_dir}/src-{now}.tgz *.py *.sh")
477
478 log_string(f"argv {' '.join(sys.argv)}")
479
480 for n in vars(args):
481     log_string(f"args.{n} {getattr(args, n)}")
482
483 for k, v in sup_args.items():
484     log_string(f'sup_args["{k}"] "{v}"')
485
486
487 ######################################################################
488
489
490 def get_lr(n_epoch, it):
491     if args.legacy_lr_schedule:
492         # my crude scheduling to compare to previous baseline, added
493         # warmup though
494
495         if it < args.nb_warmup_iter:
496             return args.legacy_large_lr * it / args.nb_warmup_iter
497         elif n_epoch < args.legacy_nb_epoch_large_lr:
498             return args.legacy_large_lr
499         else:
500             return args.legacy_small_lr
501
502     # from nanoGPT
503
504     # 1) linear warmup for warmup_iter steps
505     if it < args.nb_warmup_iter:
506         return args.learning_rate * it / args.nb_warmup_iter
507     # 2) if it > nb_decay_iter, return min learning rate
508     if it > args.nb_decay_iter:
509         return args.min_learning_rate
510     # 3) in between, use cosine decay down to min learning rate
511     decay_ratio = (it - args.nb_warmup_iter) / (
512         args.nb_decay_iter - args.nb_warmup_iter
513     )
514     coeff = 0.5 * (1.0 + math.cos(math.pi * decay_ratio))  # coeff ranges 0..1
515     return args.min_learning_rate + coeff * (
516         args.learning_rate - args.min_learning_rate
517     )
518
519
520 ######################################################################
521
522
523 assert args.picocvlr_prune_properties in {"none", "train+eval", "eval"}
524
525
526 def picoclvr_pruner_horizontal_green(p):
527     return not ("green" in p and ("left" in p or "right" in p))
528
529
530 picoclvr_pruner_train = (
531     picoclvr_pruner_horizontal_green
532     if args.picocvlr_prune_properties in {"train+eval"}
533     else None
534 )
535
536 picoclvr_pruner_eval = (
537     (lambda p: not picoclvr_pruner_horizontal_green(p))
538     if args.picocvlr_prune_properties in {"train+eval", "eval"}
539     else None
540 )
541
542 ######################################################################
543
544 device_data = device
545
546 if args.task == "byheart":
547     task = tasks.SandBox(
548         problem=problems.ProblemByHeart(),
549         nb_train_samples=args.nb_train_samples,
550         nb_test_samples=args.nb_test_samples,
551         batch_size=args.batch_size,
552         logger=log_string,
553         device=device_data,
554     )
555     args.max_percents_of_test_in_train = -1
556
557 elif args.task == "learnop":
558     task = tasks.SandBox(
559         problem=problems.ProblemLearnOperator(),
560         nb_train_samples=args.nb_train_samples,
561         nb_test_samples=args.nb_test_samples,
562         batch_size=args.batch_size,
563         logger=log_string,
564         device=device_data,
565     )
566
567
568 elif args.task == "guessop":
569     task = tasks.SandBox(
570         problem=problems.ProblemGuessOperator(),
571         nb_train_samples=args.nb_train_samples,
572         nb_test_samples=args.nb_test_samples,
573         batch_size=args.batch_size,
574         logger=log_string,
575         device=device_data,
576     )
577
578
579 elif args.task == "twotargets":
580     task = tasks.SandBox(
581         problem=problems.ProblemTwoTargets(),
582         nb_train_samples=args.nb_train_samples,
583         nb_test_samples=args.nb_test_samples,
584         batch_size=args.batch_size,
585         logger=log_string,
586         device=device_data,
587     )
588
589 elif args.task == "memory":
590     task = tasks.SandBox(
591         problem=problems.ProblemMemory(len_total=args.memory_len_total),
592         nb_train_samples=args.nb_train_samples,
593         nb_test_samples=args.nb_test_samples,
594         batch_size=args.batch_size,
595         logger=log_string,
596         device=device_data,
597     )
598
599 elif args.task == "mixing":
600     task = tasks.SandBox(
601         problem=problems.ProblemMixing(
602             hard=args.mixing_hard, random_start=not args.mixing_deterministic_start
603         ),
604         nb_train_samples=args.nb_train_samples,
605         nb_test_samples=args.nb_test_samples,
606         batch_size=args.batch_size,
607         logger=log_string,
608         device=device_data,
609     )
610
611 elif args.task == "addition":
612     task = tasks.SandBox(
613         problem=problems.ProblemAddition(),
614         nb_train_samples=args.nb_train_samples,
615         nb_test_samples=args.nb_test_samples,
616         batch_size=args.batch_size,
617         logger=log_string,
618         device=device_data,
619     )
620
621 elif args.task == "picoclvr":
622     task = tasks.PicoCLVR(
623         nb_train_samples=args.nb_train_samples,
624         nb_test_samples=args.nb_test_samples,
625         batch_size=args.batch_size,
626         height=args.picoclvr_height,
627         width=args.picoclvr_width,
628         nb_colors=args.picoclvr_nb_colors,
629         logger=log_string,
630         device=device_data,
631         pruner_train=picoclvr_pruner_train,
632         pruner_eval=picoclvr_pruner_eval,
633     )
634
635 elif args.task == "mnist":
636     task = tasks.MNIST(
637         nb_train_samples=args.nb_train_samples,
638         nb_test_samples=args.nb_test_samples,
639         batch_size=args.batch_size,
640         device=device_data,
641     )
642
643 elif args.task == "maze":
644     task = tasks.Maze(
645         nb_train_samples=args.nb_train_samples,
646         nb_test_samples=args.nb_test_samples,
647         batch_size=args.batch_size,
648         height=args.maze_height,
649         width=args.maze_width,
650         nb_walls=args.maze_nb_walls,
651         device=device_data,
652     )
653
654 elif args.task == "snake":
655     task = tasks.Snake(
656         nb_train_samples=args.nb_train_samples,
657         nb_test_samples=args.nb_test_samples,
658         batch_size=args.batch_size,
659         height=args.snake_height,
660         width=args.snake_width,
661         nb_colors=args.snake_nb_colors,
662         length=args.snake_length,
663         prompt_length=args.snake_length // 2,
664         device=device_data,
665     )
666
667 elif args.task == "stack":
668     task = tasks.Stack(
669         nb_train_samples=args.nb_train_samples,
670         nb_test_samples=args.nb_test_samples,
671         batch_size=args.batch_size,
672         logger=log_string,
673         nb_steps=args.stack_nb_steps,
674         nb_stacks=args.stack_nb_stacks,
675         nb_digits=args.stack_nb_digits,
676         fraction_values_for_train=args.stack_fraction_values_for_train,
677         device=device_data,
678     )
679
680 elif args.task == "expr":
681     task = tasks.Expr(
682         nb_train_samples=args.nb_train_samples,
683         nb_test_samples=args.nb_test_samples,
684         nb_variables=args.expr_nb_variables,
685         sequence_length=args.expr_sequence_length,
686         operand_max=args.expr_operand_max,
687         result_max=args.expr_result_max,
688         batch_size=args.batch_size,
689         device=device_data,
690     )
691
692 elif args.task == "rpl":
693     task = tasks.RPL(
694         nb_train_samples=args.nb_train_samples,
695         nb_test_samples=args.nb_test_samples,
696         batch_size=args.batch_size,
697         nb_starting_values=args.rpl_nb_starting_values,
698         max_input=args.rpl_max_input,
699         prog_len=args.rpl_prog_len,
700         nb_runs=args.rpl_nb_runs,
701         no_prog=args.rpl_no_prog,
702         logger=log_string,
703         device=device_data,
704     )
705
706 elif args.task == "grid":
707     task = tasks.Grid(
708         nb_train_samples=args.nb_train_samples,
709         nb_test_samples=args.nb_test_samples,
710         batch_size=args.batch_size,
711         size=args.grid_size,
712         nb_shapes=args.grid_nb_shapes,
713         nb_colors=args.grid_nb_colors,
714         logger=log_string,
715         device=device_data,
716     )
717
718 elif args.task == "qmlp":
719     task = tasks.QMLP(
720         nb_train_samples=args.nb_train_samples,
721         nb_test_samples=args.nb_test_samples,
722         batch_size=args.batch_size,
723         result_dir=args.result_dir,
724         logger=log_string,
725         device=device_data,
726     )
727
728 else:
729     raise ValueError(f"Unknown task {args.task}")
730
731 ######################################################################
732
733 log_string(f"device {device}")
734
735 vocabulary_size = task.vocabulary_size()
736
737 log_string(f"vocabulary_size {vocabulary_size}")
738
739 ##############################
740
741 model = mygpt.MyGPT(
742     vocabulary_size=vocabulary_size,
743     dim_model=args.dim_model,
744     dim_keys=args.dim_keys,
745     dim_hidden=args.dim_hidden,
746     nb_heads=args.nb_heads,
747     nb_lines=args.nb_lines,
748     caterpillar_height=args.caterpillar_height,
749     nb_blocks=args.nb_blocks,
750     causal=True,
751     dropout=args.dropout,
752     attention_layer=args.attention,
753     logger=log_string,
754     args=args,
755 )
756
757 model.to(device)
758
759 nb_parameters = sum(p.numel() for p in model.parameters())
760 log_string(f"nb_parameters {nb_parameters} ({int(nb_parameters/1e6)}M)")
761
762 ######################################################################
763
764 nb_epochs_finished = 0
765
766 if args.no_checkpoint:
767     log_string(f"not trying to load checkpoint.")
768
769 else:
770     try:
771         checkpoint_name = os.path.join(args.result_dir, args.checkpoint_name)
772         checkpoint = torch.load(checkpoint_name)
773         nb_epochs_finished = checkpoint["nb_epochs_finished"]
774         model.load_state_dict(checkpoint["model_state"])
775         torch.set_rng_state(checkpoint["rng_state"])
776         if torch.cuda.is_available():
777             torch.cuda.set_rng_state(checkpoint["cuda_rng_state"])
778
779         log_string(f"checkpoint loaded with {nb_epochs_finished} epochs finished.")
780
781     except FileNotFoundError:
782         log_string("starting from scratch.")
783
784     except:
785         log_string("error when loading the checkpoint.")
786         exit(1)
787
788 ######################################################################
789
790 if args.task == "expr" and args.expr_input_file is not None:
791     task.produce_results(
792         n_epoch=nb_epochs_finished,
793         model=model,
794         result_dir=args.result_dir,
795         logger=log_string,
796         deterministic_synthesis=args.deterministic_synthesis,
797         input_file=args.expr_input_file,
798     )
799
800     exit(0)
801
802 ######################################################################
803
804 nb_epochs = args.nb_epochs if args.nb_epochs > 0 else nb_epochs_default
805
806 # Compute the entropy of the training tokens
807
808 token_count = 0
809 for input in task.batches(split="train"):
810     token_count += F.one_hot(input, num_classes=task.vocabulary_size()).sum((0, 1))
811 token_probas = token_count / token_count.sum()
812 entropy = -torch.xlogy(token_probas, token_probas).sum()
813 train_set_perplexity = math.exp(entropy)
814
815 ######################################################################
816 # A bit of paranoia never hurts
817
818 if args.max_percents_of_test_in_train >= 0:
819
820     def subsets_as_tuples(batches, cs):
821         s = set()
822         for batch in batches:
823             for x in batch:
824                 s.add(tuple([v.item() for v in x]))
825                 if len(s) == cs:
826                     yield s
827                     s = set()
828         yield s
829
830     nb_test, nb_in_train = 0, 0
831     for test_subset in subsets_as_tuples(task.batches(split="test"), 25000):
832         in_train = set()
833         for train_subset in subsets_as_tuples(task.batches(split="train"), 25000):
834             in_train.update(test_subset.intersection(train_subset))
835         nb_in_train += len(in_train)
836         nb_test += len(test_subset)
837
838     log_string(
839         f"data_check {nb_in_train*100/nb_test:.02f}% ({nb_in_train}/{nb_test}) of test samples are in the train set"
840     )
841
842     assert (
843         nb_in_train <= args.max_percents_of_test_in_train * nb_test / 100
844     ), f"More than {args.max_percents_of_test_in_train}% of test samples are in the train set"
845
846 ##############################
847
848 if "calibrate" in sup_args:
849     for input in task.batches(split="train", desc="calibrate"):
850         input = input.to(device)
851         output = model(mygpt.BracketedSequence(input)).x
852
853     for n, m in model.named_modules():
854         for a in dir(m):
855             x = getattr(m, a)
856             if isinstance(x, mygpt.Calibrator):
857                 print(f"####### ${n} | ${a} ########################")
858                 mean, std = x.moments()
859                 print("mean\n", mean, "\n")
860                 print("std\n", std, "\n")
861                 print(f"############################################\n\n")
862
863     exit(0)
864
865 ##############################
866
867 nb_samples_seen = 0
868
869 if nb_epochs_finished >= nb_epochs:
870     task.produce_results(
871         n_epoch=nb_epochs_finished,
872         model=model,
873         result_dir=args.result_dir,
874         logger=log_string,
875         deterministic_synthesis=args.deterministic_synthesis,
876     )
877
878 time_pred_result = datetime.datetime.now()
879
880 it = 0
881
882 n_batch = 0
883
884 for n_epoch in range(nb_epochs_finished, nb_epochs):
885     if args.optim == "sgd":
886         optimizer = torch.optim.SGD(model.parameters(), lr=args.learning_rate)
887     elif args.optim == "adam":
888         optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
889     elif args.optim == "adamw":
890         optimizer = torch.optim.AdamW(model.parameters(), lr=args.learning_rate)
891     else:
892         raise ValueError(f"Unknown optimizer {args.optim}.")
893
894     model.train()
895
896     nb_train_samples, acc_train_loss, acc_train_inner_loss = 0, 0.0, 0.0
897
898     for input in task.batches(split="train"):
899         model.reset_inner_loss()
900         input = input.to(device)
901
902         output = model(mygpt.BracketedSequence(input)).x
903         loss = F.cross_entropy(output.transpose(1, 2), input)
904         inner_loss = model.get_inner_loss()
905
906         acc_train_loss += loss.item() * input.size(0)
907         acc_train_inner_loss += inner_loss.item() * input.size(0)
908
909         nb_train_samples += input.size(0)
910         nb_samples_seen += input.size(0)
911
912         total_loss = loss + (
913             args.rho_inner_loss * inner_loss if args.rho_inner_loss > 0 else 0.0
914         )
915
916         it += 1
917         lr = get_lr(n_epoch, it)
918         for param_group in optimizer.param_groups:
919             param_group["lr"] = lr
920
921         # log_string(f"learning_rate {lr}")
922
923         optimizer.zero_grad()
924         total_loss.backward()
925         optimizer.step()
926
927         grad_norm = sum([p.grad.pow(2).sum() for p in model.parameters()]).sqrt()
928
929         loss_file.write(f"{n_epoch} {n_batch} {loss.item()} {grad_norm.item()}\n")
930
931         n_batch += 1
932
933     with torch.autograd.no_grad():
934         model.eval()
935
936         nb_test_samples, acc_test_loss = 0, 0.0
937
938         for input in task.batches(split="test"):
939             input = input.to(device)
940
941             output = model(mygpt.BracketedSequence(input)).x
942             loss = F.cross_entropy(output.transpose(1, 2), input)
943             acc_test_loss += loss.item() * input.size(0)
944             nb_test_samples += input.size(0)
945
946         log_string(
947             f"loss {n_epoch} train_loss {acc_train_loss/nb_train_samples} train_inner_loss {acc_train_inner_loss/nb_train_samples} test_prediction {acc_test_loss/nb_test_samples}"
948         )
949
950         task.produce_results(
951             n_epoch=n_epoch,
952             model=model,
953             result_dir=args.result_dir,
954             logger=log_string,
955             deterministic_synthesis=args.deterministic_synthesis,
956         )
957
958         train_perplexity = math.exp(min(100, acc_train_loss / nb_train_samples))
959         test_perplexity = math.exp(min(100, acc_test_loss / nb_test_samples))
960
961         log_string(
962             f"perplexity {n_epoch} train_set {train_set_perplexity} train_prediction {train_perplexity} test_prediction {test_perplexity}"
963         )
964
965         time_current_result = datetime.datetime.now()
966         log_string(
967             f"next_result {time_current_result + (time_current_result - time_pred_result)}"
968         )
969         time_pred_result = time_current_result
970
971     checkpoint = {
972         "nb_epochs_finished": n_epoch + 1,
973         "model_state": model.state_dict(),
974         "rng_state": torch.get_rng_state(),
975     }
976
977     if torch.cuda.is_available():
978         checkpoint["cuda_rng_state"] = torch.cuda.get_rng_state()
979
980     checkpoint_name = os.path.join(args.result_dir, args.checkpoint_name)
981     torch.save(checkpoint, checkpoint_name)
982     log_string(f"saved checkpoint {checkpoint_name}")
983
984 ######################################################################