Update
[beaver.git] / beaver.py
1 #!/usr/bin/env python
2
3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
5
6 # Written by Francois Fleuret <francois@fleuret.org>
7
8 # torch.backends.cuda.matmul.allow_tf23
9 # torch.autocast(torch.bfloat16)
10
11 import math, sys, argparse, time, tqdm, itertools, os
12
13 import torch, torchvision
14 from torch import nn
15 from torch.nn import functional as F
16
17 import mygpt, tensorstack
18
19 ######################################################################
20
21 if torch.cuda.is_available():
22     device = torch.device("cuda")
23     torch.backends.cuda.matmul.allow_tf32 = True
24 else:
25     device = torch.device("cpu")
26
27 ######################################################################
28
29 parser = argparse.ArgumentParser(description="A maze shortest path solving with a GPT.")
30
31 parser.add_argument("--log_filename", type=str, default="train.log")
32
33 parser.add_argument("--result_dir", type=str, default="results_default")
34
35 parser.add_argument("--seed", type=int, default=0)
36
37 parser.add_argument("--nb_epochs", type=int, default=25)
38
39 parser.add_argument("--nb_train_samples", type=int, default=200000)
40
41 parser.add_argument("--nb_test_samples", type=int, default=50000)
42
43 parser.add_argument("--batch_size", type=int, default=25)
44
45 parser.add_argument("--optim", type=str, default="adam")
46
47 parser.add_argument("--learning_rate", type=float, default=1e-3)
48
49 parser.add_argument(
50     "--learning_rate_schedule", type=str, default="10: 2e-4,20: 4e-5,30: 8e-6"
51 )
52
53 parser.add_argument("--dim_model", type=int, default=512)
54
55 parser.add_argument("--dim_keys", type=int, default=64)
56
57 parser.add_argument("--dim_hidden", type=int, default=2048)
58
59 parser.add_argument("--nb_heads", type=int, default=8)
60
61 parser.add_argument("--nb_blocks", type=int, default=12)
62
63 parser.add_argument("--dropout", type=float, default=0.1)
64
65 parser.add_argument("--deterministic_synthesis", action="store_true", default=False)
66
67 parser.add_argument("--no_checkpoint", action="store_true", default=False)
68
69 parser.add_argument("--overwrite_results", action="store_true", default=False)
70
71 parser.add_argument("--one_shot", action="store_true", default=False)
72
73 parser.add_argument("--checkpoint_name", type=str, default="checkpoint.pth")
74
75 ##############################
76 # maze options
77
78 parser.add_argument("--maze_height", type=int, default=13)
79
80 parser.add_argument("--maze_width", type=int, default=21)
81
82 parser.add_argument("--maze_nb_walls", type=int, default=15)
83
84 parser.add_argument("--oneshot_mode", type=str, default="head")
85
86 ######################################################################
87
88 args = parser.parse_args()
89
90 try:
91     os.mkdir(args.result_dir)
92 except FileExistsError:
93     if not args.overwrite_results:
94         print(f"result directory {args.result_dir} already exists")
95         exit(1)
96
97 log_file = open(os.path.join(args.result_dir, args.log_filename), "a")
98
99 if args.seed >= 0:
100     # torch.backends.cudnn.deterministic = True
101     # torch.backends.cudnn.benchmark = False
102     # torch.use_deterministic_algorithms(True)
103     torch.manual_seed(args.seed)
104     if torch.cuda.is_available():
105         torch.cuda.manual_seed_all(args.seed)
106
107 ######################################################################
108
109
110 def log_string(s):
111     t = time.strftime("%Y%m%d-%H:%M:%S ", time.localtime())
112
113     if log_file is not None:
114         log_file.write(t + s + "\n")
115         log_file.flush()
116
117     print(t + s)
118     sys.stdout.flush()
119
120
121 for n in vars(args):
122     log_string(f"args.{n} {getattr(args, n)}")
123
124 ######################################################################
125
126
127 # ar_mask is a Boolean matrix of same shape as input, with 1s on the
128 # tokens that should be generated
129
130
131 def masked_inplace_autoregression(model, batch_size, input, ar_mask):
132     for input, ar_mask in zip(input.split(batch_size), ar_mask.split(batch_size)):
133         i = (ar_mask.sum(0) > 0).nonzero()
134         if i.min() > 0:
135             # Needed to initialize the model's cache
136             model(mygpt.BracketedSequence(input, 0, i.min()))
137         for s in range(i.min(), i.max() + 1):
138             output = model(mygpt.BracketedSequence(input, s, 1)).x
139             logits = output[:, s]
140             if args.deterministic_synthesis:
141                 t_next = logits.argmax(1)
142             else:
143                 dist = torch.distributions.categorical.Categorical(logits=logits)
144                 t_next = dist.sample()
145             input[:, s] = ar_mask[:, s] * t_next + (1 - ar_mask[:, s]) * input[:, s]
146
147
148 ######################################################################
149
150
151 def compute_perplexity(model, split="train"):
152     with torch.autograd.no_grad():
153         t = model.training
154         model.eval()
155
156         nb_samples, acc_loss = 0, 0.0
157
158         for input in task.batches(split=split):
159             input = input.to(device)
160
161             output = model(mygpt.BracketedSequence(input)).x
162             loss = F.cross_entropy(output.transpose(1, 2), input)
163             acc_loss += loss.item() * input.size(0)
164             nb_samples += input.size(0)
165
166         model.train(t)
167
168         return math.exp(min(100, acc_loss / nb_samples))
169
170
171 ######################################################################
172
173
174 def one_shot(gpt, task):
175     t = gpt.training
176     gpt.eval()
177     dim_in = args.dim_model * (args.nb_blocks * 2 if args.oneshot_mode == "deep" else 1)
178     model = nn.Sequential(
179         nn.Linear(dim_in, args.dim_model),
180         nn.ReLU(),
181         nn.Linear(args.dim_model, args.dim_model),
182         nn.ReLU(),
183         nn.Linear(args.dim_model, 4),
184     ).to(device)
185
186     for n_epoch in range(args.nb_epochs):
187         learning_rate = learning_rate_schedule[n_epoch]
188         optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
189
190         acc_train_loss, nb_train_samples = 0, 0
191         for input, policies in task.policy_batches(split="train"):
192             ####
193             # print(f'{input.size()=} {policies.size()=}')
194             # s = maze.stationary_densities(
195             # exit(0)
196             ####
197             mask = input.unsqueeze(-1) == maze.v_empty
198             output_gpt = gpt(mygpt.BracketedSequence(input), mode=args.oneshot_mode).x
199             output = model(output_gpt)
200             targets = policies.permute(0, 2, 1) * mask
201             output = output * mask
202             # loss = (output.softmax(-1) - targets).abs().max(-1).values.mean()
203             loss = -(output.log_softmax(-1) * targets).sum() / mask.sum()
204             acc_train_loss += loss.item() * input.size(0)
205             nb_train_samples += input.size(0)
206
207             optimizer.zero_grad()
208             loss.backward()
209             optimizer.step()
210
211         acc_test_loss, nb_test_samples = 0, 0
212         for input, policies in task.policy_batches(split="test"):
213             mask = input.unsqueeze(-1) == maze.v_empty
214             output_gpt = gpt(mygpt.BracketedSequence(input), mode=args.oneshot_mode).x
215             output = model(output_gpt)
216             targets = policies.permute(0, 2, 1) * mask
217             output = output * mask
218             # loss = (output.softmax(-1) - targets).abs().max(-1).values.mean()
219             loss = -(output.log_softmax(-1) * targets).sum() / mask.sum()
220             acc_test_loss += loss.item() * input.size(0)
221             nb_test_samples += input.size(0)
222
223         log_string(
224             f"diff_ce {n_epoch} train {acc_train_loss/nb_train_samples} test {acc_test_loss/nb_test_samples}"
225         )
226
227         # -------------------
228         input = task.test_input[:32, : task.height * task.width]
229         targets = task.test_policies[:32].permute(0, 2, 1)
230         output_gpt = gpt(mygpt.BracketedSequence(input), mode=args.oneshot_mode).x
231         output = model(output_gpt)
232         # losses = (-output.log_softmax(-1) * targets + targets.xlogy(targets)).sum(-1)
233         # losses = losses * mask
234         # losses = losses / losses.max()
235         # losses = (output.softmax(-1) - targets).abs().max(-1).values
236         # losses = (losses >= 0.05).float()
237         losses = (
238             (F.one_hot(output.argmax(-1), num_classes=4) * targets).sum(-1) == 0
239         ).float()
240         losses = losses.reshape(-1, args.maze_height, args.maze_width)
241         input = input.reshape(-1, args.maze_height, args.maze_width)
242         maze.save_image(
243             os.path.join(
244                 args.result_dir, f"oneshot_{args.oneshot_mode}_{n_epoch:04d}.png"
245             ),
246             mazes=input,
247             score_paths=losses,
248         )
249         # -------------------
250
251     gpt.train(t)
252
253
254 ######################################################################
255
256
257 class Task:
258     def batches(self, split="train"):
259         pass
260
261     def vocabulary_size(self):
262         pass
263
264     def produce_results(self, n_epoch, model):
265         pass
266
267
268 ######################################################################
269
270 import maze
271
272
273 class TaskMaze(Task):
274     def map2seq(self, *m):
275         return torch.cat([x.flatten(1) for x in m], 1)
276
277     def seq2map(self, s):
278         s = s.reshape(s.size(0), -1, self.height, self.width)
279         return (s[:, k] for k in range(s.size(1)))
280
281     def __init__(
282         self,
283         nb_train_samples,
284         nb_test_samples,
285         batch_size,
286         height,
287         width,
288         nb_walls,
289         device=torch.device("cpu"),
290     ):
291         self.batch_size = batch_size
292         self.height = height
293         self.width = width
294         self.device = device
295
296         train_mazes, train_paths, train_policies = maze.create_maze_data(
297             nb_train_samples,
298             height=height,
299             width=width,
300             nb_walls=nb_walls,
301             progress_bar=lambda x: tqdm.tqdm(x, dynamic_ncols=True, desc=f"data-train"),
302         )
303         self.train_input = self.map2seq(train_mazes.to(device), train_paths.to(device))
304         self.train_policies = train_policies.flatten(-2).to(device)
305
306         test_mazes, test_paths, test_policies = maze.create_maze_data(
307             nb_test_samples,
308             height=height,
309             width=width,
310             nb_walls=nb_walls,
311             progress_bar=lambda x: tqdm.tqdm(x, dynamic_ncols=True, desc=f"data-test"),
312         )
313         self.test_input = self.map2seq(test_mazes.to(device), test_paths.to(device))
314         self.test_policies = test_policies.flatten(-2).to(device)
315
316         self.nb_codes = self.train_input.max() + 1
317
318     def batches(self, split="train", nb_to_use=-1):
319         assert split in {"train", "test"}
320         input = self.train_input if split == "train" else self.test_input
321         if nb_to_use > 0:
322             input = input[:nb_to_use]
323         for batch in tqdm.tqdm(
324             input.split(self.batch_size), dynamic_ncols=True, desc=f"epoch-{split}"
325         ):
326             yield batch
327
328     def policy_batches(self, split="train", nb_to_use=-1):
329         assert split in {"train", "test"}
330         input = self.train_input if split == "train" else self.test_input
331         policies = self.train_policies if split == "train" else self.test_policies
332         input = input[:, : self.height * self.width]
333         policies = policies * (input != maze.v_wall)[:, None]
334
335         if nb_to_use > 0:
336             input = input[:nb_to_use]
337             policies = policies[:nb_to_use]
338
339         for batch in tqdm.tqdm(
340             zip(input.split(self.batch_size), policies.split(self.batch_size)),
341             dynamic_ncols=True,
342             desc=f"epoch-{split}",
343         ):
344             yield batch
345
346     def vocabulary_size(self):
347         return self.nb_codes
348
349     def compute_error(self, model, split="train", nb_to_use=-1):
350         nb_total, nb_correct = 0, 0
351         for input in task.batches(split, nb_to_use):
352             result = input.clone()
353             ar_mask = result.new_zeros(result.size())
354             ar_mask[:, self.height * self.width :] = 1
355             result *= 1 - ar_mask
356             masked_inplace_autoregression(model, self.batch_size, result, ar_mask)
357             mazes, paths = self.seq2map(result)
358             nb_correct += maze.path_correctness(mazes, paths).long().sum()
359             nb_total += mazes.size(0)
360
361         return nb_total, nb_correct
362
363     def produce_results(self, n_epoch, model):
364         with torch.autograd.no_grad():
365             t = model.training
366             model.eval()
367
368             train_nb_total, train_nb_correct = self.compute_error(
369                 model, "train", nb_to_use=1000
370             )
371             log_string(
372                 f"accuracy_train nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
373             )
374
375             test_nb_total, test_nb_correct = self.compute_error(
376                 model, "test", nb_to_use=1000
377             )
378             log_string(
379                 f"accuracy_test nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
380             )
381
382             input = self.test_input[:32]
383             result = input.clone()
384             ar_mask = result.new_zeros(result.size())
385             ar_mask[:, self.height * self.width :] = 1
386             result *= 1 - ar_mask
387             masked_inplace_autoregression(model, self.batch_size, result, ar_mask)
388
389             mazes, paths = self.seq2map(input)
390             _, predicted_paths = self.seq2map(result)
391             maze.save_image(
392                 os.path.join(args.result_dir, f"result_{n_epoch:04d}.png"),
393                 mazes=mazes,
394                 target_paths=paths,
395                 predicted_paths=predicted_paths,
396                 path_correct=maze.path_correctness(mazes, predicted_paths),
397             )
398
399             model.train(t)
400
401
402 ######################################################################
403
404 log_string(f"device {device}")
405
406
407 task = TaskMaze(
408     nb_train_samples=args.nb_train_samples,
409     nb_test_samples=args.nb_test_samples,
410     batch_size=args.batch_size,
411     height=args.maze_height,
412     width=args.maze_width,
413     nb_walls=args.maze_nb_walls,
414     device=device,
415 )
416
417
418 vocabulary_size = task.vocabulary_size()
419
420 log_string(f"vocabulary_size {vocabulary_size}")
421
422 ##############################
423
424 model = mygpt.MyGPT(
425     vocabulary_size=vocabulary_size,
426     dim_model=args.dim_model,
427     dim_keys=args.dim_keys,
428     dim_hidden=args.dim_hidden,
429     nb_heads=args.nb_heads,
430     nb_blocks=args.nb_blocks,
431     causal=True,
432     dropout=args.dropout,
433 )
434
435 model.to(device)
436
437 nb_parameters = sum(p.numel() for p in model.parameters())
438 log_string(f"nb_parameters {nb_parameters} ({int(nb_parameters/1e6)}M)")
439
440 ######################################################################
441
442 nb_epochs_finished = 0
443
444 if args.no_checkpoint:
445     log_string(f"not trying to load checkpoint.")
446
447 else:
448     try:
449         checkpoint_name = os.path.join(args.result_dir, args.checkpoint_name)
450         checkpoint = torch.load(checkpoint_name)
451         nb_epochs_finished = checkpoint["nb_epochs_finished"]
452         model.load_state_dict(checkpoint["model_state"])
453         torch.set_rng_state(checkpoint["rng_state"])
454         if torch.cuda.is_available():
455             torch.cuda.set_rng_state(checkpoint["cuda_rng_state"])
456
457         log_string(f"checkpoint loaded with {nb_epochs_finished} epochs finished.")
458
459     except FileNotFoundError:
460         log_string("starting from scratch.")
461
462     except:
463         log_string("error when loading the checkpoint.")
464         exit(1)
465
466 ######################################################################
467
468 token_count = 0
469 for input in task.batches(split="train"):
470     token_count += F.one_hot(input, num_classes=task.vocabulary_size()).sum((0, 1))
471 token_probas = token_count / token_count.sum()
472 entropy = -torch.xlogy(token_probas, token_probas).sum()
473 train_set_perplexity = math.exp(entropy)
474
475 ##############################
476
477 if args.learning_rate_schedule == "cos":
478     learning_rate_schedule = {}
479     for n_epoch in range(args.nb_epochs):
480         u = n_epoch / args.nb_epochs * math.pi
481         learning_rate_schedule[n_epoch] = args.learning_rate * 0.5 * (1 + math.cos(u))
482 else:
483     u = {
484         int(k): float(v)
485         for k, v in [
486             tuple(x.split(":")) for x in args.learning_rate_schedule.split(",")
487         ]
488     }
489
490     learning_rate_schedule = {}
491     learning_rate = args.learning_rate
492     for n_epoch in range(args.nb_epochs):
493         if n_epoch in u:
494             learning_rate = u[n_epoch]
495         learning_rate_schedule[n_epoch] = learning_rate
496
497 log_string(f"learning_rate_schedule {learning_rate_schedule}")
498
499 ##############################
500
501 if args.one_shot:
502     one_shot(model, task)
503     exit(0)
504
505 ##############################
506
507 if nb_epochs_finished >= args.nb_epochs:
508     n_epoch = nb_epochs_finished
509     train_perplexity = compute_perplexity(model, split="train")
510     test_perplexity = compute_perplexity(model, split="test")
511
512     log_string(
513         f"perplexity {n_epoch} train_set {train_set_perplexity} train_prediction {train_perplexity} test_prediction {test_perplexity}"
514     )
515
516     task.produce_results(n_epoch, model)
517
518     exit(0)
519
520 ##############################
521
522 for n_epoch in range(nb_epochs_finished, args.nb_epochs):
523     learning_rate = learning_rate_schedule[n_epoch]
524
525     log_string(f"learning_rate {learning_rate}")
526
527     if args.optim == "sgd":
528         optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
529     elif args.optim == "adam":
530         optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
531     elif args.optim == "adamw":
532         optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate)
533     else:
534         raise ValueError(f"Unknown optimizer {args.optim}.")
535
536     model.train()
537
538     nb_train_samples, acc_train_loss = 0, 0.0
539
540     for input in task.batches(split="train"):
541         input = input.to(device)
542         output = model(mygpt.BracketedSequence(input)).x
543         loss = F.cross_entropy(output.transpose(1, 2), input)
544         acc_train_loss += loss.item() * input.size(0)
545         nb_train_samples += input.size(0)
546
547         optimizer.zero_grad()
548         loss.backward()
549         optimizer.step()
550
551     train_perplexity = math.exp(min(100, acc_train_loss / nb_train_samples))
552     test_perplexity = compute_perplexity(model, split="test")
553
554     log_string(
555         f"perplexity {n_epoch} train_set {train_set_perplexity} train_prediction {train_perplexity} test_prediction {test_perplexity}"
556     )
557
558     task.produce_results(n_epoch, model)
559
560     checkpoint = {
561         "nb_epochs_finished": n_epoch + 1,
562         "model_state": model.state_dict(),
563         "rng_state": torch.get_rng_state(),
564     }
565
566     if torch.cuda.is_available():
567         checkpoint["cuda_rng_state"] = torch.cuda.get_rng_state()
568
569     checkpoint_name = os.path.join(args.result_dir, args.checkpoint_name)
570     torch.save(checkpoint, checkpoint_name)
571     log_string(f"saved checkpoint {checkpoint_name}")
572
573 ######################################################################