Update
[beaver.git] / beaver.py
1 #!/usr/bin/env python
2
3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
5
6 # Written by Francois Fleuret <francois@fleuret.org>
7
8 # torch.backends.cuda.matmul.allow_tf23
9 # torch.autocast(torch.bfloat16)
10
11 import math, sys, argparse, time, tqdm, itertools, os
12
13 import torch, torchvision
14 from torch import nn
15 from torch.nn import functional as F
16
17 import mygpt, tensorstack
18
19 ######################################################################
20
21 if torch.cuda.is_available():
22     device = torch.device("cuda")
23     torch.backends.cuda.matmul.allow_tf32 = True
24 else:
25     device = torch.device("cpu")
26
27 ######################################################################
28
29 parser = argparse.ArgumentParser(description="A maze shortest path solving with a GPT.")
30
31 parser.add_argument("--log_filename", type=str, default="train.log")
32
33 parser.add_argument("--result_dir", type=str, default="results_default")
34
35 parser.add_argument("--seed", type=int, default=0)
36
37 parser.add_argument("--nb_epochs", type=int, default=25)
38
39 parser.add_argument("--nb_train_samples", type=int, default=200000)
40
41 parser.add_argument("--nb_test_samples", type=int, default=50000)
42
43 parser.add_argument("--batch_size", type=int, default=25)
44
45 parser.add_argument("--optim", type=str, default="adam")
46
47 parser.add_argument("--learning_rate", type=float, default=1e-3)
48
49 parser.add_argument(
50     "--learning_rate_schedule", type=str, default="10: 2e-4,20: 4e-5,30: 8e-6"
51 )
52
53 parser.add_argument("--dim_model", type=int, default=512)
54
55 parser.add_argument("--dim_keys", type=int, default=64)
56
57 parser.add_argument("--dim_hidden", type=int, default=2048)
58
59 parser.add_argument("--nb_heads", type=int, default=8)
60
61 parser.add_argument("--nb_blocks", type=int, default=12)
62
63 parser.add_argument("--dropout", type=float, default=0.1)
64
65 parser.add_argument("--deterministic_synthesis", action="store_true", default=False)
66
67 parser.add_argument("--no_checkpoint", action="store_true", default=False)
68
69 parser.add_argument("--overwrite_results", action="store_true", default=False)
70
71 parser.add_argument("--one_shot", action="store_true", default=False)
72
73 parser.add_argument("--checkpoint_name", type=str, default="checkpoint.pth")
74
75 ##############################
76 # maze options
77
78 parser.add_argument("--maze_height", type=int, default=13)
79
80 parser.add_argument("--maze_width", type=int, default=21)
81
82 parser.add_argument("--maze_nb_walls", type=int, default=15)
83
84 ######################################################################
85
86 args = parser.parse_args()
87
88 try:
89     os.mkdir(args.result_dir)
90 except FileExistsError:
91     if not args.overwrite_results:
92         print(f"result directory {args.result_dir} already exists")
93         exit(1)
94
95 log_file = open(os.path.join(args.result_dir, args.log_filename), "a")
96
97 if args.seed >= 0:
98     # torch.backends.cudnn.deterministic = True
99     # torch.backends.cudnn.benchmark = False
100     # torch.use_deterministic_algorithms(True)
101     torch.manual_seed(args.seed)
102     if torch.cuda.is_available():
103         torch.cuda.manual_seed_all(args.seed)
104
105 ######################################################################
106
107
108 def log_string(s):
109     t = time.strftime("%Y%m%d-%H:%M:%S ", time.localtime())
110
111     if log_file is not None:
112         log_file.write(t + s + "\n")
113         log_file.flush()
114
115     print(t + s)
116     sys.stdout.flush()
117
118
119 for n in vars(args):
120     log_string(f"args.{n} {getattr(args, n)}")
121
122 ######################################################################
123
124
125 # ar_mask is a Boolean matrix of same shape as input, with 1s on the
126 # tokens that should be generated
127
128
129 def masked_inplace_autoregression(model, batch_size, input, ar_mask):
130     for input, ar_mask in zip(input.split(batch_size), ar_mask.split(batch_size)):
131         i = (ar_mask.sum(0) > 0).nonzero()
132         if i.min() > 0:
133             # Needed to initialize the model's cache
134             model(mygpt.BracketedSequence(input, 0, i.min()))
135         for s in range(i.min(), i.max() + 1):
136             output = model(mygpt.BracketedSequence(input, s, 1)).x
137             logits = output[:, s]
138             if args.deterministic_synthesis:
139                 t_next = logits.argmax(1)
140             else:
141                 dist = torch.distributions.categorical.Categorical(logits=logits)
142                 t_next = dist.sample()
143             input[:, s] = ar_mask[:, s] * t_next + (1 - ar_mask[:, s]) * input[:, s]
144
145
146 ######################################################################
147
148
149 def compute_perplexity(model, split="train"):
150     with torch.autograd.no_grad():
151         t = model.training
152         model.eval()
153
154         nb_samples, acc_loss = 0, 0.0
155
156         for input in task.batches(split=split):
157             input = input.to(device)
158
159             output = model(mygpt.BracketedSequence(input)).x
160             loss = F.cross_entropy(output.transpose(1, 2), input)
161             acc_loss += loss.item() * input.size(0)
162             nb_samples += input.size(0)
163
164         model.train(t)
165
166         return math.exp(min(100, acc_loss / nb_samples))
167
168
169 ######################################################################
170
171
172 def one_shot(gpt, task):
173     pass
174
175
176 ######################################################################
177
178
179 class Task:
180     def batches(self, split="train"):
181         pass
182
183     def vocabulary_size(self):
184         pass
185
186     def produce_results(self, n_epoch, model):
187         pass
188
189
190 ######################################################################
191
192 import maze
193
194
195 class TaskMaze(Task):
196     def map2seq(self, *m):
197         return torch.cat([x.flatten(1) for x in m], 1)
198
199     def seq2map(self, s):
200         s = s.reshape(s.size(0), -1, self.height, self.width)
201         return (s[:, k] for k in range(s.size(1)))
202
203     def __init__(
204         self,
205         nb_train_samples,
206         nb_test_samples,
207         batch_size,
208         height,
209         width,
210         nb_walls,
211         device=torch.device("cpu"),
212     ):
213         self.batch_size = batch_size
214         self.height = height
215         self.width = width
216         self.device = device
217
218         mazes_train, paths_train = maze.create_maze_data(
219             nb_train_samples,
220             height=height,
221             width=width,
222             nb_walls=nb_walls,
223             progress_bar=lambda x: tqdm.tqdm(x, dynamic_ncols=True, desc=f"data-train"),
224         )
225         mazes_train, paths_train = mazes_train.to(device), paths_train.to(device)
226         self.train_input = self.map2seq(mazes_train, paths_train)
227
228         mazes_test, paths_test = maze.create_maze_data(
229             nb_test_samples,
230             height=height,
231             width=width,
232             nb_walls=nb_walls,
233             progress_bar=lambda x: tqdm.tqdm(x, dynamic_ncols=True, desc=f"data-test"),
234         )
235         mazes_test, paths_test = mazes_test.to(device), paths_test.to(device)
236         self.test_input = self.map2seq(mazes_test, paths_test)
237
238         self.nb_codes = self.train_input.max() + 1
239
240     def batches(self, split="train", nb_to_use=-1):
241         assert split in {"train", "test"}
242         input = self.train_input if split == "train" else self.test_input
243         if nb_to_use > 0:
244             input = input[:nb_to_use]
245         for batch in tqdm.tqdm(
246             input.split(self.batch_size), dynamic_ncols=True, desc=f"epoch-{split}"
247         ):
248             yield batch
249
250     def vocabulary_size(self):
251         return self.nb_codes
252
253     def compute_error(self, model, split="train", nb_to_use=-1):
254         nb_total, nb_correct = 0, 0
255         for input in task.batches(split, nb_to_use):
256             result = input.clone()
257             ar_mask = result.new_zeros(result.size())
258             ar_mask[:, self.height * self.width :] = 1
259             result *= 1 - ar_mask
260             masked_inplace_autoregression(model, self.batch_size, result, ar_mask)
261             mazes, paths = self.seq2map(result)
262             nb_correct += maze.path_correctness(mazes, paths).long().sum()
263             nb_total += mazes.size(0)
264
265         return nb_total, nb_correct
266
267     def produce_results(self, n_epoch, model):
268         with torch.autograd.no_grad():
269             t = model.training
270             model.eval()
271
272             train_nb_total, train_nb_correct = self.compute_error(
273                 model, "train", nb_to_use=1000
274             )
275             log_string(
276                 f"accuracy_train nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
277             )
278
279             test_nb_total, test_nb_correct = self.compute_error(
280                 model, "test", nb_to_use=1000
281             )
282             log_string(
283                 f"accuracy_test nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
284             )
285
286             input = self.test_input[:32]
287             result = input.clone()
288             ar_mask = result.new_zeros(result.size())
289             ar_mask[:, self.height * self.width :] = 1
290             result *= 1 - ar_mask
291             masked_inplace_autoregression(model, self.batch_size, result, ar_mask)
292
293             mazes, paths = self.seq2map(input)
294             _, predicted_paths = self.seq2map(result)
295             maze.save_image(
296                 os.path.join(args.result_dir, f"result_{n_epoch:04d}.png"),
297                 mazes,
298                 paths,
299                 predicted_paths,
300                 maze.path_correctness(mazes, predicted_paths),
301             )
302
303             model.train(t)
304
305
306 ######################################################################
307
308 log_string(f"device {device}")
309
310
311 task = TaskMaze(
312     nb_train_samples=args.nb_train_samples,
313     nb_test_samples=args.nb_test_samples,
314     batch_size=args.batch_size,
315     height=args.maze_height,
316     width=args.maze_width,
317     nb_walls=args.maze_nb_walls,
318     device=device,
319 )
320
321
322 vocabulary_size = task.vocabulary_size()
323
324 log_string(f"vocabulary_size {vocabulary_size}")
325
326 ##############################
327
328 model = mygpt.MyGPT(
329     vocabulary_size=vocabulary_size,
330     dim_model=args.dim_model,
331     dim_keys=args.dim_keys,
332     dim_hidden=args.dim_hidden,
333     nb_heads=args.nb_heads,
334     nb_blocks=args.nb_blocks,
335     causal=True,
336     dropout=args.dropout,
337 )
338
339 model.to(device)
340
341 nb_parameters = sum(p.numel() for p in model.parameters())
342 log_string(f"nb_parameters {nb_parameters} ({int(nb_parameters/1e6)}M)")
343
344 ######################################################################
345
346 nb_epochs_finished = 0
347
348 if args.no_checkpoint:
349     log_string(f"not trying to load checkpoint.")
350
351 else:
352     try:
353         checkpoint_name = os.path.join(args.result_dir, args.checkpoint_name)
354         checkpoint = torch.load(checkpoint_name)
355         nb_epochs_finished = checkpoint["nb_epochs_finished"]
356         model.load_state_dict(checkpoint["model_state"])
357         torch.set_rng_state(checkpoint["rng_state"])
358         if torch.cuda.is_available():
359             torch.cuda.set_rng_state(checkpoint["cuda_rng_state"])
360
361         log_string(f"checkpoint loaded with {nb_epochs_finished} epochs finished.")
362
363     except FileNotFoundError:
364         log_string("starting from scratch.")
365
366     except:
367         log_string("error when loading the checkpoint.")
368         exit(1)
369
370 ######################################################################
371
372 nb_epochs = args.nb_epochs if args.nb_epochs > 0 else nb_epochs_default
373
374 token_count = 0
375 for input in task.batches(split="train"):
376     token_count += F.one_hot(input, num_classes=task.vocabulary_size()).sum((0, 1))
377 token_probas = token_count / token_count.sum()
378 entropy = -torch.xlogy(token_probas, token_probas).sum()
379 train_set_perplexity = math.exp(entropy)
380
381 ##############################
382
383 if args.learning_rate_schedule == "cos":
384     learning_rate_schedule = {}
385     for n_epoch in range(args.nb_epochs):
386         u = n_epoch / args.nb_epochs * math.pi
387         learning_rate_schedule[n_epoch] = args.learning_rate * 0.5 * (1 + math.cos(u))
388 else:
389     u = {
390         int(k): float(v)
391         for k, v in [
392             tuple(x.split(":")) for x in args.learning_rate_schedule.split(",")
393         ]
394     }
395
396     learning_rate_schedule = {}
397     learning_rate = args.learning_rate
398     for n_epoch in range(args.nb_epochs):
399         if n_epoch in u:
400             learning_rate = u[n_epoch]
401         learning_rate_schedule[n_epoch] = learning_rate
402
403 log_string(f"learning_rate_schedule {learning_rate_schedule}")
404
405 ##############################
406
407 if args.one_shot:
408     one_shot(model, task)
409     exit(0)
410
411 ##############################
412
413 if nb_epochs_finished >= nb_epochs:
414     n_epoch = nb_epochs_finished
415     train_perplexity = compute_perplexity(model, split="train")
416     test_perplexity = compute_perplexity(model, split="test")
417
418     log_string(
419         f"perplexity {n_epoch} train_set {train_set_perplexity} train_prediction {train_perplexity} test_prediction {test_perplexity}"
420     )
421
422     task.produce_results(n_epoch, model)
423
424     exit(0)
425
426 ##############################
427
428 for n_epoch in range(nb_epochs_finished, nb_epochs):
429     learning_rate = learning_rate_schedule[n_epoch]
430
431     log_string(f"learning_rate {learning_rate}")
432
433     if args.optim == "sgd":
434         optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
435     elif args.optim == "adam":
436         optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
437     elif args.optim == "adamw":
438         optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate)
439     else:
440         raise ValueError(f"Unknown optimizer {args.optim}.")
441
442     model.train()
443
444     nb_train_samples, acc_train_loss = 0, 0.0
445
446     for input in task.batches(split="train"):
447         input = input.to(device)
448         output = model(mygpt.BracketedSequence(input)).x
449         loss = F.cross_entropy(output.transpose(1, 2), input)
450         acc_train_loss += loss.item() * input.size(0)
451         nb_train_samples += input.size(0)
452
453         optimizer.zero_grad()
454         loss.backward()
455         optimizer.step()
456
457     train_perplexity = math.exp(min(100, acc_train_loss / nb_train_samples))
458     test_perplexity = compute_perplexity(model, split="test")
459
460     log_string(
461         f"perplexity {n_epoch} train_set {train_set_perplexity} train_prediction {train_perplexity} test_prediction {test_perplexity}"
462     )
463
464     task.produce_results(n_epoch, model)
465
466     checkpoint = {
467         "nb_epochs_finished": n_epoch + 1,
468         "model_state": model.state_dict(),
469         "rng_state": torch.get_rng_state(),
470     }
471
472     if torch.cuda.is_available():
473         checkpoint["cuda_rng_state"] = torch.cuda.get_rng_state()
474
475     checkpoint_name = os.path.join(args.result_dir, args.checkpoint_name)
476     torch.save(checkpoint, checkpoint_name)
477     log_string(f"saved checkpoint {checkpoint_name}")
478
479 ######################################################################