Update.
authorFrancois Fleuret <francois@fleuret.org>
Fri, 29 Jul 2022 03:45:46 +0000 (05:45 +0200)
committerFrancois Fleuret <francois@fleuret.org>
Fri, 29 Jul 2022 03:45:46 +0000 (05:45 +0200)
main.py
mygpt.py

diff --git a/main.py b/main.py
index 427a83a..83227bb 100755 (executable)
--- a/main.py
+++ b/main.py
@@ -165,6 +165,12 @@ class TaskPicoCLVR(Task):
         id_descr = [ [ self.token2id[u] for u in s ] for s in token_descr ]
         return torch.tensor(id_descr, device = self.device)
 
+    def trim(self, x, token = '<nul>'):
+        n = self.token2id[token]
+        i = (1 - (F.pad(x, (1, 1), value = n) == n).min(0).values.long()).cumsum(0)
+        a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
+        return x[:, a:b]
+
     def __init__(self, batch_size,
                  height, width, nb_colors = 5,
                  device = torch.device('cpu')):
@@ -201,13 +207,13 @@ class TaskPicoCLVR(Task):
         assert split in { 'train', 'test' }
         input = self.train_input if split == 'train' else self.test_input
         for batch in tqdm.tqdm(input.split(self.batch_size), desc = f'epoch-{split}'):
-            yield batch
+            yield self.trim(batch)
 
     def vocabulary_size(self):
         return len(self.token2id)
 
     def produce_results(self, n_epoch, model):
-        nb_tokens = self.height * self.width + 3
+        nb_tokens_to_generate = self.height * self.width + 3
         result_descr = [ ]
         nb_per_primer = 8
 
@@ -218,15 +224,26 @@ class TaskPicoCLVR(Task):
                 'green bottom <sep> yellow bottom <sep> green left of blue <sep> yellow right of blue <sep> blue top <img>',
         ]:
 
-            for k in range(nb_per_primer):
-                results = autoregression(
-                    model, self.batch_size,
-                    nb_samples = 1, nb_tokens_to_generate = nb_tokens,
-                    primer = self.tensorize([ primer_descr ]),
-                    device = self.device
-                )
-                r = ' '.join([ self.id2token[t.item()] for t in results.flatten() ])
-                result_descr.append(r)
+            results = autoregression(
+                model,
+                self.batch_size,
+                nb_samples = nb_per_primer,
+                nb_tokens_to_generate = nb_tokens_to_generate,
+                primer = self.tensorize([ primer_descr ]).expand(nb_per_primer, -1),
+                device = self.device
+            )
+
+            l = [ ' '.join([ self.id2token[t.item()] for t in r ]) for r in results ]
+            result_descr += l
+
+        np = picoclvr.nb_properties(
+            result_descr,
+            height = self.height, width = self.width
+        )
+
+        nb_requested_properties, _, nb_missing_properties = zip(*np)
+
+        log_string(f'nb_requested_properties {sum(nb_requested_properties) / len(result_descr):.02f} nb_missing_properties {sum(nb_missing_properties) / len(result_descr):.02f}')
 
         img = [
             picoclvr.descr2img(d, height = self.height, width = self.width)
@@ -241,15 +258,6 @@ class TaskPicoCLVR(Task):
         )
         log_string(f'wrote {image_name}')
 
-        np = picoclvr.nb_properties(
-            result_descr,
-            height = self.height, width = self.width
-        )
-
-        nb_requested_properties, _, nb_missing_properties = zip(*np)
-
-        log_string(f'nb_requested_properties {sum(nb_requested_properties) / len(result_descr):.02f} nb_missing_properties {sum(nb_missing_properties) / len(result_descr):.02f}')
-
 ######################################################################
 
 class TaskWiki103(Task):
@@ -462,7 +470,6 @@ for input in task.batches(split = 'train'):
 token_probas = token_count / token_count.sum()
 entropy = -torch.xlogy(token_probas, token_probas).sum()
 train_set_perplexity = math.exp(entropy)
-#log_string(f'train set perplexity {train_set_perplexity}')
 
 for k in range(nb_epochs_finished, nb_epochs):
 
index 212e1a5..d6879dc 100755 (executable)
--- a/mygpt.py
+++ b/mygpt.py
@@ -104,7 +104,7 @@ class MyGPT(nn.Module):
         for _ in range(nb_blocks):
             trunk_blocks += [
                 Residual(
-                    nn.LayerNorm(dim_model),
+                    nn.LayerNorm((dim_model,)),
                     QKVAttention(
                         dim_in = dim_model,
                         dim_qk = dim_keys,
@@ -114,7 +114,7 @@ class MyGPT(nn.Module):
                     ),
                 ),
                 Residual(
-                    nn.LayerNorm(dim_model),
+                    nn.LayerNorm((dim_model,)),
                     nn.Linear(in_features = dim_model, out_features = dim_hidden),
                     nn.ReLU(),
                     nn.Linear(in_features = dim_hidden, out_features = dim_model),
@@ -131,7 +131,8 @@ class MyGPT(nn.Module):
         x = self.embedding(x)
         x = self.trunk(x)
         x = self.readout(x)
-        return x[:, :-1]
+        x = F.pad(x, (0, 0, 0, -1))
+        return x
 
 ######################################################################