Update.
authorFrancois Fleuret <francois@fleuret.org>
Mon, 25 Apr 2022 18:29:28 +0000 (20:29 +0200)
committerFrancois Fleuret <francois@fleuret.org>
Mon, 25 Apr 2022 18:30:10 +0000 (20:30 +0200)
mygpt.py

index e6387bd..13fbe8e 100755 (executable)
--- a/mygpt.py
+++ b/mygpt.py
@@ -508,7 +508,10 @@ for k in range(args.nb_epochs):
             acc_test_loss += loss.item() * input.size(0)
             nb_test_samples += input.size(0)
 
-        log_string(f'perplexity {k+1} train {math.exp(min(100, acc_train_loss/nb_train_samples))} test {math.exp(min(100, acc_test_loss/nb_test_samples))}')
+        train_perplexity = math.exp(min(100, acc_train_loss/nb_train_samples))
+        test_perplexity = math.exp(min(100, acc_test_loss/nb_test_samples))
+
+        log_string(f'perplexity {k+1} train {train_perplexity} test {test_perplexity}')
 
         task.produce_results(k, model)