Update.
authorFrançois Fleuret <francois@fleuret.org>
Thu, 27 Jul 2023 00:38:47 +0000 (14:38 -1000)
committerFrançois Fleuret <francois@fleuret.org>
Thu, 27 Jul 2023 00:38:47 +0000 (14:38 -1000)
do_all.sh
main.py

index 76f1982..c5d16fc 100755 (executable)
--- a/do_all.sh
+++ b/do_all.sh
@@ -13,7 +13,7 @@
 # set -o pipefail
 
 #prefix="--nb_train_samples=1000 --nb_test_samples=100 --batch_size=25 --nb_epochs=2 --max_percents_of_test_in_train=-1 --model=17K"
-prefix="--nb_epochs=2"
+prefix="--nb_epochs=25"
 
 for task in byheart learnop guessop twotargets addition picoclvr maze snake stack expr rpl
 do
diff --git a/main.py b/main.py
index 7b104bf..dbdf89d 100755 (executable)
--- a/main.py
+++ b/main.py
@@ -62,7 +62,7 @@ parser.add_argument("--learning_rate_schedule", type=str, default="10: 2e-5,30:
 
 ########################################
 
-parser.add_argument("--model", type=str, default="37M")
+parser.add_argument("--model", type=str, default=None)
 
 parser.add_argument("--dim_model", type=int, default=None)
 
@@ -172,78 +172,91 @@ if args.result_dir is None:
 
 default_task_args = {
     "byheart": {
+        "model": "37M",
         "nb_epochs": 5,
         "batch_size": 25,
         "nb_train_samples": 50000,
         "nb_test_samples": 10000,
     },
     "learnop": {
+        "model": "37M",
         "nb_epochs": 5,
         "batch_size": 25,
         "nb_train_samples": 50000,
         "nb_test_samples": 10000,
     },
     "guessop": {
+        "model": "122M",
         "nb_epochs": 5,
         "batch_size": 25,
-        "nb_train_samples": 50000,
+        "nb_train_samples": 250000,
         "nb_test_samples": 10000,
     },
     "twotargets": {
+        "model": "37M",
         "nb_epochs": 5,
         "batch_size": 25,
         "nb_train_samples": 50000,
         "nb_test_samples": 10000,
     },
     "addition": {
+        "model": "122M",
         "nb_epochs": 5,
         "batch_size": 25,
         "nb_train_samples": 50000,
         "nb_test_samples": 10000,
     },
     "picoclvr": {
+        "model": "37M",
         "nb_epochs": 25,
         "batch_size": 25,
         "nb_train_samples": 250000,
         "nb_test_samples": 10000,
     },
     "mnist": {
+        "model": "37M",
         "nb_epochs": 25,
         "batch_size": 10,
         "nb_train_samples": 60000,
         "nb_test_samples": 10000,
     },
     "maze": {
+        "model": "37M",
         "nb_epochs": 25,
         "batch_size": 5,
         "nb_train_samples": 250000,
         "nb_test_samples": 10000,
     },
     "snake": {
+        "model": "37M",
         "nb_epochs": 5,
         "batch_size": 25,
         "nb_train_samples": 50000,
         "nb_test_samples": 10000,
     },
     "stack": {
+        "model": "37M",
         "nb_epochs": 5,
         "batch_size": 25,
         "nb_train_samples": 100000,
         "nb_test_samples": 1000,
     },
     "expr": {
+        "model": "37M",
         "nb_epochs": 40,
         "batch_size": 25,
         "nb_train_samples": 1000000,
         "nb_test_samples": 10000,
     },
     "rpl": {
+        "model": "37M",
         "nb_epochs": 40,
         "batch_size": 25,
         "nb_train_samples": 100000,
         "nb_test_samples": 10000,
     },
     "world": {
+        "model": "37M",
         "nb_epochs": 10,
         "batch_size": 25,
         "nb_train_samples": 25000,