Minor ETA fix.
[pysvrt.git] / cnn-svrt.py
index 8b8ec12..592f982 100755 (executable)
@@ -73,16 +73,29 @@ parser.add_argument('--compress_vignettes',
                     action='store_true', default = False,
                     help = 'Use lossless compression to reduce the memory footprint')
 
+parser.add_argument('--test_loaded_models',
+                    action='store_true', default = False,
+                    help = 'Should we compute the test error of models we load')
+
 args = parser.parse_args()
 
 ######################################################################
 
 log_file = open(args.log_file, 'w')
+pred_log_t = None
 
 print(Fore.RED + 'Logging into ' + args.log_file + Style.RESET_ALL)
 
 def log_string(s):
-    s = Fore.GREEN + time.ctime() + Style.RESET_ALL + ' ' + s
+    global pred_log_t
+    t = time.time()
+
+    if pred_log_t is None:
+        elapsed = 'start'
+    else:
+        elapsed = '+{:.02f}s'.format(t - pred_log_t)
+    pred_log_t = t
+    s = Fore.BLUE + time.ctime() + ' ' + Fore.GREEN + elapsed + Style.RESET_ALL + ' ' + s
     log_file.write(s + '\n')
     log_file.flush()
     print(s)
@@ -133,6 +146,8 @@ def train_model(model, train_set):
 
     optimizer = optim.SGD(model.parameters(), lr = 1e-2)
 
+    start_t = time.time()
+
     for e in range(0, args.nb_epochs):
         acc_loss = 0.0
         for b in range(0, train_set.nb_batches):
@@ -144,6 +159,8 @@ def train_model(model, train_set):
             loss.backward()
             optimizer.step()
         log_string('train_loss {:d} {:f}'.format(e + 1, acc_loss))
+        dt = (time.time() - start_t) / (e + 1)
+        print(Fore.CYAN + 'ETA ' + time.ctime(time.time() + dt * (args.nb_epochs - e)) + Style.RESET_ALL)
 
     return model
 
@@ -167,55 +184,84 @@ def nb_errors(model, data_set):
 for arg in vars(args):
     log_string('argument ' + str(arg) + ' ' + str(getattr(args, arg)))
 
+######################################################################
+
 for problem_number in range(1, 24):
-    if args.compress_vignettes:
-        train_set = CompressedVignetteSet(problem_number, args.nb_train_batches, args.batch_size,
-                                          cuda=torch.cuda.is_available())
-        test_set = CompressedVignetteSet(problem_number, args.nb_test_batches, args.batch_size,
-                                         cuda=torch.cuda.is_available())
-    else:
-        train_set = VignetteSet(problem_number, args.nb_train_batches, args.batch_size,
-                                          cuda=torch.cuda.is_available())
-        test_set = VignetteSet(problem_number, args.nb_test_batches, args.batch_size,
-                                          cuda=torch.cuda.is_available())
+
+    log_string('**** problem ' + str(problem_number) + ' ****')
 
     model = AfrozeShallowNet()
 
     if torch.cuda.is_available():
         model.cuda()
 
+    model_filename = model.name + '_' + \
+                     str(problem_number) + '_' + \
+                     str(args.nb_train_batches) + '.param'
+
     nb_parameters = 0
-    for p in model.parameters():
-        nb_parameters += p.numel()
+    for p in model.parameters(): nb_parameters += p.numel()
     log_string('nb_parameters {:d}'.format(nb_parameters))
 
-    model_filename = model.name + '_' + str(problem_number) + '_' + str(train_set.nb_batches) + '.param'
-
+    need_to_train = False
     try:
         model.load_state_dict(torch.load(model_filename))
         log_string('loaded_model ' + model_filename)
     except:
-        log_string('training_model')
+        need_to_train = True
+
+    if need_to_train:
+
+        log_string('training_model ' + model_filename)
+
+        t = time.time()
+
+        if args.compress_vignettes:
+            train_set = CompressedVignetteSet(problem_number,
+                                              args.nb_train_batches, args.batch_size,
+                                              cuda=torch.cuda.is_available())
+        else:
+            train_set = VignetteSet(problem_number,
+                                    args.nb_train_batches, args.batch_size,
+                                    cuda=torch.cuda.is_available())
+
+        log_string('data_generation {:0.2f} samples / s'.format(train_set.nb_samples / (time.time() - t)))
+
         train_model(model, train_set)
         torch.save(model.state_dict(), model_filename)
         log_string('saved_model ' + model_filename)
 
-    nb_train_errors = nb_errors(model, train_set)
+        nb_train_errors = nb_errors(model, train_set)
+
+        log_string('train_error {:d} {:.02f}% {:d} {:d}'.format(
+            problem_number,
+            100 * nb_train_errors / train_set.nb_samples,
+            nb_train_errors,
+            train_set.nb_samples)
+        )
+
+    if need_to_train or args.test_loaded_models:
+
+        t = time.time()
+
+        if args.compress_vignettes:
+            test_set = CompressedVignetteSet(problem_number,
+                                             args.nb_test_batches, args.batch_size,
+                                             cuda=torch.cuda.is_available())
+        else:
+            test_set = VignetteSet(problem_number,
+                                   args.nb_test_batches, args.batch_size,
+                                   cuda=torch.cuda.is_available())
 
-    log_string('train_error {:d} {:.02f}% {:d} {:d}'.format(
-        problem_number,
-        100 * nb_train_errors / train_set.nb_samples,
-        nb_train_errors,
-        train_set.nb_samples)
-    )
+        log_string('data_generation {:0.2f} samples / s'.format(test_set.nb_samples / (time.time() - t)))
 
-    nb_test_errors = nb_errors(model, test_set)
+        nb_test_errors = nb_errors(model, test_set)
 
-    log_string('test_error {:d} {:.02f}% {:d} {:d}'.format(
-        problem_number,
-        100 * nb_test_errors / test_set.nb_samples,
-        nb_test_errors,
-        test_set.nb_samples)
-    )
+        log_string('test_error {:d} {:.02f}% {:d} {:d}'.format(
+            problem_number,
+            100 * nb_test_errors / test_set.nb_samples,
+            nb_test_errors,
+            test_set.nb_samples)
+        )
 
 ######################################################################