projects
/
pysvrt.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Added --nb_validation_samples and --validation_error_threshold to terminate learning...
[pysvrt.git]
/
cnn-svrt.py
diff --git
a/cnn-svrt.py
b/cnn-svrt.py
index
7fde85d
..
f3d350e
100755
(executable)
--- a/
cnn-svrt.py
+++ b/
cnn-svrt.py
@@
-56,6
+56,13
@@
parser.add_argument('--nb_train_samples',
parser.add_argument('--nb_test_samples',
type = int, default = 10000)
parser.add_argument('--nb_test_samples',
type = int, default = 10000)
+parser.add_argument('--nb_validation_samples',
+ type = int, default = 10000)
+
+parser.add_argument('--validation_error_threshold',
+ type = float, default = 0.0,
+ help = 'Early training termination criterion')
+
parser.add_argument('--nb_epochs',
type = int, default = 50)
parser.add_argument('--nb_epochs',
type = int, default = 50)
@@
-79,13
+86,13
@@
parser.add_argument('--test_loaded_models',
parser.add_argument('--problems',
type = str, default = '1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23',
parser.add_argument('--problems',
type = str, default = '1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23',
- help = 'What problem to process')
+ help = 'What problem
s
to process')
args = parser.parse_args()
######################################################################
args = parser.parse_args()
######################################################################
-log_file = open(args.log_file, '
w
')
+log_file = open(args.log_file, '
a
')
pred_log_t = None
print(Fore.RED + 'Logging into ' + args.log_file + Style.RESET_ALL)
pred_log_t = None
print(Fore.RED + 'Logging into ' + args.log_file + Style.RESET_ALL)
@@
-194,7
+201,22
@@
class AfrozeDeepNet(nn.Module):
######################################################################
######################################################################
-def train_model(model, train_set):
+def nb_errors(model, data_set):
+ ne = 0
+ for b in range(0, data_set.nb_batches):
+ input, target = data_set.get_batch(b)
+ output = model.forward(Variable(input))
+ wta_prediction = output.data.max(1)[1].view(-1)
+
+ for i in range(0, data_set.batch_size):
+ if wta_prediction[i] != target[i]:
+ ne = ne + 1
+
+ return ne
+
+######################################################################
+
+def train_model(model, train_set, validation_set):
batch_size = args.batch_size
criterion = nn.CrossEntropyLoss()
batch_size = args.batch_size
criterion = nn.CrossEntropyLoss()
@@
-216,25
+238,24
@@
def train_model(model, train_set):
loss.backward()
optimizer.step()
dt = (time.time() - start_t) / (e + 1)
loss.backward()
optimizer.step()
dt = (time.time() - start_t) / (e + 1)
+
log_string('train_loss {:d} {:f}'.format(e + 1, acc_loss),
' [ETA ' + time.ctime(time.time() + dt * (args.nb_epochs - e)) + ']')
log_string('train_loss {:d} {:f}'.format(e + 1, acc_loss),
' [ETA ' + time.ctime(time.time() + dt * (args.nb_epochs - e)) + ']')
- return model
+ if validation_set is not None:
+ nb_validation_errors = nb_errors(model, validation_set)
-######################################################################
+ log_string('validation_error {:.02f}% {:d} {:d}'.format(
+ 100 * nb_validation_errors / validation_set.nb_samples,
+ nb_validation_errors,
+ validation_set.nb_samples)
+ )
-def nb_errors(model, data_set):
- ne = 0
- for b in range(0, data_set.nb_batches):
- input, target = data_set.get_batch(b)
- output = model.forward(Variable(input))
- wta_prediction = output.data.max(1)[1].view(-1)
+ if nb_validation_errors / validation_set.nb_samples <= args.validation_error_threshold:
+ log_string('below validation_error_threshold')
+ break
- for i in range(0, data_set.batch_size):
- if wta_prediction[i] != target[i]:
- ne = ne + 1
-
- return ne
+ return model
######################################################################
######################################################################
@@
-273,6
+294,8
@@
if args.nb_train_samples%args.batch_size > 0 or args.nb_test_samples%args.batch_
print('The number of samples must be a multiple of the batch size.')
raise
print('The number of samples must be a multiple of the batch size.')
raise
+log_string('############### start ###############')
+
if args.compress_vignettes:
log_string('using_compressed_vignettes')
VignetteSet = svrtset.CompressedVignetteSet
if args.compress_vignettes:
log_string('using_compressed_vignettes')
VignetteSet = svrtset.CompressedVignetteSet
@@
-327,7
+350,15
@@
for problem_number in map(int, args.problems.split(',')):
train_set.nb_samples / (time.time() - t))
)
train_set.nb_samples / (time.time() - t))
)
- train_model(model, train_set)
+ if args.validation_error_threshold > 0.0:
+ validation_set = VignetteSet(problem_number,
+ args.nb_validation_samples, args.batch_size,
+ cuda = torch.cuda.is_available(),
+ logger = vignette_logger())
+ else:
+ validation_set = None
+
+ train_model(model, train_set, validation_set)
torch.save(model.state_dict(), model_filename)
log_string('saved_model ' + model_filename)
torch.save(model.state_dict(), model_filename)
log_string('saved_model ' + model_filename)
@@
-351,10
+382,6
@@
for problem_number in map(int, args.problems.split(',')):
args.nb_test_samples, args.batch_size,
cuda = torch.cuda.is_available())
args.nb_test_samples, args.batch_size,
cuda = torch.cuda.is_available())
- log_string('data_generation {:0.2f} samples / s'.format(
- test_set.nb_samples / (time.time() - t))
- )
-
nb_test_errors = nb_errors(model, test_set)
log_string('test_error {:d} {:.02f}% {:d} {:d}'.format(
nb_test_errors = nb_errors(model, test_set)
log_string('test_error {:d} {:.02f}% {:d} {:d}'.format(