X-Git-Url: https://www.fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;f=cnn-svrt.py;h=a2ab1a31ce7dfc903b1b2a342b5d888f6c188045;hb=ea951479345890206211764657ce4d9556af9e76;hp=8840c4bacfa5389809a2c4d3bbe7b002577ca275;hpb=24c605e252da6bd8a74fe363192bdbfc2f6b688d;p=pysvrt.git diff --git a/cnn-svrt.py b/cnn-svrt.py index 8840c4b..a2ab1a3 100755 --- a/cnn-svrt.py +++ b/cnn-svrt.py @@ -36,7 +36,7 @@ from torch import nn from torch.nn import functional as fn from torchvision import datasets, transforms, utils -import svrt +from vignette_set import VignetteSet, CompressedVignetteSet ###################################################################### @@ -85,75 +85,6 @@ def log_string(s): ###################################################################### -class VignetteSet: - def __init__(self, problem_number, nb_batches): - self.batch_size = args.batch_size - self.problem_number = problem_number - self.nb_batches = nb_batches - self.nb_samples = self.nb_batches * self.batch_size - self.targets = [] - self.inputs = [] - - acc = 0.0 - acc_sq = 0.0 - - for b in range(0, self.nb_batches): - target = torch.LongTensor(self.batch_size).bernoulli_(0.5) - input = svrt.generate_vignettes(problem_number, target) - input = input.float().view(input.size(0), 1, input.size(1), input.size(2)) - if torch.cuda.is_available(): - input = input.cuda() - target = target.cuda() - acc += input.sum() / input.numel() - acc_sq += input.pow(2).sum() / input.numel() - self.targets.append(target) - self.inputs.append(input) - - mean = acc / self.nb_batches - std = math.sqrt(acc_sq / self.nb_batches - mean * mean) - for b in range(0, self.nb_batches): - self.inputs[b].sub_(mean).div_(std) - - def get_batch(self, b): - return self.inputs[b], self.targets[b] - -###################################################################### - -class CompressedVignetteSet: - def __init__(self, problem_number, nb_batches): - self.batch_size = args.batch_size - self.problem_number = problem_number - self.nb_batches = nb_batches - self.nb_samples = self.nb_batches * self.batch_size - self.targets = [] - self.input_storages = [] - - acc = 0.0 - acc_sq = 0.0 - for b in range(0, self.nb_batches): - target = torch.LongTensor(self.batch_size).bernoulli_(0.5) - input = svrt.generate_vignettes(problem_number, target) - acc += input.float().sum() / input.numel() - acc_sq += input.float().pow(2).sum() / input.numel() - self.targets.append(target) - self.input_storages.append(svrt.compress(input.storage())) - - self.mean = acc / self.nb_batches - self.std = math.sqrt(acc_sq / self.nb_batches - self.mean * self.mean) - - def get_batch(self, b): - input = torch.ByteTensor(svrt.uncompress(self.input_storages[b])).float() - input = input.view(self.batch_size, 1, 128, 128).sub_(self.mean).div_(self.std) - target = self.targets[b] - - if torch.cuda.is_available(): - input = input.cuda() - target = target.cuda() - - return input, target - -###################################################################### - # Afroze's ShallowNet # map size nb. maps @@ -176,6 +107,7 @@ class AfrozeShallowNet(nn.Module): self.conv3 = nn.Conv2d(16, 120, kernel_size=18) self.fc1 = nn.Linear(120, 84) self.fc2 = nn.Linear(84, 2) + self.name = 'shallownet' def forward(self, x): x = fn.relu(fn.max_pool2d(self.conv1(x), kernel_size=2)) @@ -186,6 +118,8 @@ class AfrozeShallowNet(nn.Module): x = self.fc2(x) return x +###################################################################### + def train_model(model, train_set): batch_size = args.batch_size criterion = nn.CrossEntropyLoss() @@ -231,11 +165,11 @@ for arg in vars(args): for problem_number in range(1, 24): if args.compress_vignettes: - train_set = CompressedVignetteSet(problem_number, args.nb_train_batches) - test_set = CompressedVignetteSet(problem_number, args.nb_test_batches) + train_set = CompressedVignetteSet(problem_number, args.nb_train_batches, args.batch_size) + test_set = CompressedVignetteSet(problem_number, args.nb_test_batches, args.batch_size) else: - train_set = VignetteSet(problem_number, args.nb_train_batches) - test_set = VignetteSet(problem_number, args.nb_test_batches) + train_set = VignetteSet(problem_number, args.nb_train_batches, args.batch_size) + test_set = VignetteSet(problem_number, args.nb_test_batches, args.batch_size) model = AfrozeShallowNet() @@ -247,7 +181,7 @@ for problem_number in range(1, 24): nb_parameters += p.numel() log_string('nb_parameters {:d}'.format(nb_parameters)) - model_filename = 'model_' + str(problem_number) + '.param' + model_filename = model.name + '_' + str(problem_number) + '_' + str(train_set.nb_batches) + '.param' try: model.load_state_dict(torch.load(model_filename))