projects
/
pysvrt.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Made the model filenames more explicit.
[pysvrt.git]
/
vignette_set.py
diff --git
a/vignette_set.py
b/vignette_set.py
index
19a6f33
..
5062f3e
100755
(executable)
--- a/
vignette_set.py
+++ b/
vignette_set.py
@@
-22,7
+22,7
@@
import torch
from math import sqrt
import torch
from math import sqrt
-from torch
.multiprocessing import Pool, cpu_count
+from torch
import multiprocessing
from torch import Tensor
from torch.autograd import Variable
from torch import Tensor
from torch.autograd import Variable
@@
-41,11
+41,16
@@
def generate_one_batch(s):
class VignetteSet:
class VignetteSet:
- def __init__(self, problem_number, nb_batches, batch_size, cuda = False):
+ def __init__(self, problem_number, nb_samples, batch_size, cuda = False):
+
+ if nb_samples%batch_size > 0:
+ print('nb_samples must be a mutiple of batch_size')
+ raise
+
self.cuda = cuda
self.batch_size = batch_size
self.problem_number = problem_number
self.cuda = cuda
self.batch_size = batch_size
self.problem_number = problem_number
- self.nb_batches = nb_
batches
+ self.nb_batches = nb_
samples // batch_size
self.nb_samples = self.nb_batches * self.batch_size
seeds = torch.LongTensor(self.nb_batches).random_()
self.nb_samples = self.nb_batches * self.batch_size
seeds = torch.LongTensor(self.nb_batches).random_()
@@
-53,11
+58,14
@@
class VignetteSet:
for b in range(0, self.nb_batches):
mp_args.append( [ problem_number, batch_size, seeds[b] ])
for b in range(0, self.nb_batches):
mp_args.append( [ problem_number, batch_size, seeds[b] ])
- # self.data = []
- # for b in range(0, self.nb_batches):
- # self.data.append(generate_one_batch(mp_args[b]))
+ self.data = []
+ for b in range(0, self.nb_batches):
+ self.data.append(generate_one_batch(mp_args[b]))
+
+ # Weird thing going on with the multi-processing, waiting for more info
- self.data = Pool(cpu_count()).map(generate_one_batch, mp_args)
+ # pool = multiprocessing.Pool(multiprocessing.cpu_count())
+ # self.data = pool.map(generate_one_batch, mp_args)
acc = 0.0
acc_sq = 0.0
acc = 0.0
acc_sq = 0.0
@@
-80,11
+88,16
@@
class VignetteSet:
######################################################################
class CompressedVignetteSet:
######################################################################
class CompressedVignetteSet:
- def __init__(self, problem_number, nb_batches, batch_size, cuda = False):
+ def __init__(self, problem_number, nb_samples, batch_size, cuda = False):
+
+ if nb_samples%batch_size > 0:
+ print('nb_samples must be a mutiple of batch_size')
+ raise
+
self.cuda = cuda
self.batch_size = batch_size
self.problem_number = problem_number
self.cuda = cuda
self.batch_size = batch_size
self.problem_number = problem_number
- self.nb_batches = nb_
batches
+ self.nb_batches = nb_
samples // batch_size
self.nb_samples = self.nb_batches * self.batch_size
self.targets = []
self.input_storages = []
self.nb_samples = self.nb_batches * self.batch_size
self.targets = []
self.input_storages = []