projects
/
dagnn.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Made the example more complicated to check that DAGs can be combined with other modules.
[dagnn.git]
/
test-dagnn.lua
diff --git
a/test-dagnn.lua
b/test-dagnn.lua
index
a41d880
..
f7de819
100755
(executable)
--- a/
test-dagnn.lua
+++ b/
test-dagnn.lua
@@
-23,9
+23,8
@@
require 'torch'
require 'nn'
require 'dagnn'
require 'nn'
require 'dagnn'
--- torch.setnumthreads(params.nbThreads)
torch.setdefaulttensortype('torch.DoubleTensor')
torch.setdefaulttensortype('torch.DoubleTensor')
-torch.manualSeed(
2
)
+torch.manualSeed(
1
)
function checkGrad(model, criterion, input, target)
local params, gradParams = model:getParameters()
function checkGrad(model, criterion, input, target)
local params, gradParams = model:getParameters()
@@
-39,6
+38,8
@@
function checkGrad(model, criterion, input, target)
model:backward(input, gradOutput)
local analyticalGradParam = gradParams:clone()
model:backward(input, gradOutput)
local analyticalGradParam = gradParams:clone()
+ local err = 0
+
for i = 1, params:size(1) do
local x = params[i]
for i = 1, params:size(1) do
local x = params[i]
@@
-54,23
+55,13
@@
function checkGrad(model, criterion, input, target)
local ana = analyticalGradParam[i]
local num = (loss1 - loss0) / (2 * epsilon)
local ana = analyticalGradParam[i]
local num = (loss1 - loss0) / (2 * epsilon)
- local err
- if num == ana then
- err = 0
- else
- err = torch.abs(num - ana) / torch.abs(num)
+ if num ~= ana then
+ err = math.max(err, torch.abs(num - ana) / torch.abs(num))
end
end
-
- print(
- 'CHECK '
- .. err
- .. ' checkGrad ' .. i
- .. ' analytical ' .. ana
- .. ' numerical ' .. num
- )
end
end
+ return err
end
function printTensorTable(t)
end
function printTensorTable(t)
@@
-84,35
+75,41
@@
function printTensorTable(t)
end
end
end
end
--- +-
Linear(10, 10) -> ReLU ---> d --+
--- /
/ \
--- /
/ \
--- --> a --> b -----------> c --------------
+ e -->
--- \
/
--- \
/
--- +--
Mul(-1) --------+
+-- +-
- Linear(10, 10) --> ReLU --> d -->
+-- /
/
+-- /
/
+-- --> a --> b -----------> c --------------
-+
+-- \
+-- \
+-- +--
------------- e -->
-
model
= nn.DAG()
+
dag
= nn.DAG()
a = nn.Linear(50, 10)
b = nn.ReLU()
c = nn.Linear(10, 15)
d = nn.CMulTable()
a = nn.Linear(50, 10)
b = nn.ReLU()
c = nn.Linear(10, 15)
d = nn.CMulTable()
-e = nn.CAddTable()
+e = nn.Mul(-1)
+
+dag:connect(a, b, c)
+dag:connect(b, nn.Linear(10, 15), nn.ReLU(), d)
+dag:connect(c, d)
+dag:connect(c, e)
-model:addEdge(a, b)
-model:addEdge(b, nn.Linear(10, 15), nn.ReLU(), d)
-model:addEdge(d, e)
-model:addEdge(b, c)
-model:addEdge(c, d)
-model:addEdge(c, nn.Mul(-1), e)
+dag:setInput(a)
+dag:setOutput({ d, e })
-model:setInput(a)
-model:setOutput(e)
+-- We check it works when we put it into a nn.Sequential
+model = nn.Sequential()
+ :add(nn.Linear(50, 50))
+ :add(dag)
+ :add(nn.CAddTable())
local input = torch.Tensor(30, 50):uniform()
local output = model:updateOutput(input):clone()
local input = torch.Tensor(30, 50):uniform()
local output = model:updateOutput(input):clone()
-
output:uniform()
output:uniform()
-checkGrad(model, nn.MSECriterion(), input, output)
+print('Gradient estimate error ' .. checkGrad(model, nn.MSECriterion(), input, output))
+
+print('Writing /tmp/graph.dot')
+dag:saveDot('/tmp/graph.dot')