5 Copyright (c) 2016 Idiap Research Institute, http://www.idiap.ch/
6 Written by Francois Fleuret <francois.fleuret@idiap.ch>
8 This file is free software: you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 3 as
10 published by the Free Software Foundation.
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this file. If not, see <http://www.gnu.org/licenses/>.
29 torch.setdefaulttensortype('torch.DoubleTensor')
32 function checkGrad(model, criterion, input, target, epsilon)
33 local params, gradParams = model:getParameters()
35 local epsilon = epsilon or 1e-5
37 local output = model:forward(input)
38 local loss = criterion:forward(output, target)
39 local gradOutput = criterion:backward(output, target)
41 model:backward(input, gradOutput)
42 local analyticalGradParam = gradParams:clone()
46 for i = 1, params:size(1) do
49 params[i] = x - epsilon
50 local output0 = model:forward(input)
51 local loss0 = criterion:forward(output0, target)
53 params[i] = x + epsilon
54 local output1 = model:forward(input)
55 local loss1 = criterion:forward(output1, target)
59 local ana = analyticalGradParam[i]
60 local num = (loss1 - loss0) / (2 * epsilon)
63 err = math.max(err, math.abs(num - ana) / math.max(epsilon, math.abs(num)))
70 function printTensorTable(t)
71 if torch.type(t) == 'table' then
72 for i, t in pairs(t) do
73 print('-- ELEMENT [' .. i .. '] --')
81 -- +-- Linear(10, 10) --> ReLU --> d -->
84 -- --> a --> b -----------> c ---------------+
87 -- +--------------- e -->
98 dag:connect(b, nn.Linear(10, 15), nn.ReLU(), d)
103 dag:setOutput({ d, e })
105 -- Check the output of the dot file
106 print('Writing /tmp/graph.dot')
107 dag:saveDot('/tmp/graph.dot')
109 -- Let's make a model where the dag is inside another nn.Container.
110 model = nn.Sequential()
111 :add(nn.Linear(50, 50))
115 criterion = nn.MSECriterion()
121 torch.setdefaulttensortype('torch.CudaTensor')
125 local input = torch.Tensor(30, 50):uniform()
126 local output = model:updateOutput(input):clone()
129 -- Check that DAG:accGradParameters and friends work okay
130 print('Gradient estimate error ' .. checkGrad(model, criterion, input, output, epsilon))
132 -- Check that we can save and reload the model
134 torch.save('/tmp/test.t7', model)
135 local otherModel = torch.load('/tmp/test.t7')
136 print('Gradient estimate error ' .. checkGrad(otherModel, criterion, input, output, epsilon))