2016-05-22 47 views
1
require 'torch'; 
require 'nn'; 
require 'nnx'; 
mnist = require 'mnist'; 

fullset = mnist.traindataset() 
testset = mnist.testdataset() 
trainset = { 
    size = 50000, 
    data = fullset.data[{{1,50000}}]:double(), 
    label = fullset.label[{{1,50000}}] 
} 
validationset = { 
    size = 10000, 
    data = fullset.data[{{50001, 60000}}]:double(), 
    label = fullset.label[{{50001,60000}}] 
} 
-- MNIST Dataset has 28x28 images 
model = nn.Sequential() 

model:add(nn.SpatialConvolutionMM(1, 32, 5, 5))   -- 32x24x24 
model:add(nn.ReLU()) 
model:add(nn.SpatialMaxPooling(3, 3, 3, 3))    -- 32x8x8 

model:add(nn.SpatialConvolutionMM(32, 64, 5, 5))  -- 64x4x4 
model:add(nn.Tanh()) 
model:add(nn.SpatialMaxPooling(2, 2, 2, 2))    -- 64x2x2 
model:add(nn.Reshape(64*2*2)) 
model:add(nn.Linear(64*2*2, 200)) 
model:add(nn.Tanh()) 
model:add(nn.Linear(200, 10)) 

model:add(nn.LogSoftMax()) 

criterion = nn.ClassNLLCriterion() 

x, dldx = model:getParameters()   -- now x stores the trainable parameters and dldx stores the gradient wrt these params in the model above 

sgd_params = { 
    learningRate = 1e-2, 
    learningRateDecay = 1e-4, 
    weightDecay = 1e-3, 
    momentum = 1e-4 
} 

step = function (batchsize) 

    -- setting up variables 
    local count = 0 
    local current_loss = 0 
    local shuffle = torch.randperm(trainset.size) 

    -- setting default batchsize as 200 
    batchsize = batchsize or 200 

    -- setting inputs and targets for minibatches 
    for minibatch_number = 1, trainset.size, batchsize do 

     local size = math.min(trainset.size - minibatch_number + 1, batchsize) 
     local inputs = torch.Tensor(size, 28, 28) 
     local targets = torch.Tensor(size) 

     for index = 1, size do 
      inputs[index] = trainset.data[ shuffle[ index + minibatch_number ]] 
      targets[index] = trainset.label[ shuffle[ index + minibatch_number ] ] 
     end 

     -- defining feval function to return loss and gradients of loss w.r.t. params 
     feval = function(x_new) 
     --print ("---------------------------------safe--------------------") 

      if x ~= x_new then x:copy(x_new) end 

      -- initializing gradParsams to zero 
      dldx:zero() 

      -- calculating loss and param gradients 
      local loss = criterion:forward(model.forward(inputs), targets) 
      model:backward(inputs, criterion:backward(model.output, targets)) 

      return loss, dldx 
     end 

     -- getting loss 
     -- optim returns x*, {fx} where x* is new set of params and {fx} is { loss } => fs[ 1 ] carries loss from feval 

     print(feval ~= nil and x ~= nil and sgd_params ~= nil) 
     _,fs = optim.sgd(feval, x, sgd_params) 

     count = count + 1 
     current_loss = current_loss + fs[ 1 ] 
    end 

    --returning avg loss over the minibatch 
    return current_loss/count   

end 

max_iters = 30 

for i = 1 ,max_iters do 
    local loss = step() 
    print(string.format('Epoch: %d Current loss: %4f', i, loss)) 
end 

我是新来的火炬和lua,我无法在上面的代码中找到错误。任何人都可以提出一种方法来调试它?尝试索引全局“优化”(一个零值)

错误:

/home/afroz/torch/install/bin/luajit: /home/afroz/test.lua:88: attempt to index global 'optim' (a nil value) 
stack traceback: 
    /home/afroz/test.lua:88: in function 'step' 
    /home/afroz/test.lua:102: in main chunk 
    [C]: in function 'dofile' 
    ...froz/torch/install/lib/luarocks/rocks/trepl/scm-1/bin/th:145: in main chunk 
    [C]: at 0x00406670 
+0

你不'企图指数全球 '的Optim'(一个零值)'明白什么?这意味着'optimize没有值'而不是table/usedata,所以你不能像'optim.sgd'那样使用它。 – moteus

回答

1

optim没有被分配在脚本中任何,所以当脚本引用optim.sgd,它的价值是nil,你会得到你所显示的错误。您需要重新检查脚本以确保为optim分配了正确的值。

2

优化未在脚本范围内定义。您尝试调用optim.sgd,这当然会导致您看到的错误。

像nn一样,optim是火炬的扩展包。

require 'torch'; 
require 'nn'; 
require 'nnx'; 

记住脚本开头的那些行吗?他们基本上执行这些软件包的定义。 确保安装了优化,然后尝试要求它。

https://github.com/torch/optim

相关问题