module = Add(inputDimension,scalar)

Applies a bias term to the incoming data, i.e. _y_i= x_i + b_i, or if scalar=true then uses a single bias term, _y_i= x_i + b.

Example:

y=torch.Tensor(5);  
mlp=nn.Sequential()
mlp:add(nn.Add(5))

function gradUpdate(mlp, x, y, criterion, learningRate) 
  local pred = mlp:forward(x)
  local err = criterion:forward(pred, y)
  local gradCriterion = criterion:backward(pred, y)
  mlp:zeroGradParameters()
  mlp:backward(x, gradCriterion)
  mlp:updateParameters(learningRate)
  return err
end

for i=1,10000 do
 x=lab.rand(5)
 y:copy(x); 
 for i=1,5 do y[i]=y[i]+i; end
 err=gradUpdate(mlp,x,y,nn.MSECriterion(),0.01)
end
print(mlp:get(1).bias)
gives the output:
 1.0000
 2.0000
 3.0000
 4.0000
 5.0000
[torch.Tensor of dimension 5]
i.e. the network successfully learns the input x has been shifted to produce the output y.