module = DotProduct() creates a module that takes a table of two vectors as input and outputs the dot product between them.
Example:
mlp=nn.DotProduct()
x=lab.new(1,2,3) 
y=lab.new(4,5,6)
print(mlp:forward({x,y}))
gives the output:
32 [torch.Tensor of dimension 1]
A more complicated example:
-- Train a ranking function so that mlp:forward({x,y},{x,z}) returns a number
-- which indicates whether x is better matched with y or z (larger score = better match), or vice versa.
mlp1=nn.Linear(5,10)
mlp2=mlp1:clone('weight','bias')
prl=nn.ParallelTable();
prl:add(mlp1); prl:add(mlp2)
mlp1=nn.Sequential()
mlp1:add(prl)
mlp1:add(nn.DotProduct())
mlp2=mlp1:clone('weight','bias')
mlp=nn.Sequential()
prla=nn.ParallelTable()
prla:add(mlp1)
prla:add(mlp2)
mlp:add(prla)
x=lab.rand(5); 
y=lab.rand(5)
z=lab.rand(5)
print(mlp1:forward{x,x})
print(mlp1:forward{x,y})
print(mlp1:forward{y,y})
crit=nn.MarginRankingCriterion(1); 
-- Use a typical generic gradient update function
function gradUpdate(mlp, x, y, criterion, learningRate)
   local pred = mlp:forward(x)
   local err = criterion:forward(pred, y)
   local gradCriterion = criterion:backward(pred, y)
   mlp:zeroGradParameters()
   mlp:backward(x, gradCriterion)
   mlp:updateParameters(learningRate)
end
inp={{x,y},{x,z}}
math.randomseed(1)
-- make the pair x and y have a larger dot product than x and z
for i=1,100 do
   gradUpdate(mlp,inp,1,crit,0.05)
   o1=mlp1:forward{x,y}[1]; 
   o2=mlp2:forward{x,z}[1]; 
   o=crit:forward(mlp:forward{{x,y},{x,z}},1)
   print(o1,o2,o)
end
print "******************"
-- make the pair x and z have a larger dot product than x and y
for i=1,100 do
   gradUpdate(mlp,inp,-1,crit,0.05)
   o1=mlp1:forward{x,y}[1]; 
   o2=mlp2:forward{x,z}[1]; 
   o=crit:forward(mlp:forward{{x,y},{x,z}},-1)
   print(o1,o2,o)
end