criterion = nn.MarginRankingCriterion(margin)

Creates a criterion that measures the loss given an input x = {x1,x2}, a table of two Tensors of size 1 (they contain only scalars), and a label y (1 or -1):

If y = 1 then it assumed the first input should be ranked higher (have a larger value) than the second input, and vice-versa for y = -1.

The loss function is:

loss(x,y) = forward(x,y) = max(0,-y*(x[1]-x[2])+margin)

Example:


p1_mlp= nn.Linear(5,2)
p2_mlp= p1_mlp:clone('weight','bias')

prl=nn.ParallelTable()
prl:add(p1_mlp)
prl:add(p2_mlp)
  
mlp1=nn.Sequential()
mlp1:add(prl)
mlp1:add(nn.DotProduct())
 
mlp2=mlp1:clone('weight','bias')

mlpa=nn.Sequential()
prla=nn.ParallelTable()
prla:add(mlp1)
prla:add(mlp2)
mlpa:add(prla)

crit=nn.MarginRankingCriterion(0.1)

x=lab.randn(5)
y=lab.randn(5)
z=lab.randn(5)


-- Use a typical generic gradient update function
function gradUpdate(mlp, x, y, criterion, learningRate)
 local pred = mlp:forward(x)
 local err = criterion:forward(pred, y)
 local gradCriterion = criterion:backward(pred, y)
 mlp:zeroGradParameters()
 mlp:backward(x, gradCriterion)
 mlp:updateParameters(learningRate)
end

for i=1,100 do
 gradUpdate(mlpa,{{x,y},{x,z}},1,crit,0.01)
 if true then 
      o1=mlp1:forward{x,y}[1]; 
      o2=mlp2:forward{x,z}[1]; 
      o=crit:forward(mlpa:forward{{x,y},{x,z}},1)
      print(o1,o2,o)
  end
end

print "--"

for i=1,100 do
 gradUpdate(mlpa,{{x,y},{x,z}},-1,crit,0.01)
 if true then 
      o1=mlp1:forward{x,y}[1]; 
      o2=mlp2:forward{x,z}[1]; 
      o=crit:forward(mlpa:forward{{x,y},{x,z}},-1)
      print(o1,o2,o)
  end
end