1995
@article{Low1995,
vgclass = {refpap},
author = {David G. Lowe},
title = {Similarity metric learning for a variable-kernel
classifier},
journal = {Neural Computation},
volume = {7},
number = {1},
pages = {72--85},
month = {January},
year = {1995},
url = {http://www.cs.ubc.ca/spider/lowe/papers/neural95/neural.html},
url1 = {http://www.cs.ubc.ca/spider/lowe/papers/neural95.ps},
abstract = {Nearest-neighbour interpolation algorithms have many
useful properties for applications to learning, but they often exhibit
poor generalization. In this paper, it is shown that much better
generalization can be obtained by using a variable interpolation kernel
in combination with conjugate gradient optimization of the similarity
metric and kernel size. The resulting method is called variable-kernel
similarity metric (VSM) learning. It has been tested on several
standard classification data sets, and on these problems it shows
better generalization than back propagation and most other learning
methods. An important advantage is that the system can operate as a
black box in which no model minimization parameters need to be
experimentally set by the user. The number of parameters that must be
determined through optimization are orders of magnitude less than for
back-propagation or RBF networks, which may indicate that the method
better captures the essential degrees of variation in learning. Other
features of VSM learning are discussed that make it relevant to models
for biological learning in the brain.},
}