Search results for key=ZSSa2010 :
1 match found.
Search my entire BibTeX database
Abstract |
BibTeX entry |
Postscript |
PDF |
Powerpoint |
Refereed full papers (journals, book chapters, international conferences)
2010
-
@inproceedings{ZSSa2010,
vgclass = {refpap},
author = {Zaidi, Nayyar Abbas and David McG.\ Squire and David
Suter},
title = {A Gradient-based Metric Learning Algorithm for {k-NN}
Classifiers},
booktitle = {Proceedings of the 23rd Australasian Joint Conference on
Artificial Intelligence},
address = {Adelaide, Australia},
number = {6464},
series = {Lecture Notes in Computer Science},
pages = {194--203},
publisher = {Springer-Verlag},
month = {December~7--10},
year = {2010},
doi = {http://dx.doi.org/10.1007/978-3-642-17432-2_20},
abstract = {The Nearest Neighbor (NN) classification/regression
techniques, besides their simplicity, are amongst the most widely
applied and well studied techniques for pattern recognition in machine
learning. A drawback, however, is the assumption of the availability of
a suitable metric to measure distances to the $k$ nearest neighbors. It
has been shown that k-NN classifiers with a suitable distance metric
can perform better than other, more sophisticated, alternatives such as
Support Vector Machines and Gaussian Process classifiers. For this
reason, much recent research in k-NN methods has focused on metric
learning, i.e.\ finding an optimized metric. In this paper we propose a
simple gradient-based algorithm for metric learning. We discuss in
detail the motivations behind metric learning, i.e.\ error minimization
and margin maximization. Our formulation differs from the prevalent
techniques in metric learning, where the goal is to maximize the
classifier's margin. Instead our proposed technique (MEGM) finds an
optimal metric by directly minimizing the mean square error. Our
technique not only results in greatly improved k-NN performance, but
also performs better than competing metric learning techniques.
Promising results are reported on major UCIML databases.},
}
|