1992
@article{SVL1992,
vgclass = {refpap},
vgproject = {nn,invariance},
author = {Patrice Simard and Bernard Victorri and Yann Le Cun and
John Denker},
title = {{T}angent {P}rop - A formalism for specifying selected
invariances in an adaptive network},
journal = {Advances in Neural Information Processing Systems},
volume = {4},
pages = {895--903},
year = {1992},
abstract = {In many machine learning applications, one has access not
only to training data, but also to some high-level \emph{a priori}
knowledge about the desired behaviour of the system. For example, it is
known in advance that the output of a character recognizer should be
invariant with respect to small spatial distortions of the input images
(translations, rotations, scale changes, etcetera).
We have implemented a scheme that allows a network to learn the
derivative of its outputs with respect to distortion operators of our
choosing. This not only reduces the learning time and the amount of
training data, but also provides a powerful \emph{language} for
specifying what generalizations we wish the network to perform.},
}