Search results for key=LPS1997 : 1 match found.

Refereed full papers (journals, book chapters, international conferences)

1997

@article{LPS1997,
	vgclass =	{refpap},
	author =	{Pat Langley and Gregory M. Provan and Padhraic Smyth},
	title =	{Learning with Probabilistic Representations},
	journal =	{Machine Learning},
	volume =	{29},
	number =	{2--3},
	pages =	{91--101},
	year =	{1997},
	abstract =	{Machine learning cannot occur without some means to
	represent the learned knowledge. Researchers have long recognized the
	influence of representational choices, and the major paradigms in
	machine learning are organized not around induction algorithms or
	performance elements as much as around representational classes. Major
	examples include logical representations, which encode knowledge as
	rule sets or as univariate decision trees, neural networks, which
	instead use nodes connected by weighted links, and instance-based
	approaches, which store specific training cases in memory. In the late
	1980s, work on probabilistic representations also started to appear in
	the machine learning literature. This representational framework had a
	number of attractions, including a clean probabilistic semantics and
	the ability to explicitly describe degrees of certainty. This general
	approach attracted only a moderate amount of attention until recent
	years, when progress on Bayesian belief networks led to enough activity
	in the area to justify this special issue on the topic of probabilistic
	learning \ldots},
}