Data noise is present in many machine learning problems domains, some of these are well studied but others have received less attention. In this paper we propose an algorithm for constructing a kernel Fisher discriminant (KFD) from training examples with noisy labels. The approach allows to associate with each example a probability of the label being flipped. We utilise an expectation maximization (EM) algorithm for updating the probabilities. The E-step uses class conditional probabilities estimated as a by-product of the KFD algorithm. The M-step updates the flip probabilities and determines the parameters of the discriminant. We have applied the approach to two real-world data-sets. The results show the feasibility of the approach.

@InProceedings{lawrence-noisy01,
title = {Estimating a Kernel Fisher Discriminant in the Presence of Label Noise},
author = {Neil D. Lawrence and Bernhard Schölkopf},
booktitle = {Proceedings of the International Conference in Machine Learning},
year = {2001},
editor = {Carla Brodley and Andrea P. Danyluk},
volume = {18},
address = {San Francisco, CA},
month = {00},
publisher = {Morgan Kauffman},
edit = {https://github.com/lawrennd//publications/edit/gh-pages/_posts/2001-01-01-lawrence-noisy01.md},
url = {http://inverseprobability.com/publications/lawrence-noisy01.html},
abstract = {Data noise is present in many machine learning problems domains, some of these are well studied but others have received less attention. In this paper we propose an algorithm for constructing a kernel Fisher discriminant (KFD) from training examples with *noisy labels*. The approach allows to associate with each example a probability of the label being flipped. We utilise an expectation maximization (EM) algorithm for updating the probabilities. The E-step uses class conditional probabilities estimated as a by-product of the KFD algorithm. The M-step updates the flip probabilities and determines the parameters of the discriminant. We have applied the approach to two real-world data-sets. The results show the feasibility of the approach.},
crossref = {Brodley:icml01},
key = {Lawrence:noisy01},
linkpsgz = {ftp://ftp.dcs.shef.ac.uk/home/neil/noisyfisher.ps.gz},
linksoftware = {http://inverseprobability.com/nkfd/},
group = {shefml}
}

%T Estimating a Kernel Fisher Discriminant in the Presence of Label Noise
%A Neil D. Lawrence and Bernhard Schölkopf
%B
%C Proceedings of the International Conference in Machine Learning
%D
%E Carla Brodley and Andrea P. Danyluk
%F lawrence-noisy01
%I Morgan Kauffman
%P --
%R
%U http://inverseprobability.com/publications/lawrence-noisy01.html
%V 18
%X Data noise is present in many machine learning problems domains, some of these are well studied but others have received less attention. In this paper we propose an algorithm for constructing a kernel Fisher discriminant (KFD) from training examples with *noisy labels*. The approach allows to associate with each example a probability of the label being flipped. We utilise an expectation maximization (EM) algorithm for updating the probabilities. The E-step uses class conditional probabilities estimated as a by-product of the KFD algorithm. The M-step updates the flip probabilities and determines the parameters of the discriminant. We have applied the approach to two real-world data-sets. The results show the feasibility of the approach.

TY - CPAPER
TI - Estimating a Kernel Fisher Discriminant in the Presence of Label Noise
AU - Neil D. Lawrence
AU - Bernhard Schölkopf
BT - Proceedings of the International Conference in Machine Learning
PY - 2001/01/01
DA - 2001/01/01
ED - Carla Brodley
ED - Andrea P. Danyluk
ID - lawrence-noisy01
PB - Morgan Kauffman
SP -
EP -
UR - http://inverseprobability.com/publications/lawrence-noisy01.html
AB - Data noise is present in many machine learning problems domains, some of these are well studied but others have received less attention. In this paper we propose an algorithm for constructing a kernel Fisher discriminant (KFD) from training examples with *noisy labels*. The approach allows to associate with each example a probability of the label being flipped. We utilise an expectation maximization (EM) algorithm for updating the probabilities. The E-step uses class conditional probabilities estimated as a by-product of the KFD algorithm. The M-step updates the flip probabilities and determines the parameters of the discriminant. We have applied the approach to two real-world data-sets. The results show the feasibility of the approach.
ER -

Lawrence, N.D. & Schölkopf, B.. (2001). Estimating a Kernel Fisher Discriminant in the Presence of Label Noise. Proceedings of the International Conference in Machine Learning 18:-