@inproceedings{SinzEtAl2007, author = {Sinz, F. H. and O. Chapelle and A. Agarwal and B. Schölkopf}, title = {An Analysis of Inference with the Universum}, year = {2008}, publisher = {MIT Press}, pages = {1-8}, month = {01}, journal = {Proceedings of the 20th Annual Conference on Neural Information Processing Systems (NIPS 2007)}, address = {Cambridge, Mass., USA}, abstract = {We study a pattern classification algorithm which has recently been proposed by Vapnik and coworkers. It builds on a new inductive principle which assumes that in addition to positive and negative data, a third class of data is available, termed the Universum. We assay the behavior of the algorithm by establishing links with Fisher discriminant analysis and oriented PCA, as well as with an SVM in a projected subspace (or, equivalently, with a data-dependent reduced kernel). We also provide experimental results.}, booktitle = {Twenty-First Annual Conference on Neural Information Processing Systems}, location = {Vancouver, BC, Canada}, URL = {http://books.nips.cc/papers/files/nips20/NIPS2007_0780.pdf} } @inproceedings{WestonEtAl2006, author = {Weston, J. and R. Collobert and F. Sinz and L. Bottou and V. Vapnik}, title = {Inference with the Universum}, year = {2006}, pages = {127}, month = {06/25/}, journal = {Proceedings of the 23rd International Conference on Machine Learning}, abstract = {We study classification tasks where one is given a set of labeled examples, and a set of "non-examples" of meaningful concepts in the same domain that do not belong to either class (refered to as the universum). We describe an algorithmic approach to leverage universum points and show experimentally that inference based on the labeled data and the universum can improve over using the labeled data alone, at least in the small sample case. Finally, we list some conjectures describing how and why the Universum helps, and experimentally attempt to test each hypothesis.}, booktitle = {23rd International Conference on Machine Learning}, URL = {http://www.kyb.tuebingen.mpg.de/bs/people/fabee/universvm.html} } @inproceedings{CollobertEtAl2006, author = {Collobert, R. and F. Sinz and J. Weston and L. Bottou}, title = {Trading Convexity for Scalability}, year = {2006}, publisher = {ACM Press}, pages = {201-208}, month = {06}, journal = {Proceedings of the 23rd International Conference on Machine Learning (ICML 2006)}, institution like {%Association for Computing Machinery%}, address = {New York, NY, USA}, abstract = {Convex learning algorithms, such as Support Vector Machines (SVMs), are often seen as highly desirable because they offer strong practical properties and are amenable to theoretical analysis. However, in this work we show how non-convexity can provide scalability advantages over convexity. We show how concave-convex programming can be applied to produce (i) faster SVMs where training errors are no longer support vectors, and (ii) much faster Transductive SVMs.}, booktitle = {23rd International Conference on Machine Learning}, location = {Pittsburgh, Penn., USA} } @article{CollobertEtAl20006, author = {Collobert, R. and F. Sinz and J. Weston and L. Bottou}, title = {Large Scale Transductive SVMs}, year = {2006}, volume = {7}, pages = {1687-1712}, month = {08}, journal = {Journal of Machine Learning Research}, abstract = {We show how the Concave-Convex Procedure can be applied to the optimization of Transductive SVMs, which traditionally requires solving a combinatorial search problem. This provides for the first time a highly scalable algorithm in the nonlinear case. Detailed experiments verify the utility of our approach.}, URL = {http://jmlr.csail.mit.edu/papers/volume7/collobert06a/collobert06a.pdf} }