@InProceedings{Supelec556,
author = {Virginie Galtier and Stéphane Genaud and Stephane Vialle},
title = {{Implementation of the AdaBoost Algorithm for Large Scale Distributed Environments: Comparing JavaSpace and MPJ}},
year = {2009},
booktitle = {{Fifteenth International Conference on Parallel and Distributed Systems (ICPADS'09)}},
month = {dec},
address = {Shenzhen (China)},
abstract = {This paper presents the parallelization of a machine learning
method, called the adaboost algorithm. The parallel algorithm
follows a dynamically load-balanced master-worker strategy, which
is parameterized by the granularity of the tasks distributed to
workers. We first show the benefits of this version with
heterogeneous processors. Then, we study the application in a
real, geographically distributed environment, hence adding
network latencies to the execution. Performances of the
application using more than a hundred processes are analyzed in
both JavaSpace and {\pmpi}. We therefore present an head-to-head
comparison of two parallel programming models. We study for each
case the granularities yielding the best performance. We show
that current network technologies enable to obtain interesting
speedups in many situations for such an application, even when
using a virtual shared memory paradigm in a large-scale
distributed environment.}
}