By using this site, you agree to our updated Privacy Policy and our Terms of Use. Manage your Cookies Settings.
445,897 Members | 1,969 Online
Bytes IT Community
+ Ask a Question
Need help? Post your question and get tips & solutions from a community of 445,897 IT Pros & Developers. It's quick & easy.

Parallel quicksort (MPI)

P: n/a
Hello to all... I am trying to write an algorithm parallel in order to
realize the quicksort. They are to the first crews with C and MPI. The
algorithm that I am trying to realize is PARALLEL QUICKSORT with
REGULAR SAMPLING. The idea on the planning is right. the code that I
have written syntactically does not only have problems that it does
not want any to know to work... Are two days that I analyze it but I do
not succeed to find the errors... Someone of you can give a hand to me
is deprived of hope. Ringrazio to you in advance payment... The code is
following:

#include <stdio.h>
#include <stdlib.h>
#include <mpi.h>
#include "MyMPI.h"

int main (int argc, char *argv[])
{
int i;
int id; /* Identificativo del processo */
int p; /* Numero di processi */
int n; /* Numero di elementi che appartengono al vettore*/
int high_value; /* Indice massimo del vettore per questo processo */
int low_value; /* Indice minimo del vettore per questo processo */
int size; /* Numero di elementi del vettore memorizzati da questo
processo */
int *vettore;
void quicksort (int *a, int l, int r);

MPI_Init(&argc,&argv);
MPI_Comm_rank (MPI_COMM_WORLD, &id);
MPI_Comm_size (MPI_COMM_WORLD, &p);

/* Controllo che sia specificata la dimensione del vettore
da ordinare */
if (argc != 2) {
if (!id) printf ("Command line: %s <m>\n", argv[0]);
MPI_Finalize();
exit (1);
}

/* leggo la grandezza del vettore*/
n = strtol(argv[1],NULL,10);

/* effettuo la decomposizione a blocchi del vettore */
low_value = BLOCK_LOW(id,p,n);
high_value = BLOCK_HIGH(id,p,n);
size = BLOCK_SIZE (id,p,n);

/* alloco la memoria per la parte di vettore gestita da questo
processo */
vettore = (int *) malloc (size * sizeof(int) );

/* Inserisco i valori nel vettore utilizzando la funzione random */
srand (id+n);
for (i=0; i<size; i++) vettore[i]= rand() %(n);
/* ordino utilizzando il quick sort sequenziale */
quicksort (vettore, 0, size-1);
for (i=0; i<size;i++)
printf (" proc %d ordinati %d\n ",id, vettore[i]);

/* ogni processo invia al processo root i "local regular sample"*/
int local [p];
int c=0;
for (i=0; i<size;i +=p){
local[c]= vettore [i];
c++;
}
int sample [p*p];
/* dopo la gather il processo zero ha in sample tutti i regular
sample*/
MPI_Gather (local, p, MPI_INT, sample, p, MPI_INT, 0, MPI_COMM_WORLD);

if (!id){
printf (" ecco gli elementi: ");
for (i=0;i<p*p;i++)
printf ("%d ",sample[i]);
fflush(stdout);
}

int pivot [p-1];
/* Il processo 0 ordina i regular sample e seleziona p-1 pivot */
MPI_Barrier(MPI_COMM_WORLD);
if (!id){

quicksort (sample, 0,p*p-1 );
for (i=0; i<p*p; i++);
printf (" regular sample ordinati: %d \n", sample[i]);
for (i=1; i<p; i++)
pivot[i-1]= sample [i*p+(p/2)-1];
}
/* Il processo root broadcast i pivot agli altri processi*/
MPI_Bcast (pivot, p-1, MPI_INT, 0, MPI_COMM_WORLD);

/* Ogni processo divide il suo array in p-1 sottoarray*/
int inizio [p]; /*segna memorizza il punto di divisione dell'array */
int fine [p];
inizio[0]=0;
fine [p-1]=size;
int j;
i=0;
for (j=0; j<p-1; j++){
for ( i; i<size; i++){
if ( vettore[i]>pivot [j]) {
fine [j] = i;
inizio[j+1]=i;
//printf(" \nvettore [i] %d pivot [j] %d ",vettore[i],pivot[j]);
//printf( " \nprocesso %d, fine %d, inizio %d",id,fine[j],inizio[j]);
//fflush(stdout);
i++;
break;
}}
}
//printf(" \nprocesso %d, fine %d, inizio %d",id,fine[j+1],inizio[j
+1]);
//fflush(stdout);
/* Utilizzando una comunicazione all-to-all i processi si scambiano
le parti dell'array.*/

int start_rec[p];
int fine_rec[p];
int cnt [p];
int k=0;
int rec_disp [p];

MPI_Alltoall ( inizio, 1, MPI_INT, start_rec, 1, MPI_INT,
MPI_COMM_WORLD);
MPI_Alltoall ( fine, 1, MPI_INT, fine_rec, 1, MPI_INT,
MPI_COMM_WORLD);

for (i=0; i<p;i++) {cnt[i]= (fine_rec[i] - start_rec[i]+1);
// printf ("\n fine_rec: %d, start_rec: %d",fine_rec[i],start_rec[i]);
// printf ("\nprocesso: %d cnt %d, %d ",id,i,cnt[i]);
// fflush(stdout);
}
for (i=0;i<p;i++) k += cnt[i]; // k rappresenta il numero di valori
totali che si riceveranno
int ordinato [k];
rec_disp [0]=cnt[0];
for (i=1; i<p;i++) rec_disp[i] = rec_disp[i-1]+cnt[i];
MPI_Alltoallv( vettore, inizio, fine, MPI_INT, ordinato, cnt,
rec_disp, MPI_INT, MPI_COMM_WORLD);

/* ogni processo esegue il quicksort su "ordinato" */
quicksort (ordinato, 0,k-1);

/* Salvo nel processo root il vettore con tutti i valori in ordine */
int finale [n];
MPI_Gather (&k, 1, MPI_INT, rec_disp, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Gatherv (ordinato, k, MPI_INT, finale,&k, rec_disp, MPI_INT, 0,
MPI_COMM_WORLD);
if (!id){
printf( "Vettore ordinato: ");
for (i=0; i<n; i++)
printf( "%d, ",finale[i]);
}
MPI_Finalize();
return 0;
}

void quicksort(int *a, int l, int r)
{
int q;
if (r>l) {
q=partition(a,l,r);
quicksort(a,l,q-1);
quicksort(a,q+1,r);
}
}
int partition(int a[], int l, int r)
{
int v, i, j, t;
v=a[r];
i=l-1;
j=r;
for (;;)
{
while (a[++i]<v && i < r);
while ((a[--j]>v)&&(j>=l));
if (i>=j) break;
t=a[i];
a[i]=a[j];
a[j]=t;
}
t=a[i];
a[i]=a[r];
a[r]=t;
return i;
}

Mar 7 '07 #1
Share this Question
Share on Google+
2 Replies


P: n/a
simo wrote:
Hello to all... I am trying to write an algorithm parallel in order to
realize the quicksort. They are to the first crews with C and MPI. The
algorithm that I am trying to realize is PARALLEL QUICKSORT with
REGULAR SAMPLING. The idea on the planning is right. the code that I
have written syntactically does not only have problems that it does
not want any to know to work... Are two days that I analyze it but I do
not succeed to find the errors... Someone of you can give a hand to me
is deprived of hope. Ringrazio to you in advance payment... The code is
following:

#include <stdio.h>
#include <stdlib.h>
#include <mpi.h>
#include "MyMPI.h"
<snip>

This group deals only with ISO C, which has no concept of multiple
processes, so if your problem is in the code responsible for
parallelism, then you might get a better response from posting to a
group like comp.programming.threads or a group or list for MPI.

Also try to reduce your code to the smallest possible compilable
subset that still exhibits the problem. Most importantly, try to
format your code better. Currently it's close to unreadable.

Mar 7 '07 #2

P: n/a
He found news:comp.parallel.mpi and so his MPI queries are quite
topical there and I am sure he will find good assistance.

Mar 7 '07 #3

This discussion thread is closed

Replies have been disabled for this discussion.