diff --git a/gridding.c b/gridding.c index f4769f9776245397a75ecf3e24ce24991cb9fd4b..f59bc44d43f1eb5a758f4cbe05a8357d063a9834 100644 --- a/gridding.c +++ b/gridding.c @@ -130,31 +130,23 @@ void gridding_data(){ double resolution_asec = (3600.0*180.0)/MAX(abs(metaData.uvmin),abs(metaData.uvmax))/PI; printf("RESOLUTION = %f rad, %f arcsec\n", resolution, resolution_asec); - // Declare temporary arrays for the masking - double * uus; - double * vvs; - double * wws; - float * visreals; - float * visimgs; - float * weightss; - long isector; - for (long isector_count=0; isector_count1e10 || visimgs[inu]<-1e10)printf("%f %f %ld %ld %d %ld %ld\n",visreals[inu],visimgs[inu],inu,Nvissec,rank,ilocal*metaData.polarisations*metaData.freq_per_chan+ifreq,metaData.Nvis); inu++; - } + } icount++; current = current->next; } @@ -195,22 +187,22 @@ void gridding_data(){ timing.compose_time1 += (finishk.tv_sec - begink.tv_sec); timing.compose_time1 += (finishk.tv_nsec - begink.tv_nsec) / 1000000000.0; - #ifndef USE_MPI - double vvmin = 1e20; - double uumax = -1e20; - double vvmax = -1e20; - - for (long ipart=0; ipart %f\n",gridss[iii]); #ifndef USE_MPI - long stride = isector*2*xaxis*yaxis*num_w_planes; - for (long iii=0; iii<2*xaxis*yaxis*num_w_planes; iii++)gridtot[stride+iii] = gridss[iii]; + long stride = isector*2*xaxis*yaxis*num_w_planes; + for (long iii=0; iii<2*xaxis*yaxis*num_w_planes; iii++) + gridtot[stride+iii] = gridss[iii]; #endif // Write grid in the corresponding remote slab - #ifdef USE_MPI - int target_rank = (int)isector; - //int target_rank = (int)(size-isector-1); - #ifdef ONE_SIDE - printf("One Side communication active\n"); - MPI_Win_lock(MPI_LOCK_SHARED,target_rank,0,slabwin); - MPI_Accumulate(gridss,size_of_grid,MPI_DOUBLE,target_rank,0,size_of_grid,MPI_DOUBLE,MPI_SUM,slabwin); - MPI_Win_unlock(target_rank,slabwin); - //MPI_Put(gridss,size_of_grid,MPI_DOUBLE,target_rank,0,size_of_grid,MPI_DOUBLE,slabwin); - #else - MPI_Reduce(gridss,grid,size_of_grid,MPI_DOUBLE,MPI_SUM,target_rank,MPI_COMM_WORLD); - #endif //ONE_SIDE - #endif //USE_MPI + #ifdef USE_MPI + // int target_rank = (int)isector; it implied that size >= nsectors + int target_rank = (int)(isector % size); + #ifdef ONE_SIDE + printf("One Side communication active\n"); + MPI_Win_lock(MPI_LOCK_SHARED,target_rank,0,slabwin); + MPI_Accumulate(gridss,size_of_grid,MPI_DOUBLE,target_rank,0,size_of_grid,MPI_DOUBLE,MPI_SUM,slabwin); + MPI_Win_unlock(target_rank,slabwin); + //MPI_Put(gridss,size_of_grid,MPI_DOUBLE,target_rank,0,size_of_grid,MPI_DOUBLE,slabwin); + #else + MPI_Reduce(gridss,grid,size_of_grid,MPI_DOUBLE,MPI_SUM,target_rank,MPI_COMM_WORLD); + #endif //ONE_SIDE + #endif //USE_MPI + clock_gettime(CLOCK_MONOTONIC, &finishk); endk = clock(); timing.reduce_time += ((double) (endk - startk)) / CLOCKS_PER_SEC;