[SCM] Gerris Flow Solver branch, upstream, updated. b3aa46814a06c9cb2912790b23916ffb44f1f203

Stephane Popinet popinet at users.sf.net
Fri May 15 02:54:51 UTC 2009


The following commit has been merged in the upstream branch:
commit 7a41af4fe52512a9ddf4615a08924b77b0aeaf03
Author: Stephane Popinet <popinet at users.sf.net>
Date:   Mon Nov 12 07:20:19 2007 +1100

    Bug fix for refinement of coarse VOF cells in parallel
    
    Thanks to Daniel Fuster for reporting the problem.
    
    darcs-hash:20071111202019-d4795-6ac361a03b787a2b7859f6b943a8003d4bca1c31.gz

diff --git a/src/init.c b/src/init.c
index 1386d6e..180b984 100644
--- a/src/init.c
+++ b/src/init.c
@@ -248,7 +248,7 @@ void gfs_init (int * argc, char *** argv)
     }
     else
       MPI_Init (argc, argv);
-    atexit ((void (*)(void)) MPI_Finalize);
+    MPI_Errhandler_set (MPI_COMM_WORLD, MPI_ERRORS_ARE_FATAL);
   }
 #endif /* HAVE_MPI */
   initialized = TRUE;
diff --git a/src/mpi_boundary.c b/src/mpi_boundary.c
index 9f8e552..f4c7e8f 100644
--- a/src/mpi_boundary.c
+++ b/src/mpi_boundary.c
@@ -40,7 +40,7 @@ static void send (GfsBoundary * bb)
 #ifdef DEBUG
 fprintf (stderr, "%d send %d tag: %d\n",
 	 domain->pid, 
-	 boundary->process,
+	 mpi->process,
 	 TAG (GFS_BOUNDARY (boundary)));
 #endif
     MPI_Isend (&boundary->sndcount, 1, MPI_UNSIGNED,
@@ -53,7 +53,7 @@ fprintf (stderr, "%d send %d tag: %d\n",
 #ifdef DEBUG
 fprintf (stderr, "%d send %d tag: %d size: %d\n",
 	 domain->pid, 
-	 boundary->process,
+	 mpi->process,
 	 TAG (GFS_BOUNDARY (boundary)),
 	 boundary->sndcount);
 #endif
@@ -85,7 +85,7 @@ static void receive (GfsBoundary * bb,
 #ifdef DEBUG
 fprintf (stderr, "%d wait %d %d match variable\n",
 	 gfs_box_domain (bb->box)->pid,
-	 boundary->process,
+	 mpi->process,
 	 MATCHING_TAG (GFS_BOUNDARY (boundary)));
 #endif
     MPI_Recv (&boundary->rcvcount, 1, MPI_UNSIGNED,
@@ -106,7 +106,7 @@ fprintf (stderr, "%d wait %d %d match variable\n",
 #ifdef DEBUG
 fprintf (stderr, "%d wait %d %d\n",
 	 gfs_box_domain (bb->box)->pid,
-	 boundary->process,
+	 mpi->process,
 	 MATCHING_TAG (GFS_BOUNDARY (boundary)));
 #endif
   g_assert (boundary->rcvcount <= boundary->rcvbuf->len);
@@ -118,7 +118,24 @@ fprintf (stderr, "%d wait %d %d\n",
 	    mpi->comm,
 	    &status);
   MPI_Get_count (&status, MPI_DOUBLE, &count);
+#ifdef DEBUG
+  fprintf (stderr, "%d %d %d\n", status.MPI_SOURCE, status.MPI_TAG, status.MPI_ERROR);
+  if (count == MPI_UNDEFINED) {
+    fprintf (stderr, "%d %d count is undefined!\n",
+	     gfs_box_domain (bb->box)->pid,
+	     MATCHING_TAG (GFS_BOUNDARY (boundary)));
+    g_assert_not_reached ();
+  }
+  else if (count != boundary->rcvcount) {
+    fprintf (stderr, "%d %d count = %d boundary->rcvcount = %d\n",
+	     gfs_box_domain (bb->box)->pid,
+	     MATCHING_TAG (GFS_BOUNDARY (boundary)),
+	     count, boundary->rcvcount);
+    g_assert_not_reached ();
+  }
+#else
   g_assert (count == boundary->rcvcount);
+#endif
 
 #ifdef PROFILE_MPI
   end = MPI_Wtime ();
@@ -148,7 +165,10 @@ static void synchronize (GfsBoundary * bb)
   gts_range_add_value (&domain->mpi_wait, end - start);
 #endif /* PROFILE_MPI */
   boundary->nrequest = 0;
-
+#ifdef DEBUG
+  fprintf (stderr, "==== %d synchronised ====\n",
+	   gfs_box_domain (bb->box)->pid);
+#endif
   (* gfs_boundary_periodic_class ()->synchronize) (bb);
 }
 
diff --git a/src/vof.c b/src/vof.c
index 15b6398..05ce588 100644
--- a/src/vof.c
+++ b/src/vof.c
@@ -20,6 +20,11 @@
 #include <math.h>
 #include <stdlib.h>
 #include "vof.h"
+
+#include "config.h"
+#ifdef HAVE_MPI
+#  include <mpi.h>
+#endif
 #include "variable.h"
 #include "adaptive.h"
 #include "graphic.h"
@@ -1336,10 +1341,40 @@ static void f_times_dV (FttCell * cell, VofParms * p)
 
 static void f_over_dV (FttCell * cell, VofParms * p)
 {
+  g_assert (GFS_VARIABLE (cell, p->dV->i) > 0.);
   gdouble f = GFS_VARIABLE (cell, p->par->v->i)/GFS_VARIABLE (cell, p->dV->i);
   GFS_VARIABLE (cell, p->par->v->i) = f < 1e-10 ? 0. : f > 1. - 1e-10 ? 1. : f;
 }
 
+/* refine cells which would lead to a loss of resolution at the interface */
+static void fix_too_coarse (GfsDomain * domain, VofParms * p)
+{
+  p->depth = 0;
+  p->domain = domain;
+  p->too_coarse = 0;
+  gfs_domain_face_traverse (domain, p->c,
+			    FTT_PRE_ORDER, FTT_TRAVERSE_LEAFS, -1,
+			    (FttFaceTraverseFunc) face_too_coarse, p);
+  domain->cell_init = (FttCellInitFunc) vof_cell_fine_init;
+  domain->cell_init_data = p;
+  if (p->too_coarse > 0)
+    gfs_domain_cell_traverse (domain,
+			      FTT_PRE_ORDER, FTT_TRAVERSE_LEAFS, -1,
+			      (FttCellTraverseFunc) refine_too_coarse, p);
+#ifdef HAVE_MPI
+  if (domain->pid >= 0) {
+    guint sum_too_coarse;
+      
+    MPI_Allreduce (&p->too_coarse, &sum_too_coarse, 1, MPI_UNSIGNED, MPI_SUM, MPI_COMM_WORLD);
+    p->too_coarse = sum_too_coarse;
+  }
+#endif /* HAVE_MPI */
+  if (p->too_coarse > 0)
+    gfs_domain_reshape (domain, p->depth);
+  domain->cell_init = (FttCellInitFunc) gfs_cell_fine_init;
+  domain->cell_init_data = domain;
+}
+
 /**
  * gfs_tracer_vof_advection:
  * @domain: a #GfsDomain.
@@ -1371,18 +1406,7 @@ void gfs_tracer_vof_advection (GfsDomain * domain,
     p.c = (cstart + c) % FTT_DIMENSION;
     gfs_domain_cell_traverse (domain, FTT_PRE_ORDER, FTT_TRAVERSE_LEAFS, -1,
 			      (FttCellTraverseFunc) gfs_cell_reset, par->fv);
-    p.too_coarse = 0;
-    gfs_domain_face_traverse (domain, p.c,
-			      FTT_PRE_ORDER, FTT_TRAVERSE_LEAFS, -1,
-			      (FttFaceTraverseFunc) face_too_coarse, &p);
-    if (p.too_coarse > 0) {
-      p.depth = 0;
-      p.domain = domain;
-      gfs_domain_cell_traverse (domain,
-				FTT_PRE_ORDER, FTT_TRAVERSE_LEAFS, -1,
-				(FttCellTraverseFunc) refine_too_coarse, &p);
-      gfs_domain_reshape (domain, p.depth, (FttCellInitFunc) vof_cell_fine_init, &p);
-    }
+    fix_too_coarse (domain, &p);
     gfs_domain_face_traverse (domain, p.c,
 			      FTT_PRE_ORDER, FTT_TRAVERSE_LEAFS, -1,
 			      (FttFaceTraverseFunc) vof_face_value, &p);

-- 
Gerris Flow Solver



More information about the debian-science-commits mailing list