DOUG 0.2

TransmitCoarse.F90

Go to the documentation of this file.
00001 ! DOUG - Domain decomposition On Unstructured Grids
00002 ! Copyright (C) 1998-2006 Faculty of Computer Science, University of Tartu and
00003 ! Department of Mathematics, University of Bath
00004 !
00005 ! This library is free software; you can redistribute it and/or
00006 ! modify it under the terms of the GNU Lesser General Public
00007 ! License as published by the Free Software Foundation; either
00008 ! version 2.1 of the License, or (at your option) any later version.
00009 !
00010 ! This library is distributed in the hope that it will be useful,
00011 ! but WITHOUT ANY WARRANTY; without even the implied warranty of
00012 ! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00013 ! Lesser General Public License for more details.
00014 !
00015 ! You should have received a copy of the GNU Lesser General Public
00016 ! License along with this library; if not, write to the Free Software
00017 ! Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
00018 ! or contact the authors (University of Tartu, Faculty of Computer Science, Chair
00019 ! of Distributed Systems, Liivi 2, 50409 Tartu, Estonia, http://dougdevel.org,
00020 ! mailto:info(at)dougdevel.org)
00021 
00022 module TransmitCoarse
00023 ! This file is concerned with distributing and reassembling the grid structure
00024 ! for multiple processor case. 
00025 
00026 ! General idea:
00027 ! Master side first builds some global auxilliary data and then proceeds to
00028 ! start chopping up the coarse grid and fine elements geometric data and 
00029 ! sending the information required for rebuilding the parts of the mesh to 
00030 ! the other processes. Once it is done with sending, it proceeds to build 
00031 ! itself its own local part. 
00032 ! Slave side simply waits for the information from the master and once it 
00033 ! arrives, tries to piece together the whole mesh from the (limited)
00034 ! iformation and knowledge of the structure of the datatypes.
00035 ! It is paramount that the structure of datatypes remain roughly the same,
00036 ! especially in the refinements tree, since it is relied upon quite extensively.
00037 
00038 ! Details:
00039 ! The auxilliary structures created in the beginning of send map
00040 ! fine nodes into coarse elements (so we would know which to send and which not)
00041 ! and fine elements to different processes (so we would know who gets what).
00042 ! This ensures us that for each process we only need to look through its 
00043 ! <local> data without having to iterate through all the global values each
00044 ! time. This ensures us that as we increase the process count the efficiency
00045 ! of this function will not decrease by much (it will decrease a little due to 
00046 ! overlaps which are sure to occur, but there seems to be no way to avoid that).
00047 ! Elmap, coordinates, lg_maps and freemaps are sent in a naive way. 
00048 ! gl_maps are constructed from lg_maps as it is easy to do.
00049 ! Refined element structure is built from 2 arrays, one holding
00050 ! the levels of the fine nodes and the second holding the ending indices
00051 ! of this nodes fine nodes in "elmap". Since the order of the refined elements
00052 ! is quite fixed, it is enough to deduce all other needed values.
00053 ! For each hanging nodes two indices are sent which describe between 
00054 ! which refined nodes it lay. Again, that is enough to reconstruct "hnds"
00055 
00056     implicit none
00057 
00058 #include<doug_config.h>
00059 
00060 #ifdef D_COMPLEX
00061 #define float complex
00062 #else
00063 #define float real
00064 #endif
00065  
00066 contains
00067 
00068     !! Map out and send the coarse grid around
00069     !! It sends relatively little data but has quite a large memory and
00070     !!  processor overhead
00071     subroutine SendCoarse(C, M, LC)
00072         ! This code depends on:
00073         !  coarse refined node coordinates being grouped by elements
00074         !  the ordering of elmap (as it is done currently)
00075         use RealKind
00076         use CoarseGrid_class
00077         use Mesh_class
00078         use globals, only: myrank,stream
00079         
00080         implicit none
00081         
00082         !! Coarse Grid whose structure to use
00083         type(CoarseGrid), intent(inout) :: C
00084 
00085         !! Fine Mesh for which to build
00086         type(Mesh), intent(inout) :: M
00087  
00088         !! Local Coarse Grid to create
00089         type(CoarseGrid), intent(out) :: LC
00090 
00091         integer :: ndtoel(M%nnode) ! Map fine nodes to coarse elements
00092         logical :: ispresf(M%ngf) ! Mark down freedoms present in partition
00093 
00094         integer :: fels(M%nell) ! List of fine elements for each process
00095         integer :: finds(M%nparts+1) ! Indices into it
00096 
00097         integer :: cnfmap(C%ngfc) ! List of global freedoms for each node
00098         integer :: cnfinds(C%nct+1) ! Indices into it
00099 
00100         integer :: elcnt ! Count of elements to be sent
00101         logical :: elmask(C%elnum) ! Mark down elements needed for sending
00102         logical :: refmask(C%refnum) ! Mark down refined els needed for sending
00103         integer :: ellist(C%ncti) ! The list of element inds to be sent
00104 
00105         integer :: lnnode ! local number of fine nodes
00106         integer :: gl_nodemap(M%nnode) ! map of global node inds to local
00107         integer :: lg_nodemap(M%nnode) ! and vice-versa
00108 
00109         integer :: ndcnt ! local number of coarse grid nodes
00110         integer :: gl_cnodemap(C%nct) ! map of global coarse mesh nds to local
00111         integer :: lg_cnodemap(C%nct) ! and vice-versa ! TODO: USE IT!
00112 
00113         integer :: nlf ! local number of fine freedoms
00114         integer :: lfreemap(M%ngf) ! local freemap
00115         integer :: fremap(M%ngf) ! what global freedom is that freemap entry for
00116 
00117         integer :: nlfc ! Number of coarse local freedoms
00118         
00119         integer :: refcnt ! number of refined elements to send
00120         integer :: lends(C%refnum) ! adjusted lend of the refined elements sent
00121         integer :: levels(C%refnum) ! levels of the refined elements sent
00122         ! The other values can be calculated from these two with relative ease
00123 
00124         integer :: lnfss(C%elnum) ! local nfs-s of coarse grid elements
00125         integer :: lnrfs(C%elnum) ! local number of refinements
00126 
00127         ! For local coarse grid creation
00128         integer :: pstack(0:C%mlvl) ! Stack of refinement indices
00129         integer :: nd, ndp, lvl, rel 
00130         
00131         type(RefinedElem),pointer :: ref, old
00132 
00133         ! Hanging node stuff
00134         integer :: hlist(C%nhn) ! list is l-to-g
00135         integer :: ha(C%nhn), hb(C%nhn) ! indices of nodes between which it is
00136         integer :: hd(C%nhn) ! direction relative to ha 
00137         integer :: hcnt, hdisp ! count of local hanging; hanging global disp
00138                
00139         integer :: i, j, k, f, p, el, pt, d, h, nct
00140         
00141         ! Coordinate data for sending
00142         real(kind=xyzk), pointer :: fcoords(:,:) ! fine mesh coordinates
00143         real(kind=xyzk), pointer :: ccoords(:,:) ! coarse grid coordinates
00144         real(kind=xyzk), pointer :: hcoords(:,:)
00145 
00146         ! For MPI
00147         integer :: req, ierr, stat(MPI_STATUS_SIZE), sz, szs
00148         character, pointer :: buffer(:)
00149 
00150         LC = CoarseGrid_new()
00151 
00152         buffer=>NULL()
00153         hdisp=C%ncti+C%refnum-1 ! the diplacement of beginning of hanging nodes
00154 
00155         !************************************************************
00156         ! Create a map of fine nodes to coarse elements
00157         !************************************************************
00158         
00159         ndtoel=0
00160         do el=1,C%elnum
00161         if (C%els(el)%rbeg<=0) then
00162             ! Initial grid elements are <0
00163             ndtoel(C%elmap(C%els(el)%lbeg:C%els(el)%lbeg+C%els(el)%nfs-1))=-el
00164         else
00165             j=C%els(el)%rbeg
00166             do while (j>0)
00167                 ! Refined elements are positive
00168                 ndtoel(C%elmap(C%refels(j)%lbeg:C%refels(j)%lend))=j
00169                 j=C%refels(j)%next
00170             enddo
00171         endif
00172         enddo
00173 
00174         !************************************************************
00175         ! Create the list of fine elements per each part
00176         !************************************************************
00177 
00178         ! Count the number of elements each has
00179         finds=0
00180         do i=1,M%nell
00181             finds(M%eptnmap(i))=finds(M%eptnmap(i))+1
00182         enddo
00183 
00184         ! Add them up to get indices shifted to left
00185         do i=2,M%nparts+1
00186            finds(i)=finds(i-1)+finds(i)
00187         enddo
00188 
00189         !  Create the elements list and slide indices slowly to place
00190         do i=1,M%nell
00191             p=M%eptnmap(i)
00192             fels(finds(p))=i
00193             finds(p)=finds(p)-1
00194         enddo
00195         finds=finds+1
00196 
00197         !************************************************************
00198         ! Create the list of coarse freedoms per nodes
00199         !************************************************************
00200 
00201         ! Count the number of freedoms each node has
00202         cnfinds=0
00203         do i=1,C%ngfc
00204             cnfinds(C%cfreemap(i))=cnfinds(C%cfreemap(i))+1
00205         enddo
00206 
00207         ! Add them up to get indices shifted to left
00208         do i=2,C%nct+1
00209            cnfinds(i)=cnfinds(i-1)+cnfinds(i)
00210         enddo
00211 
00212         ! Create the freedoms list and slide indices slowly to place
00213         do i=1,C%ngfc
00214             p=C%cfreemap(i)
00215             cnfmap(cnfinds(p))=i
00216             cnfinds(p)=cnfinds(p)-1
00217         enddo
00218         cnfinds=cnfinds+1
00219 
00220         !********************************************************
00221         ! Clear the arrays for the first time
00222         !********************************************************
00223         ispresf=.false.; elmask=.false.; refmask=.false.
00224         gl_nodemap=0; gl_cnodemap=0
00225  
00226 
00227         !************************************************************
00228         ! Divide up the data
00229         !************************************************************
00230 
00231         allocate(fcoords(M%nsd,M%nnode),&
00232                  ccoords(M%nsd,C%ncti),&
00233                  hcoords(M%nsd,C%nhn))
00234 
00235         do p=0, M%nparts-1
00236             ! Pass up the sending to master
00237             if (p==myrank) cycle
00238 
00239             !********************************************************
00240             ! Reset the variables
00241             !********************************************************
00242             lnnode=0; nlf=0; elcnt=0; hcnt=0; refcnt=0
00243                 
00244             !********************************************************
00245             ! Find what to send
00246             !********************************************************
00247             do i=finds(p+1),finds(p+2)-1
00248                 el=fels(i)
00249                 do j=1,M%nfrelt(el)
00250                     f=M%mhead(j, el)
00251                     
00252                     ! Mark this node as present
00253                     if (gl_nodemap(M%freemap(f))==0) then
00254                         lnnode=lnnode+1
00255                         fcoords(:,lnnode)=M%coords(:,M%freemap(f))
00256                         gl_nodemap(M%freemap(f))=lnnode
00257                         lg_nodemap(lnnode)=M%freemap(f)
00258                     endif
00259                         
00260                     ! Mark this freedom as present
00261                     if (.not.ispresf(f)) then
00262                         nlf=nlf+1
00263                         lfreemap(nlf)=gl_nodemap(M%freemap(f))
00264                         fremap(nlf)=f
00265                         ispresf(f)=.true.
00266                     endif
00267 
00268                     ! Mark the coarse refinements for sending
00269                     k=ndtoel(M%freemap(f))
00270                     do while (k>0)
00271                         ! If it is marked, so are its parents
00272                         if (refmask(k)) exit
00273 
00274                         refmask(k)=.true.
00275                         refcnt=refcnt+1
00276 
00277                         k=C%refels(k)%parent ! Move up in the tree
00278                     enddo
00279                         
00280                     ! Mark this coarse grid element for sending if needed
00281                     if (k<0) then
00282                     if (.not.elmask(-k)) then
00283                         elcnt=elcnt+1
00284                         ellist(elcnt)=-k
00285                         elmask(-k)=.true.
00286                     endif; endif
00287                 enddo
00288             enddo
00289 
00290             !********************************************************
00291             ! Determine the size of data being sent
00292             !********************************************************
00293             
00294             ! Determine how many coarse grid nodes and refined elements to send
00295             ndcnt=0; 
00296             
00297             do i=1,elcnt
00298                 el=ellist(i)
00299 
00300                 ! Add the nodes to the sending list
00301                 do j=1,2**M%nsd
00302                    if (gl_cnodemap(C%els(el)%n(j))==0) then
00303                        ndcnt=ndcnt+1
00304                        gl_cnodemap(C%els(el)%n(j))=ndcnt
00305                        lg_cnodemap(ndcnt)=C%els(el)%n(j)
00306                        ccoords(:,ndcnt)=C%coords(:,C%els(el)%n(j))
00307                    endif
00308                 enddo
00309             enddo
00310 
00311             ! Update cnodemaps with refined elements
00312             k=ndcnt+1
00313             do i=1,elcnt
00314                 el=ellist(i)
00315                 j=C%els(el)%rbeg
00316                 do while (j/=-1)
00317                     ref=>C%refels(j)
00318                     if (refmask(j)) then
00319                       gl_cnodemap(ref%node)=k
00320                       lg_cnodemap(k)=ref%node
00321  
00322                       ! Worry about hanging nodes
00323                       if (associated(ref%hnds)) then
00324                         do d=1,2*M%nsd
00325                         if (ref%hnds(d)/=0) then
00326                             f=ref%hnds(d)
00327                             if (gl_cnodemap(f)>0) then
00328                                 hb(gl_cnodemap(f)-refcnt-ndcnt)=k-ndcnt
00329                             else ! First ocurrence of the node
00330                                 hcnt=hcnt+1
00331                                 gl_cnodemap(f)=ndcnt+refcnt+hcnt
00332                                 lg_cnodemap(hcnt+refcnt+ndcnt)=f
00333                                 hlist(hcnt)=f
00334                                 ha(hcnt)=k-ndcnt; hb(hcnt)=0
00335                                 hd(hcnt)=d; hcoords(:,hcnt)=C%coords(:,f)
00336                             endif
00337                         endif
00338                         enddo
00339                       endif
00340 
00341                       k=k+1
00342                     endif
00343                     j=C%refels(j)%next
00344                 enddo
00345             enddo
00346 
00347             ! Set the total number of coarse nodes sent
00348             nct=ndcnt+refcnt+hcnt
00349 
00350             ! Determine how much of the coarse freemap needs sending
00351             nlfc=0
00352             do i=1,nct
00353                 nlfc=nlfc+cnfinds(i+1)-cnfinds(i)
00354             enddo
00355             
00356             !********************************************************
00357             ! Determine the size of and allocate the buffer
00358             !********************************************************
00359 
00360             szs=0
00361             
00362             ! Initial data
00363             call MPI_PACK_SIZE(9,MPI_INTEGER,MPI_COMM_WORLD,sz,ierr)
00364             szs=szs+sz
00365 
00366             ! Coarse grid elements
00367             call MPI_PACK_SIZE((2+2**M%nsd)*elcnt,MPI_INTEGER,&
00368                                                 MPI_COMM_WORLD,sz,ierr)
00369             szs=szs+sz
00370 
00371             ! Coarse refined elements
00372             call MPI_PACK_SIZE(2*refcnt,MPI_INTEGER,MPI_COMM_WORLD,sz,ierr)
00373             szs=szs+sz
00374             
00375             ! Coordinates ( both fine and coarse )
00376             call MPI_PACK_SIZE(M%nsd*(lnnode+nct),MPI_xyzkind,&
00377                                                 MPI_COMM_WORLD,sz,ierr)
00378             szs=szs+sz
00379             
00380             ! Elmap, lfreemap and fremap
00381             call MPI_PACK_SIZE(3*lnnode,MPI_INTEGER,MPI_COMM_WORLD,sz,ierr)
00382             szs=szs+sz
00383 
00384             ! lg_cfreemap
00385             call MPI_PACK_SIZE(nct,MPI_INTEGER,MPI_COMM_WORLD,sz,ierr)
00386             szs=szs+sz
00387 
00388             ! Cfreemap
00389             call MPI_PACK_SIZE(nlfc,MPI_INTEGER,MPI_COMM_WORLD,sz,ierr)
00390             szs=szs+sz
00391 
00392             ! Hanging node data: hd,ha,hb
00393             call MPI_PACK_SIZE(3*hcnt,MPI_INTEGER,MPI_COMM_WORLD,sz,ierr)
00394             szs=szs+sz 
00395 
00396             ! If we used the buffer before, empty it
00397             if (associated(buffer)) then
00398                 call MPI_WAIT(req,stat,ierr)
00399                 deallocate(buffer)
00400             endif
00401             
00402             allocate(buffer(szs))
00403 
00404             !********************************************************
00405             ! Pack the buffer and send it (non-blocking)
00406             !********************************************************
00407 
00408             ! Send the buffer size ahead
00409             call MPI_ISEND(szs,1, MPI_INTEGER, p, 0, MPI_COMM_WORLD, req, ierr)
00410 
00411             pt=0;
00412 
00413             ! Fine mesh sizes
00414             call MPI_PACK(lnnode,1, MPI_INTEGER, &
00415                                 buffer(1), szs, pt, MPI_COMM_WORLD, ierr)
00416             call MPI_PACK(nlf,1, MPI_INTEGER, &
00417                                 buffer(1), szs, pt, MPI_COMM_WORLD, ierr)
00418        
00419             ! Coarse mesh sizes
00420             call MPI_PACK(ndcnt,1, MPI_INTEGER, &
00421                                 buffer(1), szs, pt, MPI_COMM_WORLD, ierr)
00422             call MPI_PACK(elcnt,1, MPI_INTEGER, &
00423                                 buffer(1), szs, pt, MPI_COMM_WORLD, ierr)
00424             call MPI_PACK(refcnt,1, MPI_INTEGER, &
00425                                 buffer(1), szs, pt, MPI_COMM_WORLD, ierr)
00426             call MPI_PACK(hcnt,1, MPI_INTEGER, &
00427                                 buffer(1), szs, pt, MPI_COMM_WORLD, ierr)
00428             call MPI_PACK(C%ngfc,1, MPI_INTEGER, &
00429                                 buffer(1), szs, pt, MPI_COMM_WORLD, ierr)
00430             call MPI_PACK(nlfc,1, MPI_INTEGER, &
00431                                 buffer(1), szs, pt, MPI_COMM_WORLD, ierr)
00432             call MPI_PACK(C%mlvl,1, MPI_INTEGER, &
00433                                 buffer(1), szs, pt, MPI_COMM_WORLD, ierr)
00434 
00435             ! Fine mesh data itself
00436             call MPI_PACK(fcoords(1,1),M%nsd*lnnode, MPI_xyzkind, &
00437                                 buffer(1), szs, pt, MPI_COMM_WORLD, ierr)
00438             call MPI_PACK(lfreemap(1),nlf, MPI_INTEGER, &
00439                                 buffer(1), szs, pt, MPI_COMM_WORLD, ierr)
00440             call MPI_PACK(fremap(1),nlf, MPI_INTEGER, &
00441                                 buffer(1), szs, pt, MPI_COMM_WORLD, ierr)
00442 
00443             ! Coarse grid coordinates
00444             call MPI_PACK(ccoords,M%nsd*ndcnt, MPI_xyzkind, &
00445                                 buffer(1), szs, pt, MPI_COMM_WORLD, ierr)
00446 
00447             ! Coarse freemap
00448             do i=1,nct
00449                 do k=cnfinds(lg_cnodemap(i)),cnfinds(lg_cnodemap(i)+1)-1
00450                    call MPI_PACK(i,1, MPI_INTEGER, &
00451                                 buffer(1), szs, pt, MPI_COMM_WORLD, ierr)
00452                 enddo
00453             enddo
00454 
00455             ! Coarse freedom local-to-global mapping
00456             do i=1,nct
00457                 do k=cnfinds(lg_cnodemap(i)),cnfinds(lg_cnodemap(i)+1)-1
00458                    call MPI_PACK(cnfmap(k),1, MPI_INTEGER, &
00459                                 buffer(1), szs, pt, MPI_COMM_WORLD, ierr)
00460                 enddo
00461             enddo
00462 
00463             ! Coarse elmap (and refined els info)
00464             k=1
00465             do f=1,elcnt    
00466               el=ellist(f)
00467                 
00468               i=C%els(el)%rbeg; lnfss(el)=0; lnrfs(el)=0
00469               if (i==-1) then ! Not subdivided
00470                 do j=C%els(el)%lbeg,C%els(el)%lbeg+C%els(el)%nfs-1
00471                     if (gl_nodemap(C%elmap(j))/=0) then
00472                         lnfss(el)=lnfss(el)+1   
00473 
00474                         call MPI_PACK(gl_nodemap(C%elmap(j)), 1, MPI_INTEGER, &
00475                                 buffer(1), szs, pt, MPI_COMM_WORLD, ierr)
00476                     endif
00477                 enddo
00478               else ! subdivided
00479                 do while (i/=-1)
00480                   if (refmask(i)) then
00481                     do j=C%refels(i)%lbeg,C%refels(i)%lend
00482                     if (gl_nodemap(C%elmap(j))/=0) then
00483                         lnfss(el)=lnfss(el)+1
00484 
00485                         call MPI_PACK(gl_nodemap(C%elmap(j)), 1, MPI_INTEGER, &
00486                                 buffer(1), szs, pt, MPI_COMM_WORLD, ierr)
00487                     endif
00488                     enddo
00489                     lends(k)=lnfss(el)
00490                     levels(k)=C%refels(i)%level
00491                     lnrfs(el)=lnrfs(el)+1
00492                     k=k+1
00493                   endif
00494                   i=C%refels(i)%next               
00495                 enddo
00496               endif
00497             enddo
00498 
00499             ! Send refined element info gathered before
00500             if (refcnt/=0) then
00501                 call MPI_PACK(lends,refcnt, MPI_INTEGER, &
00502                                 buffer(1), szs, pt, MPI_COMM_WORLD, ierr)
00503                 call MPI_PACK(levels,refcnt, MPI_INTEGER, &
00504                                 buffer(1), szs, pt, MPI_COMM_WORLD, ierr) 
00505             endif
00506 
00507             ! Coarse grid elements
00508             do i=1,elcnt
00509                 el=ellist(i)
00510                 call MPI_PACK(lnrfs(el),1, MPI_INTEGER, &
00511                                 buffer(1), szs, pt, MPI_COMM_WORLD, ierr)
00512                 call MPI_PACK(lnfss(el),1, MPI_INTEGER, &
00513                                 buffer(1), szs, pt, MPI_COMM_WORLD, ierr)
00514                                
00515                 do j=1, 2**M%nsd
00516                     call MPI_PACK(gl_cnodemap(C%els(el)%n(j)),1, MPI_INTEGER,&
00517                                 buffer(1), szs, pt, MPI_COMM_WORLD, ierr)
00518                 enddo
00519 
00520                 ! Send coarse refined coordinates as needed
00521                 if (C%els(el)%rbeg/=-1) then
00522                     j=C%els(el)%rbeg
00523                     do while (j>0)
00524                         ! NSAME might speed things up, if needed
00525                         if (refmask(j)) then
00526                             call MPI_PACK(&
00527                                 C%coords(1,C%refels(j)%node),&
00528                                 M%nsd, MPI_xyzkind, &
00529                                 buffer(1), szs, pt, MPI_COMM_WORLD, ierr)
00530 
00531                            refmask(j)=.false. ! cleanup
00532                         endif
00533                         j=C%refels(j)%next
00534                     enddo
00535                 endif
00536             enddo
00537 
00538             ! Hanging node info
00539             if (hcnt/=0) then
00540                 call MPI_PACK(hd,hcnt, MPI_INTEGER, &   
00541                                 buffer(1), szs, pt, MPI_COMM_WORLD, ierr)
00542                 call MPI_PACK(ha,hcnt, MPI_INTEGER, &   
00543                                 buffer(1), szs, pt, MPI_COMM_WORLD, ierr)
00544                 call MPI_PACK(hb,hcnt, MPI_INTEGER, &   
00545                                 buffer(1), szs, pt, MPI_COMM_WORLD, ierr)
00546                 call MPI_PACK(hcoords(1,1),M%nsd*hcnt, MPI_xyzkind, &
00547                                 buffer(1), szs, pt, MPI_COMM_WORLD, ierr)
00548             endif
00549 
00550             ! Send the data
00551             call MPI_ISEND(buffer(1),pt,MPI_PACKED,p,0,MPI_COMM_WORLD,req,ierr)
00552 
00553             !********************************************************
00554             ! Clear the arrays
00555             !********************************************************
00556 
00557             ! Asymptotically less expensive than full zeroing each time
00558             ispresf(fremap(1:nlf))=.false.
00559             elmask(ellist(1:elcnt))=.false.
00560             gl_nodemap(lg_nodemap(1:lnnode))=0
00561             gl_cnodemap(lg_cnodemap(1:nct))=0
00562         enddo
00563 
00564         !********************************************************
00565         !  Do sending cleanup
00566         !********************************************************
00567        
00568         deallocate (fcoords,ccoords,hcoords)
00569         
00570         if (associated(buffer)) then
00571             call MPI_WAIT(req,stat,ierr)
00572             deallocate(buffer)
00573         endif
00574 
00575         !********************************************************
00576         !********************************************************
00577         !**  Create the local grid for the master
00578         !********************************************************
00579         !********************************************************
00580 
00581         !********************************************************
00582         ! Init local values for the mesh
00583         !********************************************************
00584         M%lnnode=M%nnode 
00585         M%lcoords=>M%coords ! To avoid reallocating double memory
00586         allocate(M%lfreemap(M%nlf))
00587 
00588         !********************************************************
00589         ! Clear the values
00590         !********************************************************
00591         lnnode=0; LC%elnum=0; LC%refnum=0; LC%nhn=0;
00592                 
00593         !********************************************************
00594         ! Find how much room things take
00595         !********************************************************
00596         do i=1,M%nlf
00597             f=M%lg_fmap(i)
00598 
00599             if (ndtoel(M%freemap(f))/=0) then
00600 
00601                     ! Build local freemap
00602                     M%lfreemap(i)=M%freemap(f)
00603            
00604                     ! Mark this node as present
00605                     if (gl_nodemap(M%freemap(f))==0) then
00606                         lnnode=lnnode+1
00607                         gl_nodemap(M%freemap(f))=1 ! we only need true/false now
00608                     endif 
00609  
00610                     ! Mark the coarse refinements for sending
00611                     k=ndtoel(M%freemap(f))
00612                     do while (k>0)
00613                         ! If it is marked, so are its parents
00614                         if (refmask(k)) exit
00615 
00616                         refmask(k)=.true.
00617                         LC%refnum=LC%refnum+1
00618 
00619                         k=C%refels(k)%parent ! Move up in the tree
00620                     enddo
00621                         
00622                     ! Mark this coarse grid element for sending if needed
00623                     if (k<0) then
00624                     if (.not.elmask(-k)) then
00625                         LC%elnum=LC%elnum+1
00626                         ellist(LC%elnum)=-k
00627                         elmask(-k)=.true.
00628                     endif; endif
00629             else
00630                     M%lfreemap(i)=0
00631             endif
00632         enddo
00633 
00634         !********************************************************
00635         ! Determine the size of data being relocated
00636         !********************************************************
00637             
00638         ! Determine how many coarse grid nodes and refined elements to take
00639         LC%ncti=0;
00640         do i=1,LC%elnum
00641             el=ellist(i)
00642 
00643             ! Mark the nodes as being in use
00644             do j=1,2**M%nsd
00645                if (gl_cnodemap(C%els(el)%n(j))==0) then
00646                    LC%ncti=LC%ncti+1
00647                    gl_cnodemap(C%els(el)%n(j))=LC%ncti
00648                    lg_cnodemap(LC%ncti)=C%els(el)%n(j)
00649                endif
00650             enddo
00651         enddo
00652 
00653         ! Update cnodemaps with refined elements
00654         k=LC%ncti+1
00655         do i=1,LC%elnum
00656             el=ellist(i)
00657             j=C%els(el)%rbeg
00658             do while (j/=-1)
00659               if (refmask(j)) then
00660                 old=>C%refels(j)
00661                 gl_cnodemap(old%node)=k
00662                 lg_cnodemap(k)=old%node
00663 
00664                 ! Worry about hanging nodes
00665                 if (associated(old%hnds)) then
00666                     do d=1,2*M%nsd
00667                         if (old%hnds(d)/=0) then
00668                             f=old%hnds(d)
00669                             if (gl_cnodemap(f)==0) then
00670                                 LC%nhn=LC%nhn+1
00671                                 gl_cnodemap(f)=LC%ncti+LC%refnum+LC%nhn
00672                                 lg_cnodemap(LC%ncti+LC%refnum+LC%nhn)=f
00673                             endif
00674                         endif
00675                     enddo
00676                 endif
00677 
00678                 k=k+1
00679               endif
00680               j=C%refels(j)%next
00681             enddo
00682         enddo
00683 
00684         LC%nct=LC%ncti+LC%refnum+LC%nhn
00685         LC%ngfc=C%ngfc
00686         LC%mlvl=C%mlvl
00687 
00688         ! Determine how much room the coarse freemap needs
00689         LC%nlfc=0
00690         do i=1,LC%nct
00691             LC%nlfc=LC%nlfc+cnfinds(i+1)-cnfinds(i)
00692         enddo
00693 
00694         !********************************************************
00695         ! Create the actual structure
00696         !********************************************************
00697 
00698         call CoarseGrid_allocate(LC,M%nsd,lnnode,coords=.true.,&
00699                      els=.true.,refels=.true.,cfreemap=.true.,local=.true.)
00700             
00701         ! Coordinates
00702         LC%coords(:,1:LC%nct)=C%coords(:,lg_cnodemap(1:LC%nct))
00703 
00704         ! Coarse freemap, lg and gl maps
00705         LC%gl_fmap=0; j=1
00706         do i=1,LC%nct
00707             do k=cnfinds(lg_cnodemap(i)),cnfinds(lg_cnodemap(i)+1)-1
00708                 LC%cfreemap(j)=i; LC%lg_fmap(j)=k; LC%gl_fmap(k)=j; j=j+1
00709             enddo
00710         enddo
00711 
00712         ! Coarse grid and refined elements + elmap
00713         k=1; nd=1; rel=1
00714         do f=1,LC%elnum  
00715             el=ellist(f)
00716                 
00717             ! First the initial grid element
00718             LC%els(f)%nref=C%els(el)%nref
00719             allocate(LC%els(f)%n(2**M%nsd))
00720             LC%els(f)%n=gl_cnodemap(C%els(el)%n)
00721             LC%els(f)%lbeg=nd
00722                 
00723             ! Deal with refinements and elmap
00724             i=C%els(el)%rbeg; LC%els(f)%nfs=0
00725             if (i==-1) then ! not subdivided
00726                 LC%els(f)%rbeg=-1
00727                 do j=C%els(el)%lbeg,C%els(el)%lbeg+C%els(el)%nfs-1
00728                     if (gl_nodemap(C%elmap(j))/=0) then
00729                         LC%els(f)%nfs=LC%els(f)%nfs+1
00730                         LC%elmap(nd)=C%elmap(j)
00731                         nd=nd+1
00732                     endif
00733                 enddo
00734             else ! subdivided
00735                 LC%els(f)%rbeg=rel
00736                 pstack(0)=-f; lvl=0; ndp=nd-1
00737                 do while (i/=-1)
00738                     old=>C%refels(i)
00739 
00740                     if (refmask(i)) then  
00741 
00742                         ref=>LC%refels(rel)  
00743 
00744                         ref%level=old%level
00745                         ref%node=gl_cnodemap(old%node)
00746                         ref%parent=pstack(ref%level-1)
00747                         ref%next=rel+1
00748 
00749                         if (associated(old%hnds)) then
00750                             allocate(ref%hnds(2*M%nsd))
00751                             do d=1,2*M%nsd
00752                                 if (old%hnds(d)==0) then
00753                                     ref%hnds(d)=0
00754                                 else
00755                                     ref%hnds(d)=gl_cnodemap(old%hnds(d))
00756                                 endif
00757                             enddo
00758                         else
00759                             nullify(ref%hnds)
00760                         endif
00761 
00762                         ref%lbeg=nd
00763                         do j=old%lbeg,old%lend
00764                             if (gl_nodemap(C%elmap(j))/=0) then
00765                                 LC%els(f)%nfs=LC%els(f)%nfs+1
00766                                 LC%elmap(nd)=C%elmap(j)
00767                                 nd=nd+1
00768                             endif
00769                         enddo
00770                         ref%lend=nd-1
00771                         ref%lstop=ref%lend
00772                     
00773                         ! If we ascend in the tree, mod lstops
00774                         if (ref%level<lvl) then
00775                             do j=ref%level,lvl-1
00776                                 LC%refels(pstack(j))%lstop=ndp
00777                             enddo
00778                         endif
00779                     
00780                         ! Move forward
00781                         ndp=ref%lstop; lvl=ref%level
00782                         pstack(lvl)=rel; rel=rel+1
00783                     endif
00784                     i=old%next           
00785                 enddo
00786                 ! ref should point to the last refined el made
00787                 ref%next=-1
00788             endif
00789         enddo
00790         
00791         ! Print a small report
00792         if (sctls%verbose>3) then
00793             write (stream,*) "Local coarse mesh has: "
00794             write (stream,*) "Grid nodes:   ",LC%ncti
00795             write (stream,*) "Refinements:  ",LC%refnum
00796             write (stream,*) "Hanging nodes:",LC%nhn
00797         endif
00798 
00799     end subroutine SendCoarse
00800 
00801     ! Receive the coarse mesh
00802     subroutine ReceiveCoarse(C, M)
00803         use RealKind
00804         use CoarseGrid_class
00805         use Mesh_class
00806         use globals, only: stream
00807         
00808         implicit none
00809         
00810         !! Coarse Grid whose structure to use
00811         type(CoarseGrid), intent(out) :: C
00812         !! Fine Mesh with which to build
00813         type(Mesh), intent(inout) :: M
00814 
00815         integer :: i, j, pt
00816         integer :: ref, cnd, nd, ndp, lvl
00817         
00818         integer :: nlf
00819         integer, pointer :: lfreemap(:),fremap(:)
00820         integer, pointer :: lends(:), levels(:), pstack(:)
00821 
00822         ! For hanging nodes
00823         integer, pointer :: hd(:), ha(:)
00824 
00825         type(RefinedElem), pointer :: rel
00826 
00827         ! For MPI
00828         integer :: stat(MPI_STATUS_SIZE), ierr, szs
00829         character, pointer :: buffer(:)
00830 
00831         C = CoarseGrid_new()
00832 
00833         !*****************************************
00834         ! Receive the messages
00835         !*****************************************
00836 
00837         ! Get the buffer size from the master
00838         call MPI_RECV(szs,1,MPI_INTEGER,D_MASTER,0,MPI_COMM_WORLD,stat,ierr)
00839         
00840         ! Allocate memory for the buffer
00841         allocate(buffer(szs))
00842 
00843         ! Get the full package of info
00844         call MPI_RECV(buffer(1),szs,MPI_PACKED,D_MASTER,0,MPI_COMM_WORLD,stat,ierr)
00845 
00846         !*****************************************
00847         ! Unpack the sizes
00848         !*****************************************
00849 
00850         pt=0
00851 
00852         ! First two for fine mesh
00853         call MPI_UNPACK(buffer(1),szs, pt,&
00854                         M%lnnode,1, MPI_INTEGER,& 
00855                         MPI_COMM_WORLD, ierr)
00856         
00857 
00858         call MPI_UNPACK(buffer(1),szs, pt,&
00859                         nlf,1, MPI_INTEGER,& 
00860                         MPI_COMM_WORLD, ierr)
00861 
00862 
00863         ! The others for coarse mesh
00864         call MPI_UNPACK(buffer(1),szs,pt,&
00865                         C%ncti,1, MPI_INTEGER,& 
00866                         MPI_COMM_WORLD, ierr)
00867 
00868 
00869         call MPI_UNPACK(buffer(1),szs, pt,&
00870                         C%elnum,1, MPI_INTEGER,& 
00871                         MPI_COMM_WORLD, ierr)
00872         
00873         call MPI_UNPACK(buffer(1),szs, pt,&
00874                         C%refnum,1, MPI_INTEGER,& 
00875                         MPI_COMM_WORLD, ierr)
00876 
00877         call MPI_UNPACK(buffer(1),szs, pt,&
00878                         C%nhn,1, MPI_INTEGER,& 
00879                         MPI_COMM_WORLD, ierr)
00880 
00881         C%nct=C%ncti+C%refnum+C%nhn
00882 
00883         call MPI_UNPACK(buffer(1),szs, pt,&
00884                         C%ngfc,1, MPI_INTEGER,& 
00885                         MPI_COMM_WORLD, ierr)
00886         
00887         call MPI_UNPACK(buffer(1),szs, pt,&
00888                         C%nlfc,1, MPI_INTEGER,& 
00889                         MPI_COMM_WORLD, ierr)
00890         
00891         call MPI_UNPACK(buffer(1),szs, pt,&
00892                         C%mlvl,1, MPI_INTEGER,& 
00893                         MPI_COMM_WORLD, ierr)
00894 
00895         !*****************************************
00896         ! Unpack the fine mesh info
00897         !*****************************************
00898 
00899         ! Allocate memory
00900         allocate (M%lcoords(M%nsd,M%lnnode),M%lfreemap(M%nlf))
00901         allocate (lfreemap(nlf),fremap(nlf))
00902 
00903         ! Unpack the coordinates directly
00904         call MPI_UNPACK(buffer(1),szs, pt,&
00905                         M%lcoords(1,1), M%nsd*M%lnnode, MPI_xyzkind,& 
00906                         MPI_COMM_WORLD, ierr)
00907 
00908         ! Unpack lfreemap and fremap into temp. arrays
00909         call MPI_UNPACK(buffer(1),szs, pt,&
00910                         lfreemap(1), nlf, MPI_INTEGER,& 
00911                         MPI_COMM_WORLD, ierr)
00912 
00913         call MPI_UNPACK(buffer(1),szs, pt,&
00914                         fremap(1), nlf, MPI_INTEGER,& 
00915                         MPI_COMM_WORLD, ierr)
00916 
00917         ! Create lfreemap based on the two temp arrays
00918         M%lfreemap=0 
00919         do i=1,nlf
00920             j=M%gl_fmap(fremap(i))
00921             if (j/=0) M%lfreemap(j)=lfreemap(i)
00922         enddo
00923 
00924         ! Deallocate aux. arrays
00925         deallocate(lfreemap, fremap)
00926 
00927        !*****************************************
00928        ! Unpack the already remapped data
00929        !*****************************************
00930 
00931        ! First, allocate the coarse mesh
00932        call CoarseGrid_allocate(C,M%nsd,nnode=M%lnnode,coords=.true.,&
00933                                    els=.true.,refels=.true.,&
00934                                    cfreemap=.true.,local=.true.)
00935        
00936        ! Coarse grid coordinates
00937        call MPI_UNPACK(buffer(1),szs, pt,&
00938                         C%coords(1,1),M%nsd*C%ncti, MPI_xyzkind,& 
00939                         MPI_COMM_WORLD, ierr)
00940 
00941        ! Coarse freemap
00942        call MPI_UNPACK(buffer(1),szs, pt,&
00943                         C%cfreemap(1),C%nlfc, MPI_INTEGER,& 
00944                         MPI_COMM_WORLD, ierr)
00945 
00946        ! Coarse freedom local-to-global mapping
00947        call MPI_UNPACK(buffer(1),szs, pt,&
00948                         C%lg_fmap(1),C%nlfc, MPI_INTEGER,& 
00949                         MPI_COMM_WORLD, ierr)
00950 
00951        ! Create the opposite map
00952        C%gl_fmap=0
00953        do i=1,C%nlfc
00954            C%gl_fmap(C%lg_fmap(i))=i
00955        enddo
00956 
00957        ! Coarse elmap
00958        call MPI_UNPACK(buffer(1),szs, pt,&
00959                         C%elmap(1),M%lnnode, MPI_INTEGER,& 
00960                         MPI_COMM_WORLD, ierr)
00961 
00962        !*****************************************
00963        ! Unpack and rebuild elements
00964        !  This is the tricky part
00965        !*****************************************
00966 
00967        if (C%refnum/=0) then
00968        ! Allocate the memory
00969        allocate(lends(C%refnum),levels(C%refnum),pstack(0:C%mlvl+1))
00970 
00971        ! Unpack the refined element info
00972        call MPI_UNPACK(buffer(1),szs, pt,&
00973                         lends(1),C%refnum, MPI_INTEGER,& 
00974                         MPI_COMM_WORLD, ierr)
00975        
00976        call MPI_UNPACK(buffer(1),szs, pt,&
00977                         levels(1),C%refnum, MPI_INTEGER,& 
00978                         MPI_COMM_WORLD, ierr)
00979        endif
00980      
00981        ! One coarse element at a time
00982        ref=1; cnd=C%ncti+1; nd=1
00983        do i=1,C%elnum
00984            call MPI_UNPACK(buffer(1),szs, pt,&
00985                         C%els(i)%nref, 1, MPI_INTEGER,& 
00986                         MPI_COMM_WORLD, ierr)
00987            call MPI_UNPACK(buffer(1),szs, pt,&
00988                         C%els(i)%nfs, 1, MPI_INTEGER,& 
00989                         MPI_COMM_WORLD, ierr)
00990 
00991            allocate(C%els(i)%n(2**M%nsd))
00992 
00993            call MPI_UNPACK(buffer(1),szs, pt,&
00994                         C%els(i)%n(1), 2**M%nsd, MPI_INTEGER,& 
00995                         MPI_COMM_WORLD, ierr)
00996            
00997            C%els(i)%lbeg=nd
00998            
00999            if (C%els(i)%nref==0) then ! No refinement
01000                 C%els(i)%rbeg=-1
01001            else
01002                 ! Unpack the coarse nodes
01003                 call MPI_UNPACK(buffer(1),szs, pt,&
01004                         C%coords(1,cnd),&
01005                         M%nsd*C%els(i)%nref, MPI_xyzkind,& 
01006                         MPI_COMM_WORLD, ierr)
01007 
01008                 C%els(i)%rbeg=ref
01009                 pstack(0)=-i; lvl=0; ndp=nd-1
01010                 do ref=ref,ref+C%els(i)%nref-1
01011                     rel=>C%refels(ref)
01012                     ! Set the values
01013                     rel%level=levels(ref)
01014                     rel%node=cnd
01015                     rel%next=ref+1
01016                     rel%parent=pstack(levels(ref)-1)
01017                     rel%lbeg=ndp+1
01018                     rel%lend=nd+lends(ref)-1
01019                     rel%lstop=rel%lend
01020                     nullify(rel%hnds)
01021                     
01022                     ! If we ascend in the tree, mod lstops
01023                     if (rel%level<lvl) then
01024                         do j=rel%level,lvl-1
01025                             C%refels(pstack(j))%lstop=ndp
01026                         enddo
01027                     endif
01028                     
01029                     ! Move forward
01030                     ndp=rel%lstop; lvl=rel%level
01031                     pstack(lvl)=ref
01032                     cnd=cnd+1
01033                 enddo
01034                 
01035                 ! Finish up the list (assume loop terminates when ref=max+1)
01036                 C%refels(ref-1)%next=-1
01037            endif
01038 
01039            ! Move forward
01040            nd=nd+C%els(i)%nfs
01041        enddo
01042 
01043        ! Deal with hanging nodes
01044        if (C%nhn>0) then
01045            allocate(hd(C%nhn),ha(C%nhn))
01046            !  Directions
01047            call MPI_UNPACK(buffer(1),szs, pt,&
01048                         hd,C%nhn, MPI_INTEGER,& 
01049                         MPI_COMM_WORLD, ierr)
01050 
01051            ! Indices (first batch)
01052            call MPI_UNPACK(buffer(1),szs, pt,&
01053                         ha,C%nhn, MPI_INTEGER,& 
01054                         MPI_COMM_WORLD, ierr)
01055 
01056            ! Add the hanging nodes of the first batch
01057            do i=1,C%nhn
01058               rel=>C%refels(ha(i))
01059               if (.not.associated(rel%hnds)) then
01060                   allocate(rel%hnds(2*M%nsd)); rel%hnds=0; endif
01061               
01062               rel%hnds(hd(i))=C%ncti+C%refnum+i
01063            enddo
01064 
01065            ! Indices (second batch)
01066            call MPI_UNPACK(buffer(1),szs, pt,&
01067                         ha,C%nhn, MPI_INTEGER,& 
01068                         MPI_COMM_WORLD, ierr)
01069 
01070            ! Add the hanging nodes to the second batch aswell. 
01071            ! Needs more caution than previous, since dirs are reversed and
01072            ! some entries might be missing
01073            do i=1,C%nhn
01074            if (ha(i)/=0) then
01075               rel=>C%refels(ha(i))
01076               if (.not.associated(rel%hnds)) then
01077                   allocate(rel%hnds(2*M%nsd)); rel%hnds=0; endif
01078               
01079               rel%hnds(mod( (hd(i)-1) + M%nsd, 2*M%nsd)+1)=C%ncti+C%refnum+i
01080            endif
01081            enddo
01082 
01083            deallocate(hd,ha)
01084 
01085            !  Coordinates
01086            call MPI_UNPACK(buffer(1),szs, pt,&
01087                         C%coords(1,C%nct-C%nhn+1),M%nsd*C%nhn, MPI_xyzkind,& 
01088                         MPI_COMM_WORLD, ierr)
01089  
01090        endif
01091 
01092        ! Print a small report
01093        if (sctls%verbose>3) then
01094                 write (stream,*) "Local coarse mesh has: "
01095                 write (stream,*) "Grid nodes:   ",C%ncti
01096                 write (stream,*) "Refinements:  ",C%refnum
01097                 write (stream,*) "Hanging nodes:",C%nhn
01098        endif
01099 
01100 
01101        deallocate(buffer)
01102        if (C%refnum/=0) deallocate(lends,levels,pstack)
01103 
01104     end subroutine ReceiveCoarse
01105 
01106 end module TransmitCoarse