DOUG 0.2

ElemMtxs_distribute.F90

Go to the documentation of this file.
00001 ! DOUG - Domain decomposition On Unstructured Grids
00002 ! Copyright (C) 1998-2006 Faculty of Computer Science, University of Tartu and
00003 ! Department of Mathematics, University of Bath
00004 !
00005 ! This library is free software; you can redistribute it and/or
00006 ! modify it under the terms of the GNU Lesser General Public
00007 ! License as published by the Free Software Foundation; either
00008 ! version 2.1 of the License, or (at your option) any later version.
00009 !
00010 ! This library is distributed in the hope that it will be useful,
00011 ! but WITHOUT ANY WARRANTY; without even the implied warranty of
00012 ! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00013 ! Lesser General Public License for more details.
00014 !
00015 ! You should have received a copy of the GNU Lesser General Public
00016 ! License along with this library; if not, write to the Free Software
00017 ! Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
00018 ! or contact the authors (University of Tartu, Faculty of Computer Science, Chair
00019 ! of Distributed Systems, Liivi 2, 50409 Tartu, Estonia, http://dougdevel.org,
00020 ! mailto:info(at)dougdevel.org)
00021 
00022 !-------------------------------------------------------
00025 module ElemMtxs_distribute
00026 
00027   use DOUG_utils
00028   use Mesh_class
00029   use Vect_mod
00030   use SpMtx_class
00031   use ElemMtxs_base
00032   use ElemMtxs_assemble
00033   use globals
00034 
00035 #include<doug_config.h>
00036 
00037 #ifdef D_COMPLEX
00038 #define float complex
00039 #else
00040 #define float real
00041 #endif
00042 
00043   implicit none
00044 
00045   !--------------------------------------------------------------------
00049   type ElemMtxsIntf
00050      integer :: nell   !< Number of interface elements
00051      integer :: mfrelt
00052 
00056      integer, dimension(:), pointer :: nellsend_map
00057 
00059      integer, dimension(:), pointer :: nellrecv_map
00060 
00061      integer, dimension(:), pointer :: request_nellrecv_map, request_nellsend_map
00062 
00063 
00067      integer, dimension(:), pointer :: gl_emap
00068 
00069      integer, dimension(:), pointer :: lg_emap
00070 
00072      integer :: nellintf ! Number of interface elements
00073 
00075      integer(kind=4), dimension(:),   pointer :: inner_interf_emask ! kind=4 is required, otherwise sum of array may overflow
00076 
00078      integer,         dimension(:),   pointer :: intfell2indx
00079 
00082      integer(kind=4), dimension(:,:), pointer :: intfsend_emask ! kind=4 is required, otherwise sum of array may overflow
00083 
00084      type(ElemMtxsPacket), dimension(:), pointer :: intfsend_packets
00085   end type ElemMtxsIntf
00086 
00087   private :: &
00088        ElemMtxsIntf_buildInnerInterfEMask, &
00089        ElemMtxsIntf_buildMapsMasks
00090 
00091 contains
00092 
00093   !=================================================================
00094   !
00095   ! Interface element distribution logic
00096   !
00097   !=================================================================
00098 
00099   !-----------------------------
00102   function ElemMtxsIntf_newInit(Msh) result(E)
00103     implicit none
00104 
00105     type(Mesh), intent(in) :: Msh
00106     type(ElemMtxsIntf)     :: E
00107   
00108     integer :: i, nelemsend
00109 
00110     ! Initialize members
00111     E%nell   = Msh%nell
00112     E%mfrelt = Msh%mfrelt
00113 
00114     E%nellsend_map => NULL()
00115     E%nellrecv_map => NULL()
00116     E%request_nellrecv_map => NULL()
00117     E%request_nellsend_map => NULL()
00118 
00119     E%gl_emap => NULL()
00120     E%lg_emap => NULL()
00121 
00122     E%inner_interf_emask => NULL()
00123     E%intfsend_emask => NULL()
00124     E%intfell2indx => NULL()
00125     
00126     ! Build element matrix maps and masks
00127     call ElemMtxsIntf_buildMapsMasks(E, Msh)
00128     
00129     ! Starting exchange process
00130     call ElemMtxsIntf_exchangeMapsMasks(E, Msh)
00131 
00132     ! For all neighbours, allocate chunks structure for all element matrices to be sent
00133     allocate(E%intfsend_packets(Msh%nnghbrs))
00134     do i = 1,Msh%nnghbrs
00135        nelemsend = E%nellsend_map(Msh%nghbrs(i)+1)
00136        call ElemMtxsPacket_Init(E%intfsend_packets(i), Msh, nelemsend)
00137     end do
00138   end function ElemMtxsIntf_newInit
00139 
00140   !-----------------------------
00143   subroutine ElemMtxsIntf_Destroy(E)
00144     implicit none
00145 
00146     type(ElemMtxsIntf), intent(in out) :: E
00147 
00148     integer :: i
00149 
00150     E%nell   = -1
00151     E%mfrelt = -1
00152 
00153     if (associated(E%gl_emap)) deallocate(E%gl_emap)
00154     if (associated(E%lg_emap)) deallocate(E%lg_emap)
00155     if (associated(E%inner_interf_emask)) deallocate(E%inner_interf_emask)
00156     if (associated(E%nellsend_map))  deallocate(E%nellsend_map)
00157     if (associated(E%nellrecv_map))  deallocate(E%nellrecv_map)
00158     if (associated(E%request_nellrecv_map)) deallocate(E%request_nellrecv_map)
00159     if (associated(E%request_nellsend_map)) deallocate(E%request_nellsend_map)
00160     if (associated(E%intfsend_emask)) deallocate(E%intfsend_emask)
00161     if (associated(E%intfell2indx))   deallocate(E%intfell2indx)
00162     if (associated(E%intfsend_packets)) then
00163        do i = 1,size(E%intfsend_packets)
00164           call ElemMtxsPacket_Destroy(E%intfsend_packets(i))
00165        end do
00166        deallocate(E%intfsend_packets)
00167     end if
00168 
00169   end subroutine ElemMtxsIntf_Destroy
00170 
00171   !==========================================
00172   !
00173   ! Maps and masks
00174   !
00175   !==========================================
00176 
00177   !------------------------------------------
00182   subroutine ElemMtxsIntf_buildInnerInterfEMask(E, M)
00183     implicit none
00184 
00185     type(ElemMtxsIntf), intent(in out) :: E
00186     type(Mesh),         intent(in)     :: M
00187 
00188     integer :: h, lf, ge, le
00189 
00190     ! Mark all elements as inner ones
00191     allocate(E%inner_interf_emask(E%nell))
00192     E%inner_interf_emask = D_ELEM_INNER
00193     do h = 1,M%hashsize
00194        if (M%hash(h,1) > 0 ) then
00195 
00196           lf = M%gl_fmap(M%hash(h,1))
00197           if (lf /= 0) then ! The freedom is mine
00198              ! freedom is an interface freedom
00199              if (M%inner_interf_fmask(lf) == D_FREEDOM_INTERF) then
00200                 ! if element freedom belongs to is in mine subpartition
00201                 if (M%eptnmap(M%hash(h,2)) == (myrank + 1)) then
00202                    le = E%gl_emap(M%hash(h,2))
00203                    if (E%inner_interf_emask(le) /= D_ELEM_INTERF) &
00204                         E%inner_interf_emask(le) = D_ELEM_INTERF
00205                 end if
00206              end if
00207           end if
00208        end if
00209     end do
00210 
00211     if (D_DEBUGLVL > 4) then
00212        write(stream,*)
00213        write(stream,*) 'Inner/Interface elements mask: 0-inner, 1-interface'
00214        write(stream,*) ' # global | # local | mask'
00215        do le = 1,E%nell
00216           write(stream,'(a,i7,a,i7,a,i1)') '   ',E%lg_emap(le),' | ',le,&
00217                ' |   ',E%inner_interf_emask(le)
00218        end do
00219     end if
00220   end subroutine ElemMtxsIntf_buildInnerInterfEMask
00221 
00222 
00223   !-------------------------------------------
00230   subroutine ElemMtxsIntf_buildMapsMasks(E, M)
00231     use globals
00232     implicit none
00233 
00234     type(ElemMtxsIntf), intent(in out) :: E
00235     type(Mesh),     intent(in)     :: M
00236 
00237     integer                        :: i, j
00238     integer :: ge, le, gf, lf, h, p, elem_pid
00239     ! Map for processes' ids to indexes in 'E%intfsend_emask' :
00240     ! pid2indx[M%nparts]
00241     integer, dimension(:), pointer :: pid2indx
00242 
00243     !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
00244     ! Global to local / local to global
00245     !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
00246     allocate(E%gl_emap(M%nell))
00247     E%gl_emap = 0
00248     allocate(E%lg_emap(M%partnelems(myrank+1)))
00249     E%lg_emap = 0
00250     i = 0
00251     do ge = 1,M%nell
00252        ! If element belongs to my subpartition
00253        if (M%eptnmap(ge) == myrank+1) then
00254           i = i + 1
00255           E%gl_emap(ge) = i
00256           E%lg_emap(i)  = ge
00257        end if
00258     end do
00259 
00260     !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
00261     ! Inner/interface masks for elements
00262     !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
00263     call ElemMtxsIntf_buildInnerInterfEMask(E, M)
00264 
00265     !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
00266     ! Mask for interface elements, which show whom the
00267     ! particular interface element will be sent to :
00268     ! - 'intfsend_emask' (and 'intfell2indx')
00269     ! Number of interface elements calculated as well:
00270     ! - 'nellintf'
00271     !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
00272 
00273     ! Map from processes' ids to indexes in
00274     ! 'ElemMtxs%intfsend_emask[pid2indx(:),]'
00275     allocate(pid2indx(M%nparts))
00276     pid2indx = 0
00277     do p = 1,M%nparts
00278        do j = 1,M%nnghbrs
00279           if (M%nghbrs(j) == p-1) then
00280              pid2indx(p) = j
00281              exit
00282           end if
00283        end do
00284     end do
00285 
00286     ! Map from local interface elements' ids to indexes
00287     ! in 'ElemMtxs%intfsend_emask[,E%intfell2indx(:)]'
00288     allocate(E%intfell2indx(E%nell))
00289     E%intfell2indx = 0
00290     i = 0
00291     do le = 1,E%nell
00292        if (E%inner_interf_emask(le) == D_ELEM_INTERF) then
00293           i = i + 1
00294           E%intfell2indx(le) = i
00295        end if
00296     end do
00297 
00298     ! Number of interface elements
00299     E%nellintf = 0
00300     do i = 1,E%nell
00301        if (E%inner_interf_emask(i) /= 0) &
00302             E%nellintf = E%nellintf + 1
00303     end do
00304 
00305     ! Build 'ElemMtxs%intfsend_emask':
00306     allocate(E%intfsend_emask(E%nellintf,M%nnghbrs))
00307     E%intfsend_emask = 0
00308     do le = 1,E%nell
00309        if (E%inner_interf_emask(le) == D_ELEM_INTERF) then
00310           ge = E%lg_emap(le)
00311           do i = 1,M%nfrelt(ge)
00312              gf = M%mhead(i,ge)
00313              lf = M%gl_fmap(gf)
00314              ! qualified for local interface freedom
00315              if (M%inner_interf_fmask(lf) == D_FREEDOM_INTERF) then
00316                 h = M%hashlook(int(gf/M%hscale)+1)
00317                 do while (M%hash(h,1) > 0)
00318                    ! look for elements from the other subpartitions
00319                    ! which touch this particular freedom on my interface
00320                    if (M%hash(h,1) == gf) then
00321                       elem_pid = M%eptnmap(M%hash(h,2))
00322                       if (elem_pid /= myrank+1) then
00323                          if (E%intfsend_emask(E%intfell2indx(le),pid2indx(elem_pid)) /= 1_1) &
00324                               E%intfsend_emask(E%intfell2indx(le),pid2indx(elem_pid)) = 1_1
00325                       end if
00326                    end if
00327                    h = h + 1
00328                 end do ! do while
00329              end if
00330           end do
00331        end if
00332     end do
00333 
00334     deallocate(pid2indx)
00335   end subroutine ElemMtxsIntf_buildMapsMasks
00336 
00337 
00338   !----------------------------------------------
00341   subroutine ElemMtxsIntf_exchangeMapsMasks(E, M)
00342     implicit none
00343 
00344     type(ElemMtxsIntf), intent(in out) :: E
00345     type(Mesh),         intent(in)     :: M
00346 
00347     integer :: i, p
00348     integer :: ierr
00349 
00350     ! Calculate and non-blockingly send amounts of interface
00351     ! elements I will send to each neighbour
00352     allocate(E%nellsend_map(M%nparts))
00353     E%nellsend_map = 0
00354     allocate(E%request_nellsend_map(M%nnghbrs))
00355     do i = 1,M%nnghbrs
00356        p = M%nghbrs(i)
00357        E%nellsend_map(p+1) = sum(E%intfsend_emask(:,i))
00358        call MPI_ISEND(E%nellsend_map(p+1), 1, MPI_INTEGER,&
00359             p, D_TAG_NELEMINTF_SEND, MPI_COMM_WORLD, &
00360             E%request_nellsend_map(i), ierr)
00361     end do
00362 
00363     ! Initialise non-blocking receives for the amounts of interface
00364     ! elements I will receive from neigbours
00365     ! NB! corresponding MPI_WAIT() is in 'ElemMtxsIntf_waitMapsMasks()'
00366     allocate(E%nellrecv_map(M%nparts))
00367     E%nellrecv_map = 0
00368     allocate(E%request_nellrecv_map(M%nnghbrs))
00369     do i = 1,M%nnghbrs
00370        p = M%nghbrs(i)
00371        call MPI_IRECV(E%nellrecv_map(p+1), 1, MPI_INTEGER, &
00372             p, D_TAG_NELEMINTF_SEND, MPI_COMM_WORLD, &
00373             E%request_nellrecv_map(i), ierr)
00374     end do
00375 
00376   end subroutine ElemMtxsIntf_exchangeMapsMasks
00377 
00378 
00379   !----------------------------------------------
00382   subroutine ElemMtxsIntf_waitMapsMasks(E, M)
00383     implicit none
00384 
00385     type(ElemMtxsIntf), intent(in out) :: E
00386     type(Mesh),         intent(in)     :: M
00387 
00388     integer :: ierr
00389 
00390     ! Wait until all elements in nellrecv_map have been received
00391     call MPI_WAITALL(size(E%request_nellrecv_map), E%request_nellrecv_map, MPI_STATUSES_IGNORE, ierr)
00392     deallocate(E%request_nellrecv_map)
00393     E%request_nellrecv_map => NULL()
00394 
00395     ! Wait until all elements in nellsend_map have been sent
00396     call MPI_WAITALL(size(E%request_nellsend_map), E%request_nellsend_map, MPI_STATUSES_IGNORE, ierr)
00397     deallocate(E%request_nellsend_map)
00398     E%request_nellsend_map => NULL()
00399   end subroutine ElemMtxsIntf_waitMapsMasks
00400 
00401 
00402   !------------------------------------------
00406   subroutine ElemMtxsIntf_addChunk(E, chunk, Msh)
00407     implicit none
00408 
00409     type(ElemMtxsIntf),      intent(in out) :: E
00410     type(ElemMtxsChunk), intent(in)     :: chunk !< chunk of elements to be added to E
00411     type(Mesh),     intent(in)     :: Msh
00412     
00413     integer :: i, j, p
00414     integer :: ge, le
00415     
00416     ! Check all elements in a chunk - if some of them should be sent to neighbours, keep them
00417     do i = 1,chunk%nell
00418        ge = chunk%lg_emap(i)
00419        le = E%gl_emap(ge)
00420        if (E%intfell2indx(le) /= 0) then !
00421           do p = 1,Msh%nnghbrs
00422              if (E%intfsend_emask(E%intfell2indx(le),p) /= 0) then
00423                 E%intfsend_packets(p)%chunk%nell = E%intfsend_packets(p)%chunk%nell + 1
00424                 j = E%intfsend_packets(p)%chunk%nell
00425                 E%intfsend_packets(p)%chunk%lg_emap(j)   = chunk%lg_emap(i)
00426                 E%intfsend_packets(p)%chunk%elem(:,:,j)  = chunk%elem(:,:,i)
00427                 E%intfsend_packets(p)%chunk%elemrhs(:,j) = chunk%elemrhs(:,i)
00428              end if
00429           end do
00430        end if
00431     end do
00432   end subroutine ElemMtxsIntf_addChunk
00433 
00434 
00435   !------------------------------------------
00438   subroutine ElemMtxsIntf_distAndAssemble(E, A_interf, Msh)
00439     use globals, only : stream
00440     implicit none
00441 
00442     type(ElemMtxsIntf), intent(in out) :: E
00443     type(SpMtx), intent(out) :: A_interf
00444     type(Mesh), intent(in) :: Msh
00445 
00446     integer                                     :: ierr, nghbrsused, nghbridx, nelemsend
00447     integer                                     :: i, j, k, p
00448     logical, dimension(:), pointer              :: recved
00449     type(ElemMtxsAssembleContext)               :: AC
00450     type(ElemMtxsChunk)                         :: chunk
00451   
00452     ! TODO: get rid of it - but we must use different packet tags for this
00453     call MPI_BARRIER(MPI_COMM_WORLD, ierr)
00454 
00455     ! Wait for maps synchronization to finish
00456     call ElemMtxsIntf_waitMapsMasks(E, Msh)
00457 
00458     ! Initialize assembling context
00459     AC = ElemMtxsAssembleContext_newInit(Msh)
00460 
00461     ! Send intfexchange chunks
00462     do p = 1,Msh%nnghbrs
00463        nelemsend = E%nellsend_map(Msh%nghbrs(p)+1)
00464        if (nelemsend /= E%intfsend_packets(p)%chunk%nell) &
00465            call DOUG_abort('[ElemMtxs_assembleIntf] : size mismatch', -1)
00466        call ElemMtxsPacket_send(E%intfsend_packets(p), Msh, Msh%nghbrs(p))
00467     end do
00468 
00469     ! Recv intfexchange chunks and assemble
00470     allocate(recved(Msh%nnghbrs))
00471     recved = .false.
00472     nghbridx = 0
00473     nghbrsused = 0
00474     call ElemMtxsChunk_Init(chunk, maxval(E%nellrecv_map), Msh)
00475     do while (nghbrsused < Msh%nnghbrs)
00476        nghbridx = mod(nghbridx, Msh%nnghbrs) + 1
00477        if (.not.recved(nghbridx)) then
00478           call ElemMtxsChunk_recv(chunk, Msh, Msh%nghbrs(nghbridx)) ! TODO: better poll if received, otherwise take next neighbour
00479           call ElemMtxsAssembleContext_addChunk(AC, chunk, Msh)
00480           recved(nghbridx) = .true.
00481           nghbrsused = nghbrsused + 1
00482        end if
00483     end do
00484     call ElemMtxsChunk_Destroy(chunk)
00485     deallocate(recved)
00486     
00487     ! Extract final sparse matrix, destroy context
00488     call ElemMtxsAssembleContext_extractSpMtx(AC, A_interf, Msh)
00489     call ElemMtxsAssembleContext_Destroy(AC)
00490 
00491     ! Wait until sending has completed
00492     do p = 1,Msh%nnghbrs
00493        call ElemMtxsPacket_wait(E%intfsend_packets(p))
00494     end do
00495   end subroutine ElemMtxsIntf_distAndAssemble
00496 
00497 
00498   !=================================================================
00499   !
00500   ! File I/O and main distribution logic
00501   !
00502   !=================================================================
00503 
00504   !-----------------------------------------------------------------
00509   subroutine ElemMtxs_readAndDistribute(Msh, fnElemMatrs, A, b, A_intf)
00510     use globals, only : stream, sctls
00511     implicit none
00512 
00513     type(Mesh),                intent(in) :: Msh
00514     character*(*),             intent(in) :: fnElemMatrs !< name of the file containing element matrices
00515     type(SpMtx),              intent(out) :: A !< assembled non-interface elements
00516     float(kind=rk), dimension(:), pointer :: b !< assembled RHS vector
00517     type(SpMtx), intent(out), optional    :: A_intf !< optional matrix containing assembled interface elements
00518 
00519     integer                                     :: elemMatrs = 50
00520     integer                                     :: npackets
00521     integer                                     :: i, j, k, p
00522     float(kind=rk), dimension(:),       pointer :: x
00523     type(ElemMtxsPacket),               pointer :: packet, prev_packet
00524     type(ElemMtxsPacket), dimension(:), pointer :: packets
00525     type(ElemMtxsAssembleContext)               :: AC
00526     type(ElemMtxsIntf)                          :: E
00527 
00528     ! Read in element nodes data
00529     write(stream,*)
00530     write(stream, FMT='(a)', advance='no') 'Reading in element matrices'//&
00531          ' and RHSs ... '
00532     call flush(stream)
00533     open(elemMatrs, FILE=fnElemMatrs, STATUS='OLD', FORM='UNFORMATTED') !XXX TODO element_rhs_file
00534 
00535     ! Build element matrix maps and masks
00536     E = ElemMtxsIntf_newInit(Msh)
00537 
00538     ! Initialize assembling context
00539     AC = ElemMtxsAssembleContext_newInit(Msh)
00540 
00541     ! initialize packet structures
00542     allocate(packets(Msh%nparts))
00543     do p = 1,Msh%nparts
00544        call ElemMtxsPacket_Init(packets(p), Msh)
00545     end do
00546 
00547     ! Buffer and distribute elements in packets
00548     do i = 1,Msh%nell
00549        ! Find p corresponding to processor number that should receive this element
00550        p = Msh%eptnmap(i)
00551 
00552        ! Find temporary buffer to use
00553        packet => packets(p)
00554        prev_packet => NULL()
00555        do while (associated(packet))
00556           if (ElemMtxsPacket_sendInProgress(packet)) then
00557              prev_packet => packet
00558              packet => packet%next
00559              cycle
00560           end if
00561           exit
00562        end do
00563        if (.not.associated(packet)) then
00564           allocate(packet)
00565           prev_packet%next => packet
00566           call ElemMtxsPacket_Init(packet, Msh)
00567        end if
00568 
00569        ! Read element
00570        packet%chunk%nell = packet%chunk%nell + 1
00571        packet%chunk%lg_emap(packet%chunk%nell) = i
00572        do j = 1,Msh%nfrelt(i)
00573           read (elemMatrs) (packet%chunk%elem(k,j,packet%chunk%nell), k = 1,Msh%nfrelt(i))
00574        end do
00575        read (elemMatrs) (packet%chunk%elemrhs(k,packet%chunk%nell), k = 1,Msh%nfrelt(i))
00576 
00577        ! Check if we should flush the buffer
00578        if (packet%chunk%nell == size(packet%chunk%lg_emap)) then
00579           if (p == 1) then
00580              call ElemMtxsAssembleContext_addChunk(AC, packet%chunk, Msh)
00581              call ElemMtxsIntf_addChunk(E, packet%chunk, Msh)
00582           else
00583              call ElemMtxsPacket_send(packet, Msh, p-1)
00584           end if
00585           packet%chunk%nell = 0
00586        end if
00587     end do
00588 
00589     ! Flush chunks
00590     npackets = 0
00591     do p = 1, Msh%nparts
00592        packet => packets(p)
00593        do while (associated(packet))
00594           if (packet%chunk%nell > 0) then
00595              if (p == 1) then
00596                 call ElemMtxsAssembleContext_addChunk(AC, packet%chunk, Msh)
00597                 call ElemMtxsIntf_addChunk(E, packet%chunk, Msh)
00598              else
00599                 call ElemMtxsPacket_send(packet, Msh, p-1)
00600              end if
00601           end if
00602           call ElemMtxsPacket_wait(packet)
00603           prev_packet => packet
00604           packet => packet%next
00605           npackets = npackets + 1
00606           call ElemMtxsChunk_Destroy(prev_packet%chunk)
00607        end do
00608     end do
00609     deallocate(packets)
00610     
00611     ! Extract final data from assembling context, destroy temporary objects
00612     call ElemMtxsAssembleContext_extractSpMtx(AC, A, Msh)
00613     if (sctls%useAggregatedRHS) then
00614       call Vect_readAndBroadcastRHS(b, Msh)
00615         else
00616           call ElemMtxsAssembleContext_extractVect(AC, b, Msh)
00617     endif
00618     call ElemMtxsAssembleContext_Destroy(AC)
00619     
00620     ! Synchronize interface elements
00621     if (present(A_intf)) call ElemMtxsIntf_distAndAssemble(E, A_intf, Msh)
00622     call ElemMtxsIntf_Destroy(E)
00623     call Vect_exchangeIntf(b, Msh)
00624 
00625     ! Done
00626     close(elemMatrs)
00627     write(stream, *) 'number of packets in use: ', npackets
00628     write(stream, *) 'done'
00629     call flush(stream)
00630 
00631   end subroutine ElemMtxs_readAndDistribute
00632 
00633   
00634   !-----------------------------------------------------------------
00639   subroutine ElemMtxs_recvAndAssemble(Msh, A, b, A_intf)
00640     use globals, only : stream, sctls
00641     implicit none
00642 
00643     type(Mesh),               intent(in)  :: Msh
00644     type(SpMtx),              intent(out) :: A !< assembled non-interface elements
00645     float(kind=rk), dimension(:), pointer :: b !< assembled RHS vector
00646     type(SpMtx),    intent(out), optional :: A_intf !< optional matrix containing assembled interface elements
00647 
00648     integer                       :: i, j, p
00649     integer                       :: nell, nelemsend, ge, le
00650     type(ElemMtxsChunk)           :: chunk
00651     type(ElemMtxsAssembleContext) :: AC
00652     type(ElemMtxsIntf)            :: E
00653 
00654     ! Initialize interface exchange procedure
00655     E = ElemMtxsIntf_newInit(Msh)
00656 
00657     ! Initialize assembling context
00658     AC = ElemMtxsAssembleContext_newInit(Msh)
00659 
00660     ! Loop until all elements have been received. Assemble final sparse matrix by chunks
00661     nell = 0
00662     call ElemMtxsChunk_Init(chunk, D_ELEMMTXS_IN_PACKET, Msh)
00663     do while (nell < Msh%partnelems(myrank+1))
00664        call ElemMtxsChunk_recv(chunk, Msh)
00665        call ElemMtxsAssembleContext_addChunk(AC, chunk, Msh)
00666        call ElemMtxsIntf_addChunk(E, chunk, Msh)
00667        nell = nell + chunk%nell
00668     end do ! while
00669     call ElemMtxsChunk_Destroy(chunk)
00670 
00671     ! Extract final sparse matrix, destroy context
00672     call ElemMtxsAssembleContext_extractSpMtx(AC, A, Msh)
00673     if (sctls%useAggregatedRHS) then
00674           call Vect_readAndBroadcastRHS(b, Msh)
00675         else
00676       call ElemMtxsAssembleContext_extractVect(AC, b, Msh)
00677     endif
00678     call ElemMtxsAssembleContext_Destroy(AC)
00679 
00680     ! Assemble interface elements, destroy interface info
00681     if (present(A_intf)) call ElemMtxsIntf_distAndAssemble(E, A_intf, Msh)
00682     call ElemMtxsIntf_Destroy(E)
00683     call Vect_exchangeIntf(b, Msh)
00684 
00685   end subroutine ElemMtxs_recvAndAssemble
00686 
00687 end module ElemMtxs_distribute
00688