DOUG 0.2

CoarseAllgathers.F90

Go to the documentation of this file.
00001 ! DOUG - Domain decomposition On Unstructured Grids
00002 ! Copyright (C) 1998-2006 Faculty of Computer Science, University of Tartu and
00003 ! Department of Mathematics, University of Bath
00004 !
00005 ! This library is free software; you can redistribute it and/or
00006 ! modify it under the terms of the GNU Lesser General Public
00007 ! License as published by the Free Software Foundation; either
00008 ! version 2.1 of the License, or (at your option) any later version.
00009 !
00010 ! This library is distributed in the hope that it will be useful,
00011 ! but WITHOUT ANY WARRANTY; without even the implied warranty of
00012 ! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00013 ! Lesser General Public License for more details.
00014 !
00015 ! You should have received a copy of the GNU Lesser General Public
00016 ! License along with this library; if not, write to the Free Software
00017 ! Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
00018 ! or contact the authors (University of Tartu, Faculty of Computer Science, Chair
00019 ! of Distributed Systems, Liivi 2, 50409 Tartu, Estonia, http://dougdevel.org,
00020 ! mailto:info(at)dougdevel.org)
00021 
00041 module CoarseAllgathers
00042 
00043     use RealKind
00044     use SpMtx_class
00045     use SpMtx_arrangement
00046     implicit none
00047 
00048 #include<doug_config.h>
00049 
00050 #ifdef D_COMPLEX
00051 #define float complex
00052 #else
00053 #define float real
00054 #endif
00055 
00057     type SendData
00058         integer :: ssize
00059         integer, pointer :: rsizes(:), rdisps(:)
00060         integer :: send
00061         float(kind=rk), pointer :: fbuf(:)
00062     end type
00063 
00064     type CoarseData
00065         logical :: active=.false.
00066         integer :: nprocs               !< Number of processes
00067         integer :: ngfc,nlfc            !< numbers of freedoms
00068         integer, pointer :: cdisps(:)    !< Coarse node displacements in array
00069         integer, pointer :: glg_cfmap(:)   !< global lg coarse freemap
00070         integer, pointer :: lg_cfmap(:), gl_cfmap(:) !< local coarse maps
00071 
00072         type(SpMtx) :: LAC      !< Local coarse matrix piece
00073         type(SpMtx) :: AC       !< Global coarse matrix
00074         type(SpMtx) :: R        !< Restriction matrix
00075  
00076         type(SendData) :: send          !< Auxilliary struct for sending data
00077     end type CoarseData
00078 
00079 contains
00080   subroutine CoarseData_Copy(cdata, cdata2)
00081     type(CoarseData),intent(inout) :: cdata
00082     type(CoarseData),intent(in) :: cdata2
00083     cdata%ngfc = cdata2%ngfc
00084     cdata%nlfc = cdata2%nlfc
00085 
00086     if (associated(cdata%lg_cfmap)) deallocate(cdata%lg_cfmap)
00087     allocate(cdata%lg_cfmap(cdata%nlfc))
00088     cdata%lg_cfmap = cdata2%lg_cfmap
00089 
00090     if (associated(cdata%gl_cfmap)) deallocate(cdata%gl_cfmap)
00091     allocate(cdata%gl_cfmap(cdata%ngfc))
00092     cdata%gl_cfmap = cdata2%gl_cfmap
00093 
00094     if (associated(cdata%cdisps)) deallocate(cdata%cdisps)
00095     if (associated(cdata%glg_cfmap)) deallocate(cdata%glg_cfmap)
00096   end subroutine CoarseData_Copy
00097 
00098     function SendData_New(nproc) result (S)
00099         use Mesh_class
00100 
00101         integer, intent(in) :: nproc
00102         type(SendData) :: S
00103 
00104         S%send=0; S%ssize=-1
00105         allocate(S%rsizes(nproc),S%rdisps(nproc))
00106         nullify(S%fbuf)
00107 
00108     end function SendData_New
00109        
00110     subroutine SendData_destroy(S)
00111 
00112         type(SendData), intent(inout) :: S
00113 
00114         S%ssize=-1
00115         deallocate(S%rsizes,S%rdisps)
00116         if (associated(S%fbuf)) deallocate(S%fbuf)
00117 
00118     end subroutine SendData_destroy
00119  
00120     subroutine AllSendCoarselgmap(lg_cfmap,nlfc,nproc,cdisps,glg_cfmap,send)
00121         use RealKind
00122         use SpMtx_class
00123         use SpMtx_util
00124         use Mesh_class
00125         use globals, only: stream
00126  
00128         integer, intent(in) :: lg_cfmap(:)
00129 
00130         integer, intent(in) :: nlfc
00131 
00132         integer, intent(in) :: nproc
00133 
00134         integer, intent(out) :: cdisps(:)
00135 
00136         integer, pointer :: glg_cfmap(:)
00137 
00138         type(SendData), intent(out) :: send
00139         
00140         integer :: i
00141 
00142         allocate(send%rsizes(nproc))
00143         send%ssize=nlfc
00144 
00145         ! Do the sizes communication
00146         call MPI_ALLGATHER(send%ssize,1,MPI_INTEGER,&
00147                            send%rsizes,1,MPI_INTEGER,&
00148                            MPI_COMM_WORLD, i)
00149 
00150         ! Calc cdisps
00151         cdisps(1)=0
00152         do i=1,nproc
00153            cdisps(i+1)=cdisps(i)+send%rsizes(i)
00154         enddo
00155 
00156         ! Allocate glg_cfmap and put the local map in it
00157         allocate(glg_cfmap(cdisps(nproc+1)))
00158 
00159         ! Send the coarse freemap to be filled
00160         call MPI_ALLGATHERV_NB_INIT(lg_cfmap,send%ssize,MPI_INTEGER,&
00161                                   glg_cfmap,send%rsizes,cdisps,MPI_INTEGER,&
00162                                   MPI_COMM_WORLD, send%send)
00163 
00164 
00165     end subroutine AllSendCoarselgmap
00166 
00167     subroutine AllRecvCoarselgmap(send)
00168 
00169         type(SendData), intent(in) :: send
00170 
00171         call MPI_ALLGATHERV_NB_WAIT(send%send)
00172     end subroutine
00173 
00174             
00175     subroutine AllSendCoarseMtx(A,AG,lg_cfmap,ngfc,nproc,send)
00176         use RealKind
00177         use SpMtx_class
00178         use SpMtx_util
00179         use Mesh_class
00180         use globals, only: stream
00181  
00183         type(SpMtx), intent(inout) :: A, AG
00184 
00185         integer, intent(in) :: lg_cfmap(:)
00186 
00187         integer, intent(in) :: ngfc
00188 
00189         integer, intent(in) :: nproc
00190 
00191         type(SendData), intent(out) :: send
00192         
00193         integer :: i, b, e, cnt
00194         integer :: res, sends(2)
00195 
00196         ! Do the sizes communication
00197         call MPI_ALLGATHER(A%nnz,1,MPI_INTEGER,&
00198                            send%rsizes,1,MPI_INTEGER,&
00199                            MPI_COMM_WORLD, res)
00200         ! Calculate the places for each of the processes in the array
00201         send%rdisps(1)=0
00202         do i=1,nproc-1
00203            send%rdisps(i+1)=send%rdisps(i)+send%rsizes(i)
00204         enddo
00205         cnt=send%rdisps(nproc)+send%rsizes(nproc)
00206 
00207         ! Create the coarse matrix in its global scale
00208         AG=SpMtx_newInit(nnz=cnt,nrows=ngfc,ncols=ngfc)
00209 
00210         ! Remap A
00211         A%indi=lg_cfmap(A%indi); A%indj=lg_cfmap(A%indj)
00212 
00213         ! Send the matrix data in 3 batches       
00214 
00215         send%ssize=A%nnz
00216  
00217         call MPI_ALLGATHERV_NB_INIT(A%indi,A%nnz,MPI_INTEGER,&
00218                                   AG%indi,send%rsizes,send%rdisps,MPI_INTEGER,&
00219                                   MPI_COMM_WORLD, sends(1))
00220 
00221         call MPI_ALLGATHERV_NB_CHAIN(A%indj,A%nnz,MPI_INTEGER,&
00222                                   AG%indj,send%rsizes,send%rdisps,MPI_INTEGER,&
00223                                   MPI_COMM_WORLD,sends(1), sends(2))
00224 
00225         call MPI_ALLGATHERV_NB_CHAIN(A%val,A%nnz,MPI_fkind,&
00226                                   AG%val,send%rsizes,send%rdisps,MPI_fkind,&
00227                                   MPI_COMM_WORLD,sends(2), send%send)
00228 
00229     end subroutine AllSendCoarseMtx
00230 
00231     subroutine AllRecvCoarseMtx(A,send,add)
00232         !use SpMtx_arrangement
00234         type(SpMtx), intent(inout) :: A 
00235 
00236         type(SendData), intent(in) :: send
00237         logical,intent(in) :: add
00238 
00239         integer :: i
00240         logical :: ar(A%nrows)
00241 
00242         call MPI_ALLGATHERV_NB_WAIT(send%send)
00243         ! After recieving, get rid of duplicate elements by adding them
00244         call SpMtx_consolidate(A,add)
00245 
00246 
00247         ar=.false.
00248         do i=1,A%nnz
00249             ar(A%indi(i))=.true.
00250         enddo
00251 
00252         ar=.false.
00253         do i=1,A%nnz
00254             ar(A%indj(i))=.true.
00255         enddo
00256 
00257     end subroutine
00258 
00259     subroutine AllSendCoarseVector(xl,nproc,cdisps,send)
00260         use RealKind
00261         use SpMtx_class
00262         use Mesh_class
00263         use globals, only: stream
00264  
00266         float(kind=rk), intent(in) :: xl(:) 
00268         integer, intent(in) :: nproc
00269 
00270         integer, intent(in) :: cdisps(:)
00271 
00272         type(SendData), intent(out) :: send
00273 
00274         if (sctls%verbose>6) write(stream,*) "Sending local coarse vector"
00275 
00276         if (.not.associated(send%fbuf)) then
00277             ! Calc the size of my data
00278             send%ssize=cdisps(myrank+2)-cdisps(myrank+1)
00279             send%rsizes=cdisps(2:nproc+1)-cdisps(1:nproc)
00280 
00281             ! allocate xbuf
00282             allocate(send%fbuf(cdisps(nproc+1)))
00283         endif
00284 
00285         ! Setup the send
00286         call MPI_ALLGATHERV_NB_INIT(xl,send%ssize,MPI_fkind,&
00287                                   send%fbuf,send%rsizes,cdisps,MPI_fkind, &
00288                                   MPI_COMM_WORLD, send%send)
00289 
00290     end subroutine AllSendCoarseVector
00291 
00292      subroutine AllRecvCoarseVector(xg,nproc,cdisps,glg_cfmap,send)
00293         use RealKind
00294         use SpMtx_class
00295         use Mesh_class
00296         use globals, only: stream
00297  
00299         float(kind=rk), intent(out) :: xg(:) 
00301         integer, intent(in) :: nproc
00302 
00303         integer, intent(in) :: cdisps(:)
00304 
00305         integer, intent(in) :: glg_cfmap(:)       
00306 
00307         type(SendData), intent(in) :: send
00308         
00309         integer :: i
00310 
00311         if (sctls%verbose>6) write(stream,*) "Receiving local coarse vector"
00312 
00313         ! Zero the vector
00314         xg=0.0_rk
00315 
00316         ! Wait for the data to arrive
00317         call MPI_ALLGATHERV_NB_WAIT(send%send)
00318 
00319         ! Assemble the global vector from it
00320         do i=1,cdisps(nproc+1)
00321           xg(glg_cfmap(i))=xg(glg_cfmap(i))+send%fbuf(i)
00322         enddo
00323 
00324     end subroutine AllRecvCoarseVector
00325 
00331    subroutine CleanCoarse(C,R,M)
00332         use RealKind
00333         use CoarseGrid_class
00334         use SpMtx_class
00335         use SpMtx_util
00336         use globals
00337         
00338         implicit none
00339 
00341         type(CoarseGrid), intent(inout) :: C
00342 
00343         type(Mesh), intent(in) :: M
00344 
00345         type(SpMtx), intent(inout) :: R
00346 
00347         integer :: used(C%nlfc)
00348         integer :: disps(M%nparts),cnts(M%nparts),remap(0:C%ngfc)
00349         integer, allocatable :: lunused(:), unused(:)
00350         integer, allocatable :: ldisagree(:), disagree(:)
00351         integer, pointer :: lg_fmap(:)
00352         integer :: i, k, cnt, tcnt, dcnt, ierr
00353         
00354         !--------------------------------------------
00355         ! Locate seemingly unused nodes
00356         !--------------------------------------------
00357         used=0
00358 
00359         ! Find the used ones and unused count
00360         cnt=C%nlfc
00361         do i=1,R%nnz
00362             if (used(R%indi(i))==0) then
00363                 used(R%indi(i))=1
00364                 cnt=cnt-1
00365             endif
00366         enddo
00367 
00368         ! Get the counts of others
00369         call MPI_ALLGATHER(cnt, 1,MPI_INTEGER, &
00370                            cnts,1,MPI_INTEGER,MPI_COMM_WORLD,ierr)
00371 
00372         ! Calculate the displacements
00373         disps(1)=0 
00374         do i=1,M%nparts-1
00375             disps(i+1)=disps(i)+cnts(i)
00376         enddo
00377         tcnt=disps(M%nparts)+cnts(M%nparts)
00378 
00379         ! If noone has any freedoms he doesnt need, we have nothing to do
00380         if (tcnt==0) return
00381         
00382         if (sctls%verbose>5) &
00383                write(stream,*) "Total number of obsoletion candidates recieved:",tcnt 
00384 
00385         ! Allocate memory
00386         allocate(lunused(cnt),unused(tcnt))
00387         
00388         ! Mark down the unused ones
00389         k=1
00390         do i=1,C%nlfc
00391            if (used(i)==0) then
00392                lunused(k)=C%lg_fmap(i); k=k+1
00393            endif
00394         enddo
00395 
00396         ! Get that info from others as well
00397         call MPI_Allgatherv(lunused,cnt,MPI_INTEGER, &
00398                       unused,cnts,disps,MPI_INTEGER,MPI_COMM_WORLD,ierr)
00399 
00400         deallocate(lunused)
00401 
00402         !--------------------------------------------
00403         ! Find freedoms we disagree on about use status
00404         !--------------------------------------------
00405 
00406         ! Find the count of the freedoms we disagree on
00407         cnt=0
00408         do i=1,tcnt
00409             if (C%gl_fmap(unused(i))/=0) then
00410             if (used(C%gl_fmap(unused(i)))==1) then 
00411                 cnt=cnt+1; used(C%gl_fmap(unused(i)))=2
00412             endif; endif
00413         enddo
00414 
00415         ! Get the counts of others
00416         call MPI_ALLGATHER(cnt, 1,MPI_INTEGER, &
00417                            cnts,1,MPI_INTEGER,MPI_COMM_WORLD,ierr)
00418 
00419         ! Calculate the displacements
00420         disps(1)=0 
00421         do i=1,M%nparts-1
00422             disps(i+1)=disps(i)+cnts(i)
00423         enddo
00424         dcnt=disps(M%nparts)+cnts(M%nparts)
00425         if (sctls%verbose>5) &
00426                write(stream,*) "Total number of disagreements recieved:",dcnt
00427 
00428         ! Allocate memory
00429         allocate(ldisagree(cnt), disagree(dcnt))
00430         
00431         ! Mark down the disagreed ones
00432         k=1
00433         do i=1,C%nlfc
00434            if (used(i)==2) then
00435                ldisagree(k)=C%lg_fmap(i); k=k+1
00436            endif
00437         enddo
00438 
00439         ! Get disagreement info from others as well
00440         if (dcnt/=0) &
00441         call MPI_Allgatherv(ldisagree,cnt,MPI_INTEGER, &
00442                       disagree,cnts,disps,MPI_INTEGER,MPI_COMM_WORLD,ierr)
00443 
00444         deallocate(ldisagree)
00445 
00446         !--------------------------------------------
00447         ! Remap the coarse freedoms
00448         !--------------------------------------------
00449 
00450         ! Create the remap (maps old coarse nodes to new ones)
00451         remap=0;
00452         do i=1,tcnt ! Flip the ones that seem unused
00453             remap(unused(i))=1
00454             if (C%gl_fmap(unused(i))/=0) &
00455                 used(C%gl_fmap(unused(i)))=0
00456  
00457         enddo
00458         do i=1,dcnt ! Flip the ones that arent back
00459             remap(disagree(i))=0
00460             if (C%gl_fmap(disagree(i))/=0) &
00461                 used(C%gl_fmap(disagree(i)))=1
00462         enddo
00463         
00464         deallocate(unused,disagree)
00465 
00466         cnt=0
00467         do i=1,C%ngfc
00468             if (remap(i)==1) then
00469                 remap(i)=remap(i-1)
00470                 cnt=cnt+1
00471             else 
00472                 remap(i)=remap(i-1)+1
00473             endif
00474         enddo
00475         C%ngfc=C%ngfc-cnt
00476 
00477         ! remap the gl and lg_fmap
00478         cnt=0; k=1; C%gl_fmap=0 
00479         do i=1,C%nlfc
00480             if (used(i)/=0) then
00481                 C%lg_fmap(k)=remap(C%lg_fmap(i))
00482                 C%gl_fmap(C%lg_fmap(k))=k
00483                 used(i)=k ! create the local remap into used
00484                 k=k+1
00485             endif
00486         enddo
00487 
00488         if (sctls%verbose>3) &
00489                write(stream,*) "Cleaned ",C%nlfc-k+1," coarse freedoms"
00490 
00491         ! Remap R
00492         R%indi(:)=used(R%indi(:))
00493        
00494         ! Restore M_bound if needed
00495         if (R%arrange_type==D_SpMtx_ARRNG_ROWS) then
00496             do i=1,C%nlfc
00497                 if (used(i)/=0) &
00498                     R%M_bound(used(i))=R%M_bound(i)
00499             enddo 
00500             R%M_bound(k)=R%M_bound(C%nlfc+1)
00501         endif
00502 
00503         C%nlfc=k-1
00504 
00505         R%nrows=C%nlfc
00506 
00507     end subroutine CleanCoarse 
00508 
00509   subroutine setup_aggr_cdat(cdat, cdat_vec, nagrs,n,aggrnum,M)
00510     use globals
00511     !use CoarseAllgathers
00512     use Mesh_class
00513     use SpMtx_operation
00514     implicit none
00515 
00516     type(CoarseData),intent(out) :: cdat
00517     type(CoarseData),intent(out) :: cdat_vec
00518 
00519     integer :: nagrs ! number of aggregates (may increase here)
00520     integer, intent(in) :: n ! number of unknowns
00521     integer, dimension(:), pointer :: aggrnum ! larger due ghost freedoms
00522     type(Mesh),optional     :: M  ! Mesh
00523 
00524     integer,dimension(:),pointer :: stat
00525     integer :: i,maxglaggn,nn,cnt,col,thiscol,lg_idx
00526 
00527     if (numprocs>1) then 
00528       nn=size(aggrnum)
00529       cdat_vec%active=.true.
00530       cdat_vec%send=SendData_New(numprocs)
00531       cdat_vec%send%ssize=nagrs
00532       call MPI_ALLGATHER(cdat_vec%send%ssize,1,MPI_INTEGER,&
00533                          cdat_vec%send%rsizes,1,MPI_INTEGER,&
00534                          MPI_COMM_WORLD,i)
00535       cdat_vec%nlfc=nagrs
00536       allocate(cdat_vec%lg_cfmap(nagrs))
00537       cdat_vec%ngfc=sum(cdat_vec%send%rsizes)
00538       allocate(cdat_vec%gl_cfmap(cdat_vec%ngfc))
00539       cdat_vec%gl_cfmap=0
00540       ! assign global numbers to all aggregates, numbering is ordered by rank
00541       lg_idx=sum(cdat_vec%send%rsizes(1:myrank))
00542       write(stream,*) "nagrs", nagrs
00543       do i=1,nagrs
00544         lg_idx=lg_idx+1
00545         cdat_vec%lg_cfmap(i)=lg_idx
00546         cdat_vec%gl_cfmap(lg_idx)=i
00547       enddo
00548       allocate(cdat_vec%cdisps(numprocs+1))
00549       cdat_vec%nprocs=numprocs
00550       call AllSendCoarselgmap(cdat_vec%lg_cfmap,cdat_vec%nlfc,numprocs,&
00551                               cdat_vec%cdisps,cdat_vec%glg_cfmap,cdat_vec%send)
00552       call AllRecvCoarselgmap(cdat_vec%send)
00553       if (sctls%verbose>3) then
00554         write(stream,*)'global coarse problem size:',cdat_vec%ngfc
00555         write(stream,*)'cdat_vec%vec:rsizes are:',cdat_vec%send%rsizes
00556         write(stream,*)'cdat_vec%lg_cfmap:',cdat_vec%lg_cfmap
00557         write(stream,*)'cdat_vec%gl_cfmap:',cdat_vec%gl_cfmap
00558         write(stream,*)'cdat_vec%cdisps:',cdat_vec%cdisps
00559         write(stream,*)'cdat_vec%glg_cfmap:',cdat_vec%glg_cfmap
00560       endif
00561 
00562       ! now change to global agregate numbers:
00563       do i=1,n
00564         if (aggrnum(i)>0) then
00565           aggrnum(i)=cdat_vec%lg_cfmap(aggrnum(i))
00566         endif
00567       enddo
00568 !write(stream,*)'Global aggregate numbers before exchange are:', aggrnum
00569       if (max(sctls%overlap,sctls%smoothers)>0) then
00570         call exch_aggr_nums(aggrnum,M)
00571       else
00572         call exch_aggr_nums_ol0(aggrnum,M)
00573       endif
00574 !write(stream,*)'Global aggregate numbers after exchange are:', aggrnum
00575       !now localise aggregate numbers back
00576       maxglaggn=maxval(aggrnum)
00577       allocate(stat(maxglaggn))
00578       stat=0
00579       cnt=0
00580       ! mark the local aggr numbers first:
00581       do i=1,nagrs
00582         cnt=cnt+1
00583         stat(cdat_vec%lg_cfmap(i))=cnt
00584       enddo
00585       do i=1,nn
00586         if (aggrnum(i)>0) then
00587           if (stat(aggrnum(i))==0) then
00588             cnt=cnt+1
00589             stat(aggrnum(i))=cnt
00590           endif
00591         endif
00592       enddo
00593       cdat%nlfc=cnt
00594       nagrs=cnt
00595       cdat%active=.true.
00596       cdat%send=SendData_New(numprocs)
00597       cdat%send%ssize=nagrs
00598       allocate(cdat%lg_cfmap(cdat%nlfc))
00599       cdat%send=SendData_New(numprocs)
00600       cdat%send%ssize=nagrs
00601       call MPI_ALLGATHER(cdat%send%ssize,1,MPI_INTEGER,&
00602                          cdat%send%rsizes,1,MPI_INTEGER,&
00603                          MPI_COMM_WORLD,i)
00604       cdat%ngfc=cdat_vec%ngfc
00605       allocate(cdat%gl_cfmap(cdat%ngfc))
00606       cdat%gl_cfmap=0
00607       do i=1,maxglaggn
00608         if (stat(i)>0) then
00609           cdat%lg_cfmap(stat(i))=i
00610           cdat%gl_cfmap(i)=stat(i)
00611         endif
00612       enddo
00613       deallocate(stat)
00614       ! localise aggrnum:
00615       do i=1,nn
00616         if (aggrnum(i)>0) then
00617           aggrnum(i)=cdat%gl_cfmap(aggrnum(i))
00618         endif
00619       enddo
00620       allocate(cdat%cdisps(numprocs+1))
00621       cdat%nprocs=numprocs
00622       call AllSendCoarselgmap(cdat%lg_cfmap,cdat%nlfc,numprocs,&
00623                               cdat%cdisps,cdat%glg_cfmap,cdat%send)
00624       call AllRecvCoarselgmap(cdat%send)
00625       if (sctls%verbose>3) then
00626         write(stream,*)'local overlapped coarse problem size:',cdat%nlfc
00627         write(stream,*)'cdat%rsizes are:',cdat%send%rsizes
00628         write(stream,*)'cdat%lg_cfmap:',cdat%lg_cfmap
00629         write(stream,*)'cdat%gl_cfmap:',cdat%gl_cfmap
00630         write(stream,*)'cdat%cdisps:',cdat%cdisps
00631         write(stream,*)'cdat%glg_cfmap:',cdat%glg_cfmap
00632       endif
00633     endif
00634   end subroutine setup_aggr_cdat
00635 
00636 end module CoarseAllgathers