Actual source code: ex17f.F90

  1: !
  2: !
  3: !   "Scatters from a parallel vector to a sequential vector.  In
  4: !  this case each local vector is as long as the entire parallel vector.
  5: !
  6: #include <petsc/finclude/petscvec.h>
  7:       use petscvec
  8:       implicit none

 10:       PetscErrorCode ierr
 11:       PetscMPIInt size, rank
 12:       PetscInt NN, low, high
 13:       PetscInt iglobal, i
 14:       PetscInt, parameter :: first = 0, stride = 1, n = 5
 15:       PetscScalar value
 16:       PetscScalar, parameter :: zero = 0.0
 17:       Vec x, y
 18:       IS is1, is2
 19:       VecScatter ctx

 21:       PetscCallA(PetscInitialize(ierr))

 23:       PetscCallMPIA(MPI_Comm_size(PETSC_COMM_WORLD, size, ierr))
 24:       PetscCallMPIA(MPI_Comm_rank(PETSC_COMM_WORLD, rank, ierr))

 26: !     create two vectors
 27: !     one parallel and one sequential. The sequential one on each processor
 28: !     is as long as the entire parallel one.

 30:       NN = size*n
 31:       PetscCallA(VecCreateFromOptions(PETSC_COMM_WORLD, PETSC_NULL_CHARACTER, 1_PETSC_INT_KIND, PETSC_DECIDE, NN, y, ierr))
 32:       PetscCallA(VecCreateFromOptions(PETSC_COMM_SELF, PETSC_NULL_CHARACTER, 1_PETSC_INT_KIND, NN, NN, x, ierr))

 34:       PetscCallA(VecSet(x, zero, ierr))
 35:       PetscCallA(VecGetOwnershipRange(y, low, high, ierr))
 36:       do i = 0, n - 1
 37:         iglobal = i + low
 38:         value = i + 10*rank
 39:         PetscCallA(VecSetValues(y, 1_PETSC_INT_KIND, [iglobal], [value], INSERT_VALUES, ierr))
 40:       end do

 42:       PetscCallA(VecAssemblyBegin(y, ierr))
 43:       PetscCallA(VecAssemblyEnd(y, ierr))
 44: !
 45: !   View the parallel vector
 46: !
 47:       PetscCallA(VecView(y, PETSC_VIEWER_STDOUT_WORLD, ierr))

 49: !     create two index sets and the scatter context to move the contents of
 50: !     of the parallel vector to each sequential vector. If you want the
 51: !     parallel vector delivered to only one processor then create a is2
 52: !     of length zero on all processors except the one to receive the parallel vector

 54:       PetscCallA(ISCreateStride(PETSC_COMM_SELF, NN, first, stride, is1, ierr))
 55:       PetscCallA(ISCreateStride(PETSC_COMM_SELF, NN, first, stride, is2, ierr))
 56:       PetscCallA(VecScatterCreate(y, is2, x, is1, ctx, ierr))
 57:       PetscCallA(VecScatterBegin(ctx, y, x, ADD_VALUES, SCATTER_FORWARD, ierr))
 58:       PetscCallA(VecScatterEnd(ctx, y, x, ADD_VALUES, SCATTER_FORWARD, ierr))
 59:       PetscCallA(VecScatterDestroy(ctx, ierr))
 60: !
 61: !   View the sequential vector on the 0th processor
 62: !
 63:       if (rank == 0) then
 64:         PetscCallA(VecView(x, PETSC_VIEWER_STDOUT_SELF, ierr))
 65:       end if

 67:       PetscCallA(PetscBarrier(y, ierr))
 68:       PetscCallA(PetscBarrier(is1, ierr))
 69:       PetscCallA(VecDestroy(x, ierr))
 70:       PetscCallA(VecDestroy(y, ierr))
 71:       PetscCallA(ISDestroy(is1, ierr))
 72:       PetscCallA(ISDestroy(is2, ierr))

 74:       PetscCallA(PetscFinalize(ierr))
 75:     end

 77: !/*TEST
 78: !
 79: !     test:
 80: !       nsize: 3
 81: !       filter:  grep -v " MPI process"
 82: !
 83: !TEST*/