Actual source code: ex2f.F90

  1: !
  2: !
  3: !  Description: Builds a parallel vector with 1 component on the first
  4: !               processor, 2 on the second, etc.  Then each processor adds
  5: !               one to all elements except the last rank.
  6: !
  7: ! -----------------------------------------------------------------------
  8: #include <petsc/finclude/petscvec.h>
  9: program main
 10:   use petscvec
 11:   implicit none

 13: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
 14: !                 Beginning of program
 15: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

 17:   Vec x
 18:   PetscInt N, i
 19:   PetscErrorCode ierr
 20:   PetscMPIInt rank
 21:   PetscScalar, parameter :: one = 1.0
 22:   PetscScalar value(1)

 24:   PetscCallA(PetscInitialize(ierr))
 25:   PetscCallMPIA(MPI_Comm_rank(PETSC_COMM_WORLD, rank, ierr))

 27: !  Create a parallel vector.
 28: !   - In this case, we specify the size of the local portion on
 29: !     each processor, and PETSc computes the global size.  Alternatively,
 30: !     if we pass the global size and use PETSC_DECIDE for the
 31: !     local size PETSc will choose a reasonable partition trying
 32: !     to put nearly an equal number of elements on each processor.

 34:   N = rank + 1
 35:   PetscCallA(VecCreateFromOptions(PETSC_COMM_WORLD, PETSC_NULL_CHARACTER, 1_PETSC_INT_KIND, N, PETSC_DECIDE, x, ierr))
 36:   PetscCallA(VecGetSize(x, N, ierr))
 37:   PetscCallA(VecSet(x, one, ierr))

 39: !  Set the vector elements.
 40: !   - Note that VecSetValues() uses 0-based row and column numbers
 41: !     in Fortran as well as in C.
 42: !   - Always specify global locations of vector entries.
 43: !   - Each processor can contribute any vector entries,
 44: !     regardless of which processor "owns" them; any nonlocal
 45: !     contributions will be transferred to the appropriate processor
 46: !     during the assembly process.
 47: !   - In this example, the flag ADD_VALUES indicates that all
 48: !     contributions will be added together.

 50:   do i = 0, N - rank - 1
 51:     PetscCallA(VecSetValues(x, 1_PETSC_INT_KIND, [i], [one], ADD_VALUES, ierr))
 52:   end do

 54: !  Assemble vector, using the 2-step process:
 55: !    VecAssemblyBegin(), VecAssemblyEnd()
 56: !  Computations can be done while messages are in transition
 57: !  by placing code between these two statements.

 59:   PetscCallA(VecAssemblyBegin(x, ierr))
 60:   PetscCallA(VecAssemblyEnd(x, ierr))

 62: !     Test VecGetValues() with scalar entries
 63:   if (rank == 0) then
 64:     PetscCallA(VecGetValues(x, 0_PETSC_INT_KIND, [i], value, ierr))
 65:   end if

 67: !  View the vector; then destroy it.

 69:   PetscCallA(VecView(x, PETSC_VIEWER_STDOUT_WORLD, ierr))
 70:   PetscCallA(VecDestroy(x, ierr))

 72:   PetscCallA(PetscFinalize(ierr))
 73: end

 75: !/*TEST
 76: !
 77: !     test:
 78: !       nsize: 2
 79: !       filter: grep -v " MPI process"
 80: !
 81: !TEST*/