Actual source code: ex8.c
1: static char help[]= "Test VecScatterCreateToZero, VecScatterCreateToAll\n\n";
3: #include <petscvec.h>
4: int main(int argc,char **argv)
5: {
6: PetscInt i,N=10,low,high;
7: PetscMPIInt size,rank;
8: Vec x,y;
9: VecScatter vscat;
11: PetscInitialize(&argc,&argv,(char*)0,help);
12: MPI_Comm_size(PETSC_COMM_WORLD,&size);
13: MPI_Comm_rank(PETSC_COMM_WORLD,&rank);
15: VecCreate(PETSC_COMM_WORLD,&x);
16: VecSetFromOptions(x);
17: VecSetSizes(x,PETSC_DECIDE,N);
18: VecGetOwnershipRange(x,&low,&high);
19: PetscObjectSetName((PetscObject)x,"x");
21: /*-------------------------------------*/
22: /* VecScatterCreateToZero */
23: /*-------------------------------------*/
25: /* MPI vec x = [0, 1, 2, .., N-1] */
26: for (i=low; i<high; i++) VecSetValue(x,i,(PetscScalar)i,INSERT_VALUES);
27: VecAssemblyBegin(x);
28: VecAssemblyEnd(x);
30: PetscPrintf(PETSC_COMM_WORLD,"\nTesting VecScatterCreateToZero\n");
31: VecScatterCreateToZero(x,&vscat,&y);
32: PetscObjectSetName((PetscObject)y,"y");
34: /* Test PetscSFBcastAndOp with op = MPI_REPLACE, which does y = x on rank 0 */
35: VecScatterBegin(vscat,x,y,INSERT_VALUES,SCATTER_FORWARD);
36: VecScatterEnd(vscat,x,y,INSERT_VALUES,SCATTER_FORWARD);
37: if (rank == 0) VecView(y,PETSC_VIEWER_STDOUT_SELF);
39: /* Test PetscSFBcastAndOp with op = MPI_SUM, which does y += x */
40: VecScatterBegin(vscat,x,y,ADD_VALUES,SCATTER_FORWARD);
41: VecScatterEnd(vscat,x,y,ADD_VALUES,SCATTER_FORWARD);
42: if (rank == 0) VecView(y,PETSC_VIEWER_STDOUT_SELF);
44: /* Test PetscSFReduce with op = MPI_REPLACE, which does x = y */
45: VecScatterBegin(vscat,y,x,INSERT_VALUES,SCATTER_REVERSE);
46: VecScatterEnd(vscat,y,x,INSERT_VALUES,SCATTER_REVERSE);
47: VecView(x,PETSC_VIEWER_STDOUT_WORLD);
49: /* Test PetscSFReduce with op = MPI_SUM, which does x += y on x's local part on rank 0*/
50: VecScatterBegin(vscat,y,x,ADD_VALUES,SCATTER_REVERSE_LOCAL);
51: VecScatterEnd(vscat,y,x,ADD_VALUES,SCATTER_REVERSE_LOCAL);
52: VecView(x,PETSC_VIEWER_STDOUT_WORLD);
54: VecDestroy(&y);
55: VecScatterDestroy(&vscat);
57: /*-------------------------------------*/
58: /* VecScatterCreateToAll */
59: /*-------------------------------------*/
60: for (i=low; i<high; i++) VecSetValue(x,i,(PetscScalar)i,INSERT_VALUES);
61: VecAssemblyBegin(x);
62: VecAssemblyEnd(x);
64: PetscPrintf(PETSC_COMM_WORLD,"\nTesting VecScatterCreateToAll\n");
66: VecScatterCreateToAll(x,&vscat,&y);
67: PetscObjectSetName((PetscObject)y,"y");
69: /* Test PetscSFBcastAndOp with op = MPI_REPLACE, which does y = x on all ranks */
70: VecScatterBegin(vscat,x,y,INSERT_VALUES,SCATTER_FORWARD);
71: VecScatterEnd(vscat,x,y,INSERT_VALUES,SCATTER_FORWARD);
72: if (rank == 0) VecView(y,PETSC_VIEWER_STDOUT_SELF);
74: /* Test PetscSFBcastAndOp with op = MPI_SUM, which does y += x */
75: VecScatterBegin(vscat,x,y,ADD_VALUES,SCATTER_FORWARD);
76: VecScatterEnd(vscat,x,y,ADD_VALUES,SCATTER_FORWARD);
77: if (rank == 0) VecView(y,PETSC_VIEWER_STDOUT_SELF);
79: /* Test PetscSFReduce with op = MPI_REPLACE, which does x = y */
80: VecScatterBegin(vscat,y,x,INSERT_VALUES,SCATTER_REVERSE);
81: VecScatterEnd(vscat,y,x,INSERT_VALUES,SCATTER_REVERSE);
82: VecView(x,PETSC_VIEWER_STDOUT_WORLD);
84: /* Test PetscSFReduce with op = MPI_SUM, which does x += size*y */
85: VecScatterBegin(vscat,y,x,ADD_VALUES,SCATTER_REVERSE);
86: VecScatterEnd(vscat,y,x,ADD_VALUES,SCATTER_REVERSE);
87: VecView(x,PETSC_VIEWER_STDOUT_WORLD);
88: VecDestroy(&x);
89: VecDestroy(&y);
90: VecScatterDestroy(&vscat);
92: PetscFinalize();
93: return 0;
94: }
96: /*TEST
98: testset:
99: # N=10 is divisible by nsize, to trigger Allgather/Gather in SF
100: nsize: 2
101: # Exact numbers really matter here
102: diff_args: -j
103: filter: grep -v "type"
104: output_file: output/ex8_1.out
106: test:
107: suffix: 1_standard
109: test:
110: suffix: 1_cuda
111: # sf_backend cuda is not needed if compiling only with cuda
112: args: -vec_type cuda -sf_backend cuda
113: requires: cuda
115: test:
116: suffix: 1_hip
117: args: -vec_type hip -sf_backend hip
118: requires: hip
120: test:
121: suffix: 1_cuda_aware_mpi
122: # sf_backend cuda is not needed if compiling only with cuda
123: args: -vec_type cuda -sf_backend cuda
124: requires: cuda defined(PETSC_HAVE_MPI_GPU_AWARE)
126: testset:
127: # N=10 is not divisible by nsize, to trigger Allgatherv/Gatherv in SF
128: nsize: 3
129: # Exact numbers really matter here
130: diff_args: -j
131: filter: grep -v "type"
132: output_file: output/ex8_2.out
134: test:
135: suffix: 2_standard
137: test:
138: suffix: 2_cuda
139: # sf_backend cuda is not needed if compiling only with cuda
140: args: -vec_type cuda -sf_backend cuda
141: requires: cuda
143: test:
144: suffix: 2_hip
145: # sf_backend hip is not needed if compiling only with hip
146: args: -vec_type hip -sf_backend hip
147: requires: hip
149: test:
150: suffix: 2_cuda_aware_mpi
151: args: -vec_type cuda
152: requires: cuda defined(PETSC_HAVE_MPI_GPU_AWARE)
154: TEST*/