Epetra Package Browser (Single Doxygen Collection) Development
Loading...
Searching...
No Matches
test/Directory_LL/cxx_main.cpp
Go to the documentation of this file.
1//@HEADER
2// ************************************************************************
3//
4// Epetra: Linear Algebra Services Package
5// Copyright 2011 Sandia Corporation
6//
7// Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
8// the U.S. Government retains certain rights in this software.
9//
10// Redistribution and use in source and binary forms, with or without
11// modification, are permitted provided that the following conditions are
12// met:
13//
14// 1. Redistributions of source code must retain the above copyright
15// notice, this list of conditions and the following disclaimer.
16//
17// 2. Redistributions in binary form must reproduce the above copyright
18// notice, this list of conditions and the following disclaimer in the
19// documentation and/or other materials provided with the distribution.
20//
21// 3. Neither the name of the Corporation nor the names of the
22// contributors may be used to endorse or promote products derived from
23// this software without specific prior written permission.
24//
25// THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
26// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
29// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
30// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
31// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
32// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36//
37// Questions? Contact Michael A. Heroux (maherou@sandia.gov)
38//
39// ************************************************************************
40//@HEADER
41
42
43// Epetra_BlockMap Test routine
44
45#include "Epetra_Time.h"
46#include "Epetra_BlockMap.h"
47#include "Epetra_Map.h"
48#ifdef EPETRA_MPI
49#include "Epetra_MpiComm.h"
50#include <mpi.h>
51#endif
52#include "Epetra_SerialComm.h"
53#include "Epetra_Util.h"
54#include "../epetra_test_err.h"
55#include "Epetra_Version.h"
56#include "Epetra_Directory.h"
57
63
64int main(int argc, char *argv[]) {
65 bool verbose = false;
66 // Check if we should print results to standard out
67 if (argc > 1) {
68 if ((argv[1][0] == '-') && (argv[1][1] == 'v')) {
69 verbose = true;
70 }
71 }
72
73 int returnierr = 0;
74
75#ifdef EPETRA_MPI
76
77 // Initialize MPI
78 MPI_Init(&argc,&argv);
79 Epetra_MpiComm Comm(MPI_COMM_WORLD);
80#else
82#endif
83
84 if (!verbose) {
85 Comm.SetTracebackMode(0); // This should shut down any error traceback reporting
86 }
87 int MyPID = Comm.MyPID();
88
89 int verbose_int = verbose ? 1 : 0;
90 Comm.Broadcast(&verbose_int, 1, 0);
91 verbose = verbose_int==1 ? true : false;
92
93 if (verbose && MyPID==0)
94 cout << Epetra_Version() << endl << endl;
95
96 EPETRA_TEST_ERR( directory_test_1(Comm), returnierr );
97
98 EPETRA_TEST_ERR( directory_test_2(Comm), returnierr );
99
100 EPETRA_TEST_ERR( directory_test_3(Comm), returnierr );
101
102 EPETRA_TEST_ERR( directory_test_4(Comm), returnierr );
103
104 EPETRA_TEST_ERR( directory_test_5(Comm), returnierr );
105
106#ifdef EPETRA_MPI
107 MPI_Finalize();
108#endif
109
110 if (MyPID == 0) {
111 if (returnierr == 0) {
112 cout << "Epetra_Directory tests passed."<<endl;
113 }
114 else {
115 cout << "Epetra_Directory tests failed."<<endl;
116 }
117 }
118
119 return returnierr;
120}
121
123{
124 //set up a map with arbitrary distribution of IDs, but with unique
125 //processor ID ownership (i.e., each ID only appears on 1 processor)
126
127 int myPID = Comm.MyPID();
128 int numProcs = Comm.NumProc();
129
130 if (numProcs < 2) return(0);
131
132 int myFirstID = (myPID+1)*(myPID+1);
133 int myNumIDs = 3+myPID;
134
135 long long* myIDs = new long long[myNumIDs];
136 int i;
137 for(i=0; i<myNumIDs; ++i) {
138 myIDs[i] = myFirstID+i;
139 }
140
141 Epetra_BlockMap blkmap((long long)-1, myNumIDs, myIDs, 1, 0, Comm);
142
143 Epetra_Directory* directory = Comm.CreateDirectory(blkmap);
144
145 int proc = myPID+1;
146 if (proc >= numProcs) proc = 0;
147
148 int procNumIDs = 3+proc;
149 long long procFirstID = (long long)(proc+1)*(proc+1);
150 long long procLastID = procFirstID+procNumIDs - 1;
151
152 int queryProc1 = -1;
153 int queryProc2 = -1;
154
155 int err = directory->GetDirectoryEntries(
156 blkmap, 1, &procFirstID,
157 &queryProc1, NULL, NULL
158 );
159 err += directory->GetDirectoryEntries(
160 blkmap, 1, &procLastID,
161 &queryProc2, NULL, NULL
162 );
163 delete directory;
164 delete [] myIDs;
165
166 if (queryProc1 != proc || queryProc2 != proc) {
167 return(-1);
168 }
169
170 return(0);
171}
172
174{
175 //set up a Epetra_BlockMap with arbitrary distribution of IDs, but with unique
176 //processor ID ownership (i.e., each ID only appears on 1 processor)
177 //
178 //the thing that makes this Epetra_BlockMap nasty is that higher-numbered
179 //processors own lower IDs.
180
181 int myPID = Comm.MyPID();
182 int numProcs = Comm.NumProc();
183
184 if (numProcs < 2) return(0);
185
186 int myFirstID = (numProcs-myPID)*(numProcs-myPID);
187 int myNumIDs = 3;
188
189 long long* myIDs = new long long[myNumIDs];
190 int i;
191 for(i=0; i<myNumIDs; ++i) {
192 myIDs[i] = myFirstID+i;
193 }
194
195 Epetra_BlockMap blkmap((long long)-1, myNumIDs, myIDs, 1, 0, Comm);
196
197 Epetra_Directory* directory = Comm.CreateDirectory(blkmap);
198
199 int proc = myPID+1;
200 if (proc >= numProcs) proc = 0;
201
202 int procNumIDs = 3;
203 long long procFirstID = (long long)(numProcs-proc)*(numProcs-proc);
204 long long procLastID = procFirstID+procNumIDs - 1;
205
206 int queryProc1 = -1;
207 int queryProc2 = -1;
208
209 int err = directory->GetDirectoryEntries(blkmap, 1, &procFirstID,
210 &queryProc1, NULL, NULL);
211 err += directory->GetDirectoryEntries(blkmap, 1, &procLastID,
212 &queryProc2, NULL, NULL);
213 delete directory;
214 delete [] myIDs;
215
216 if (queryProc1 != proc || queryProc2 != proc) {
217 return(-1);
218 }
219
220 return(0);
221}
222
224{
225 //set up a map with arbitrary distribution of IDs, including non-unique
226 //processor ID ownership (i.e., some IDs appear on more than 1 processor)
227
228 int myPID = Comm.MyPID();
229 int numProcs = Comm.NumProc();
230
231 if (numProcs < 2) return(0);
232
233 int myFirstID = (myPID+1)*(myPID+1);
234 int myNumIDs = 4;
235
236 long long* myIDs = new long long[myNumIDs];
237 int i;
238 for(i=0; i<myNumIDs-1; ++i) {
239 myIDs[i] = myFirstID+i;
240 }
241
242 int nextProc = myPID+1;
243 if (nextProc >= numProcs) nextProc = 0;
244
245 int nextProcFirstID = (nextProc+1)*(nextProc+1);
246 myIDs[myNumIDs-1] = nextProcFirstID;
247
248 Epetra_BlockMap blkmap((long long)-1, myNumIDs, myIDs, 1, 0, Comm);
249
250 Epetra_Directory* directory = Comm.CreateDirectory(blkmap);
251
252 bool uniqueGIDs = directory->GIDsAllUniquelyOwned();
253
254 delete directory;
255 delete [] myIDs;
256
257 if (uniqueGIDs) {
258 return(-1);
259 }
260
261 return(0);
262}
263
265{
266 int myPID = Comm.MyPID();
267 int numProcs = Comm.NumProc();
268
269 if (numProcs < 2) return(0);
270
271 //Set up a map with overlapping ranges of GIDs.
272 int num = 5;
273 int numMyGIDs = 2*num;
274 int myFirstGID = myPID*num;
275
276 long long* myGIDs = new long long[numMyGIDs];
277
278 for(int i=0; i<numMyGIDs; ++i) {
279 myGIDs[i] = myFirstGID+i;
280 }
281
282 Epetra_Map overlappingmap((long long)-1, numMyGIDs, myGIDs, 0, Comm);
283
284 delete [] myGIDs;
285
286 long long numGlobal0 = overlappingmap.NumGlobalElements64();
287
288#ifndef EPETRA_NO_32BIT_GLOBAL_INDICES // FIXME
289 Epetra_Map uniquemap1 =
290 Epetra_Util::Create_OneToOne_Map(overlappingmap);
291
292 bool use_high_sharing_proc = true;
293
294 Epetra_Map uniquemap2 =
295 Epetra_Util::Create_OneToOne_Map(overlappingmap, use_high_sharing_proc);
296
297 long long numGlobal1 = uniquemap1.NumGlobalElements64();
298 long long numGlobal2 = uniquemap2.NumGlobalElements64();
299
300 //The two one-to-one maps should have the same number of global elems.
301 if (numGlobal1 != numGlobal2) {
302 return(-1);
303 }
304
305 //The number of global elems should be greater in the original map
306 //than in the one-to-one map.
307 if (numGlobal0 <= numGlobal1) {
308 return(-2);
309 }
310
311 int numLocal1 = uniquemap1.NumMyElements();
312 int numLocal2 = uniquemap2.NumMyElements();
313
314 //If this is proc 0 or proc numProcs-1, then the number of
315 //local elements should be different in the two one-to-one maps.
316 if ((myPID==0 || myPID==numProcs-1) && numLocal1 == numLocal2) {
317 return(-3);
318 }
319
320#endif
321 return(0);
322}
323
325{
326 int myPID = Comm.MyPID();
327 int numProcs = Comm.NumProc();
328
329 if (numProcs < 2) return(0);
330
331 //Set up a map with overlapping ranges of GIDs.
332 int num = 5;
333 int numMyGIDs = 2*num;
334 int myFirstGID = myPID*num;
335
336 long long* myGIDs = new long long[numMyGIDs];
337 int* sizes = new int[numMyGIDs];
338
339 for(int i=0; i<numMyGIDs; ++i) {
340 myGIDs[i] = myFirstGID+i;
341 sizes[i] = myFirstGID+i+1;
342 }
343
344 Epetra_BlockMap overlappingmap((long long)-1, numMyGIDs, myGIDs, sizes, 0, Comm);
345
346 delete [] myGIDs;
347 delete [] sizes;
348
349 long long numGlobal0 = overlappingmap.NumGlobalElements64();
350
351#ifndef EPETRA_NO_32BIT_GLOBAL_INDICES // FIXME
352 Epetra_BlockMap uniquemap1 =
354
355 bool use_high_sharing_proc = true;
356
357 Epetra_BlockMap uniquemap2 =
358 Epetra_Util::Create_OneToOne_BlockMap(overlappingmap, use_high_sharing_proc);
359
360 long long numGlobal1 = uniquemap1.NumGlobalElements64();
361 long long numGlobal2 = uniquemap2.NumGlobalElements64();
362
363 //The two one-to-one maps should have the same number of global elems.
364 if (numGlobal1 != numGlobal2) {
365 return(-1);
366 }
367
368 //The number of global elems should be greater in the original map
369 //than in the one-to-one map.
370 if (numGlobal0 <= numGlobal1) {
371 return(-2);
372 }
373
374 int numLocal1 = uniquemap1.NumMyElements();
375 int numLocal2 = uniquemap2.NumMyElements();
376
377 //If this is proc 0 or proc numProcs-1, then the number of
378 //local elements should be different in the two one-to-one maps.
379 if ((myPID==0 || myPID==numProcs-1) && numLocal1 == numLocal2) {
380 return(-3);
381 }
382#endif
383 return(0);
384}
std::string Epetra_Version()
Epetra_BlockMap: A class for partitioning block element vectors and matrices.
long long NumGlobalElements64() const
int NumMyElements() const
Number of elements on the calling processor.
Epetra_Comm: The Epetra Communication Abstract Base Class.
Definition Epetra_Comm.h:73
virtual int NumProc() const =0
Returns total number of processes.
virtual Epetra_Directory * CreateDirectory(const Epetra_BlockMap &Map) const =0
Create a directory object for the given Epetra_BlockMap.
virtual int MyPID() const =0
Return my process ID.
Epetra_Directory: This class is a pure virtual class whose interface allows Epetra_Map and Epetr_Bloc...
virtual int GetDirectoryEntries(const Epetra_BlockMap &Map, const int NumEntries, const int *GlobalEntries, int *Procs, int *LocalEntries, int *EntrySizes, bool high_rank_sharing_procs=false) const =0
GetDirectoryEntries : Returns proc and local id info for non-local map entries.
virtual bool GIDsAllUniquelyOwned() const =0
GIDsAllUniquelyOwned: returns true if all GIDs appear on just one processor.
Epetra_Map: A class for partitioning vectors and matrices.
Definition Epetra_Map.h:119
Epetra_MpiComm: The Epetra MPI Communication Class.
int Broadcast(double *MyVals, int Count, int Root) const
Epetra_MpiComm Broadcast function.
int MyPID() const
Return my process ID.
static void SetTracebackMode(int TracebackModeValue)
Set the value of the Epetra_Object error traceback report mode.
Epetra_SerialComm: The Epetra Serial Communication Class.
static Epetra_BlockMap Create_OneToOne_BlockMap(const Epetra_BlockMap &usermap, bool high_rank_proc_owns_shared=false)
Epetra_Util Create_OneToOne_Map function.
static Epetra_Map Create_OneToOne_Map(const Epetra_Map &usermap, bool high_rank_proc_owns_shared=false)
Epetra_Util Create_OneToOne_Map function.
#define EPETRA_TEST_ERR(a, b)
int directory_test_1(Epetra_Comm &Comm)
int main(int argc, char *argv[])
int directory_test_2(Epetra_Comm &Comm)
int directory_test_3(Epetra_Comm &Comm)
int directory_test_4(Epetra_Comm &Comm)
int directory_test_5(Epetra_Comm &Comm)