FEI Package Browser (Single Doxygen Collection) Version of the Day
Loading...
Searching...
No Matches
fei_Graph_Impl.cpp
Go to the documentation of this file.
1/*--------------------------------------------------------------------*/
2/* Copyright 2005 Sandia Corporation. */
3/* Under the terms of Contract DE-AC04-94AL85000, there is a */
4/* non-exclusive license for use of this work by or on behalf */
5/* of the U.S. Government. Export of this program may require */
6/* a license from the United States Government. */
7/*--------------------------------------------------------------------*/
8
9#include <fei_macros.hpp>
10
11#include <fei_Graph_Impl.hpp>
12#include <fei_EqnComm.hpp>
13#include <fei_CommUtils.hpp>
14#include <fei_TemplateUtils.hpp>
15#include <fei_VectorSpace.hpp>
16
17#undef fei_file
18#define fei_file "fei_Graph_Impl.cpp"
19#include <fei_ErrMacros.hpp>
20
21//----------------------------------------------------------------------------
22fei::Graph_Impl::Graph_Impl(MPI_Comm comm, int firstLocalRow, int lastLocalRow)
23 : localGraphData_(NULL),
24 remoteGraphData_(),
25 eqnComm_(),
26 firstLocalRow_(firstLocalRow),
27 lastLocalRow_(lastLocalRow),
28 localProc_(0),
29 numProcs_(1),
30 comm_(comm)
31{
34 //for remoteGraphData_, we don't know what the range of row-numbers will
35 //be, so we'll just construct it with -1,-1
37 for(int p=0; p<numProcs_; ++p) {
38 remoteGraphData_[p] = new remote_table_type(-1, -1);
39 }
40 eqnComm_.reset(new fei::EqnComm(comm_, lastLocalRow-firstLocalRow+1));
42}
43
44//----------------------------------------------------------------------------
46{
47 delete localGraphData_;
48 for(unsigned i=0; i<remoteGraphData_.size(); ++i) {
49 delete remoteGraphData_[i];
50 }
51}
52
53//----------------------------------------------------------------------------
54int fei::Graph_Impl::addIndices(int row, int len, const int* indices)
55{
56 if (row < 0) {
57 return(-1);
58 }
59
60 if (row < firstLocalRow_ || row > lastLocalRow_) {
61 int p = eqnComm_->getOwnerProc(row);
62 remoteGraphData_[p]->addIndices(row, len, indices);
63 }
64 else localGraphData_->addIndices(row, len, indices);
65
66 return(0);
67}
68
69//----------------------------------------------------------------------------
70int fei::Graph_Impl::addSymmetricIndices(int numIndices, int* indices,
71 bool diagonal)
72{
73 if (diagonal) {
74 addDiagonals(numIndices, indices);
75 return(0);
76 }
77
78 bool all_local = true;
79 if (numProcs_ > 1) {
80 for(int i=0; i<numIndices; ++i) {
81 if (indices[i] < 0) {
82 return(-1);
83 }
84
85 bool local = true;
86 if (indices[i] < firstLocalRow_ || indices[i] > lastLocalRow_) {
87 all_local = false;
88 local = false;
89 }
90
91 if (!local) {
92 int p = eqnComm_->getOwnerProc(indices[i]);
93 remoteGraphData_[p]->addIndices(indices[i], numIndices, indices);
94 }
95 }
96 }
97
98 if (all_local) {
99 localGraphData_->addIndices(numIndices, indices,
100 numIndices, indices);
101 }
102 else {
103 for(int i=0; i<numIndices; ++i) {
104 if (indices[i] >= firstLocalRow_ && indices[i] <= lastLocalRow_) {
105 localGraphData_->addIndices(indices[i], numIndices, indices);
106 }
107 }
108 }
109
110 return(0);
111}
112
113//----------------------------------------------------------------------------
114void fei::Graph_Impl::addDiagonals(int numIndices, int* indices)
115{
116 bool all_local = true;
117 int i;
118 if (numProcs_ > 1) {
119 for(i=0; i<numIndices; ++i) {
120 int ind = indices[i];
121 if (ind < 0) {
122 throw std::runtime_error("fei::Graph_Impl::addDiagonals given negative index");
123 }
124
125 bool local = true;
126 if (ind < firstLocalRow_ || ind > lastLocalRow_) {
127 all_local = false;
128 local = false;
129 }
130
131 if (!local) {
132 int p=eqnComm_->getOwnerProc(ind);
133 remoteGraphData_[p]->addIndices(ind, 1, &ind);
134 }
135 }
136 }
137
138 if (all_local) {
139 localGraphData_->addDiagonals(numIndices, indices);
140 }
141 else {
142 for(i=0; i<numIndices; ++i) {
143 int ind = indices[i];
144 if (ind >= firstLocalRow_ && ind <= lastLocalRow_) {
145 localGraphData_->addIndices(ind, 1, &ind);
146 }
147 }
148 }
149}
150
151//----------------------------------------------------------------------------
153 bool prefixLinesWithPoundSign)
154{
155 if (localGraphData_ == NULL) {
156 if (prefixLinesWithPoundSign) {
157 os << "# fei::Graph_Impl::writeLocalGraph numRows: 0"<<FEI_ENDL;
158 }
159 else {
160 os << "0" << FEI_ENDL;
161 }
162 return(0);
163 }
164
165 if (prefixLinesWithPoundSign) {
166 os << "# fei::Graph_Impl::writeLocalGraph numRows: ";
167 }
168 os << localGraphData_->getMap().size() <<FEI_ENDL;
169
170 if (prefixLinesWithPoundSign) {
171 writeToStream(*localGraphData_, os, "# ");
172 }
173 else {
174 writeToStream(*localGraphData_, os, NULL);
175 }
176
177 return( 0 );
178}
179
180//----------------------------------------------------------------------------
182{
183 for(unsigned p=0; p<remoteGraphData_.size(); ++p) {
184 writeToStream(*remoteGraphData_[p], os, "# ");
185 }
186
187 return( 0 );
188}
189
190//----------------------------------------------------------------------------
192{
193 if (numProcs_ == 1) return(0);
194
195#ifndef FEI_SER
196 //this function gathers shared-but-not-owned data onto the
197 //owning processors.
198
199 //iterate the remoteGraphData_ array and create a list of processors
200 //that we will be sending data to. (processors which own graph rows
201 //that we share.)
202 std::vector<int> sendProcs;
203 for(unsigned i=0; i<remoteGraphData_.size(); ++i) {
204 if ((int)i == localProc_) continue;
205 if (remoteGraphData_[i] != NULL) {
206 if (remoteGraphData_[i]->getMap().size() == 0) {
207 continue;
208 }
209
210 sendProcs.push_back((int)i);
211 }
212 }
213
214 //now we can find out which procs we'll be receiving from.
215 std::vector<int> recvProcs;
216 fei::mirrorProcs(comm_, sendProcs, recvProcs);
217
218 //next we'll declare arrays to receive into.
219 std::vector<std::vector<int> > recv_ints(recvProcs.size());
220
221 //...and an array for the sizes of the recv buffers:
222 std::vector<int> recv_sizes(recvProcs.size());
223 std::vector<MPI_Request> mpiReqs(recvProcs.size());
224 std::vector<MPI_Status> mpiStatuses(recvProcs.size());
225
226 int tag1 = 11113;
227
228 unsigned offset = 0;
229 for(unsigned i=0; i<recvProcs.size(); ++i) {
230 MPI_Irecv(&recv_sizes[i], 1, MPI_INT, recvProcs[i],
231 tag1, comm_, &mpiReqs[i]);
232 }
233
234 //now we'll pack our to-be-sent data into buffers, and send the
235 //sizes to the receiving procs:
236 std::vector<std::vector<int> > send_ints(sendProcs.size());
237
238 for(unsigned i=0; i<sendProcs.size(); ++i) {
239 int proc = sendProcs[i];
240
241 fei::packRaggedTable(*(remoteGraphData_[proc]), send_ints[i]);
242
243 int isize = send_ints[i].size();
244
245 MPI_Send(&isize, 1, MPI_INT, proc, tag1, comm_);
246 }
247
248 if (mpiReqs.size() > 0) {
249 MPI_Waitall(mpiReqs.size(), &mpiReqs[0], &mpiStatuses[0]);
250 }
251
252 //now resize our recv buffers, and post the recvs.
253 for(size_t i=0; i<recvProcs.size(); ++i) {
254 int intsize = recv_sizes[i];
255
256 recv_ints[i].resize(intsize);
257
258 MPI_Irecv(&(recv_ints[i][0]), intsize, MPI_INT, recvProcs[i],
259 tag1, comm_, &mpiReqs[i]);
260 }
261
262 //now send our packed buffers.
263 for(size_t i=0; i<sendProcs.size(); ++i) {
264 int proc = sendProcs[i];
265
266 MPI_Send(&(send_ints[i][0]), send_ints[i].size(), MPI_INT,
267 proc, tag1, comm_);
268 }
269
270 if (mpiReqs.size() > 0) {
271 MPI_Waitall(mpiReqs.size(), &mpiReqs[0], &mpiStatuses[0]);
272 }
273
274 for(unsigned i=0; i<recvProcs.size(); ++i) {
275 std::vector<int> recvdata = recv_ints[i];
276 int numRows = recvdata[0];
277 int* rowNumbers = &recvdata[1];
278 int* rowLengths = rowNumbers+numRows;
279 int* packedCols = rowLengths+numRows;
280 offset = 0;
281 for(int r=0; r<numRows; ++r) {
282 addIndices(rowNumbers[r], rowLengths[r], &packedCols[offset]);
283 offset += rowLengths[r];
284 }
285 }
286
287#endif
288 return(0);
289}
290
291//----------------------------------------------------------------------------
293{
294 table_row_type* colIndices = localGraphData_->getRow(row);
295 if (colIndices == NULL) {
296 return(-1);
297 }
298
299 return(colIndices->size());
300}
301
302//----------------------------------------------------------------------------
304{
305 int numLocalRows = localGraphData_->getMap().size();
306
307 return(numLocalRows);
308}
309
310//----------------------------------------------------------------------------
312{
313 int numNonzeros = fei::countNonzeros(*localGraphData_);
314
315 return(numNonzeros);
316}
317
int getLocalRowLength(int row)
int addIndices(int row, int len, const int *indices)
fei::SharedPtr< fei::EqnComm > eqnComm_
int getNumLocalNonzeros() const
std::vector< remote_table_type * > remoteGraphData_
Graph_Impl(MPI_Comm comm, int firstLocalRow, int lastLocalRow)
int writeLocalGraph(FEI_OSTREAM &os, bool debug=false, bool prefixLinesWithPoundSign=true)
int writeRemoteGraph(FEI_OSTREAM &os)
void addDiagonals(int numIndices, int *indices)
int addSymmetricIndices(int numIndices, int *indices, bool diagonal=false)
table_type * localGraphData_
snl_fei::RaggedTable< std::map< int, fei::ctg_set< int > * >, fei::ctg_set< int > > remote_table_type
Definition fei_Graph.hpp:35
snl_fei::RaggedTable< snl_fei::MapContig< fei::ctg_set< int > * >, fei::ctg_set< int > > table_type
Definition fei_Graph.hpp:28
void reset(T *p=0)
int size() const
#define FEI_OSTREAM
#define FEI_ENDL
#define MPI_Comm
Definition fei_mpi.h:56
int localProc(MPI_Comm comm)
int countNonzeros(snl_fei::RaggedTable< MAP_TYPE, SET_TYPE > &table)
void packRaggedTable(snl_fei::RaggedTable< MAP_TYPE, SET_TYPE > &table, std::vector< int > &intdata)
int mirrorProcs(MPI_Comm comm, std::vector< int > &toProcs, std::vector< int > &fromProcs)
int numProcs(MPI_Comm comm)
void writeToStream(snl_fei::RaggedTable< MAP_TYPE, SET_TYPE > &table, FEI_OSTREAM &os, const char *lineprefix=NULL)