ROL
ROL_TypeE_StabilizedLCLAlgorithm_Def.hpp
Go to the documentation of this file.
1// @HEADER
2// ************************************************************************
3//
4// Rapid Optimization Library (ROL) Package
5// Copyright (2014) Sandia Corporation
6//
7// Under terms of Contract DE-AC04-94AL85000, there is a non-exclusive
8// license for use of this work by or on behalf of the U.S. Government.
9//
10// Redistribution and use in source and binary forms, with or without
11// modification, are permitted provided that the following conditions are
12// met:
13//
14// 1. Redistributions of source code must retain the above copyright
15// notice, this list of conditions and the following disclaimer.
16//
17// 2. Redistributions in binary form must reproduce the above copyright
18// notice, this list of conditions and the following disclaimer in the
19// documentation and/or other materials provided with the distribution.
20//
21// 3. Neither the name of the Corporation nor the names of the
22// contributors may be used to endorse or promote products derived from
23// this software without specific prior written permission.
24//
25// THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
26// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
29// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
30// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
31// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
32// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36//
37// Questions? Contact lead developers:
38// Drew Kouri (dpkouri@sandia.gov) and
39// Denis Ridzal (dridzal@sandia.gov)
40//
41// ************************************************************************
42// @HEADER
43
44#ifndef ROL_TYPEE_STABILIZEDLCLALGORITHM_DEF_H
45#define ROL_TYPEE_STABILIZEDLCLALGORITHM_DEF_H
46
48#include "ROL_Bounds.hpp"
49
50namespace ROL {
51namespace TypeE {
52
53template<typename Real>
55 : TypeE::Algorithm<Real>::Algorithm(), list_(list), subproblemIter_(0) {
56 // Set status test
57 status_->reset();
58 status_->add(makePtr<ConstraintStatusTest<Real>>(list));
59
60 Real one(1), p1(0.1), p9(0.9), ten(1.e1), oe8(1.e8), oem8(1.e-8);
61 ParameterList& sublist = list.sublist("Step").sublist("Stabilized LCL");
62 useDefaultInitPen_ = sublist.get("Use Default Initial Penalty Parameter", true);
63 state_->searchSize = sublist.get("Initial Penalty Parameter", ten);
64 sigma_ = sublist.get("Initial Elastic Penalty Parameter", ten*ten);
65 // Multiplier update parameters
66 scaleLagrangian_ = sublist.get("Use Scaled Stabilized LCL", false);
67 penaltyUpdate_ = sublist.get("Penalty Parameter Growth Factor", ten);
68 maxPenaltyParam_ = sublist.get("Maximum Penalty Parameter", oe8);
69 sigmaMax_ = sublist.get("Maximum Elastic Penalty Parameter", oe8);
70 sigmaUpdate_ = sublist.get("Elastic Penalty Parameter Growth Rate", ten);
71 // Optimality tolerance update
72 optIncreaseExponent_ = sublist.get("Optimality Tolerance Increase Exponent", one);
73 optDecreaseExponent_ = sublist.get("Optimality Tolerance Decrease Exponent", one);
74 optToleranceInitial_ = sublist.get("Initial Optimality Tolerance", one);
75 // Feasibility tolerance update
76 feasIncreaseExponent_ = sublist.get("Feasibility Tolerance Increase Exponent", p9);
77 feasDecreaseExponent_ = sublist.get("Feasibility Tolerance Decrease Exponent", p1);
78 feasToleranceInitial_ = sublist.get("Initial Feasibility Tolerance", one);
79 // Subproblem information
80 maxit_ = sublist.get("Subproblem Iteration Limit", 1000);
81 subStep_ = sublist.get("Subproblem Step Type", "Trust Region");
82 HessianApprox_ = sublist.get("Level of Hessian Approximation", 0);
83 list_.sublist("Step").set("Type",subStep_);
84 list_.sublist("Status Test").set("Iteration Limit",maxit_);
85 // Verbosity setting
86 verbosity_ = list.sublist("General").get("Output Level", 0);
88 bool print = verbosity_ > 2;
89 list_.sublist("General").set("Output Level",(print ? verbosity_ : 0));
90 // Outer iteration tolerances
91 outerFeasTolerance_ = list.sublist("Status Test").get("Constraint Tolerance", oem8);
92 outerOptTolerance_ = list.sublist("Status Test").get("Gradient Tolerance", oem8);
93 outerStepTolerance_ = list.sublist("Status Test").get("Step Tolerance", oem8);
94 // Scaling
95 useDefaultScaling_ = sublist.get("Use Default Problem Scaling", true);
96 fscale_ = sublist.get("Objective Scaling", one);
97 cscale_ = sublist.get("Constraint Scaling", one);
98}
99
100template<typename Real>
102 const Vector<Real> &g,
103 const Vector<Real> &l,
104 const Vector<Real> &c,
106 Constraint<Real> &con,
107 std::ostream &outStream ) {
108 const Real one(1), TOL(1.e-2);
109 Real tol = std::sqrt(ROL_EPSILON<Real>());
111
112 // Initialize the algorithm state
113 state_->nfval = 0;
114 state_->ncval = 0;
115 state_->ngrad = 0;
116
117 // Compute objective value
118 alobj.getAugmentedLagrangian()->update(x,UpdateType::Initial,state_->iter);
119 state_->value = alobj.getObjectiveValue(x,tol);
120 alobj.getAugmentedLagrangian()->gradient(*state_->gradientVec,x,tol);
121
122 // Compute constraint violation
123 state_->constraintVec->set(*alobj.getConstraintVec(x,tol));
124 state_->cnorm = state_->constraintVec->norm();
125
126 // Update evaluation counters
127 state_->ncval += alobj.getNumberConstraintEvaluations();
128 state_->nfval += alobj.getNumberFunctionEvaluations();
129 state_->ngrad += alobj.getNumberGradientEvaluations();
130
131 // Compute problem scaling
132 if (useDefaultScaling_) {
133 fscale_ = one/std::max(one,alobj.getObjectiveGradient(x,tol)->norm());
134 try {
135 Ptr<Vector<Real>> ji = x.clone();
136 Real maxji(0), normji(0);
137 for (int i = 0; i < c.dimension(); ++i) {
138 con.applyAdjointJacobian(*ji,*c.basis(i),x,tol);
139 normji = ji->norm();
140 maxji = std::max(normji,maxji);
141 }
142 cscale_ = one/std::max(one,maxji);
143 }
144 catch (std::exception &e) {
145 cscale_ = one;
146 }
147 }
148 alobj.setScaling(fscale_,cscale_);
149
150 // Compute gradient of the lagrangian
151 state_->gnorm = state_->gradientVec->norm()/std::min(fscale_,cscale_);
152
153 // Compute initial penalty parameter
154 if (useDefaultInitPen_) {
155 const Real oem8(1e-8), oem2(1e-2), two(2), ten(10);
156 state_->searchSize = std::max(oem8,
157 std::min(ten*std::max(one,std::abs(fscale_*state_->value))
158 / std::max(one,std::pow(cscale_*state_->cnorm,two)),oem2*maxPenaltyParam_));
159 }
160 // Initialize intermediate stopping tolerances
161 optTolerance_ = std::max<Real>(TOL*outerOptTolerance_,
162 optToleranceInitial_);
163 //optTolerance_ = std::min<Real>(optTolerance_,TOL*state_->gnorm);
164 feasTolerance_ = std::max<Real>(TOL*outerFeasTolerance_,
165 feasToleranceInitial_);
166
167 // Set data
168 alobj.reset(l,state_->searchSize,sigma_);
169
170 if (verbosity_ > 1) {
171 outStream << std::endl;
172 outStream << "Stabilized LCL Initialize" << std::endl;
173 outStream << "Objective Scaling: " << fscale_ << std::endl;
174 outStream << "Constraint Scaling: " << cscale_ << std::endl;
175 outStream << "Penalty Parameter: " << state_->searchSize << std::endl;
176 outStream << std::endl;
177 }
178}
179
180template<typename Real>
182 std::ostream &outStream ) {
183 if (problem.getProblemType() == TYPE_E) {
184 problem.edit();
185 problem.finalize(true,verbosity_>3,outStream); // Lump linear and nonlinear constraints
186 run(*problem.getPrimalOptimizationVector(),
187 *problem.getDualOptimizationVector(),
188 *problem.getObjective(),
189 *problem.getConstraint(),
190 *problem.getMultiplierVector(),
191 *problem.getResidualVector(),
192 outStream);
193 problem.finalizeIteration();
194 }
195 else {
196 throw Exception::NotImplemented(">>> ROL::TypeE::Algorithm::run : Optimization problem is not Type E!");
197 }
198}
199
200template<typename Real>
202 const Vector<Real> &g,
203 Objective<Real> &obj,
204 Constraint<Real> &econ,
205 Vector<Real> &emul,
206 const Vector<Real> &eres,
207 std::ostream &outStream ) {
208 const Real one(1), oem2(1e-2);
209 Real tol(std::sqrt(ROL_EPSILON<Real>())), cnorm(0), lnorm;;
210 // Initialize augmented Lagrangian data
211 ElasticObjective<Real> alobj(makePtrFromRef(obj),makePtrFromRef(econ),
212 state_->searchSize,sigma_,g,eres,emul,
213 scaleLagrangian_,HessianApprox_);
214 initialize(x,g,emul,eres,alobj,econ,outStream);
215 // Define Elastic Subproblem
216 Ptr<Vector<Real>> u = eres.clone(), v = eres.clone(), c = eres.clone();
217 Ptr<Vector<Real>> gu = emul.clone(), gv = emul.clone(), l = emul.clone();
218 Ptr<Vector<Real>> s = x.clone(), gs = g.clone(), cvec = eres.clone();
219 Ptr<ElasticLinearConstraint<Real>> lcon
220 = makePtr<ElasticLinearConstraint<Real>>(makePtrFromRef(x),
221 makePtrFromRef(econ),
222 makePtrFromRef(eres));
223 std::vector<Ptr<Vector<Real>>> vecList = {s,u,v};
224 Ptr<PartitionedVector<Real>> xp = makePtr<PartitionedVector<Real>>(vecList);
225 Ptr<PartitionedVector<Real>> gxp = makePtr<PartitionedVector<Real>>({gs,gu,gv});
226 Ptr<Vector<Real>> lb = u->clone(); lb->zero();
227 std::vector<Ptr<BoundConstraint<Real>>> bndList(3);
228 bndList[0] = makePtr<BoundConstraint<Real>>(); bndList[0]->deactivate();
229 bndList[1] = makePtr<Bounds<Real>>(*lb,true);
230 bndList[2] = makePtr<Bounds<Real>>(*lb,true);
231 Ptr<BoundConstraint<Real>> xbnd
232 = makePtr<BoundConstraint_Partitioned<Real>>(bndList,vecList);
233 ParameterList ppa_list;
234 if (c->dimension() == 1)
235 ppa_list.sublist("General").sublist("Polyhedral Projection").set("Type","Dai-Fletcher");
236 else
237 ppa_list.sublist("General").sublist("Polyhedral Projection").set("Type","Semismooth Newton");
238 Problem<Real> elc(makePtrFromRef(alobj),xp,gxp);
239 elc.addBoundConstraint(xbnd);
240 elc.addLinearConstraint("ElasticLinearConstraint",lcon,l,c);
241 elc.finalize(false,verbosity_>2,outStream);
242 Ptr<Vector<Real>> b2 = eres.clone(), xpwa = xp->clone(), mul = emul.clone();
243 b2->zero();
244
245 // Initialize subproblem algorithm
246 Ptr<TypeB::Algorithm<Real>> algo;
247
248 // Output
249 if (verbosity_ > 0) writeOutput(outStream,true);
250
251 while (status_->check(*state_)) {
252 lcon->setAnchor(state_->iterateVec);
253 if (verbosity_ > 3) elc.check(true,outStream);
254
255 // Solve linearly constrained augmented Lagrangian subproblem
256 list_.sublist("Status Test").set("Gradient Tolerance",optTolerance_);
257 list_.sublist("Status Test").set("Step Tolerance",1.e-6*optTolerance_);
258 algo = TypeB::AlgorithmFactory<Real>(list_);
259 algo->run(elc,outStream);
260 x.set(*xp->get(0));
261
262 // Update evaluation counters
263 subproblemIter_ = algo->getState()->iter;
264 state_->nfval += alobj.getNumberFunctionEvaluations();
265 state_->ngrad += alobj.getNumberGradientEvaluations();
266 state_->ncval += alobj.getNumberConstraintEvaluations();
267
268 // Compute step
269 state_->stepVec->set(x);
270 state_->stepVec->axpy(-one,*state_->iterateVec);
271 state_->snorm = state_->stepVec->norm();
272
273 // Update iteration information
274 state_->iter++;
275 cvec->set(*alobj.getConstraintVec(x,tol));
276 cnorm = cvec->norm();
277 if ( cscale_*cnorm < feasTolerance_ ) {
278 // Update iteration information
279 state_->iterateVec->set(x);
280 state_->value = alobj.getObjectiveValue(x,tol);
281 state_->constraintVec->set(*cvec);
282 state_->cnorm = cnorm;
283
284 // Update multipliers
285 emul.axpy(static_cast<Real>(-1),*elc.getPolyhedralProjection()->getMultiplier());
286 emul.axpy(state_->searchSize*cscale_,state_->constraintVec->dual());
287
288 alobj.getAugmentedLagrangian()->gradient(*state_->gradientVec,x,tol);
289 if (scaleLagrangian_) state_->gradientVec->scale(state_->searchSize);
290 econ.applyAdjointJacobian(*gs,*elc.getPolyhedralProjection()->getMultiplier(),x,tol);
291 state_->gradientVec->axpy(-cscale_,*gs);
292 state_->gnorm = state_->gradientVec->norm();
293
294 // Update subproblem information
295 lnorm = mul->norm();
296 sigma_ = std::min(one+lnorm,sigmaMax_)/(one+state_->searchSize);
297 if ( algo->getState()->statusFlag == EXITSTATUS_CONVERGED ) {
298 optTolerance_ = std::max(oem2*outerOptTolerance_,
299 optTolerance_/(one + std::pow(state_->searchSize,optIncreaseExponent_)));
300 }
301 feasTolerance_ = std::max(oem2*outerFeasTolerance_,
302 feasTolerance_/(one + std::pow(state_->searchSize,feasIncreaseExponent_)));
303
304 // Update Algorithm State
305 state_->snorm += lnorm + state_->searchSize*cscale_*state_->cnorm;
306 state_->lagmultVec->set(emul);
307 }
308 else {
309 // Update subproblem information
310 state_->searchSize = std::min(penaltyUpdate_*state_->searchSize,maxPenaltyParam_);
311 sigma_ /= sigmaUpdate_;
312 optTolerance_ = std::max(oem2*outerOptTolerance_,
313 optToleranceInitial_/(one + std::pow(state_->searchSize,optDecreaseExponent_)));
314 feasTolerance_ = std::max(oem2*outerFeasTolerance_,
315 feasToleranceInitial_/(one + std::pow(state_->searchSize,feasDecreaseExponent_)));
316 }
317 alobj.reset(emul,state_->searchSize,sigma_);
318
319 // Update Output
320 if (verbosity_ > 0) writeOutput(outStream,printHeader_);
321 }
322 if (verbosity_ > 0) TypeE::Algorithm<Real>::writeExitStatus(outStream);
323}
324
325template<typename Real>
326void StabilizedLCLAlgorithm<Real>::writeHeader( std::ostream& os ) const {
327 std::stringstream hist;
328 if(verbosity_>1) {
329 hist << std::string(114,'-') << std::endl;
330 hist << "Stabilized LCL status output definitions" << std::endl << std::endl;
331 hist << " iter - Number of iterates (steps taken)" << std::endl;
332 hist << " fval - Objective function value" << std::endl;
333 hist << " cnorm - Norm of the constraint violation" << std::endl;
334 hist << " gLnorm - Norm of the gradient of the Lagrangian" << std::endl;
335 hist << " snorm - Norm of the step" << std::endl;
336 hist << " penalty - Penalty parameter" << std::endl;
337 hist << " sigma - Elastic Penalty parameter" << std::endl;
338 hist << " feasTol - Feasibility tolerance" << std::endl;
339 hist << " optTol - Optimality tolerance" << std::endl;
340 hist << " #fval - Number of times the objective was computed" << std::endl;
341 hist << " #grad - Number of times the gradient was computed" << std::endl;
342 hist << " #cval - Number of times the constraint was computed" << std::endl;
343 hist << " subIter - Number of iterations to solve subproblem" << std::endl;
344 hist << std::string(114,'-') << std::endl;
345 }
346 hist << " ";
347 hist << std::setw(6) << std::left << "iter";
348 hist << std::setw(15) << std::left << "fval";
349 hist << std::setw(15) << std::left << "cnorm";
350 hist << std::setw(15) << std::left << "gLnorm";
351 hist << std::setw(15) << std::left << "snorm";
352 hist << std::setw(10) << std::left << "penalty";
353 hist << std::setw(10) << std::left << "sigma";
354 hist << std::setw(10) << std::left << "feasTol";
355 hist << std::setw(10) << std::left << "optTol";
356 hist << std::setw(8) << std::left << "#fval";
357 hist << std::setw(8) << std::left << "#grad";
358 hist << std::setw(8) << std::left << "#cval";
359 hist << std::setw(8) << std::left << "subIter";
360 hist << std::endl;
361 os << hist.str();
362}
363
364template<typename Real>
365void StabilizedLCLAlgorithm<Real>::writeName( std::ostream& os ) const {
366 std::stringstream hist;
367 hist << std::endl << "Stabilized LCL Solver (Type E, Equality Constraints)";
368 hist << std::endl;
369 hist << "Subproblem Solver: " << subStep_ << std::endl;
370 os << hist.str();
371}
372
373template<typename Real>
374void StabilizedLCLAlgorithm<Real>::writeOutput( std::ostream& os, const bool print_header ) const {
375 std::stringstream hist;
376 hist << std::scientific << std::setprecision(6);
377 if ( state_->iter == 0 ) writeName(os);
378 if ( print_header ) writeHeader(os);
379 if ( state_->iter == 0 ) {
380 hist << " ";
381 hist << std::setw(6) << std::left << state_->iter;
382 hist << std::setw(15) << std::left << state_->value;
383 hist << std::setw(15) << std::left << state_->cnorm;
384 hist << std::setw(15) << std::left << state_->gnorm;
385 hist << std::setw(15) << std::left << "---";
386 hist << std::scientific << std::setprecision(2);
387 hist << std::setw(10) << std::left << state_->searchSize;
388 hist << std::setw(10) << std::left << sigma_;
389 hist << std::setw(10) << std::left << std::max(feasTolerance_,outerFeasTolerance_);
390 hist << std::setw(10) << std::left << std::max(optTolerance_,outerOptTolerance_);
391 hist << std::scientific << std::setprecision(6);
392 hist << std::setw(8) << std::left << state_->nfval;
393 hist << std::setw(8) << std::left << state_->ngrad;
394 hist << std::setw(8) << std::left << state_->ncval;
395 hist << std::setw(8) << std::left << "---";
396 hist << std::endl;
397 }
398 else {
399 hist << " ";
400 hist << std::setw(6) << std::left << state_->iter;
401 hist << std::setw(15) << std::left << state_->value;
402 hist << std::setw(15) << std::left << state_->cnorm;
403 hist << std::setw(15) << std::left << state_->gnorm;
404 hist << std::setw(15) << std::left << state_->snorm;
405 hist << std::scientific << std::setprecision(2);
406 hist << std::setw(10) << std::left << state_->searchSize;
407 hist << std::setw(10) << std::left << sigma_;
408 hist << std::setw(10) << std::left << feasTolerance_;
409 hist << std::setw(10) << std::left << optTolerance_;
410 hist << std::scientific << std::setprecision(6);
411 hist << std::setw(8) << std::left << state_->nfval;
412 hist << std::setw(8) << std::left << state_->ngrad;
413 hist << std::setw(8) << std::left << state_->ncval;
414 hist << std::setw(8) << std::left << subproblemIter_;
415 hist << std::endl;
416 }
417 os << hist.str();
418}
419
420} // namespace TypeE
421} // namespace ROL
422
423#endif
Provides an interface to check status of optimization algorithms for problems with equality constrain...
Defines the general constraint operator interface.
virtual void applyAdjointJacobian(Vector< Real > &ajv, const Vector< Real > &v, const Vector< Real > &x, Real &tol)
Apply the adjoint of the the constraint Jacobian at , , to vector .
Provides the interface to evaluate the elastic augmented Lagrangian.
void reset(const Vector< Real > &multiplier, Real penaltyParameter, Real sigma)
const Ptr< const Vector< Real > > getConstraintVec(const Vector< Real > &x, Real &tol)
const Ptr< const Vector< Real > > getObjectiveGradient(const Vector< Real > &x, Real &tol)
const Ptr< AugmentedLagrangianObjective< Real > > getAugmentedLagrangian(void) const
int getNumberConstraintEvaluations(void) const
Real getObjectiveValue(const Vector< Real > &x, Real &tol)
void setScaling(const Real fscale=1.0, const Real cscale=1.0)
Provides the interface to evaluate objective functions.
const Ptr< PolyhedralProjection< Real > > & getPolyhedralProjection()
Get the polyhedral projection object. This is a null pointer if no linear constraints and/or bounds a...
const Ptr< Vector< Real > > & getPrimalOptimizationVector()
Get the primal optimization space vector.
const Ptr< Vector< Real > > & getDualOptimizationVector()
Get the dual optimization space vector.
const Ptr< Vector< Real > > & getMultiplierVector()
Get the dual constraint space vector.
const Ptr< Constraint< Real > > & getConstraint()
Get the equality constraint.
EProblem getProblemType()
Get the optimization problem type (U, B, E, or G).
void addLinearConstraint(std::string name, const Ptr< Constraint< Real > > &linear_econ, const Ptr< Vector< Real > > &linear_emul, const Ptr< Vector< Real > > &linear_eres=nullPtr, bool reset=false)
Add a linear equality constraint.
void addBoundConstraint(const Ptr< BoundConstraint< Real > > &bnd)
Add a bound constraint.
void finalizeIteration()
Transform the optimization variables to the native parameterization after an optimization algorithm h...
void check(bool printToStream=false, std::ostream &outStream=std::cout) const
Run vector, linearity and derivative checks for user-supplied vectors, objective function and constra...
const Ptr< Objective< Real > > & getObjective()
Get the objective function.
const Ptr< Vector< Real > > & getResidualVector()
Get the primal constraint space vector.
virtual void finalize(bool lumpConstraints=false, bool printToStream=false, std::ostream &outStream=std::cout)
Tranform user-supplied constraints to consist of only bounds and equalities. Optimization problem can...
virtual void edit()
Resume editting optimization problem after finalize has been called.
void initialize(const Vector< Real > &x, const Vector< Real > &g, const Vector< Real > &mul, const Vector< Real > &c)
virtual void writeExitStatus(std::ostream &os) const
const Ptr< AlgorithmState< Real > > state_
const Ptr< CombinedStatusTest< Real > > status_
void initialize(Vector< Real > &x, const Vector< Real > &g, const Vector< Real > &l, const Vector< Real > &c, ElasticObjective< Real > &alobj, Constraint< Real > &con, std::ostream &outStream=std::cout)
virtual void writeOutput(std::ostream &os, const bool print_header=false) const override
Print iterate status.
virtual void writeHeader(std::ostream &os) const override
Print iterate header.
virtual void run(Problem< Real > &problem, std::ostream &outStream=std::cout) override
Run algorithm on equality constrained problems (Type-E). This is the primary Type-E interface.
virtual void writeName(std::ostream &os) const override
Print step name.
Defines the linear algebra or vector space interface.
virtual void set(const Vector &x)
Set where .
virtual ROL::Ptr< Vector > clone() const =0
Clone to make a new (uninitialized) vector.
virtual int dimension() const
Return dimension of the vector space.
virtual ROL::Ptr< Vector > basis(const int i) const
Return i-th basis vector.
virtual void axpy(const Real alpha, const Vector &x)
Compute where .
@ TYPE_E
@ EXITSTATUS_CONVERGED