26 #ifndef _CXSC_LRVECTOR_INL_INCLUDED
27 #define _CXSC_LRVECTOR_INL_INCLUDED
49 :l(i1),u(i2),size(i2-i1+1)
51 noexcept:l(i1),u(i2),size(i2-i1+1)
55 if(i1>i2) cxscthrow(ERROR_LRVECTOR_WRONG_BOUNDARIES(
"l_rvector::l_rvector(const int &i1,const int &i2)"));
63 for(
int i=0, j=l-rs.l;i<size;i++,j++)
70 for (
int i=0;i<size;i++)
83 for(
int i=0, j=l-rs.l;i<size;i++,j++)
90 for (
int i=0;i<size;i++)
101 #if(CXSC_INDEX_CHECK)
107 #if(CXSC_INDEX_CHECK)
108 if(i<l||i>u) cxscthrow(ERROR_LRVECTOR_ELEMENT_NOT_IN_VEC(
"l_real & l_rvector::operator [](const int &i)"));
114 #if(CXSC_INDEX_CHECK)
120 #if(CXSC_INDEX_CHECK)
121 if(i<start||i>end) cxscthrow(ERROR_LRVECTOR_ELEMENT_NOT_IN_VEC(
"l_real & l_rvector_slice::operator [](const int &i)"));
127 #if(CXSC_INDEX_CHECK)
133 #if(CXSC_INDEX_CHECK)
134 if(1<l||i>u) cxscthrow(ERROR_LRVECTOR_SUB_ARRAY_TOO_BIG(
"l_rvector_slice l_rvector::operator ()(const int &i)"));
140 #if(CXSC_INDEX_CHECK)
146 #if(CXSC_INDEX_CHECK)
147 if(i1<l||i2>u) cxscthrow(ERROR_LRVECTOR_SUB_ARRAY_TOO_BIG(
"l_rvector_slice l_rvector::operator ()(const int &i1,const int &i2)"));
153 #if(CXSC_INDEX_CHECK)
159 #if(CXSC_INDEX_CHECK)
160 if(1<start||i>end) cxscthrow(ERROR_LRVECTOR_SUB_ARRAY_TOO_BIG(
"l_rvector_slice l_rvector_slice::operator ()(const int &i)"));
166 #if(CXSC_INDEX_CHECK)
172 #if(CXSC_INDEX_CHECK)
173 if(i1<start||i2>end) cxscthrow(ERROR_LRVECTOR_SUB_ARRAY_TOO_BIG(
"l_rvector_slice l_rvector_slice::operator ()(const int &i1,const int &i2)"));
179 #if(CXSC_INDEX_CHECK)
185 #if(CXSC_INDEX_CHECK)
186 if(rv.size>1) cxscthrow(ERROR_LRVECTOR_TYPE_CAST_OF_THICK_OBJ(
"l_real::l_real(const l_rvector &rv)"));
187 else if(rv.size<1) cxscthrow(ERROR_LRVECTOR_USE_OF_UNINITIALIZED_OBJ(
"l_real::l_real(const l_rvector &rv)"));
193 #if(CXSC_INDEX_CHECK)
199 #if(CXSC_INDEX_CHECK)
200 if(sl.size>1) cxscthrow(ERROR_LRVECTOR_TYPE_CAST_OF_THICK_OBJ(
"l_real::l_real(const l_rvector_slice &sl)"));
201 else if(sl.size<1) cxscthrow(ERROR_LRVECTOR_USE_OF_UNINITIALIZED_OBJ(
"l_real::l_real(const l_rvector_slice &sl)"));
203 *
this=sl.dat[sl.start-sl.l];
239 INLINE l_rvector::operator
void*() noexcept {
return _vvoid(*
this); }
241 #if(CXSC_INDEX_CHECK)
246 {
return _vsvsassign(*
this,sl); }
248 #if(CXSC_INDEX_CHECK)
253 {
return _vsvassign(*
this,rv); }
257 #if(CXSC_INDEX_CHECK)
262 {
return _vsvsassign(*
this,sl); }
264 #if(CXSC_INDEX_CHECK)
269 {
return _vsvassign(*
this,rv); }
271 INLINE l_rvector_slice::operator
void*() noexcept {
return _vsvoid(*
this); }
278 #if(CXSC_INDEX_CHECK)
283 { _vresize<class l_rvector,class l_real>(rv,len); }
285 #if(CXSC_INDEX_CHECK)
290 { _vresize<class l_rvector,class l_real>(rv,lb,ub); }
294 INLINE
bool operator !(
const l_rvector &rv) noexcept {
return _vnot(rv); }
338 INLINE std::ostream &operator <<(std::ostream &s,
const l_rvector &rv) noexcept {
return _vout(s,rv); }
339 INLINE std::ostream &operator <<(std::ostream &o,
const l_rvector_slice &sl) noexcept {
return _vsout(o,sl); }
340 INLINE std::istream &operator >>(std::istream &s,
l_rvector &rv) noexcept {
return _vin(s,rv); }
341 INLINE std::istream &operator >>(std::istream &s,
l_rvector_slice &rv) noexcept {
return _vsin(s,rv); }
347 #if(CXSC_INDEX_CHECK)
352 { _vvaccu(dp,rv1,rv2); }
354 #if(CXSC_INDEX_CHECK)
359 { _vsvaccu(dp,sl,rv); }
361 #if(CXSC_INDEX_CHECK)
366 { _vsvaccu(dp,sl,rv); }
367 INLINE
void accumulate(dotprecision &dp,
const l_rvector & rv1,
const l_rmatrix_subv &rv2)
368 #if(CXSC_INDEX_CHECK)
374 INLINE
void accumulate(dotprecision &dp,
const l_rmatrix_subv & rv1,
const l_rvector &rv2)
375 #if(CXSC_INDEX_CHECK)
381 INLINE
void accumulate(dotprecision &dp,
const l_rmatrix_subv & rv1,
const l_rmatrix_subv &rv2)
382 #if(CXSC_INDEX_CHECK)
389 #if(CXSC_INDEX_CHECK)
394 { _vsvsaccu(dp,sl1,sl2); }
396 #if(CXSC_INDEX_CHECK)
401 { _vvaccu(dp,rv1,rv2); }
403 #if(CXSC_INDEX_CHECK)
408 { _vsvaccu(dp,sl,rv); }
410 #if(CXSC_INDEX_CHECK)
415 { _vsvaccu(dp,sl,rv); }
416 INLINE
void accumulate(idotprecision &dp,
const l_rvector & rv1,
const l_rmatrix_subv &rv2)
417 #if(CXSC_INDEX_CHECK)
423 INLINE
void accumulate(idotprecision &dp,
const l_rmatrix_subv & rv1,
const l_rvector &rv2)
424 #if(CXSC_INDEX_CHECK)
430 INLINE
void accumulate(idotprecision &dp,
const l_rmatrix_subv & rv1,
const l_rmatrix_subv &rv2)
431 #if(CXSC_INDEX_CHECK)
438 #if(CXSC_INDEX_CHECK)
443 { _vsvsaccu(dp,sl1,sl2); }
447 #if(CXSC_INDEX_CHECK)
452 {
return _vvlmult<l_rvector,l_rvector,l_real>(rv1,rv2); }
454 #if(CXSC_INDEX_CHECK)
459 {
return _vsvlmult<l_rvector_slice,l_rvector,l_real>(sl,rv); }
461 #if(CXSC_INDEX_CHECK)
466 {
return _vsvlmult<l_rvector_slice,l_rvector,l_real>(sl,rv); }
468 #if(CXSC_INDEX_CHECK)
473 {
return _vsvslmult<l_rvector_slice,l_rvector_slice,l_real>(sl1,sl2); }
478 #if(CXSC_INDEX_CHECK)
483 {
return _vvplus<l_rvector,l_rvector,l_rvector>(rv1,rv2); }
485 #if(CXSC_INDEX_CHECK)
490 {
return _vvsplus<l_rvector,l_rvector_slice,l_rvector>(rv,sl); }
492 #if(CXSC_INDEX_CHECK)
497 {
return _vvsplus<l_rvector,l_rvector_slice,l_rvector>(rv,sl); }
499 #if(CXSC_INDEX_CHECK)
504 {
return _vsvsplus<l_rvector_slice,l_rvector_slice,l_rvector>(sl1,sl2); }
506 #if(CXSC_INDEX_CHECK)
511 {
return _vvplusassign(rv1,rv2); }
513 #if(CXSC_INDEX_CHECK)
518 {
return _vvsplusassign(rv,sl); }
520 #if(CXSC_INDEX_CHECK)
525 {
return _vsvplusassign(*
this,rv); }
527 #if(CXSC_INDEX_CHECK)
532 {
return _vsvsplusassign(*
this,sl2); }
537 #if(CXSC_INDEX_CHECK)
542 {
return _vvminus<l_rvector,l_rvector,l_rvector>(rv1,rv2); }
544 #if(CXSC_INDEX_CHECK)
549 {
return _vvsminus<l_rvector,l_rvector_slice,l_rvector>(rv,sl); }
551 #if(CXSC_INDEX_CHECK)
556 {
return _vsvminus<l_rvector_slice,l_rvector,l_rvector>(sl,rv); }
558 #if(CXSC_INDEX_CHECK)
563 {
return _vsvsminus<l_rvector_slice,l_rvector_slice,l_rvector>(sl1,sl2); }
565 #if(CXSC_INDEX_CHECK)
570 {
return _vvminusassign(rv1,rv2); }
572 #if(CXSC_INDEX_CHECK)
577 {
return _vvsminusassign(rv,sl); }
579 #if(CXSC_INDEX_CHECK)
584 {
return _vsvminusassign(*
this,rv); }
586 #if(CXSC_INDEX_CHECK)
591 {
return _vsvsminusassign(*
this,sl2); }
593 INLINE
bool operator ==(
const l_rvector &rv1,
const l_rvector &rv2) noexcept {
return _vveq(rv1,rv2); }
597 INLINE
bool operator !=(
const l_rvector &rv1,
const l_rvector &rv2) noexcept {
return _vvneq(rv1,rv2); }
601 INLINE
bool operator <(
const l_rvector &rv1,
const l_rvector &rv2) noexcept {
return _vvless(rv1,rv2); }
605 INLINE
bool operator <=(
const l_rvector &rv1,
const l_rvector &rv2) noexcept {
return _vvleq(rv1,rv2); }
609 INLINE
bool operator >(
const l_rvector &rv1,
const l_rvector &rv2) noexcept {
return _vvless(rv2,rv1); }
613 INLINE
bool operator >=(
const l_rvector &rv1,
const l_rvector &rv2) noexcept {
return _vvleq(rv2,rv1); }
623 #if(CXSC_INDEX_CHECK)
628 { _vvaccu(dp,rv2,rv1); }
630 #if(CXSC_INDEX_CHECK)
635 { _vvaccu(dp,rv1,rv2); }
637 #if(CXSC_INDEX_CHECK)
642 { _vsvaccu(dp,sl,rv); }
644 #if(CXSC_INDEX_CHECK)
649 { _vsvaccu(dp,sl,rv); }
651 #if(CXSC_INDEX_CHECK)
656 { _vsvaccu(dp,sl,rv); }
657 INLINE
void accumulate(dotprecision &dp,
const rvector & rv1,
const l_rmatrix_subv &rv2)
658 #if(CXSC_INDEX_CHECK)
664 INLINE
void accumulate(dotprecision &dp,
const l_rvector & rv1,
const rmatrix_subv &rv2)
665 #if(CXSC_INDEX_CHECK)
672 #if(CXSC_INDEX_CHECK)
677 { _vsvaccu(dp,sl,rv); }
678 INLINE
void accumulate(dotprecision &dp,
const rmatrix_subv & rv1,
const l_rvector &rv2)
679 #if(CXSC_INDEX_CHECK)
685 INLINE
void accumulate(dotprecision &dp,
const l_rmatrix_subv & rv1,
const rvector &rv2)
686 #if(CXSC_INDEX_CHECK)
692 INLINE
void accumulate(dotprecision &dp,
const rmatrix_subv & rv1,
const l_rmatrix_subv &rv2)
693 #if(CXSC_INDEX_CHECK)
699 INLINE
void accumulate(dotprecision &dp,
const l_rmatrix_subv & rv1,
const rmatrix_subv &rv2)
700 #if(CXSC_INDEX_CHECK)
707 #if(CXSC_INDEX_CHECK)
712 { _vsvsaccu(dp,sl2,sl1); }
714 #if(CXSC_INDEX_CHECK)
719 { _vsvsaccu(dp,sl1,sl2); }
722 #if(CXSC_INDEX_CHECK)
727 { _vvaccu(dp,rv2,rv1); }
729 #if(CXSC_INDEX_CHECK)
734 { _vvaccu(dp,rv1,rv2); }
736 #if(CXSC_INDEX_CHECK)
741 { _vsvaccu(dp,sl,rv); }
743 #if(CXSC_INDEX_CHECK)
748 { _vsvaccu(dp,sl,rv); }
750 #if(CXSC_INDEX_CHECK)
755 { _vsvaccu(dp,sl,rv); }
756 INLINE
void accumulate(idotprecision &dp,
const rvector & rv1,
const l_rmatrix_subv &rv2)
757 #if(CXSC_INDEX_CHECK)
763 INLINE
void accumulate(idotprecision &dp,
const l_rvector & rv1,
const rmatrix_subv &rv2)
764 #if(CXSC_INDEX_CHECK)
771 #if(CXSC_INDEX_CHECK)
776 { _vsvaccu(dp,sl,rv); }
777 INLINE
void accumulate(idotprecision &dp,
const rmatrix_subv & rv1,
const l_rvector &rv2)
778 #if(CXSC_INDEX_CHECK)
784 INLINE
void accumulate(idotprecision &dp,
const l_rmatrix_subv & rv1,
const rvector &rv2)
785 #if(CXSC_INDEX_CHECK)
792 #if(CXSC_INDEX_CHECK)
797 { _vsvsaccu(dp,sl2,sl1); }
799 #if(CXSC_INDEX_CHECK)
804 { _vsvsaccu(dp,sl1,sl2); }
807 #if(CXSC_INDEX_CHECK)
812 {
return _vvlmult<rvector,l_rvector,l_real>(rv1,rv2); }
814 #if(CXSC_INDEX_CHECK)
819 {
return _vsvlmult<rvector_slice,l_rvector,l_real>(sl,rv); }
821 #if(CXSC_INDEX_CHECK)
826 {
return _vsvlmult<l_rvector_slice,rvector,l_real>(sl,rv); }
828 #if(CXSC_INDEX_CHECK)
833 {
return _vsvslmult<rvector_slice,l_rvector_slice,l_real>(sl1,sl2); }
836 #if(CXSC_INDEX_CHECK)
841 {
return _vvlmult<rvector,l_rvector,l_real>(rv2,rv1); }
843 #if(CXSC_INDEX_CHECK)
848 {
return _vsvlmult<l_rvector_slice,rvector,l_real>(sl,rv); }
850 #if(CXSC_INDEX_CHECK)
855 {
return _vsvlmult<rvector_slice,l_rvector,l_real>(sl,rv); }
857 #if(CXSC_INDEX_CHECK)
862 {
return _vsvslmult<rvector_slice,l_rvector_slice,l_real>(sl2,sl1); }
865 #if(CXSC_INDEX_CHECK)
870 {
return _vvplus<rvector,l_rvector,l_rvector>(rv1,rv2); }
872 #if(CXSC_INDEX_CHECK)
877 {
return _vvsplus<rvector,l_rvector_slice,l_rvector>(rv,sl); }
879 #if(CXSC_INDEX_CHECK)
884 {
return _vvsplus<l_rvector,rvector_slice,l_rvector>(rv,sl); }
886 #if(CXSC_INDEX_CHECK)
891 {
return _vsvsplus<rvector_slice,l_rvector_slice,l_rvector>(sl1,sl2); }
894 #if(CXSC_INDEX_CHECK)
899 {
return _vvplus<rvector,l_rvector,l_rvector>(rv2,rv1); }
901 #if(CXSC_INDEX_CHECK)
906 {
return _vvsplus<l_rvector,rvector_slice,l_rvector>(rv,sl); }
908 #if(CXSC_INDEX_CHECK)
913 {
return _vvsplus<rvector,l_rvector_slice,l_rvector>(rv,sl); }
915 #if(CXSC_INDEX_CHECK)
920 {
return _vsvsplus<rvector_slice,l_rvector_slice,l_rvector>(sl2,sl1); }
923 #if(CXSC_INDEX_CHECK)
928 {
return _vvplusassign(rv1,rv2); }
930 #if(CXSC_INDEX_CHECK)
935 {
return _vvsplusassign(rv,sl); }
937 #if(CXSC_INDEX_CHECK)
942 {
return _vsvplusassign(*
this,rv); }
944 #if(CXSC_INDEX_CHECK)
949 {
return _vsvsplusassign(*
this,sl2); }
952 #if(CXSC_INDEX_CHECK)
957 {
return _vvminus<rvector,l_rvector,l_rvector>(rv1,rv2); }
959 #if(CXSC_INDEX_CHECK)
964 {
return _vvsminus<rvector,l_rvector_slice,l_rvector>(rv,sl); }
966 #if(CXSC_INDEX_CHECK)
971 {
return _vsvminus<rvector_slice,l_rvector,l_rvector>(sl,rv); }
973 #if(CXSC_INDEX_CHECK)
978 {
return _vsvsminus<rvector_slice,l_rvector_slice,l_rvector>(sl1,sl2); }
981 #if(CXSC_INDEX_CHECK)
986 {
return _vvminus<l_rvector,rvector,l_rvector>(rv1,rv2); }
988 #if(CXSC_INDEX_CHECK)
993 {
return _vvsminus<l_rvector,rvector_slice,l_rvector>(rv,sl); }
995 #if(CXSC_INDEX_CHECK)
1000 {
return _vsvminus<l_rvector_slice,rvector,l_rvector>(sl,rv); }
1002 #if(CXSC_INDEX_CHECK)
1007 {
return _vsvsminus<l_rvector_slice,rvector_slice,l_rvector>(sl1,sl2); }
1010 #if(CXSC_INDEX_CHECK)
1015 {
return _vvminusassign(rv1,rv2); }
1017 #if(CXSC_INDEX_CHECK)
1022 {
return _vvsminusassign(rv,sl); }
1024 #if(CXSC_INDEX_CHECK)
1029 {
return _vsvminusassign(*
this,rv); }
1031 #if(CXSC_INDEX_CHECK)
1036 {
return _vsvsminusassign(*
this,sl2); }
The Data Type dotprecision.
The Data Type idotprecision.
The Multiple-Precision Data Type l_real.
l_real(void) noexcept
Constructor of class l_real.
The Multiple-Precision Data Type l_rvector_slice.
l_rvector_slice & operator-=(const l_rvector &rv) noexcept
Implementation of subtraction and allocation operation.
l_rvector_slice & operator+=(const l_rvector &rv) noexcept
Implementation of addition and allocation operation.
l_rvector_slice & operator()() noexcept
Operator for accessing the whole vector.
l_rvector_slice & operator=(const l_rvector_slice &sl) noexcept
Implementation of standard assigning operator.
l_rvector_slice & operator*=(const l_real &r) noexcept
Implementation of multiplication and allocation operation.
l_real & operator[](const int &i) const noexcept
Operator for accessing the single elements of the vector.
l_rvector_slice & operator/=(const l_real &r) noexcept
Implementation of division and allocation operation.
The Multiple-Precision Data Type l_rvector.
l_rvector() noexcept
Constructor of class l_rvector.
l_rvector & operator=(const l_rvector &rv) noexcept
Implementation of standard assigning operator.
l_real & operator[](const int &i) const noexcept
Operator for accessing the single elements of the vector.
l_rvector & operator()() noexcept
Operator for accessing the whole vector.
The Data Type rvector_slice.
The namespace cxsc, providing all functionality of the class library C-XSC.
civector operator/(const cimatrix_subv &rv, const cinterval &s) noexcept
Implementation of division operation.
cdotprecision & operator+=(cdotprecision &cd, const l_complex &lc) noexcept
Implementation of standard algebraic addition and allocation operation.
INLINE l_rvector _l_rvector(const rmatrix_subv &rs) noexcept
Deprecated typecast, which only exist for the reason of compatibility with older versions of C-XSC.
cimatrix & operator*=(cimatrix &m, const cinterval &c) noexcept
Implementation of multiplication and allocation operation.
ivector abs(const cimatrix_subv &mv) noexcept
Returns the absolute value of the matrix.
civector operator*(const cimatrix_subv &rv, const cinterval &s) noexcept
Implementation of multiplication operation.
void Resize(cimatrix &A) noexcept
Resizes the matrix.
cimatrix & operator/=(cimatrix &m, const cinterval &c) noexcept
Implementation of division and allocation operation.