cloudy trunk
|
00001 /* This file is part of Cloudy and is copyright (C)1978-2008 by Gary J. Ferland and 00002 * others. For conditions of distribution and use see copyright notice in license.txt */ 00003 /*GrainMakeDiffuse main routine for generating the grain diffuse emission, called by RT_diffuse */ 00004 #include "cddefines.h" 00005 #include "physconst.h" 00006 #include "rfield.h" 00007 #include "phycon.h" 00008 #include "dense.h" 00009 #include "hmi.h" 00010 #include "thermal.h" 00011 #include "trace.h" 00012 #include "thirdparty.h" 00013 #include "iterations.h" 00014 #include "grainvar.h" 00015 #include "grains.h" 00016 00017 #define NO_ATOMS(ND) (gv.bin[ND]->AvVol*gv.bin[ND]->dustp[0]/ATOMIC_MASS_UNIT/gv.bin[ND]->atomWeight) 00018 00019 /* NB NB -- the sequence below has been carefully chosen and should NEVER be 00020 * altered unless you really know what you are doing !! */ 00021 /* >>chng 03 jan 16, introduced QH_THIGH_FAIL and started using splint_safe and spldrv_safe 00022 * throughout the code; this solves the parispn_a031_sil.in bug, PvH */ 00023 /* >>chng 03 jan 16, rescheduled QH_STEP_FAIL as non-fatal; it can be triggered at very low temps, PvH */ 00024 typedef enum { 00025 /* the following are OK */ 00026 /* 0 1 2 3 */ 00027 QH_OK, QH_ANALYTIC, QH_ANALYTIC_RELAX, QH_DELTA, 00028 /* the following are mild errors we already recovered from */ 00029 /* 4 5 6 7 */ 00030 QH_NEGRATE_FAIL, QH_LOOP_FAIL, QH_ARRAY_FAIL, QH_THIGH_FAIL, 00031 /* any of these errors will prompt qheat to do another iteration */ 00032 /* 8 9 10 11 */ 00033 QH_RETRY, QH_CONV_FAIL, QH_BOUND_FAIL, QH_DELTA_FAIL, 00034 /* any error larger than QH_NO_REBIN will cause GetProbDistr_LowLimit to return 00035 * before even attempting to rebin the results; we may be able to recover though... */ 00036 /* 12 13 14 15 */ 00037 QH_NO_REBIN, QH_LOW_FAIL, QH_HIGH_FAIL, QH_STEP_FAIL, 00038 /* any case larger than QH_FATAL is truly pathological; there is no hope of recovery */ 00039 /* 16 17 18 19 */ 00040 QH_FATAL, QH_WIDE_FAIL, QH_NBIN_FAIL, QH_REBIN_FAIL 00041 } QH_Code; 00042 00043 /*================================================================================*/ 00044 /* definitions relating to quantum heating routines */ 00045 00046 /* this is the minimum number of bins for quantum heating calculation to be valid */ 00047 static const long NQMIN = 10L; 00048 00049 /* this is the lowest value for dPdlnT that should be included in the modeling */ 00050 static const double PROB_CUTOFF_LO = 1.e-15; 00051 static const double PROB_CUTOFF_HI = 1.e-20; 00052 static const double SAFETY = 1.e+8; 00053 00054 /* during the first NSTARTUP steps, use special algorithm to calculate stepsize */ 00055 static const long NSTARTUP = 5L; 00056 00057 /* if the average number of multiple events is above this number 00058 * don't try full quantum heating treatment. */ 00059 static const double MAX_EVENTS = 150.; 00060 00061 /* maximum number of tries for quantum heating routine */ 00062 /* >>chng 02 jan 30, changed LOOP_MAX from 10 -> 20, PvH */ 00063 static const long LOOP_MAX = 20L; 00064 00065 /* if all else fails, divide temp estimate by this factor */ 00066 static const double DEF_FAC = 3.; 00067 00068 /* total probability for all bins should not deviate more than this from 1. */ 00069 static const double PROB_TOL = 0.02; 00070 00071 /* after NQTEST steps, make an estimate if prob distr will converge in NQGRID steps */ 00072 /* >>chng 02 jan 30, change NQTEST from 1000 -> 500, PvH */ 00073 static const long NQTEST = 500L; 00074 00075 /* if the ratio fwhm/Umax is lower than this number 00076 * don't try full quantum heating treatment. */ 00077 static const double FWHM_RATIO = 0.1; 00078 /* if the ratio fwhm/Umax is lower than this number 00079 * don't even try analytic approximation, use delta function instead */ 00080 static const double FWHM_RATIO2 = 0.007; 00081 00082 /* maximum number of steps for integrating quantum heating probability distribution */ 00083 static const long MAX_LOOP = 2*NQGRID; 00084 00085 /* this is the tolerance used while integrating the temperature distribution of the grains */ 00086 static const double QHEAT_TOL = 5.e-3; 00087 00088 /* maximum number of tries before we declare that probability distribution simply won't fit */ 00089 static const long WIDE_FAIL_MAX = 3; 00090 00091 /* multipliers for PROB_TOL used in GetProbDistr_HighLimit */ 00092 static const double STRICT = 1.; 00093 static const double RELAX = 15.; 00094 00095 /* when rebinning quantum heating results, make ln(qtemp[i]/qtemp[i-1]) == QT_RATIO */ 00096 /* this constant determines the accuracy with which the Wien tail of the grain emission 00097 * is calculated; if x = h*nu/k*T_gr and d = QT_RATIO-1., then the relative accuracy of 00098 * the flux in the Wien tail is Acc = fabs(x*d^2/12 - x^2*d^2/24). A typical value of x 00099 * would be x = 15, so QT_RATIO = 1.03 should converge the spectrum to better than 1% */ 00100 static const double QT_RATIO = 1.03; 00101 00102 00103 /*================================================================================*/ 00104 /* global variables */ 00105 00106 /* these data define the enthalpy function for silicates 00107 * derived from: 00108 * >>refer grain physics Guhathakurta & Draine, 1989, ApJ, 345, 230 00109 * coefficients converted to rydberg energy units, per unit atom 00110 * assuming a density of 3.3 g/cm^3 and pure MgSiFeO4. 00111 * this is not right, but the result is correct because number 00112 * of atoms will be calculated using the same assumption. */ 00113 00114 /* this is the specific density of silicate in g/cm^3 */ 00115 static const double DEN_SIL = 3.30; 00116 00117 /* these are the mean molecular weights per atom for MgSiFeO4 and SiO2 in amu */ 00118 static const double MW_SIL = 24.6051; 00119 /*#define MW_SIO2 20.0283*/ 00120 00121 static const double tlim[5]={0.,50.,150.,500.,DBL_MAX}; 00122 static const double ppower[4]={2.00,1.30,0.68,0.00}; 00123 static const double cval[4]={ 00124 1.40e3/DEN_SIL*ATOMIC_MASS_UNIT*MW_SIL/EN1RYD, 00125 2.20e4/DEN_SIL*ATOMIC_MASS_UNIT*MW_SIL/EN1RYD, 00126 4.80e5/DEN_SIL*ATOMIC_MASS_UNIT*MW_SIL/EN1RYD, 00127 3.41e7/DEN_SIL*ATOMIC_MASS_UNIT*MW_SIL/EN1RYD}; 00128 00129 00130 /* initialize phiTilde */ 00131 STATIC void qheat_init(long,/*@out@*/double[],/*@out@*/double*); 00132 /* worker routine, this implements the algorithm of Guhathakurtha & Draine */ 00133 STATIC void GetProbDistr_LowLimit(long,double,double,double,/*@in@*/double[],/*@in@*/double[], 00134 /*@out@*/double[],/*@out@*/double[],/*@out@*/double[], 00135 /*@out@*/long*,/*@out@*/double*,long*,/*@out@*/QH_Code*); 00136 /* try two consecutive integration steps using stepsize "step/2." (yielding p[k]), 00137 * and also one double integration step using stepsize "step" (yielding p2k). */ 00138 STATIC double TryDoubleStep(double[],double[],double[],double[],double[],double[], 00139 double[],double,/*@out@*/double*,long,long,/*@out@*/bool*); 00140 /* calculate logarithmic integral from (x1,y1) to (x2,y2) */ 00141 STATIC double log_integral(double,double,double,double); 00142 /* scan the probability distribution for valid range */ 00143 STATIC void ScanProbDistr(double[],double[],long,double,long,/*@out@*/long*,/*@out@*/long*, 00144 /*@out@*/long*,long*,QH_Code*); 00145 /* rebin the quantum heating results to speed up RT_diffuse */ 00146 STATIC long RebinQHeatResults(long,long,long,double[],double[],double[],double[],double[], 00147 double[],double[],QH_Code*); 00148 /* calculate approximate probability distribution in high intensity limit */ 00149 STATIC void GetProbDistr_HighLimit(long,double,double,double,/*@out@*/double[],/*@out@*/double[], 00150 /*@out@*/double[],/*@out@*/double*,/*@out@*/long*, 00151 /*@out@*/double*,/*@out@*/QH_Code*); 00152 /* derivative of the enthalpy function dU/dT */ 00153 STATIC double uderiv(double,long); 00154 /* enthalpy function */ 00155 STATIC double ufunct(double,long,/*@out@*/bool*); 00156 /* inverse enthalpy function */ 00157 STATIC double inv_ufunct(double,long,/*@out@*/bool*); 00158 /* helper function for calculating enthalpy, uses Debye theory */ 00159 STATIC double DebyeDeriv(double,long); 00160 00161 /* >>chng 01 oct 29, introduced gv.bin[nd]->cnv_H_pGR, cnv_GR_pH, etc. PvH */ 00162 00163 00164 /* main routine for generating the grain diffuse emission, called by RT_diffuse */ 00165 void GrainMakeDiffuse(void) 00166 { 00167 long i, 00168 j, 00169 nd; 00170 double bfac, 00171 f, 00172 factor, 00173 flux, 00174 x; 00175 00176 /* >>chng 01 sep 11, replace array allocation on stack with 00177 * MALLOC to avoid bug in gcc 3.0 on Sparc platforms, PvH */ 00178 double *qtemp/*[NQGRID]*/, *qprob/*[NQGRID]*/; 00179 00180 # ifndef NDEBUG 00181 bool lgNoTdustFailures; 00182 double BolFlux, 00183 Comparison1, 00184 Comparison2; 00185 # endif 00186 00187 /* this assures 6-digit precision in the evaluation of the exponential below */ 00188 const double LIM1 = pow(2.e-6,1./2.); 00189 const double LIM2 = pow(6.e-6,1./3.); 00190 00191 DEBUG_ENTRY( "GrainMakeDiffuse()" ); 00192 00193 factor = 2.*PI4*POW2(FR1RYD/SPEEDLIGHT)*FR1RYD; 00194 /* >>chng 96 apr 26 upper limit chng from 15 to 75 Peter van Hoof */ 00195 /* >>chng 00 apr 10 use constants appropriate for double precision, by PvH */ 00196 x = log(0.999*DBL_MAX); 00197 00198 /* save grain emission per unit volume */ 00199 for( i=0; i < rfield.nflux; i++ ) 00200 { 00201 /* used in RT_diffuse to derive total emission */ 00202 gv.GrainEmission[i] = 0.; 00203 gv.SilicateEmission[i] = 0.; 00204 gv.GraphiteEmission[i] = 0.; 00205 } 00206 00207 qtemp = (double*)MALLOC((size_t)(NQGRID*sizeof(double))); 00208 qprob = (double*)MALLOC((size_t)(NQGRID*sizeof(double))); 00209 00210 for( nd=0; nd < gv.nBin; nd++ ) 00211 { 00212 strg_type scase; 00213 bool lgLocalQHeat; 00214 long qnbin=-200; 00215 realnum *grn, threshold; 00216 double xx; 00217 00218 /* save emission from this species */ 00219 scase = gv.which_strg[gv.bin[nd]->matType]; 00220 switch( scase ) 00221 { 00222 case STRG_SIL: 00223 grn = gv.SilicateEmission; 00224 break; 00225 case STRG_CAR: 00226 grn = gv.GraphiteEmission; 00227 break; 00228 default: 00229 fprintf( ioQQQ, " GrainMakeDiffuse called with unknown storage class: %d\n", scase ); 00230 cdEXIT(EXIT_FAILURE); 00231 } 00232 00233 /* this local copy is necessary to keep lint happy */ 00234 lgLocalQHeat = gv.bin[nd]->lgQHeat; 00235 /* >>chng 04 nov 09, do not evaluate quantum heating if abundance is negligible, PvH 00236 * this prevents PAH's deep inside molecular regions from failing if GrnVryDepth is used */ 00237 /* >>chng 04 dec 31, introduced separate thresholds near I-front and in molecular region, PvH */ 00238 threshold = ( dense.xIonDense[ipHYDROGEN][0]+dense.xIonDense[ipHYDROGEN][1] > hmi.H2_total ) ? 00239 gv.dstAbundThresholdNear : gv.dstAbundThresholdFar; 00240 00241 if( lgLocalQHeat && gv.bin[nd]->dstAbund >= threshold ) 00242 { 00243 qheat(qtemp,qprob,&qnbin,nd); 00244 00245 if( gv.bin[nd]->lgUseQHeat ) 00246 { 00247 ASSERT( qnbin > 0 ); 00248 } 00249 } 00250 else 00251 { 00252 /* >> chng 04 dec 31, repaired bug lgUseQHeat not set when abundance below threshold, PvH */ 00253 gv.bin[nd]->lgUseQHeat = false; 00254 } 00255 00256 flux = 1.; 00257 /* flux can only become zero in the Wien tail */ 00258 /* >>chng 04 jan 25, replaced flux > 0. with (realnum)flux > 0.f, PvH */ 00259 for( i=0; i < rfield.nflux && (realnum)flux > 0.f; i++ ) 00260 { 00261 flux = 0.; 00262 if( lgLocalQHeat && gv.bin[nd]->lgUseQHeat ) 00263 { 00264 xx = 1.; 00265 /* we start at high temperature end and work our way down 00266 * until contribution becomes negligible */ 00267 for( j=qnbin-1; j >= 0 && xx > flux*DBL_EPSILON; j-- ) 00268 { 00269 f = TE1RYD/qtemp[j]*rfield.anu[i]; 00270 if( f < x ) 00271 { 00272 /* want the number exp(hnu/kT) - 1, two expansions */ 00273 /* >>chng 04 jan 25, changed 1.e-5 -> LIM1, LIM2, PvH */ 00274 if( f > LIM2 ) 00275 bfac = exp(f) - 1.; 00276 else if( f > LIM1 ) 00277 bfac = (1. + 0.5*f)*f; 00278 else 00279 bfac = f; 00280 xx = qprob[j]*gv.bin[nd]->dstab1[i]*rfield.anu2[i]* 00281 rfield.widflx[i]/bfac; 00282 flux += xx; 00283 } 00284 else 00285 { 00286 xx = 0.; 00287 } 00288 } 00289 } 00290 else 00291 { 00292 f = TE1RYD/gv.bin[nd]->tedust*rfield.anu[i]; 00293 if( f < x ) 00294 { 00295 /* >>chng 04 jan 25, changed 1.e-5 -> LIM1, LIM2, PvH */ 00296 if( f > LIM2 ) 00297 bfac = exp(f) - 1.; 00298 else if( f > LIM1 ) 00299 bfac = (1. + 0.5*f)*f; 00300 else 00301 bfac = f; 00302 flux = gv.bin[nd]->dstab1[i]*rfield.anu2[i]*rfield.widflx[i]/bfac; 00303 } 00304 } 00305 00306 /* do multiplications outside loop, PvH */ 00307 flux *= factor*gv.bin[nd]->cnv_H_pCM3; 00308 00309 /* remember local emission these are zeroed out on each zone 00310 * above, and now incremented so is unit emission from this zone */ 00311 gv.GrainEmission[i] += (realnum)flux; 00312 /* unit emission for each different grain type */ 00313 grn[i] += (realnum)flux; 00314 } 00315 } 00316 00317 free( qprob ); 00318 free( qtemp ); 00319 00320 # ifndef NDEBUG 00321 /********************************************************************************* 00322 * 00323 * Following are three checks on energy and charge conservation by the grain code. 00324 * Their primary function is as an internal consistency check, so that coding 00325 * errors get caught as early as possible. This is why the program terminates as 00326 * soon as any one of the checks fails. 00327 * 00328 * NB NB - despite appearances, these checks do NOT guarantee overall energy 00329 * conservation in the Cloudy model to the asserted tolerance, see note 1B ! 00330 * 00331 * Note 1: there are two sources for energy imbalance in the grain code (see A & B). 00332 * A: Interpolation in dstems. The code calculates how much energy the grains 00333 * emit in thermal radiation (gv.bin[nd]->GrainHeat), and converts that into 00334 * an (average) grain temperature by reverse interpolation in dstems. If 00335 * quantum heating is not used, that temperature is used directly to generate 00336 * the local diffuse emission. Hence the finite resolution of the dstems grid 00337 * can lead to small errors in flux. This is tested in Check 1. The maximum 00338 * error of interpolating in dstems scales with NDEMS^-3. The same problem 00339 * can also occur when quantum heating is used, however, the fact that many 00340 * bins are used will probably lead to a cancellation effect of the errors. 00341 * B: RT_OTS_Update gets called AFTER grain() has completed, so the grain heating 00342 * was calculated using a different version of the OTS fields than the one 00343 * that gets fed into the RT routines (where the actual attenuation of the 00344 * radiation fields by the grain opacity is done). This can lead to an energy 00345 * imbalance, depending on how accurate the convergence of the OTS fields is. 00346 * This is outside the control of the grain code and is therefore NOT checked. 00347 * Rather, the grain code remembers the contribution from the old OTS fields 00348 * (through gv.bin[nd]->BolFlux) and uses that in Check 3. In most models the 00349 * difference will be well below 0.1%, but in AGN type models where OTS continua 00350 * are important, the energy imbalance can be of the order of 0.5% of the grain 00351 * heating (status nov 2001). On 04 jan 25 the initialization of phiTilde has 00352 * been moved to qheat, implying that phiTilde now uses the updated version of 00353 * the OTS fields. The total amount of radiated energy however is still based 00354 * on gv.bin[nd]->GrainHeat which uses the old version of the OTS fields. 00355 * C: Energy conservation for collisional processes is guaranteed by adding in 00356 * (very small) correction terms. These corrections are needed to cover up 00357 * small imperfection in the theory, and cannot be avoided without making the 00358 * already very complex theory even more complex. 00359 * D: Photo-electric heating and collisional cooling can have an important effect 00360 * on the total heating balance of the gas. Both processes depend strongly on 00361 * the grain charge, so assuring proper charge balance is important as well. 00362 * This is tested in Check 2. 00363 * 00364 * Note 2: for quantum heating it is important to resolve the Maxwell distribution 00365 * of the electrons and ions far enough into the tail that the total amount of 00366 * energy contained in the remaining tail is negligible. If this is not the case, 00367 * the assert at the beginning of the qheat() routine will fail. This is because 00368 * the code filling up the phiTilde array in GrainCollHeating() assumes a value for 00369 * the average particle energy based on a Maxwell distribution going to infinity. 00370 * If the maximum energy used is too low, the assumed average energy would be 00371 * incorrect. 00372 * 00373 *********************************************************************************/ 00374 00375 lgNoTdustFailures = true; 00376 for( nd=0; nd < gv.nBin; nd++ ) 00377 { 00378 if( !gv.bin[nd]->lgTdustConverged ) 00379 { 00380 lgNoTdustFailures = false; 00381 break; 00382 } 00383 } 00384 00385 /* CHECK 1: does the grain thermal emission conserve energy ? */ 00386 BolFlux = 0.; 00387 for( i=0; i < rfield.nflux; i++ ) 00388 { 00389 BolFlux += gv.GrainEmission[i]*rfield.anu[i]*EN1RYD; 00390 } 00391 Comparison1 = 0.; 00392 for( nd=0; nd < gv.nBin; nd++ ) 00393 { 00394 if( gv.bin[nd]->tedust < gv.bin[nd]->Tsublimat ) 00395 Comparison1 += CONSERV_TOL*gv.bin[nd]->GrainHeat; 00396 else 00397 /* for high temperatures the interpolation in dstems 00398 * is less accurate, so we have to be more lenient */ 00399 Comparison1 += 10.*CONSERV_TOL*gv.bin[nd]->GrainHeat; 00400 } 00401 00402 /* >>chng 04 mar 11, add constant grain temperature to pass assert */ 00403 /* >>chng 04 jun 01, deleted test for constant grain temperature, PvH */ 00404 ASSERT( fabs(BolFlux-gv.GrainHeatSum) < Comparison1 ); 00405 00406 /* CHECK 2: assert charging balance */ 00407 for( nd=0; nd < gv.nBin; nd++ ) 00408 { 00409 if( gv.bin[nd]->lgChrgConverged ) 00410 { 00411 double ave = 0.5*(gv.bin[nd]->RateDn+gv.bin[nd]->RateUp); 00412 /* >>chng 04 dec 16, add lgAbort - do not throw assert if we are in 00413 * process of aborting */ 00414 ASSERT( lgAbort || fabs(gv.bin[nd]->RateDn-gv.bin[nd]->RateUp) < CONSERV_TOL*ave ); 00415 } 00416 } 00417 00418 if( lgNoTdustFailures && gv.lgDHetOn && gv.lgDColOn && thermal.ConstGrainTemp == 0. ) 00419 { 00420 /* CHECK 3: calculate the total energy donated to grains, must be balanced by 00421 * the energy emitted in thermal radiation plus various forms of gas heating */ 00422 Comparison1 = 0.; 00423 for( nd=0; nd < gv.nBin; nd++ ) 00424 { 00425 Comparison1 += gv.bin[nd]->BolFlux; 00426 } 00427 /* add in collisional heating of grains by plasma (if positive) */ 00428 Comparison1 += MAX2(gv.GasCoolColl,0.); 00429 /* add in net amount of chemical energy donated by recombining ions and molecule formation */ 00430 Comparison1 += gv.GrainHeatChem; 00431 00432 /* thermal emis PE effect gas heating by coll thermionic emis */ 00433 Comparison2 = gv.GrainHeatSum+thermal.heating[0][13]+thermal.heating[0][14]+thermal.heating[0][25]; 00434 00435 /* >>chng 06 jun 02, add test on gv.GrainHeatScaleFactor so that assert not thrown 00436 * when set grain heat command is used */ 00437 ASSERT( lgAbort || gv.GrainHeatScaleFactor != 1.f || gv.lgBakesPAH_heat || 00438 fabs(Comparison1-Comparison2)/Comparison2 < CONSERV_TOL ); 00439 } 00440 # endif 00441 return; 00442 } 00443 00444 00445 /**************************************************************************** 00446 * 00447 * qheat: driver routine for non-equilibrium heating of grains 00448 * 00449 * This routine calls GetProbDistr_LowLimit, GetProbDistr_HighLimit 00450 * (which do the actual non-equilibrium calculations), and does the 00451 * subsequent quality control. 00452 * 00453 * Written by Peter van Hoof (UK, CITA, QUB). 00454 * 00455 ****************************************************************************/ 00456 00457 /* this is the new version of the quantum heating code, used starting Cloudy 96 beta 3 */ 00458 00459 void qheat(/*@out@*/ double qtemp[], /* qtemp[NQGRID] */ 00460 /*@out@*/ double qprob[], /* qprob[NQGRID] */ 00461 /*@out@*/ long int *qnbin, 00462 long int nd) 00463 { 00464 bool lgBoundErr, 00465 lgDelta, 00466 lgNegRate, 00467 lgOK, 00468 lgTried; 00469 long int i, 00470 nWideFail; 00471 QH_Code ErrorCode, 00472 ErrorCode2, 00473 ErrorStart; 00474 double c0, 00475 c1, 00476 c2, 00477 check, 00478 DefFac, 00479 deriv, 00480 *dPdlnT/*[NQGRID]*/, 00481 fwhm, 00482 FwhmRatio, 00483 integral, 00484 minBracket, 00485 maxBracket, 00486 new_tmin, 00487 NumEvents, 00488 oldy, 00489 *Phi/*[NC_ELL]*/, 00490 *PhiDrv/*[NC_ELL]*/, 00491 *phiTilde/*[NC_ELL]*/, 00492 rel_tol, 00493 Tmax, 00494 tol, 00495 Umax, 00496 xx, 00497 y; 00498 00499 00500 DEBUG_ENTRY( "qheat()" ); 00501 00502 /* sanity checks */ 00503 ASSERT( gv.bin[nd]->lgQHeat ); 00504 ASSERT( nd >= 0 && nd < gv.nBin ); 00505 00506 if( trace.lgTrace && trace.lgDustBug ) 00507 { 00508 fprintf( ioQQQ, "\n >>>>qheat called for grain %s\n", gv.bin[nd]->chDstLab ); 00509 } 00510 00511 /* >>chng 01 aug 22, allocate space */ 00512 /* phiTilde is continuum corrected for photo-electric effect, in events/H/s/cell, default depl */ 00513 phiTilde = (double*)MALLOC((size_t)((unsigned)rfield.nupper*sizeof(double)) ); 00514 Phi = (double*)MALLOC((size_t)((unsigned)rfield.nupper*sizeof(double)) ); 00515 PhiDrv = (double*)MALLOC((size_t)((unsigned)rfield.nupper*sizeof(double)) ); 00516 dPdlnT = (double*)MALLOC((size_t)(NQGRID*sizeof(double)) ); 00517 00518 qheat_init( nd, phiTilde, &check ); 00519 00520 check += gv.bin[nd]->GrainHeatColl-gv.bin[nd]->GrainCoolTherm; 00521 00522 xx = integral = 0.; 00523 c0 = c1 = c2 = 0.; 00524 lgNegRate = false; 00525 oldy = 0.; 00526 tol = 1.; 00527 00528 /* >>chng 01 nov 29, rfield.nflux -> gv.qnflux, PvH */ 00529 /* >>chng 03 jan 26, gv.qnflux -> gv.bin[nd]->qnflux, PvH */ 00530 for( i=gv.bin[nd]->qnflux-1; i >= 0; i-- ) 00531 { 00532 /* >>chng 97 jul 17, to summed continuum */ 00533 /* >>chng 00 mar 30, to phiTilde, to keep track of photo-electric effect and collisions, by PvH */ 00534 /* >>chng 01 oct 10, use trapezoidal rule for integrating Phi, reverse direction of integration. 00535 * Phi[i] is now integral from exactly rfield.anu[i] to infinity to second-order precision, PvH */ 00536 /* >>chng 01 oct 30, change normalization of Phi, PhiDrv from <unit>/cm^3 -> <unit>/grain, PvH */ 00537 double fac = ( i < gv.bin[nd]->qnflux-1 ) ? 1./rfield.widflx[i] : 0.; 00538 /* phiTilde has units events/H/s, y has units events/grain/s/Ryd */ 00539 y = phiTilde[i]*gv.bin[nd]->cnv_H_pGR*fac; 00540 /* PhiDrv[i] = (Phi[i+1]-Phi[i])/(anu[i+1]-anu[i]), units events/grain/s/Ryd */ 00541 PhiDrv[i] = -0.5*(y + oldy); 00542 /* there is a minus sign here because we are integrating from infinity downwards */ 00543 xx -= PhiDrv[i]*(rfield.anu[i+1]-rfield.anu[i]); 00544 /* Phi has units events/grain/s */ 00545 Phi[i] = xx; 00546 00547 # ifndef NDEBUG 00548 /* trapezoidal rule is not needed for integral, this is also second-order correct */ 00549 integral += phiTilde[i]*gv.bin[nd]->cnv_H_pCM3*rfield.anu[i]*EN1RYD; 00550 # endif 00551 00552 /* c<n> has units Ryd^(n+1)/grain/s */ 00553 c0 += Phi[i]*rfield.widflx[i]; 00554 c1 += Phi[i]*rfield.anu[i]*rfield.widflx[i]; 00555 c2 += Phi[i]*POW2(rfield.anu[i])*rfield.widflx[i]; 00556 00557 lgNegRate = lgNegRate || ( phiTilde[i] < 0. ); 00558 00559 oldy = y; 00560 } 00561 00562 /* sanity check */ 00563 ASSERT( fabs(check-integral)/check <= CONSERV_TOL ); 00564 00565 # if 0 00566 { 00567 char fnam[50]; 00568 FILE *file; 00569 00570 sprintf(fnam,"Lambda_%2.2ld.asc",nd); 00571 file = fopen(fnam,"w"); 00572 for( i=0; i < NDEMS; ++i ) 00573 fprintf(file,"%e %e %e\n", 00574 exp(gv.dsttmp[i]), 00575 ufunct(exp(gv.dsttmp[i]),nd,&lgBoundErr), 00576 exp(gv.bin[nd]->dstems[i])*gv.bin[nd]->cnv_H_pGR/EN1RYD); 00577 fclose(file); 00578 00579 sprintf(fnam,"Phi_%2.2ld.asc",nd); 00580 file = fopen(fnam,"w"); 00581 for( i=0; i < gv.bin[nd]->qnflux; ++i ) 00582 fprintf(file,"%e %e\n", rfield.anu[i],Phi[i]); 00583 fclose(file); 00584 } 00585 # endif 00586 00587 /* Tmax is where p(U) will peak (at least in high intensity limit) */ 00588 Tmax = gv.bin[nd]->tedust; 00589 /* grain enthalpy at peak of p(U), in Ryd */ 00590 Umax = ufunct(Tmax,nd,&lgBoundErr); 00591 ASSERT( !lgBoundErr ); /* this should never happen */ 00592 /* y is dln(Lambda)/dlnT at Tmax */ 00593 spldrv_safe(gv.dsttmp,gv.bin[nd]->dstems,gv.bin[nd]->dstslp2,NDEMS,log(Tmax),&y,&lgBoundErr); 00594 ASSERT( !lgBoundErr ); /* this should never happen */ 00595 /* deriv is dLambda/dU at Umax, in 1/grain/s */ 00596 deriv = y*c0/(uderiv(Tmax,nd)*Tmax); 00597 /* estimated FWHM of probability distribution, in Ryd */ 00598 fwhm = sqrt(8.*LN_TWO*c1/deriv); 00599 00600 NumEvents = POW2(fwhm)*c0/(4.*LN_TWO*c2); 00601 FwhmRatio = fwhm/Umax; 00602 00603 /* >>chng 01 nov 15, change ( NumEvents > MAX_EVENTS2 ) --> ( FwhmRatio < FWHM_RATIO2 ), PvH */ 00604 lgDelta = ( FwhmRatio < FWHM_RATIO2 ); 00605 /* high intensity case is always OK since we will use equilibrium treatment */ 00606 lgOK = lgDelta; 00607 00608 ErrorStart = QH_OK; 00609 00610 if( lgDelta ) 00611 { 00612 /* in this case we ignore negative rates, equilibrium treatment is good enough */ 00613 lgNegRate = false; 00614 ErrorStart = MAX2(ErrorStart,QH_DELTA); 00615 } 00616 00617 if( lgNegRate ) 00618 ErrorStart = MAX2(ErrorStart,QH_NEGRATE_FAIL); 00619 00620 ErrorCode = ErrorStart; 00621 00622 if( trace.lgTrace && trace.lgDustBug ) 00623 { 00624 double Rate2 = 0.; 00625 for( int nz=0; nz < gv.bin[nd]->nChrg; nz++ ) 00626 Rate2 += gv.bin[nd]->chrg[nz]->FracPop*gv.bin[nd]->chrg[nz]->HeatingRate2; 00627 00628 fprintf( ioQQQ, " grain heating: %.4e, integral %.4e, total rate %.4e lgNegRate %c\n", 00629 gv.bin[nd]->GrainHeat,integral,Phi[0],TorF(lgNegRate)); 00630 fprintf( ioQQQ, " av grain temp %.4e av grain enthalpy (Ryd) %.4e\n", 00631 gv.bin[nd]->tedust,Umax); 00632 fprintf( ioQQQ, " fwhm^2/(4ln2*c2/c0): %.4e fwhm (Ryd) %.4e fwhm/Umax %.4e\n", 00633 NumEvents,fwhm,FwhmRatio ); 00634 fprintf( ioQQQ, " HeatingRate1 %.4e HeatingRate2 %.4e lgQHTooWide %c\n", 00635 gv.bin[nd]->HeatingRate1*gv.bin[nd]->cnv_H_pCM3, Rate2*gv.bin[nd]->cnv_H_pCM3, 00636 TorF(gv.bin[nd]->lgQHTooWide) ); 00637 } 00638 00639 /* these two variables will bracket qtmin, they should only be needed during the initial search phase */ 00640 minBracket = GRAIN_TMIN; 00641 maxBracket = gv.bin[nd]->tedust; 00642 00643 /* >>chng 02 jan 30, introduced lgTried to avoid running GetProbDistr_HighLimit over and over..., PvH */ 00644 lgTried = false; 00645 /* >>chng 02 aug 06, introduced QH_WIDE_FAIL and nWideFail, PvH */ 00646 nWideFail = 0; 00647 /* >>chng 03 jan 27, introduced DefFac to increase factor for repeated LOW_FAIL's, PvH */ 00648 DefFac = DEF_FAC; 00649 /* >>chng 04 nov 10, introduced rel_tol to increase precision in case of repeated CONV_FAIL's, PvH */ 00650 rel_tol = 1.; 00651 00652 /* if average number of multiple photon events is too high, lgOK is set to true */ 00653 /* >>chng 02 aug 12, added gv.bin[nd]->lgQHTooWide to prevent unnecessary looping here. 00654 * In general the number of integration steps needed to integrate the probability distribution 00655 * will increase monotonically with depth into the cloud. Hence, once the distribution becomes 00656 * too wide to fit into NQGRID steps (which only happens for extremely cold grains in deeply 00657 * shielded conditions) there is no hope of ever converging GetProbDistr_LowLimit and the code 00658 * will waste a lot of CPU time establishing this for every zone again. So once the distribution 00659 * becomes too wide we immediately skip to the analytic approximation to save time, PvH */ 00660 for( i=0; i < LOOP_MAX && !lgOK && !gv.bin[nd]->lgQHTooWide; i++ ) 00661 { 00662 if( gv.bin[nd]->qtmin >= gv.bin[nd]->tedust ) 00663 { 00664 /* >>chng 02 jul 31, was gv.bin[nd]->qtmin = 0.7*gv.bin[nd]->tedust */ 00665 /* >>chng 03 nov 10, changed Umax/exp(+... to Umax*exp(-... to avoid overflow, PvH */ 00666 double Ulo = Umax*exp(-sqrt(-log(PROB_CUTOFF_LO)/(4.*LN_TWO))*fwhm/Umax); 00667 double MinEnth = exp(gv.bin[nd]->DustEnth[0]); 00668 Ulo = MAX2(Ulo,MinEnth); 00669 gv.bin[nd]->qtmin = inv_ufunct(Ulo,nd,&lgBoundErr); 00670 ASSERT( !lgBoundErr ); /* this should never happen */ 00671 /* >>chng 02 jul 30, added this test; prevents problems with ASSERT below, PvH */ 00672 if( gv.bin[nd]->qtmin <= minBracket || gv.bin[nd]->qtmin >= maxBracket ) 00673 gv.bin[nd]->qtmin = sqrt(minBracket*maxBracket); 00674 } 00675 gv.bin[nd]->qtmin = MAX2(gv.bin[nd]->qtmin,GRAIN_TMIN); 00676 00677 ASSERT( minBracket <= gv.bin[nd]->qtmin && gv.bin[nd]->qtmin <= maxBracket ); 00678 00679 ErrorCode = ErrorStart; 00680 00681 /* >>chng 01 nov 15, add ( FwhmRatio >= FWHM_RATIO ), PvH */ 00682 if( FwhmRatio >= FWHM_RATIO && NumEvents <= MAX_EVENTS ) 00683 { 00684 GetProbDistr_LowLimit(nd,rel_tol,Umax,fwhm,Phi,PhiDrv,qtemp,qprob,dPdlnT,qnbin, 00685 &new_tmin,&nWideFail,&ErrorCode); 00686 00687 /* >>chng 02 jan 07, various changes to improve convergence for very cold grains, PvH */ 00688 if( ErrorCode == QH_DELTA_FAIL && fwhm < Umax && !lgTried ) 00689 { 00690 double dummy; 00691 00692 /* this situation can mean two things: either the photon rate is so high that 00693 * the code needs too many steps to integrate the probability distribution, 00694 * or alternatively, tmin is far too low and the code needs too many steps 00695 * to overcome the rising side of the probability distribution. 00696 * So we call GetProbDistr_HighLimit first to determine if the former is the 00697 * case; if that fails then the latter must be true and we reset QH_DELTA_FAIL */ 00698 ErrorCode = MAX2(ErrorStart,QH_ANALYTIC); 00699 /* use dummy to avoid losing estimate for new_tmin from GetProbDistr_LowLimit */ 00700 /* >>chng 02 aug 06, introduced STRICT and RELAX, PvH */ 00701 GetProbDistr_HighLimit(nd,STRICT,Umax,fwhm,qtemp,qprob,dPdlnT,&tol,qnbin,&dummy, 00702 &ErrorCode); 00703 00704 if( ErrorCode >= QH_RETRY ) 00705 { 00706 ErrorCode = QH_DELTA_FAIL; 00707 lgTried = true; 00708 } 00709 } 00710 00711 /* >>chng 02 aug 07 major rewrite of the logic below */ 00712 if( ErrorCode < QH_NO_REBIN ) 00713 { 00714 if( new_tmin < minBracket || new_tmin > maxBracket ) 00715 ++nWideFail; 00716 00717 if( nWideFail < WIDE_FAIL_MAX ) 00718 { 00719 if( new_tmin <= minBracket ) 00720 new_tmin = sqrt(gv.bin[nd]->qtmin*minBracket); 00721 if( new_tmin >= maxBracket ) 00722 new_tmin = sqrt(gv.bin[nd]->qtmin*maxBracket); 00723 } 00724 else 00725 { 00726 ErrorCode = MAX2(ErrorCode,QH_WIDE_FAIL); 00727 } 00728 00729 if( ErrorCode == QH_CONV_FAIL ) 00730 { 00731 rel_tol *= 0.9; 00732 } 00733 } 00734 else if( ErrorCode == QH_LOW_FAIL ) 00735 { 00736 double help1 = gv.bin[nd]->qtmin*sqrt(DefFac); 00737 double help2 = sqrt(gv.bin[nd]->qtmin*maxBracket); 00738 minBracket = gv.bin[nd]->qtmin; 00739 new_tmin = MIN2(help1,help2); 00740 /* increase factor in case we get repeated LOW_FAIL's */ 00741 DefFac += 1.5; 00742 } 00743 else if( ErrorCode == QH_HIGH_FAIL ) 00744 { 00745 double help = sqrt(gv.bin[nd]->qtmin*minBracket); 00746 maxBracket = gv.bin[nd]->qtmin; 00747 new_tmin = MAX2(gv.bin[nd]->qtmin/DEF_FAC,help); 00748 } 00749 else 00750 { 00751 new_tmin = sqrt(minBracket*maxBracket); 00752 } 00753 } 00754 else 00755 { 00756 GetProbDistr_HighLimit(nd,STRICT,Umax,fwhm,qtemp,qprob,dPdlnT,&tol,qnbin,&new_tmin, 00757 &ErrorCode); 00758 } 00759 00760 gv.bin[nd]->qtmin = new_tmin; 00761 00762 lgOK = ( ErrorCode < QH_RETRY ); 00763 00764 if( ErrorCode >= QH_FATAL ) 00765 break; 00766 00767 if( ErrorCode != QH_LOW_FAIL ) 00768 DefFac = DEF_FAC; 00769 00770 if( trace.lgTrace && trace.lgDustBug ) 00771 { 00772 fprintf( ioQQQ, " GetProbDistr returns code %d\n", ErrorCode ); 00773 if( !lgOK ) 00774 { 00775 fprintf( ioQQQ, " >>>>qheat trying another iteration, qtmin bracket %.4e %.4e", 00776 minBracket,maxBracket ); 00777 fprintf( ioQQQ, " nWideFail %ld\n", nWideFail ); 00778 } 00779 } 00780 } 00781 00782 if( ErrorCode == QH_WIDE_FAIL ) 00783 gv.bin[nd]->lgQHTooWide = true; 00784 00785 /* >>chng 03 jan 17, added test for !lgDelta, PvH */ 00786 /* if( gv.bin[nd]->lgQHTooWide ) */ 00787 if( gv.bin[nd]->lgQHTooWide && !lgDelta ) 00788 ErrorCode = MAX2(ErrorCode,QH_WIDE_FAIL); 00789 00790 /* if( ErrorCode >= QH_RETRY ) */ 00791 /* printf( " *** PROBLEM loop not converged, errorcode %d\n",ErrorCode ); */ 00792 00793 /* The quantum heating code tends to run into trouble when it goes deep into the neutral zone, 00794 * especially if the original spectrum was very hard, as is the case in high excitation PNe or AGN. 00795 * You then get a bipartition in the spectrum where most of the photons have low energy, while 00796 * there still are hard X-ray photons left. The fact that the average energy per photon is low 00797 * forces the quantum code to take tiny little steps when integrating the probability distribution, 00798 * while the fact that X-ray photons are still present implies that big temperature spikes still 00799 * occur and hence the temperature distribution is very wide. Therefore the code needs a zillion 00800 * steps to integrate the probability distribution and simply runs out of room. As a last resort 00801 * try the analytic approximation with relaxed constraints used below. */ 00802 /* >>chng 02 oct 03, vary Umax and fwhm to force fit with fwhm/Umax remaining constant */ 00803 /* >>chng 03 jan 17, changed test so that last resort always gets tried when lgOK = lgDelta = false, PvH */ 00804 /* if( !lgOK && FwhmRatio >= FWHM_RATIO && NumEvents <= MAX_EVENTS ) */ 00805 if( !lgOK && !lgDelta ) 00806 { 00807 double Umax2 = Umax*sqrt(tol); 00808 double fwhm2 = fwhm*sqrt(tol); 00809 00810 for( i=0; i < LOOP_MAX; ++i ) 00811 { 00812 double dummy; 00813 00814 ErrorCode2 = MAX2(ErrorStart,QH_ANALYTIC); 00815 GetProbDistr_HighLimit(nd,RELAX,Umax2,fwhm2,qtemp,qprob,dPdlnT,&tol,qnbin,&dummy, 00816 &ErrorCode2); 00817 00818 lgOK = ( ErrorCode2 < QH_RETRY ); 00819 if( lgOK ) 00820 { 00821 gv.bin[nd]->qtmin = dummy; 00822 ErrorCode = ErrorCode2; 00823 break; 00824 } 00825 else 00826 { 00827 Umax2 *= sqrt(tol); 00828 fwhm2 *= sqrt(tol); 00829 } 00830 } 00831 } 00832 00833 if( nzone == 1 ) 00834 gv.bin[nd]->qtmin_zone1 = gv.bin[nd]->qtmin; 00835 00836 gv.bin[nd]->lgUseQHeat = ( lgOK && !lgDelta ); 00837 gv.bin[nd]->lgEverQHeat = ( gv.bin[nd]->lgEverQHeat || gv.bin[nd]->lgUseQHeat ); 00838 00839 if( lgOK ) 00840 { 00841 if( trace.lgTrace && trace.lgDustBug ) 00842 fprintf( ioQQQ, " >>>>qheat converged with code: %d\n", ErrorCode ); 00843 } 00844 else 00845 { 00846 *qnbin = 0; 00847 ++gv.bin[nd]->QHeatFailures; 00848 fprintf( ioQQQ, " PROBLEM qheat did not converge grain %s in zone %ld, error code = %d\n", 00849 gv.bin[nd]->chDstLab,nzone,ErrorCode ); 00850 } 00851 00852 if( gv.QHPunchFile != NULL && ( iterations.lgLastIt || !gv.lgQHPunLast ) ) 00853 { 00854 fprintf( gv.QHPunchFile, "\nDust Temperature Distribution: grain %s zone %ld\n", 00855 gv.bin[nd]->chDstLab,nzone ); 00856 00857 fprintf( gv.QHPunchFile, "Equilibrium temperature: %.2f\n", gv.bin[nd]->tedust ); 00858 00859 if( gv.bin[nd]->lgUseQHeat ) 00860 { 00861 /* >>chng 01 oct 09, remove qprob from output, it depends on step size, PvH */ 00862 fprintf( gv.QHPunchFile, "Number of bins: %ld\n", *qnbin ); 00863 fprintf( gv.QHPunchFile, " Tgrain dP/dlnT\n" ); 00864 for( i=0; i < *qnbin; i++ ) 00865 { 00866 fprintf( gv.QHPunchFile, "%.4e %.4e\n", qtemp[i],dPdlnT[i] ); 00867 } 00868 } 00869 else 00870 { 00871 fprintf( gv.QHPunchFile, "**** quantum heating was not used\n" ); 00872 } 00873 } 00874 00875 free ( phiTilde ); 00876 free ( Phi ); 00877 free ( PhiDrv ); 00878 free ( dPdlnT ); 00879 return; 00880 } 00881 00882 00883 /* initialize phiTilde */ 00884 STATIC void qheat_init(long nd, 00885 /*@out@*/ double phiTilde[], /* phiTilde[rfield.nupper] */ 00886 /*@out@*/ double *check) 00887 { 00888 long i, 00889 nz; 00890 double sum = 0.; 00891 00892 /*@-redef@*/ 00893 enum {DEBUG_LOC=false}; 00894 /*@+redef@*/ 00895 00896 DEBUG_ENTRY( "qheat_init()" ); 00897 00898 /* sanity checks */ 00899 ASSERT( gv.bin[nd]->lgQHeat ); 00900 ASSERT( nd >= 0 && nd < gv.nBin ); 00901 00902 *check = 0.; 00903 00904 /* >>chng 01 nov 29, rfield.nflux -> gv.qnflux, PvH */ 00905 /* >>chng 03 jan 26, gv.qnflux -> gv.bin[nd]->qnflux, PvH */ 00906 for( i=0; i < gv.bin[nd]->qnflux; i++ ) 00907 { 00908 phiTilde[i] = 0.; 00909 } 00910 00911 /* fill in auxiliary array for quantum heating routine 00912 * it reshuffles the input spectrum times the cross section to take 00913 * the photo-electric effect into account. this prevents the quantum 00914 * heating routine from having to calculate this effect over and over 00915 * again; it can do a straight integration instead, making the code 00916 * a lot simpler and faster. this initializes the array for non-ionizing 00917 * energies, the reshuffling for higher energies is done in the next loop 00918 * phiTilde has units events/H/s/cell at default depletion */ 00919 00920 double NegHeatingRate = 0.; 00921 00922 for( nz=0; nz < gv.bin[nd]->nChrg; nz++ ) 00923 { 00924 double check1 = 0.; 00925 ChargeBin *gptr = gv.bin[nd]->chrg[nz]; 00926 00927 /* integrate over incident continuum for non-ionizing energies */ 00928 for( i=0; i < MIN2(gptr->ipThresInf,rfield.nflux); i++ ) 00929 { 00930 check1 += rfield.SummedCon[i]*gv.bin[nd]->dstab1[i]*rfield.anu[i]; 00931 phiTilde[i] += gptr->FracPop*rfield.SummedCon[i]*gv.bin[nd]->dstab1[i]; 00932 } 00933 00934 /* >>chng 01 mar 02, use new expresssions for grain cooling and absorption 00935 * cross sections following the discussion with Joe Weingartner, PvH */ 00936 for( i=gptr->ipThresInf; i < rfield.nflux; i++ ) 00937 { 00938 long ipLo2 = gptr->ipThresInfVal; 00939 double cs1 = ( i >= ipLo2 ) ? gv.bin[nd]->dstab1[i]*gptr->yhat_primary[i] : 0.; 00940 00941 check1 += rfield.SummedCon[i]*gptr->fac1[i]; 00942 /* this accounts for the photons that are fully absorbed by grain */ 00943 phiTilde[i] += gptr->FracPop*rfield.SummedCon[i]*MAX2(gv.bin[nd]->dstab1[i]-cs1,0.); 00944 00945 /* >>chng 01 oct 10, use bisection search to find ip. On C scale now */ 00946 00947 /* this accounts for photons that eject an electron from the valence band */ 00948 if( cs1 > 0. ) 00949 { 00950 /* we treat photo-ejection and all subsequent de-excitation cascades 00951 * from the conduction/valence band as one simultaneous event */ 00952 /* the condition cs1 > 0. assures i >= ipLo2 */ 00953 /* ratio is number of ejected electrons per primary ionization */ 00954 double ratio = ( gv.lgWD01 ) ? 1. : gptr->yhat[i]/gptr->yhat_primary[i]; 00955 /* ehat is average energy of ejected electron at infinity */ 00956 double ehat = gptr->ehat[i]; 00957 double cool1, sign = 1.; 00958 realnum xx; 00959 00960 if( gptr->DustZ <= -1 ) 00961 cool1 = gptr->ThresSurf + gptr->PotSurf + ehat; 00962 else 00963 cool1 = gptr->ThresSurfVal + gptr->PotSurf + ehat; 00964 /* secondary electrons are assumed to have the same Elo and Ehi as the 00965 * primary electrons that excite them. This neglects the threshold for 00966 * the ejection of the secondary electron and can cause xx to become 00967 * negative if Ehi is less than that threshold. To conserve energy we 00968 * will simply assume a negative rate here. Since secondary electrons 00969 * are generally not important this should have little impact on the 00970 * overall temperature distribution */ 00971 xx = rfield.anu[i] - (realnum)(ratio*cool1); 00972 if( xx < 0.f ) 00973 { 00974 xx = -xx; 00975 sign = -1.; 00976 } 00977 long ipLo = hunt_bisect( gv.anumin, i+1, xx ); 00978 phiTilde[ipLo] += sign*gptr->FracPop*rfield.SummedCon[i]*cs1; 00979 } 00980 00981 /* no need to account for photons that eject an electron from the conduction band */ 00982 /* >>chng 01 dec 11, cool2 always equal to rfield.anu[i] -> no grain heating */ 00983 } 00984 00985 *check += gptr->FracPop*check1*EN1RYD*gv.bin[nd]->cnv_H_pCM3; 00986 00987 sum += gptr->FracPop*check1*EN1RYD*gv.bin[nd]->cnv_H_pCM3; 00988 00989 if( DEBUG_LOC ) 00990 { 00991 double integral = 0.; 00992 for( i=0; i < gv.bin[nd]->qnflux; i++ ) 00993 { 00994 integral += phiTilde[i]*gv.bin[nd]->cnv_H_pCM3*rfield.anu[i]*EN1RYD; 00995 } 00996 dprintf( ioQQQ, " integral test 1: integral %.6e %.6e\n", integral, sum ); 00997 } 00998 00999 /* add quantum heating due to recombination of electrons, subtract thermionic cooling */ 01000 01001 /* gptr->HeatingRate2 is net heating rate in erg/H/s at standard depl 01002 * includes contributions for recombining electrons, autoionizing electrons 01003 * subtracted by thermionic emissions here since it is inverse process 01004 * 01005 * NB - in extreme conditions this rate may be negative (if there 01006 * is an intense radiation field leading to very hot grains, but no ionizing 01007 * photons, hence very few free electrons). we assume that the photon rates 01008 * are high enough under those circumstances to avoid phiTilde becoming negative, 01009 * but we will check that in qheat1 anyway. */ 01010 01011 /* >>chng 03 nov 06, check for extremely low HeatingRate and save CPU time, pah_crash.in, PvH */ 01012 if( gptr->HeatingRate2*gv.bin[nd]->cnv_H_pCM3 > 0.05*CONSERV_TOL*gv.bin[nd]->GrainHeat ) 01013 { 01014 double *RateArr,Sum,ESum,DSum,Delta,E_av2,Corr; 01015 double fac = BOLTZMANN/EN1RYD*phycon.te; 01016 /* E0 is barrier that electron needs to overcome, zero for positive grains */ 01017 /* >>chng 03 jan 23, added second term to correctly account for autoionizing states 01018 * where ThresInfInc is negative, tested in small_grain.in, PvH */ 01019 double E0 = -(MIN2(gptr->PotSurfInc,0.) + MIN2(gptr->ThresInfInc,0.)); 01020 /* >>chng 01 mar 02, this should be energy gap between top electron and infinity, PvH */ 01021 /* >>chng 01 nov 21, use correct barrier: ThresInf[nz] -> ThresInfInc[nz], PvH */ 01022 /* >>chng 03 jan 23, replaced -E0 with MIN2(PotSurfInc[nz],0.), PvH */ 01023 double Einf = gptr->ThresInfInc + MIN2(gptr->PotSurfInc,0.); 01024 /* this is average energy deposited by one event, in erg 01025 * this value is derived from distribution assumed here, and may 01026 * not be the same as HeatElectrons/(CollisionRateElectr*eta) !! */ 01027 /* >>chng 01 nov 21, use correct barrier: ThresInf[nz] -> ThresInfInc[nz], PvH */ 01028 /* >>chng 03 jan 23, replaced ThresInfInc[nz] with MAX2(ThresInfInc[nz],0.), PvH */ 01029 double E_av = MAX2(gptr->ThresInfInc,0.)*EN1RYD + 2.*BOLTZMANN*phycon.te; 01030 /* this is rate in events/H/s at standard depletion */ 01031 double rate = gptr->HeatingRate2/E_av; 01032 01033 double ylo = -exp(-E0/fac); 01034 /* this is highest kinetic energy of electron that can be represented in phiTilde */ 01035 /* >>chng 01 nov 29, rfield.nflux -> gv.qnflux, PvH */ 01036 /* >>chng 03 jan 26, gv.qnflux -> gv.bin[nd]->qnflux, PvH */ 01037 double Ehi = gv.anumax[gv.bin[nd]->qnflux-1]-Einf; 01038 double yhi = ((E0-Ehi)/fac-1.)*exp(-Ehi/fac); 01039 /* renormalize rate so that integral over phiTilde*anu gives correct total energy */ 01040 rate /= yhi-ylo; 01041 01042 /* correct for fractional population of this charge state */ 01043 rate *= gptr->FracPop; 01044 01045 /* >>chng 03 jan 24, add code to correct for discretization errors, hotdust.in, PvH */ 01046 RateArr=(double*)MALLOC((size_t)((unsigned)gv.bin[nd]->qnflux*sizeof(double))); 01047 Sum = ESum = DSum = 0.; 01048 01049 /* >>chng 04 jan 21, replaced gv.bin[nd]->qnflux -> gv.bin[nd]->qnflux2, PvH */ 01050 for( i=0; i < gv.bin[nd]->qnflux2; i++ ) 01051 { 01052 Ehi = gv.anumax[i] - Einf; 01053 if( Ehi >= E0 ) 01054 { 01055 /* Ehi is kinetic energy of electron at infinity */ 01056 yhi = ((E0-Ehi)/fac-1.)*exp(-Ehi/fac); 01057 /* >>chng 01 mar 24, use MAX2 to protect against roundoff error, PvH */ 01058 RateArr[i] = rate*MAX2(yhi-ylo,0.); 01059 Sum += RateArr[i]; 01060 ESum += rfield.anu[i]*RateArr[i]; 01061 # ifndef NDEBUG 01062 DSum += rfield.widflx[i]*RateArr[i]; 01063 # endif 01064 ylo = yhi; 01065 } 01066 else 01067 { 01068 RateArr[i] = 0.; 01069 } 01070 } 01071 E_av2 = ESum/Sum*EN1RYD; 01072 Delta = DSum/Sum*EN1RYD; 01073 ASSERT( fabs(E_av-E_av2) <= Delta ); 01074 Corr = E_av/E_av2; 01075 01076 for( i=0; i < gv.bin[nd]->qnflux2; i++ ) 01077 { 01078 phiTilde[i] += RateArr[i]*Corr; 01079 } 01080 01081 sum += gptr->FracPop*gptr->HeatingRate2*gv.bin[nd]->cnv_H_pCM3; 01082 01083 if( DEBUG_LOC ) 01084 { 01085 double integral = 0.; 01086 for( i=0; i < gv.bin[nd]->qnflux; i++ ) 01087 { 01088 integral += phiTilde[i]*gv.bin[nd]->cnv_H_pCM3*rfield.anu[i]*EN1RYD; 01089 } 01090 dprintf( ioQQQ, " integral test 2: integral %.6e %.6e\n", integral, sum ); 01091 } 01092 01093 free( RateArr ); 01094 } 01095 else 01096 { 01097 NegHeatingRate += gptr->FracPop*gptr->HeatingRate2*gv.bin[nd]->cnv_H_pCM3; 01098 } 01099 } 01100 01101 /* ============================================================================= */ 01102 01103 /* add quantum heating due to molecule/ion collisions */ 01104 01105 /* gv.bin[nd]->HeatingRate1 is heating rate in erg/H/s at standard depl 01106 * includes contributions from molecules/neutral atoms and recombining ions 01107 * 01108 * in fully ionized conditions electron heating rates will be much higher 01109 * than ion and molecule rates since electrons are so much faster and grains 01110 * tend to be positive. in non-ionized conditions the main contribution will 01111 * come from neutral atoms and molecules, so it is appropriate to treat both 01112 * the same. in fully ionized conditions we don't care since unimportant. 01113 * 01114 * NB - if grains are hotter than ambient gas, the heating rate may become negative. 01115 * if photon rates are not high enough to prevent phiTilde from becoming negative, 01116 * we will raise a flag while calculating the quantum heating in qheat1 */ 01117 01118 /* >>chng 03 nov 06, check for extremely low HeatingRate and save CPU time, PvH */ 01119 if( gv.bin[nd]->HeatingRate1*gv.bin[nd]->cnv_H_pCM3 > 0.05*CONSERV_TOL*gv.bin[nd]->GrainHeat ) 01120 { 01121 /* limits for Taylor expansion of (1+x)*exp(-x) */ 01122 /* these choices will assure only 6 digits precision */ 01123 const double LIM2 = pow(3.e-6,1./3.); 01124 const double LIM3 = pow(8.e-6,1./4.); 01125 /* if gas temperature is higher than grain temperature we will 01126 * consider Maxwell-Boltzmann distribution of incoming particles 01127 * and ignore distribution of outgoing particles, if grains 01128 * are hotter than ambient gas, we use reverse treatment */ 01129 double fac = BOLTZMANN/EN1RYD*MAX2(phycon.te,gv.bin[nd]->tedust); 01130 /* this is average energy deposited/extracted by one event, in erg */ 01131 double E_av = 2.*BOLTZMANN*MAX2(phycon.te,gv.bin[nd]->tedust); 01132 /* this is rate in events/H/s at standard depletion */ 01133 double rate = gv.bin[nd]->HeatingRate1/E_av; 01134 01135 double ylo = -1.; 01136 /* this is highest energy of incoming/outgoing particle that can be represented in phiTilde */ 01137 /* >>chng 01 nov 29, rfield.nflux -> gv.qnflux, PvH */ 01138 /* >>chng 03 jan 26, gv.qnflux -> gv.bin[nd]->qnflux, PvH */ 01139 double Ehi = gv.anumax[gv.bin[nd]->qnflux-1]; 01140 double yhi = -(Ehi/fac+1.)*exp(-Ehi/fac); 01141 /* renormalize rate so that integral over phiTilde*anu gives correct total energy */ 01142 rate /= yhi-ylo; 01143 01144 for( i=0; i < gv.bin[nd]->qnflux2; i++ ) 01145 { 01146 /* Ehi is kinetic energy of incoming/outgoing particle 01147 * we assume that Ehi-E0 is deposited/extracted from grain */ 01148 /* Ehi = gv.anumax[i]; */ 01149 double x = gv.anumax[i]/fac; 01150 /* (1+x)*exp(-x) = 1 - 1/2*x^2 + 1/3*x^3 - 1/8*x^4 + O(x^5) 01151 * = 1 - Sum_n=2^infty (-x)^n/(n*(n-2)!) */ 01152 if( x > LIM3 ) 01153 yhi = -(x+1.)*exp(-x); 01154 else if( x > LIM2 ) 01155 yhi = -(((1./3.)*x - 0.5)*x*x + 1.); 01156 else 01157 yhi = -(1. - 0.5*x*x); 01158 01159 /* >>chng 01 mar 24, use MAX2 to protect against roundoff error, PvH */ 01160 phiTilde[i] += rate*MAX2(yhi-ylo,0.); 01161 ylo = yhi; 01162 } 01163 01164 sum += gv.bin[nd]->HeatingRate1*gv.bin[nd]->cnv_H_pCM3; 01165 01166 if( DEBUG_LOC ) 01167 { 01168 double integral = 0.; 01169 for( i=0; i < gv.bin[nd]->qnflux; i++ ) 01170 { 01171 integral += phiTilde[i]*gv.bin[nd]->cnv_H_pCM3*rfield.anu[i]*EN1RYD; 01172 } 01173 dprintf( ioQQQ, " integral test 3: integral %.6e %.6e\n", integral, sum ); 01174 } 01175 } 01176 else 01177 { 01178 NegHeatingRate += gv.bin[nd]->HeatingRate1*gv.bin[nd]->cnv_H_pCM3; 01179 } 01180 01181 /* here we account for the negative heating rates, we simply do that by scaling the entire 01182 * phiTilde array down by a constant factor such that the total amount of energy is conserved 01183 * This treatment assures that phiTilde never goes negative, which avoids problems further on */ 01184 if( NegHeatingRate < 0. ) 01185 { 01186 double scale_fac = (sum+NegHeatingRate)/sum; 01187 for( i=0; i < gv.bin[nd]->qnflux; i++ ) 01188 phiTilde[i] *= scale_fac; 01189 01190 sum += NegHeatingRate; 01191 01192 if( DEBUG_LOC ) 01193 { 01194 double integral = 0.; 01195 for( i=0; i < gv.bin[nd]->qnflux; i++ ) 01196 { 01197 integral += phiTilde[i]*gv.bin[nd]->cnv_H_pCM3*rfield.anu[i]*EN1RYD; 01198 } 01199 dprintf( ioQQQ, " integral test 4: integral %.6e %.6e\n", integral, sum ); 01200 } 01201 } 01202 01203 return; 01204 } 01205 01206 01207 /******************************************************************************************* 01208 * 01209 * GetProbDistr_LowLimit: main routine for calculating non-equilibrium heating of grains 01210 * 01211 * This routine implements the algorithm outlined in: 01212 * >>refer grain physics Guhathakurtha & Draine, 1989, ApJ, 345, 230 01213 * 01214 * The original (fortran) version of the code was written by Kevin Volk. 01215 * 01216 * Heavily modified and adapted for new style grains by Peter van Hoof. 01217 * 01218 *******************************************************************************************/ 01219 01220 STATIC void GetProbDistr_LowLimit(long int nd, 01221 double rel_tol, 01222 double Umax, 01223 double fwhm, 01224 /*@in@*/ double Phi[], /* Phi[NQGRID] */ 01225 /*@in@*/ double PhiDrv[], /* PhiDrv[NQGRID] */ 01226 /*@out@*/ double qtemp[], /* qtemp[NQGRID] */ 01227 /*@out@*/ double qprob[], /* qprob[NQGRID] */ 01228 /*@out@*/ double dPdlnT[], /* dPdlnT[NQGRID] */ 01229 /*@out@*/ long int *qnbin, 01230 /*@out@*/ double *new_tmin, 01231 long *nWideFail, 01232 /*@out@*/ QH_Code *ErrorCode) 01233 { 01234 bool lgAllNegSlope, 01235 lgBoundErr; 01236 long int j, 01237 k, 01238 l, 01239 nbad, 01240 nbin, 01241 nend=0, 01242 nmax, 01243 nok, 01244 nstart=0, 01245 nstart2=0; 01246 double dCool, 01247 delu[NQGRID], 01248 dlnLdlnT, 01249 dlnpdlnU, 01250 fac = 0., 01251 Lambda[NQGRID], 01252 maxVal, 01253 NextStep, 01254 p[NQGRID], 01255 qtmin1, 01256 RadCooling, 01257 sum, 01258 u1[NQGRID], 01259 y; 01260 01261 01262 DEBUG_ENTRY( "GetProbDistr_LowLimit()" ); 01263 01264 /* sanity checks */ 01265 ASSERT( nd >= 0 && nd < gv.nBin ); 01266 01267 if( trace.lgTrace && trace.lgDustBug ) 01268 { 01269 fprintf( ioQQQ, " GetProbDistr_LowLimit called for grain %s\n", gv.bin[nd]->chDstLab ); 01270 fprintf( ioQQQ, " got qtmin1 %.4e\n", gv.bin[nd]->qtmin); 01271 } 01272 01273 qtmin1 = gv.bin[nd]->qtmin; 01274 qtemp[0] = qtmin1; 01275 /* u1 holds enthalpy in Ryd/grain */ 01276 u1[0] = ufunct(qtemp[0],nd,&lgBoundErr); 01277 ASSERT( !lgBoundErr ); /* this should never happen */ 01278 /* >>chng 00 mar 22, taken out factor 4, factored in hden and dstAbund 01279 * interpolate in dstems array instead of integrating explicitly, by PvH */ 01280 splint_safe(gv.dsttmp,gv.bin[nd]->dstems,gv.bin[nd]->dstslp2,NDEMS,log(qtemp[0]),&y,&lgBoundErr); 01281 ASSERT( !lgBoundErr ); /* this should never happen */ 01282 /* Lambda holds the radiated energy for grains in this bin, in Ryd/s/grain */ 01283 Lambda[0] = exp(y)*gv.bin[nd]->cnv_H_pGR/EN1RYD; 01284 /* set up first step of integration */ 01285 /* >>chng 01 nov 14, set to 2.*Lambda[0]/Phi[0] instead of u1[0], 01286 * this choice assures that p[1] doesn't make a large jump from p[0], PvH */ 01287 delu[0] = 2.*Lambda[0]/Phi[0]; 01288 p[0] = PROB_CUTOFF_LO; 01289 dPdlnT[0] = p[0]*qtemp[0]*uderiv(qtemp[0],nd); 01290 RadCooling = 0.5*p[0]*Lambda[0]*delu[0]; 01291 NextStep = 0.01*Lambda[0]/Phi[0]; 01292 /* >>chng 03 nov 10, added extra safeguard against stepsize too small, PvH */ 01293 if( NextStep < 0.05*DBL_EPSILON*u1[0] ) 01294 { 01295 *ErrorCode = MAX2(*ErrorCode,QH_STEP_FAIL); 01296 return; 01297 } 01298 else if( NextStep < 5.*DBL_EPSILON*u1[0] ) 01299 { 01300 /* try a last resort... */ 01301 NextStep *= 100.; 01302 } 01303 01304 nbad = 0; 01305 k = 0; 01306 01307 *qnbin = 0; 01308 *new_tmin = qtmin1; 01309 lgAllNegSlope = true; 01310 maxVal = dPdlnT[0]; 01311 nmax = 0; 01312 01313 /* this test neglects a negative contribution which is impossible to calculate, so it may 01314 * fail to detect cases where the probability distribution starts dropping immediately, 01315 * we will use a second test using the variable lgAllNegSlope below to catch those cases, PvH */ 01316 spldrv_safe(gv.dsttmp,gv.bin[nd]->dstems,gv.bin[nd]->dstslp2,NDEMS,log(qtemp[0]),&dlnLdlnT,&lgBoundErr); 01317 ASSERT( !lgBoundErr ); /* this should never happen */ 01318 dlnpdlnU = u1[0]*Phi[0]/Lambda[0] - dlnLdlnT*u1[0]/(qtemp[0]*uderiv(qtemp[0],nd)); 01319 if( dlnpdlnU < 0. ) 01320 { 01321 /* >>chng 03 nov 06, confirm this by integrating first step..., pah_crash.in, PvH */ 01322 (void)TryDoubleStep(u1,delu,p,qtemp,Lambda,Phi,PhiDrv,NextStep,&dCool,k,nd,&lgBoundErr); 01323 dPdlnT[2] = p[2]*qtemp[2]*uderiv(qtemp[2],nd); 01324 01325 if( dPdlnT[2] < dPdlnT[0] ) { 01326 /* if dPdlnT starts falling immediately, 01327 * qtmin1 was too high and convergence is impossible */ 01328 *ErrorCode = MAX2(*ErrorCode,QH_HIGH_FAIL); 01329 return; 01330 } 01331 } 01332 01333 /* NB NB -- every break in this loop should set *ErrorCode (except for regular stop criterion) !! */ 01334 for( l=0; l < MAX_LOOP; ++l ) 01335 { 01336 double RelError = TryDoubleStep(u1,delu,p,qtemp,Lambda,Phi,PhiDrv,NextStep,&dCool,k,nd,&lgBoundErr); 01337 01338 /* this happens if the grain temperature in qtemp becomes higher than GRAIN_TMAX 01339 * nothing that TryDoubleStep returns can be trusted, so this check should be first */ 01340 if( lgBoundErr ) 01341 { 01342 nbad += 2; 01343 *ErrorCode = MAX2(*ErrorCode,QH_THIGH_FAIL); 01344 break; 01345 } 01346 01347 /* estimate new stepsize */ 01348 if( RelError > rel_tol*QHEAT_TOL ) 01349 { 01350 nbad += 2; 01351 01352 /* step is rejected, decrease stepsize and try again */ 01353 NextStep *= sqrt(0.9*rel_tol*QHEAT_TOL/RelError); 01354 01355 /* stepsize too small, this can happen at extreme low temperatures */ 01356 if( NextStep < 5.*DBL_EPSILON*u1[k] ) 01357 { 01358 *ErrorCode = MAX2(*ErrorCode,QH_STEP_FAIL); 01359 break; 01360 } 01361 01362 continue; 01363 } 01364 else 01365 { 01366 /* step was OK, adjust stepsize */ 01367 k += 2; 01368 01369 /* >>chng 03 nov 10, safeguard against division by zero, PvH */ 01370 NextStep *= MIN2(pow(0.9*rel_tol*QHEAT_TOL/MAX2(RelError,1.e-50),1./3.),4.); 01371 NextStep = MIN2(NextStep,Lambda[k]/Phi[0]); 01372 } 01373 01374 dPdlnT[k-1] = p[k-1]*qtemp[k-1]*uderiv(qtemp[k-1],nd); 01375 dPdlnT[k] = p[k]*qtemp[k]*uderiv(qtemp[k],nd); 01376 01377 lgAllNegSlope = lgAllNegSlope && ( dPdlnT[k] < dPdlnT[k-2] ); 01378 01379 if( dPdlnT[k-1] > maxVal ) 01380 { 01381 maxVal = dPdlnT[k-1]; 01382 nmax = k-1; 01383 } 01384 if( dPdlnT[k] > maxVal ) 01385 { 01386 maxVal = dPdlnT[k]; 01387 nmax = k; 01388 } 01389 01390 RadCooling += dCool; 01391 01392 // if( nzone >= 24 && nd == 0 ) { 01393 // printf(" k %ld T[k] %.6e U[k] %.6e p[k] %.6e dPdlnT[k] %.6e\n",k-1,qtemp[k-1],u1[k-1],p[k-1],dPdlnT[k-1]); 01394 // printf(" k %ld T[k] %.6e U[k] %.6e p[k] %.6e dPdlnT[k] %.6e\n",k,qtemp[k],u1[k],p[k],dPdlnT[k]); 01395 // } 01396 01397 /* if qtmin is far too low, p[k] can become extremely large, exceeding 01398 * even double precision range. the following check should prevent overflows */ 01399 /* >>chng 01 nov 07, sqrt(DBL_MAX) -> sqrt(DBL_MAX/100.) so that sqrt(p[k]*p[k+1]) is safe */ 01400 if( p[k] > sqrt(DBL_MAX/100.) ) 01401 { 01402 *ErrorCode = MAX2(*ErrorCode,QH_LOW_FAIL); 01403 break; 01404 01405 #if 0 01406 /* >>chng 01 apr 21, change DBL_MAX -> DBL_MAX/100. to work around 01407 * a bug in the Compaq C compiler V6.3-025 when invoked with -fast, PvH */ 01408 for( j=0; j <= k; j++ ) 01409 { 01410 p[j] /= DBL_MAX/100.; 01411 dPdlnT[j] /= DBL_MAX/100.; 01412 } 01413 RadCooling /= DBL_MAX/100.; 01414 #endif 01415 } 01416 01417 /* this may catch a bug in the Compaq C compiler V6.3-025 01418 * if this gets triggered, try compiling with -ieee */ 01419 ASSERT( p[k] > 0. && dPdlnT[k] > 0. && RadCooling > 0. ); 01420 01421 /* do a check for negative slope and if there will be enough room to store results */ 01422 /* >>chng 02 jan 30, do this test only every NQTEST steps, PvH */ 01423 /* if( k >= MIN2(NQTEST,NQGRID-2) ) */ 01424 if( k > 0 && k%NQTEST == 0 ) 01425 { 01426 double wid, avStep, factor; 01427 /* >>chng 02 jul 31, do additional test for HIGH_FAIL, 01428 * first test before main loop doesn't catch everything, PvH */ 01429 if( lgAllNegSlope ) 01430 { 01431 *ErrorCode = MAX2(*ErrorCode,QH_HIGH_FAIL); 01432 break; 01433 } 01434 01435 /* this is a lower limit for the total width of the probability distr */ 01436 /* >>chng 02 jan 30, corrected calculation of wid and avStep, PvH */ 01437 wid = (sqrt(-log(PROB_CUTOFF_LO)/(4.*LN_TWO)) + 01438 sqrt(-log(PROB_CUTOFF_HI)/(4.*LN_TWO)))*fwhm/Umax; 01439 avStep = log(u1[k]/u1[0])/(double)k; 01440 /* make an estimate for the number of steps needed */ 01441 /* >>chng 02 jan 30, included factor 1.5 because stepsize increases near peak, PvH */ 01442 /* >>chng 02 aug 06, changed 1.5 to sliding scale because of laqheat2.in test, PvH */ 01443 factor = 1.1 + 3.9*(1.0 - sqrt((double)k/(double)NQGRID)); 01444 if( wid/avStep > factor*(double)NQGRID ) 01445 { 01446 *ErrorCode = MAX2(*ErrorCode,QH_ARRAY_FAIL); 01447 break; 01448 } 01449 } 01450 01451 /* if we run out of room to store results, do regular break 01452 * the code below will sort out if integration is valid or not */ 01453 if( k >= NQGRID-2 ) 01454 { 01455 *ErrorCode = MAX2(*ErrorCode,QH_ARRAY_FAIL); 01456 break; 01457 } 01458 01459 /* force thermal equilibrium of the grains */ 01460 fac = RadCooling*gv.bin[nd]->cnv_GR_pCM3*EN1RYD/gv.bin[nd]->GrainHeat; 01461 01462 /* this is regular stop criterion */ 01463 if( dPdlnT[k] < dPdlnT[k-1] && dPdlnT[k]/fac < PROB_CUTOFF_HI ) 01464 { 01465 break; 01466 } 01467 } 01468 01469 if( l == MAX_LOOP ) 01470 *ErrorCode = MAX2(*ErrorCode,QH_LOOP_FAIL); 01471 01472 nok = k; 01473 nbin = k+1; 01474 01475 /* there are insufficient bins to attempt rebinning */ 01476 if( *ErrorCode < QH_NO_REBIN && nbin < NQMIN ) 01477 *ErrorCode = MAX2(*ErrorCode,QH_NBIN_FAIL); 01478 01479 /* >>chng 02 aug 07, do some basic checks on the distribution first */ 01480 if( *ErrorCode < QH_NO_REBIN ) 01481 ScanProbDistr(u1,dPdlnT,nbin,maxVal,nmax,&nstart,&nstart2,&nend,nWideFail,ErrorCode); 01482 01483 if( *ErrorCode >= QH_NO_REBIN ) 01484 { 01485 return; 01486 } 01487 01488 for( j=0; j < nbin; j++ ) 01489 { 01490 p[j] /= fac; 01491 dPdlnT[j] /= fac; 01492 } 01493 RadCooling /= fac; 01494 01495 /* >>chng 02 aug 08, moved RebinQHeatResults from here further down, this improves new_tmin estimate */ 01496 *new_tmin = 0.; 01497 for( j=nstart; j < nbin; j++ ) 01498 { 01499 if( dPdlnT[j] < PROB_CUTOFF_LO ) 01500 { 01501 *new_tmin = qtemp[j]; 01502 } 01503 else 01504 { 01505 if( j == nstart ) 01506 { 01507 if( dPdlnT[j] > SAFETY*PROB_CUTOFF_LO ) 01508 *ErrorCode = MAX2(*ErrorCode,QH_BOUND_FAIL); 01509 01510 /* >>chng 02 aug 11, use nstart2 for more reliable extrapolation */ 01511 if( dPdlnT[nstart2] < 0.999*dPdlnT[nstart2+NSTARTUP] ) 01512 { 01513 /* >>chng 02 aug 09, new formula for extrapolating new_tmin, PvH */ 01514 /* this assumes that at low temperatures the behaviour 01515 * is as follows: dPdlnT(T) = C1 * exp( -C2/T**3 ) */ 01516 double T1 = qtemp[nstart2]; 01517 double T2 = qtemp[nstart2+NSTARTUP]; 01518 double delta_y = log(dPdlnT[nstart2+NSTARTUP]/dPdlnT[nstart2]); 01519 double c2 = delta_y/(1./POW3(T1)-1./POW3(T2)); 01520 double help = c2/POW3(T1) + log(dPdlnT[nstart2]/PROB_CUTOFF_LO); 01521 *new_tmin = pow(c2/help,1./3.); 01522 } 01523 01524 /* >>chng 04 nov 09, in case of lower bound failure, assure qtmin is lowered, PvH */ 01525 if( dPdlnT[j] > SAFETY*PROB_CUTOFF_LO && *new_tmin >= qtmin1 ) 01526 { 01527 double delta_x = log(qtemp[nstart2+NSTARTUP]/qtemp[nstart2]); 01528 double delta_y = log(dPdlnT[nstart2+NSTARTUP]/dPdlnT[nstart2]); 01529 delta_x *= log(PROB_CUTOFF_LO/dPdlnT[nstart2])/delta_y; 01530 *new_tmin = qtemp[nstart2]*exp(delta_x); 01531 if( *new_tmin < qtmin1 ) 01532 /* in general this estimate will be too low -> use geometric mean */ 01533 *new_tmin = sqrt( *new_tmin*qtmin1 ); 01534 else 01535 /* last resort... */ 01536 *new_tmin = qtmin1/DEF_FAC; 01537 } 01538 } 01539 break; 01540 } 01541 } 01542 *new_tmin = MAX3(*new_tmin,qtmin1/DEF_FAC,GRAIN_TMIN); 01543 01544 ASSERT( *new_tmin < gv.bin[nd]->tedust ); 01545 01546 /* >>chng 02 jan 30, prevent excessive looping when prob distribution simply won't fit, PvH */ 01547 if( dPdlnT[nbin-1] > SAFETY*PROB_CUTOFF_HI ) 01548 { 01549 if( *ErrorCode == QH_ARRAY_FAIL || *ErrorCode == QH_LOOP_FAIL ) 01550 { 01551 ++(*nWideFail); 01552 01553 if( *nWideFail < WIDE_FAIL_MAX ) 01554 { 01555 /* this indicates that low end was OK, but we ran out of room 01556 * to store the high end -> try GetProbDistr_HighLimit instead */ 01557 *ErrorCode = MAX2(*ErrorCode,QH_DELTA_FAIL); 01558 } 01559 else 01560 { 01561 *ErrorCode = MAX2(*ErrorCode,QH_WIDE_FAIL); 01562 } 01563 } 01564 else 01565 { 01566 *ErrorCode = MAX2(*ErrorCode,QH_BOUND_FAIL); 01567 } 01568 } 01569 01570 /* >>chng 01 may 11, rebin the quantum heating results 01571 * 01572 * for grains in intense radiation fields, the code needs high resolution for stability 01573 * and therefore produces lots of small bins, even though the grains never make large 01574 * excurions from the equilibrium temperature; adding in the resulting spectra in RT_diffuse 01575 * takes up an excessive amount of CPU time where most CPU is spent on grains for which 01576 * the quantum treatment matters least, and moreover on temperature bins with very low 01577 * probability; rebinning the results on a coarser grid should help reduce the overhead */ 01578 /* >>chng 02 aug 07, use nstart and nend while rebinning */ 01579 01580 nbin = RebinQHeatResults(nd,nstart,nend,p,qtemp,qprob,dPdlnT,u1,delu,Lambda,ErrorCode); 01581 01582 /* >>chng 01 jul 13, add fail-safe for failure in RebinQHeatResults */ 01583 if( nbin == 0 ) 01584 { 01585 return; 01586 } 01587 01588 *qnbin = nbin; 01589 01590 sum = 0.; 01591 for( j=0; j < nbin; j++ ) 01592 { 01593 sum += qprob[j]; 01594 } 01595 01596 /* the fact that the probability normalization fails probably indicates 01597 * that the distribution is too close to a delta function to resolve */ 01598 if( fabs(sum-1.) > PROB_TOL ) 01599 *ErrorCode = MAX2(*ErrorCode,QH_CONV_FAIL); 01600 01601 if( trace.lgTrace && trace.lgDustBug ) 01602 { 01603 fprintf( ioQQQ, 01604 " zone %ld %s nbin %ld nok %ld nbad %ld total prob %.4e rel_tol %.3e new_tmin %.4e\n", 01605 nzone,gv.bin[nd]->chDstLab,nbin,nok,nbad,sum,rel_tol,*new_tmin ); 01606 } 01607 return; 01608 } 01609 01610 01611 /* try two consecutive integration steps using stepsize "step/2." (yielding p[k]), 01612 * and also one double integration step using stepsize "step" (yielding p2k). 01613 * the difference fabs(p2k-p[k])/(3.*p[k]) can be used to estimate the relative 01614 * accuracy of p[k] and will be used to adapt the stepsize to an optimal value */ 01615 STATIC double TryDoubleStep(double u1[], 01616 double delu[], 01617 double p[], 01618 double qtemp[], 01619 double Lambda[], 01620 double Phi[], 01621 double PhiDrv[], 01622 double step, 01623 /*@out@*/ double *cooling, 01624 long index, 01625 long nd, 01626 /*@out@*/ bool *lgBoundFail) 01627 { 01628 long i, 01629 j, 01630 jlo, 01631 k=-1000; 01632 double bval_jk, 01633 cooling2, 01634 p2k, 01635 p_max, 01636 RelErrCool, 01637 RelErrPk, 01638 sum, 01639 sum2 = -DBL_MAX, 01640 trap1, 01641 trap12 = -DBL_MAX, 01642 trap2, 01643 uhi, 01644 ulo, 01645 umin, 01646 y; 01647 01648 DEBUG_ENTRY( "TryDoubleStep()" ); 01649 01650 /* sanity checks */ 01651 ASSERT( index >= 0 && index < NQGRID-2 && nd >= 0 && nd < gv.nBin && step > 0. ); 01652 01653 ulo = rfield.anu[0]; 01654 /* >>chng 01 nov 29, rfield.nflux -> gv.qnflux, PvH */ 01655 /* >>chng 03 jan 26, gv.qnflux -> gv.bin[nd]->qnflux, PvH */ 01656 uhi = rfield.anu[gv.bin[nd]->qnflux-1]; 01657 01658 /* >>chng 01 nov 21, skip initial bins if they have very low probability */ 01659 p_max = 0.; 01660 for( i=0; i <= index; i++ ) 01661 p_max = MAX2(p_max,p[i]); 01662 jlo = 0; 01663 while( p[jlo] < PROB_CUTOFF_LO*p_max ) 01664 jlo++; 01665 01666 for( i=1; i <= 2; i++ ) 01667 { 01668 bool lgErr; 01669 long ipLo = 0; 01670 long ipHi = gv.bin[nd]->qnflux-1; 01671 k = index + i; 01672 delu[k] = step/2.; 01673 u1[k] = u1[k-1] + delu[k]; 01674 qtemp[k] = inv_ufunct(u1[k],nd,lgBoundFail); 01675 splint_safe(gv.dsttmp,gv.bin[nd]->dstems,gv.bin[nd]->dstslp2,NDEMS,log(qtemp[k]),&y,&lgErr); 01676 *lgBoundFail = *lgBoundFail || lgErr; 01677 Lambda[k] = exp(y)*gv.bin[nd]->cnv_H_pGR/EN1RYD; 01678 01679 sum = sum2 = 0.; 01680 trap1 = trap2 = trap12 = 0.; 01681 01682 /* this loop uses up a large fraction of the total CPU time, it should be well optimized */ 01683 for( j=jlo; j < k; j++ ) 01684 { 01685 umin = u1[k] - u1[j]; 01686 01687 if( umin >= uhi ) 01688 { 01689 /* for increasing j, umin will decrease monotonically. If ( umin > uhi ) holds for 01690 * the current value of j, it must have held for the previous value as well. Hence 01691 * both trap1 and trap2 are zero at this point and we would only be adding zero 01692 * to sum. Therefore we skip this step to save CPU time */ 01693 continue; 01694 } 01695 else if( umin > ulo ) 01696 { 01697 /* do a bisection search such that rfield.anu[ipLo] <= umin < rfield.anu[ipHi] 01698 * ipoint doesn't always give the correct index into anu (it works for AnuOrg) 01699 * bisection search is also faster, which is important here to save CPU time. 01700 * on the first iteration ipLo equals 0 and the first while loop will be skipped; 01701 * after that umin is monotonically decreasing, and ipHi is retained from the 01702 * previous iteration since it is a valid upper limit; ipLo will equal ipHi-1 */ 01703 long ipStep = 1; 01704 /* >>chng 03 feb 03 rjrw: hunt for lower bracket */ 01705 while( rfield.anu[ipLo] > (realnum)umin ) 01706 { 01707 ipHi = ipLo; 01708 ipLo -= ipStep; 01709 if( ipLo <= 0 ) 01710 { 01711 ipLo = 0; 01712 break; 01713 } 01714 ipStep *= 2; 01715 } 01716 /* now do regular bisection search */ 01717 while( ipHi-ipLo > 1 ) 01718 { 01719 long ipMd = (ipLo+ipHi)/2; 01720 if( rfield.anu[ipMd] > (realnum)umin ) 01721 ipHi = ipMd; 01722 else 01723 ipLo = ipMd; 01724 } 01725 bval_jk = Phi[ipLo] + (umin - rfield.anu[ipLo])*PhiDrv[ipLo]; 01726 } 01727 else 01728 { 01729 bval_jk = Phi[0]; 01730 } 01731 01732 /* these two quantities are needed to take double step from index -> index+2 */ 01733 trap12 = trap1; 01734 sum2 = sum; 01735 01736 /* bval_jk*gv.bin[nd]->cnv_CM3_pGR is the total excitation rate from j to k and 01737 * higher due to photon absorptions and particle collisions, it already implements 01738 * Eq. 2.17 of Guhathakurtha & Draine, in events/grain/s */ 01739 /* >>chng 00 mar 27, factored in hden (in Phi), by PvH */ 01740 /* >>chng 00 mar 29, add in contribution due to particle collisions, by PvH */ 01741 /* >>chng 01 mar 30, moved multiplication of bval_jk with gv.bin[nd]->cnv_CM3_pGR 01742 * out of loop, PvH */ 01743 trap2 = p[j]*bval_jk; 01744 /* Trapezoidal rule, multiplication with factor 0.5 is done outside loop */ 01745 sum += (trap1 + trap2)*delu[j]; 01746 trap1 = trap2; 01747 } 01748 01749 /* >>chng 00 mar 27, multiplied with delu, by PvH */ 01750 /* >>chng 00 apr 05, taken out Lambda[0], improves convergence at low end dramatically!, by PvH */ 01751 /* qprob[k] = sum*gv.bin[nd]->cnv_CM3_pGR*delu[k]/(Lambda[k] - Lambda[0]); */ 01752 /* this expression includes factor 0.5 from trapezoidal rule above */ 01753 /* p[k] = 0.5*(sum + (trap1 + p[k]*Phi[0])*delu[k])/Lambda[k] */ 01754 p[k] = (sum + trap1*delu[k])/(2.*Lambda[k] - Phi[0]*delu[k]); 01755 01756 // total failure -> force next step to be smaller 01757 if( p[k] <= 0. ) 01758 return 3.*QHEAT_TOL; 01759 } 01760 01761 /* this is estimate for p[k] using one double step of size "step" */ 01762 p2k = (sum2 + trap12*step)/(2.*Lambda[k] - Phi[0]*step); 01763 01764 // total failure -> force next step to be smaller 01765 if( p2k <= 0. ) 01766 return 3.*QHEAT_TOL; 01767 01768 RelErrPk = fabs(p2k-p[k])/p[k]; 01769 01770 /* this is radiative cooling due to the two probability bins we just added */ 01771 /* simple trapezoidal rule will not do here, RelErrCool would never converge */ 01772 *cooling = log_integral(u1[k-2],p[k-2]*Lambda[k-2],u1[k-1],p[k-1]*Lambda[k-1]); 01773 *cooling += log_integral(u1[k-1],p[k-1]*Lambda[k-1],u1[k],p[k]*Lambda[k]); 01774 01775 /* same as cooling, but now for double step of size "step" */ 01776 cooling2 = log_integral(u1[k-2],p[k-2]*Lambda[k-2],u1[k],p2k*Lambda[k]); 01777 01778 /* p[0] is not reliable, so ignore convergence test on cooling on first step */ 01779 RelErrCool = ( index > 0 ) ? fabs(cooling2-(*cooling))/(*cooling) : 0.; 01780 01781 /* printf( " TryDoubleStep p[k-1] %.4e p[k] %.4e p2k %.4e\n",p[k-1],p[k],p2k ); */ 01782 /* error scales as O(step^3), so this is relative accuracy of p[k] or cooling */ 01783 return MAX2(RelErrPk,RelErrCool)/3.; 01784 } 01785 01786 01787 /* calculate logarithmic integral from (xx1,yy1) to (xx2,yy2) */ 01788 STATIC double log_integral(double xx1, 01789 double yy1, 01790 double xx2, 01791 double yy2) 01792 { 01793 double eps, 01794 result, 01795 xx; 01796 01797 DEBUG_ENTRY( "log_integral()" ); 01798 01799 /* sanity checks */ 01800 ASSERT( xx1 > 0. && yy1 > 0. && xx2 > 0. && yy2 > 0. ); 01801 01802 xx = log(xx2/xx1); 01803 eps = log((xx2/xx1)*(yy2/yy1)); 01804 if( fabs(eps) < 1.e-4 ) 01805 { 01806 result = xx1*yy1*xx*((eps/6. + 0.5)*eps + 1.); 01807 } 01808 else 01809 { 01810 result = (xx2*yy2 - xx1*yy1)*xx/eps; 01811 } 01812 return result; 01813 } 01814 01815 01816 /* scan the probability distribution for valid range */ 01817 STATIC void ScanProbDistr(double u1[], /* u1[nbin] */ 01818 double dPdlnT[], /* dPdlnT[nbin] */ 01819 long nbin, 01820 double maxVal, 01821 long nmax, 01822 /*@out@*/long *nstart, 01823 /*@out@*/long *nstart2, 01824 /*@out@*/long *nend, 01825 long *nWideFail, 01826 QH_Code *ErrorCode) 01827 { 01828 bool lgSetLo, 01829 lgSetHi; 01830 long i; 01831 double deriv_max, 01832 minVal; 01833 const double MIN_FAC_LO = 1.e4; 01834 const double MIN_FAC_HI = 1.e4; 01835 01836 DEBUG_ENTRY( "ScanProbDistr()" ); 01837 01838 /* sanity checks */ 01839 ASSERT( nbin > 0 && nmax >= 0 && nmax < nbin && maxVal > 0. ); 01840 01841 /* sometimes the probability distribution will start falling before settling on 01842 * a rising slope. In such a case nstart will point to the first rising point, 01843 * while nstart2 will point to the point with the steepest derivative on the 01844 * rising slope. The code will use the distribution from nstart to nend as a 01845 * valid probability distribution, but the will use the points near nstart2 01846 * to extrapolate a new value for qtmin if needed */ 01847 minVal = maxVal; 01848 *nstart = nmax; 01849 for( i=nmax; i >= 0; --i ) 01850 { 01851 if( dPdlnT[i] < minVal ) 01852 { 01853 *nstart = i; 01854 minVal = dPdlnT[i]; 01855 } 01856 } 01857 deriv_max = 0.; 01858 *nstart2 = nmax; 01859 for( i=nmax; i > *nstart; --i ) 01860 { 01861 double deriv = log(dPdlnT[i]/dPdlnT[i-1])/log(u1[i]/u1[i-1]); 01862 if( deriv > deriv_max ) 01863 { 01864 *nstart2 = i-1; 01865 deriv_max = deriv; 01866 } 01867 } 01868 *nend = nbin-1; 01869 01870 /* now do quality control; these checks are more stringent than the ones in GetProbDistr_LowLimit */ 01871 lgSetLo = ( nmax >= *nend || maxVal/dPdlnT[*nend] < MIN_FAC_HI ); 01872 /* >>chng 03 jan 22, prevent problems if both dPdlnT and its derivative are continuously rising, 01873 * in which case both lgSetLo and lgSetHi are set and QH_WIDE_FAIL is triggered; 01874 * this can happen if qtmin is far too low; triggered by pahtest.in, PvH */ 01875 if( lgSetLo ) 01876 /* use relaxed test if lgSetLo is already set */ 01877 lgSetHi = ( nmax <= *nstart || maxVal/dPdlnT[*nstart] < MIN_FAC_LO ); 01878 else 01879 lgSetHi = ( nmax <= *nstart2 || maxVal/dPdlnT[*nstart2] < MIN_FAC_LO ); 01880 01881 if( lgSetLo && lgSetHi ) 01882 { 01883 ++(*nWideFail); 01884 01885 if( *nWideFail >= WIDE_FAIL_MAX ) 01886 *ErrorCode = MAX2(*ErrorCode,QH_WIDE_FAIL); 01887 } 01888 01889 if( lgSetLo ) 01890 *ErrorCode = MAX2(*ErrorCode,QH_LOW_FAIL); 01891 01892 if( lgSetHi ) 01893 *ErrorCode = MAX2(*ErrorCode,QH_HIGH_FAIL); 01894 01895 /* there are insufficient bins to attempt rebinning */ 01896 if( *ErrorCode < QH_NO_REBIN && (*nend - *nstart) < NQMIN ) 01897 *ErrorCode = MAX2(*ErrorCode,QH_NBIN_FAIL); 01898 01899 if( trace.lgTrace && trace.lgDustBug ) 01900 { 01901 fprintf( ioQQQ, " ScanProbDistr nstart %ld nstart2 %ld nend %ld nmax %ld maxVal %.3e", 01902 *nstart,*nstart2,*nend,nmax,maxVal ); 01903 fprintf( ioQQQ, " dPdlnT[nstart] %.3e dPdlnT[nstart2] %.3e dPdlnT[nend] %.3e code %d\n", 01904 dPdlnT[*nstart],dPdlnT[*nstart2],dPdlnT[*nend],*ErrorCode ); 01905 } 01906 01907 if( *ErrorCode >= QH_NO_REBIN ) 01908 { 01909 *nstart = -1; 01910 *nstart2 = -1; 01911 *nend = -2; 01912 } 01913 return; 01914 } 01915 01916 01917 /* rebin the quantum heating results to speed up RT_diffuse */ 01918 STATIC long RebinQHeatResults(long nd, 01919 long nstart, 01920 long nend, 01921 double p[], 01922 double qtemp[], 01923 double qprob[], 01924 double dPdlnT[], 01925 double u1[], 01926 double delu[], 01927 double Lambda[], 01928 QH_Code *ErrorCode) 01929 { 01930 long i, 01931 newnbin; 01932 double fac, 01933 help, 01934 mul_fac, 01935 *new_delu/*[NQGRID]*/, 01936 *new_dPdlnT/*[NQGRID]*/, 01937 *new_Lambda/*[NQGRID]*/, 01938 *new_p/*[NQGRID]*/, 01939 *new_qprob/*[NQGRID]*/, 01940 *new_qtemp/*[NQGRID]*/, 01941 *new_u1/*[NQGRID]*/, 01942 PP1, 01943 PP2, 01944 RadCooling, 01945 T1, 01946 T2, 01947 Ucheck, 01948 uu1, 01949 uu2; 01950 01951 DEBUG_ENTRY( "RebinQHeatResults()" ); 01952 01953 /* sanity checks */ 01954 ASSERT( nd >= 0 && nd < gv.nBin ); 01955 /* >>chng 02 aug 07, changed oldnbin -> nstart..nend */ 01956 ASSERT( nstart >= 0 && nstart < nend && nend < NQGRID ); 01957 01958 /* leading entries may have become very small or zero -> skip */ 01959 for( i=nstart; i <= nend && dPdlnT[i] < PROB_CUTOFF_LO; i++ ) {} 01960 01961 /* >>chng 04 oct 17, add fail-safe to keep lint happy, but this should never happen... */ 01962 if( i >= NQGRID ) 01963 { 01964 *ErrorCode = MAX2(*ErrorCode,QH_REBIN_FAIL); 01965 return 0; 01966 } 01967 01968 /* >>chng 02 aug 15, use malloc to prevent stack overflows */ 01969 new_delu = (double*)MALLOC((size_t)(NQGRID*sizeof(double))); 01970 new_dPdlnT = (double*)MALLOC((size_t)(NQGRID*sizeof(double))); 01971 new_Lambda = (double*)MALLOC((size_t)(NQGRID*sizeof(double))); 01972 new_p = (double*)MALLOC((size_t)(NQGRID*sizeof(double))); 01973 new_qprob = (double*)MALLOC((size_t)(NQGRID*sizeof(double))); 01974 new_qtemp = (double*)MALLOC((size_t)(NQGRID*sizeof(double))); 01975 new_u1 = (double*)MALLOC((size_t)(NQGRID*sizeof(double))); 01976 01977 newnbin = 0; 01978 01979 T1 = qtemp[i]; 01980 PP1 = p[i]; 01981 uu1 = u1[i]; 01982 01983 /* >>chng 04 feb 01, change 2.*NQMIN -> 1.5*NQMIN, PvH */ 01984 help = pow(qtemp[nend]/qtemp[i],1./(1.5*NQMIN)); 01985 mul_fac = MIN2(QT_RATIO,help); 01986 01987 Ucheck = u1[i]; 01988 RadCooling = 0.; 01989 01990 while( i < nend ) 01991 { 01992 bool lgBoundErr; 01993 bool lgDone= false; 01994 double s0 = 0.; 01995 double s1 = 0.; 01996 double wid = 0.; 01997 double xx,y; 01998 01999 T2 = T1*mul_fac; 02000 02001 do 02002 { 02003 double p1,p2,L1,L2,frac,slope; 02004 if( qtemp[i] <= T1 && T1 <= qtemp[i+1] ) 02005 { 02006 /* >>chng 01 nov 15, copy uu2 into uu1 instead, PvH */ 02007 /* uu1 = ufunct(T1,nd); */ 02008 frac = log(T1/qtemp[i]); 02009 slope = log(p[i+1]/p[i])/log(qtemp[i+1]/qtemp[i]); 02010 p1 = p[i]*exp(frac*slope); 02011 slope = log(Lambda[i+1]/Lambda[i])/log(qtemp[i+1]/qtemp[i]); 02012 L1 = Lambda[i]*exp(frac*slope); 02013 } 02014 else 02015 { 02016 /* >>chng 01 nov 15, copy uu2 into uu1 instead, PvH */ 02017 /* uu1 = u1[i]; */ 02018 p1 = p[i]; 02019 L1 = Lambda[i]; 02020 } 02021 if( qtemp[i] <= T2 && T2 <= qtemp[i+1] ) 02022 { 02023 /* >>chng 02 apr 30, make sure this doesn't point beyond valid range, PvH */ 02024 help = ufunct(T2,nd,&lgBoundErr); 02025 uu2 = MIN2(help,u1[i+1]); 02026 ASSERT( !lgBoundErr ); /* this should never be triggered */ 02027 frac = log(T2/qtemp[i]); 02028 slope = log(p[i+1]/p[i])/log(qtemp[i+1]/qtemp[i]); 02029 p2 = p[i]*exp(frac*slope); 02030 slope = log(Lambda[i+1]/Lambda[i])/log(qtemp[i+1]/qtemp[i]); 02031 L2 = Lambda[i]*exp(frac*slope); 02032 lgDone = true; 02033 } 02034 else 02035 { 02036 uu2 = u1[i+1]; 02037 p2 = p[i+1]; 02038 L2 = Lambda[i+1]; 02039 /* >>chng 01 nov 15, this caps the range in p(U) integrated in one bin 02040 * it helps avoid spurious QH_BOUND_FAIL's when flank is very steep, PvH */ 02041 if( MAX2(p2,PP1)/MIN2(p2,PP1) > sqrt(SAFETY) ) 02042 { 02043 lgDone = true; 02044 T2 = qtemp[i+1]; 02045 } 02046 ++i; 02047 } 02048 PP2 = p2; 02049 wid += uu2 - uu1; 02050 /* sanity check */ 02051 ASSERT( wid >= 0. ); 02052 s0 += log_integral(uu1,p1,uu2,p2); 02053 s1 += log_integral(uu1,p1*L1,uu2,p2*L2); 02054 uu1 = uu2; 02055 02056 } while( i < nend && ! lgDone ); 02057 02058 /* >>chng 01 nov 14, if T2 == qtemp[oldnbin-1], the code will try another iteration 02059 * break here to avoid zero divide, the assert on Ucheck tests if we are really finished */ 02060 /* >>chng 01 dec 04, change ( s0 == 0. ) to ( s0 <= 0. ), PvH */ 02061 if( s0 <= 0. ) 02062 { 02063 ASSERT( wid == 0. ); 02064 break; 02065 } 02066 02067 new_qprob[newnbin] = s0; 02068 new_Lambda[newnbin] = s1/s0; 02069 xx = log(new_Lambda[newnbin]*EN1RYD*gv.bin[nd]->cnv_GR_pH); 02070 splint_safe(gv.bin[nd]->dstems,gv.dsttmp,gv.bin[nd]->dstslp,NDEMS,xx,&y,&lgBoundErr); 02071 ASSERT( !lgBoundErr ); /* this should never be triggered */ 02072 new_qtemp[newnbin] = exp(y); 02073 new_u1[newnbin] = ufunct(new_qtemp[newnbin],nd,&lgBoundErr); 02074 ASSERT( !lgBoundErr ); /* this should never be triggered */ 02075 new_delu[newnbin] = wid; 02076 new_p[newnbin] = new_qprob[newnbin]/new_delu[newnbin]; 02077 new_dPdlnT[newnbin] = new_p[newnbin]*new_qtemp[newnbin]*uderiv(new_qtemp[newnbin],nd); 02078 02079 Ucheck += wid; 02080 RadCooling += new_qprob[newnbin]*new_Lambda[newnbin]; 02081 02082 T1 = T2; 02083 PP1 = PP2; 02084 ++newnbin; 02085 } 02086 02087 /* >>chng 01 jul 13, add fail-safe */ 02088 if( newnbin < NQMIN ) 02089 { 02090 *ErrorCode = MAX2(*ErrorCode,QH_REBIN_FAIL); 02091 02092 free(new_delu); 02093 free(new_dPdlnT); 02094 free(new_Lambda); 02095 free(new_p); 02096 free(new_qprob); 02097 free(new_qtemp); 02098 free(new_u1); 02099 return 0; 02100 } 02101 02102 fac = RadCooling*EN1RYD*gv.bin[nd]->cnv_GR_pCM3/gv.bin[nd]->GrainHeat; 02103 02104 if( trace.lgTrace && trace.lgDustBug ) 02105 { 02106 fprintf( ioQQQ, " RebinQHeatResults found tol1 %.4e tol2 %.4e\n", 02107 fabs(u1[nend]/Ucheck-1.), fabs(fac-1.) ); 02108 } 02109 02110 /* do quality control */ 02111 /* >>chng 02 apr 30, tighten up check, PvH */ 02112 ASSERT( fabs(u1[nend]/Ucheck-1.) < 10.*sqrt((double)(nend-nstart+newnbin))*DBL_EPSILON ); 02113 02114 if( fabs(fac-1.) > CONSERV_TOL ) 02115 *ErrorCode = MAX2(*ErrorCode,QH_CONV_FAIL); 02116 02117 for( i=0; i < newnbin; i++ ) 02118 { 02119 /* renormalize the distribution to assure energy conservation */ 02120 p[i] = new_p[i]/fac; 02121 qtemp[i] = new_qtemp[i]; 02122 qprob[i] = new_qprob[i]/fac; 02123 dPdlnT[i] = new_dPdlnT[i]/fac; 02124 u1[i] = new_u1[i]; 02125 delu[i] = new_delu[i]; 02126 Lambda[i] = new_Lambda[i]; 02127 02128 /* sanity checks */ 02129 ASSERT( qtemp[i] > 0. && qprob[i] > 0. ); 02130 02131 /* printf(" rk %ld T[k] %.6e U[k] %.6e p[k] %.6e dPdlnT[k] %.6e\n",i,qtemp[i],u1[i],p[i],dPdlnT[i]); */ 02132 } 02133 02134 free(new_delu); 02135 free(new_dPdlnT); 02136 free(new_Lambda); 02137 free(new_p); 02138 free(new_qprob); 02139 free(new_qtemp); 02140 free(new_u1); 02141 return newnbin; 02142 } 02143 02144 02145 /* calculate approximate probability distribution in high intensity limit */ 02146 STATIC void GetProbDistr_HighLimit(long nd, 02147 double TolFac, 02148 double Umax, 02149 double fwhm, 02150 /*@out@*/double qtemp[], 02151 /*@out@*/double qprob[], 02152 /*@out@*/double dPdlnT[], 02153 /*@out@*/double *tol, 02154 /*@out@*/long *qnbin, 02155 /*@out@*/double *new_tmin, 02156 /*@out@*/QH_Code *ErrorCode) 02157 { 02158 bool lgBoundErr, 02159 lgErr; 02160 long i, 02161 nbin; 02162 double c1, 02163 c2, 02164 delu[NQGRID], 02165 fac, 02166 fac1, 02167 fac2, 02168 help1, 02169 help2, 02170 L1, 02171 L2, 02172 Lambda[NQGRID], 02173 mul_fac, 02174 p[NQGRID], 02175 p1, 02176 p2, 02177 RadCooling, 02178 sum, 02179 T1, 02180 T2, 02181 Tlo, 02182 Thi, 02183 Ulo, 02184 Uhi, 02185 uu1, 02186 uu2, 02187 xx, 02188 y; 02189 02190 DEBUG_ENTRY( "GetProbDistr_HighLimit()" ); 02191 02192 if( trace.lgTrace && trace.lgDustBug ) 02193 { 02194 fprintf( ioQQQ, " GetProbDistr_HighLimit called for grain %s\n", gv.bin[nd]->chDstLab ); 02195 } 02196 02197 c1 = sqrt(4.*LN_TWO/PI)/fwhm*exp(-POW2(fwhm/Umax)/(16.*LN_TWO)); 02198 c2 = 4.*LN_TWO*POW2(Umax/fwhm); 02199 02200 fac1 = fwhm/Umax*sqrt(-log(PROB_CUTOFF_LO)/(4.*LN_TWO)); 02201 /* >>chng 03 nov 10, safeguard against underflow, PvH */ 02202 help1 = Umax*exp(-fac1); 02203 help2 = exp(gv.bin[nd]->DustEnth[0]); 02204 Ulo = MAX2(help1,help2); 02205 /* >>chng 03 jan 28, ignore lgBoundErr on lower boundary, low-T grains have negigible emission, PvH */ 02206 Tlo = inv_ufunct(Ulo,nd,&lgBoundErr); 02207 02208 fac2 = fwhm/Umax*sqrt(-log(PROB_CUTOFF_HI)/(4.*LN_TWO)); 02209 /* >>chng 03 nov 10, safeguard against overflow, PvH */ 02210 if( fac2 > log(DBL_MAX/10.) ) 02211 { 02212 *ErrorCode = MAX2(*ErrorCode,QH_WIDE_FAIL); 02213 return; 02214 } 02215 Uhi = Umax*exp(fac2); 02216 Thi = inv_ufunct(Uhi,nd,&lgBoundErr); 02217 02218 nbin = 0; 02219 02220 T1 = Tlo; 02221 uu1 = ufunct(T1,nd,&lgErr); 02222 lgBoundErr = lgBoundErr || lgErr; 02223 help1 = log(uu1/Umax); 02224 p1 = c1*exp(-c2*POW2(help1)); 02225 splint_safe(gv.dsttmp,gv.bin[nd]->dstems,gv.bin[nd]->dstslp2,NDEMS,log(T1),&y,&lgErr); 02226 lgBoundErr = lgBoundErr || lgErr; 02227 L1 = exp(y)*gv.bin[nd]->cnv_H_pGR/EN1RYD; 02228 02229 /* >>chng 03 nov 10, safeguard against underflow, PvH */ 02230 if( uu1*p1*L1 < 1.e5*DBL_MIN ) 02231 { 02232 *ErrorCode = MAX2(*ErrorCode,QH_WIDE_FAIL); 02233 return; 02234 } 02235 02236 /* >>chng 04 feb 01, change 2.*NQMIN -> 1.2*NQMIN, PvH */ 02237 help1 = pow(Thi/Tlo,1./(1.2*NQMIN)); 02238 mul_fac = MIN2(QT_RATIO,help1); 02239 02240 sum = 0.; 02241 RadCooling = 0.; 02242 02243 do 02244 { 02245 double s0,s1,wid; 02246 02247 T2 = T1*mul_fac; 02248 uu2 = ufunct(T2,nd,&lgErr); 02249 lgBoundErr = lgBoundErr || lgErr; 02250 help1 = log(uu2/Umax); 02251 p2 = c1*exp(-c2*POW2(help1)); 02252 splint_safe(gv.dsttmp,gv.bin[nd]->dstems,gv.bin[nd]->dstslp2,NDEMS,log(T2),&y,&lgErr); 02253 lgBoundErr = lgBoundErr || lgErr; 02254 L2 = exp(y)*gv.bin[nd]->cnv_H_pGR/EN1RYD; 02255 02256 wid = uu2 - uu1; 02257 s0 = log_integral(uu1,p1,uu2,p2); 02258 s1 = log_integral(uu1,p1*L1,uu2,p2*L2); 02259 02260 qprob[nbin] = s0; 02261 Lambda[nbin] = s1/s0; 02262 xx = log(Lambda[nbin]*EN1RYD*gv.bin[nd]->cnv_GR_pH); 02263 splint_safe(gv.bin[nd]->dstems,gv.dsttmp,gv.bin[nd]->dstslp,NDEMS,xx,&y,&lgErr); 02264 lgBoundErr = lgBoundErr || lgErr; 02265 qtemp[nbin] = exp(y); 02266 delu[nbin] = wid; 02267 p[nbin] = qprob[nbin]/delu[nbin]; 02268 dPdlnT[nbin] = p[nbin]*qtemp[nbin]*uderiv(qtemp[nbin],nd); 02269 02270 sum += qprob[nbin]; 02271 RadCooling += qprob[nbin]*Lambda[nbin]; 02272 02273 T1 = T2; 02274 uu1 = uu2; 02275 p1 = p2; 02276 L1 = L2; 02277 02278 ++nbin; 02279 02280 } while( T2 < Thi && nbin < NQGRID ); 02281 02282 fac = RadCooling*EN1RYD*gv.bin[nd]->cnv_GR_pCM3/gv.bin[nd]->GrainHeat; 02283 02284 for( i=0; i < nbin; ++i ) 02285 { 02286 qprob[i] /= fac; 02287 dPdlnT[i] /= fac; 02288 } 02289 02290 *tol = sum/fac; 02291 *qnbin = nbin; 02292 *new_tmin = qtemp[0]; 02293 *ErrorCode = MAX2(*ErrorCode,QH_ANALYTIC); 02294 02295 /* do quality control */ 02296 if( TolFac > STRICT ) 02297 *ErrorCode = MAX2(*ErrorCode,QH_ANALYTIC_RELAX); 02298 02299 if( lgBoundErr ) 02300 *ErrorCode = MAX2(*ErrorCode,QH_THIGH_FAIL); 02301 02302 if( fabs(sum/fac-1.) > PROB_TOL ) 02303 *ErrorCode = MAX2(*ErrorCode,QH_CONV_FAIL); 02304 02305 if( dPdlnT[0] > SAFETY*PROB_CUTOFF_LO || dPdlnT[nbin-1] > SAFETY*PROB_CUTOFF_HI ) 02306 *ErrorCode = MAX2(*ErrorCode,QH_BOUND_FAIL); 02307 02308 if( trace.lgTrace && trace.lgDustBug ) 02309 { 02310 fprintf( ioQQQ, " GetProbDistr_HighLimit found tol1 %.4e tol2 %.4e\n", 02311 fabs(sum-1.), fabs(sum/fac-1.) ); 02312 fprintf( ioQQQ, " zone %ld %s nbin %ld total prob %.4e new_tmin %.4e\n", 02313 nzone,gv.bin[nd]->chDstLab,nbin,sum/fac,*new_tmin ); 02314 } 02315 return; 02316 } 02317 02318 02319 /* calculate derivative of the enthalpy function dU/dT (aka the specific heat) at a given temperature, in Ryd/K */ 02320 STATIC double uderiv(double temp, 02321 long int nd) 02322 { 02323 enth_type ecase; 02324 long int i, 02325 j; 02326 double N_C, 02327 N_H; 02328 double deriv = 0., 02329 hok[3] = {1275., 1670., 4359.}, 02330 numer, 02331 dnumer, 02332 denom, 02333 ddenom, 02334 x; 02335 02336 02337 DEBUG_ENTRY( "uderiv()" ); 02338 02339 if( temp <= 0. ) 02340 { 02341 fprintf( ioQQQ, " uderiv called with non-positive temperature: %.6e\n" , temp ); 02342 cdEXIT(EXIT_FAILURE); 02343 } 02344 ASSERT( nd >= 0 && nd < gv.nBin ); 02345 02346 ecase = gv.which_enth[gv.bin[nd]->matType]; 02347 switch( ecase ) 02348 { 02349 case ENTH_CAR: 02350 numer = (4.15e-22/EN1RYD)*pow(temp,3.3); 02351 dnumer = (3.3*4.15e-22/EN1RYD)*pow(temp,2.3); 02352 denom = 1. + 6.51e-03*temp + 1.5e-06*temp*temp + 8.3e-07*pow(temp,2.3); 02353 ddenom = 6.51e-03 + 2.*1.5e-06*temp + 2.3*8.3e-07*pow(temp,1.3); 02354 /* dU/dT for pah/graphitic grains in Ryd/K, derived from: 02355 * >>refer grain physics Guhathakurta & Draine, 1989, ApJ, 345, 230 */ 02356 deriv = (dnumer*denom - numer*ddenom)/POW2(denom); 02357 break; 02358 case ENTH_CAR2: 02359 /* dU/dT for graphite grains in Ryd/K, using eq 9 of */ 02360 /* >>refer grain physics Draine B.T., and Li A., 2001, ApJ, 551, 807 */ 02361 deriv = (DebyeDeriv(temp/863.,2) + 2.*DebyeDeriv(temp/2504.,2))*BOLTZMANN/EN1RYD; 02362 break; 02363 case ENTH_SIL: 02364 /* dU/dT for silicate grains (and grey grains) in Ryd/K */ 02365 /* limits of tlim set above, 0 and DBL_MAX, so always OK */ 02366 /* >>refer grain physics Guhathakurta & Draine, 1989, ApJ, 345, 230 */ 02367 for( j = 0; j < 4; j++ ) 02368 { 02369 if( temp > tlim[j] && temp <= tlim[j+1] ) 02370 { 02371 deriv = cval[j]*pow(temp,ppower[j]); 02372 break; 02373 } 02374 } 02375 break; 02376 case ENTH_SIL2: 02377 /* dU/dT for silicate grains in Ryd/K, using eq 11 of */ 02378 /* >>refer grain physics Draine B.T., and Li A., 2001, ApJ, 551, 807 */ 02379 deriv = (2.*DebyeDeriv(temp/500.,2) + DebyeDeriv(temp/1500.,3))*BOLTZMANN/EN1RYD; 02380 break; 02381 case ENTH_PAH: 02382 /* dU/dT for PAH grains in Ryd/K, using eq A.4 of */ 02383 /* >>refer grain physics Dwek E., Arendt R.G., Fixsen D.J. et al., 1997, ApJ, 475, 565 */ 02384 /* this expression is only valid upto 2000K */ 02385 x = log10(MIN2(temp,2000.)); 02386 deriv = pow(10.,-21.26+3.1688*x-0.401894*POW2(x))/EN1RYD; 02387 break; 02388 case ENTH_PAH2: 02389 /* dU/dT for PAH grains in Ryd/K, approximately using eq 33 of */ 02390 /* >>refer grain physics Draine B.T., and Li A., 2001, ApJ, 551, 807 */ 02391 /* N_C and N_H should actually be nint(N_C) and nint(N_H), 02392 * but this can lead to FP overflow for very large grains */ 02393 N_C = NO_ATOMS(nd); 02394 if( N_C <= 25. ) 02395 { 02396 N_H = 0.5*N_C; 02397 } 02398 else if( N_C <= 100. ) 02399 { 02400 N_H = 2.5*sqrt(N_C); 02401 } 02402 else 02403 { 02404 N_H = 0.25*N_C; 02405 } 02406 deriv = 0.; 02407 for( i=0; i < 3; i++ ) 02408 { 02409 double help1 = hok[i]/temp; 02410 if( help1 < 300. ) 02411 { 02412 double help2 = exp(help1); 02413 double help3 = ( help1 < 1.e-7 ) ? help1*(1.+0.5*help1) : help2-1.; 02414 deriv += N_H/(N_C-2.)*POW2(help1)*help2/POW2(help3)*BOLTZMANN/EN1RYD; 02415 } 02416 } 02417 deriv += (DebyeDeriv(temp/863.,2) + 2.*DebyeDeriv(temp/2504.,2))*BOLTZMANN/EN1RYD; 02418 break; 02419 default: 02420 fprintf( ioQQQ, " uderiv called with unknown type for enthalpy function: %d\n", ecase ); 02421 cdEXIT(EXIT_FAILURE); 02422 } 02423 02424 /* >>chng 00 mar 23, use formula 3.1 of Guhathakurtha & Draine, by PvH */ 02425 /* >>chng 03 jan 17, use MAX2(..,1) to prevent crash for extremely small grains, PvH */ 02426 deriv *= MAX2(NO_ATOMS(nd)-2.,1.); 02427 02428 if( deriv <= 0. ) 02429 { 02430 fprintf( ioQQQ, " uderiv finds non-positive derivative: %.6e, what's up?\n" , deriv ); 02431 cdEXIT(EXIT_FAILURE); 02432 } 02433 return( deriv ); 02434 } 02435 02436 02437 /* calculate the enthalpy of a grain at a given temperature, in Ryd */ 02438 STATIC double ufunct(double temp, 02439 long int nd, 02440 /*@out@*/ bool *lgBoundErr) 02441 { 02442 double enthalpy, 02443 y; 02444 02445 DEBUG_ENTRY( "ufunct()" ); 02446 02447 if( temp <= 0. ) 02448 { 02449 fprintf( ioQQQ, " ufunct called with non-positive temperature: %.6e\n" , temp ); 02450 cdEXIT(EXIT_FAILURE); 02451 } 02452 ASSERT( nd >= 0 && nd < gv.nBin ); 02453 02454 /* >>chng 02 apr 22, interpolate in DustEnth array to get enthalpy, by PvH */ 02455 splint_safe(gv.dsttmp,gv.bin[nd]->DustEnth,gv.bin[nd]->EnthSlp,NDEMS,log(temp),&y,lgBoundErr); 02456 enthalpy = exp(y); 02457 02458 ASSERT( enthalpy > 0. ); 02459 return( enthalpy ); 02460 } 02461 02462 02463 /* this is the inverse of ufunct: determine grain temperature as a function of enthalpy */ 02464 STATIC double inv_ufunct(double enthalpy, 02465 long int nd, 02466 /*@out@*/ bool *lgBoundErr) 02467 { 02468 double temp, 02469 y; 02470 02471 DEBUG_ENTRY( "inv_ufunct()" ); 02472 02473 if( enthalpy <= 0. ) 02474 { 02475 fprintf( ioQQQ, " inv_ufunct called with non-positive enthalpy: %.6e\n" , enthalpy ); 02476 cdEXIT(EXIT_FAILURE); 02477 } 02478 ASSERT( nd >= 0 && nd < gv.nBin ); 02479 02480 /* >>chng 02 apr 22, interpolate in DustEnth array to get temperature, by PvH */ 02481 splint_safe(gv.bin[nd]->DustEnth,gv.dsttmp,gv.bin[nd]->EnthSlp2,NDEMS,log(enthalpy),&y,lgBoundErr); 02482 temp = exp(y); 02483 02484 ASSERT( temp > 0. ); 02485 return( temp ); 02486 } 02487 02488 02489 /* initialize interpolation arrys for grain enthalpy */ 02490 void InitEnthalpy(void) 02491 { 02492 double C_V1, 02493 C_V2, 02494 tdust1, 02495 tdust2; 02496 long int i, 02497 j, 02498 nd; 02499 02500 DEBUG_ENTRY( "InitEnthalpy()" ); 02501 02502 for( nd=0; nd < gv.nBin; nd++ ) 02503 { 02504 tdust2 = GRAIN_TMIN; 02505 C_V2 = uderiv(tdust2,nd); 02506 /* at low temps, C_V = C*T^3 -> U = C*T^4/4 = C_V*T/4 */ 02507 gv.bin[nd]->DustEnth[0] = C_V2*tdust2/4.; 02508 tdust1 = tdust2; 02509 C_V1 = C_V2; 02510 02511 for( i=1; i < NDEMS; i++ ) 02512 { 02513 double tmid,Cmid; 02514 tdust2 = exp(gv.dsttmp[i]); 02515 C_V2 = uderiv(tdust2,nd); 02516 tmid = sqrt(tdust1*tdust2); 02517 /* this ensures accuracy for silicate enthalpy */ 02518 for( j=1; j < 4; j++ ) 02519 { 02520 if( tdust1 < tlim[j] && tlim[j] < tdust2 ) 02521 { 02522 tmid = tlim[j]; 02523 break; 02524 } 02525 } 02526 Cmid = uderiv(tmid,nd); 02527 gv.bin[nd]->DustEnth[i] = gv.bin[nd]->DustEnth[i-1] + 02528 log_integral(tdust1,C_V1,tmid,Cmid) + 02529 log_integral(tmid,Cmid,tdust2,C_V2); 02530 tdust1 = tdust2; 02531 C_V1 = C_V2; 02532 } 02533 } 02534 02535 /* conversion for logarithmic interpolation */ 02536 for( nd=0; nd < gv.nBin; nd++ ) 02537 { 02538 for( i=0; i < NDEMS; i++ ) 02539 { 02540 gv.bin[nd]->DustEnth[i] = log(gv.bin[nd]->DustEnth[i]); 02541 } 02542 } 02543 02544 /* now find slopes from spline fit */ 02545 for( nd=0; nd < gv.nBin; nd++ ) 02546 { 02547 /* set up coefficients for splint */ 02548 spline(gv.dsttmp,gv.bin[nd]->DustEnth,NDEMS,2e31,2e31,gv.bin[nd]->EnthSlp); 02549 spline(gv.bin[nd]->DustEnth,gv.dsttmp,NDEMS,2e31,2e31,gv.bin[nd]->EnthSlp2); 02550 } 02551 return; 02552 } 02553 02554 02555 /* helper function for calculating specific heat, uses Debye theory */ 02556 STATIC double DebyeDeriv(double x, 02557 long n) 02558 { 02559 long i, 02560 nn; 02561 double res, 02562 *xx, 02563 *rr, 02564 *aa, 02565 *ww; 02566 02567 DEBUG_ENTRY( "DebyeDeriv()" ); 02568 02569 ASSERT( x > 0. ); 02570 ASSERT( n == 2 || n == 3 ); 02571 02572 if( x < 0.001 ) 02573 { 02574 /* for general n this is Gamma(n+2)*zeta(n+1)*powi(x,n) */ 02575 if( n == 2 ) 02576 { 02577 res = 6.*1.202056903159594*POW2(x); 02578 } 02579 else if( n == 3 ) 02580 { 02581 res = 24.*1.082323233711138*POW3(x); 02582 } 02583 else 02584 /* added to keep lint happy - note that assert above confimred that n is 2 or 3, 02585 * but lint flagged possible flow without defn of res */ 02586 TotalInsanity(); 02587 } 02588 else 02589 { 02590 nn = 4*MAX2(4,2*(long)(0.05/x)); 02591 xx = (double *)MALLOC(sizeof(double)*(unsigned)nn); 02592 rr = (double *)MALLOC(sizeof(double)*(unsigned)nn); 02593 aa = (double *)MALLOC(sizeof(double)*(unsigned)nn); 02594 ww = (double *)MALLOC(sizeof(double)*(unsigned)nn); 02595 gauss_legendre(nn,xx,aa); 02596 gauss_init(nn,0.,1.,xx,aa,rr,ww); 02597 02598 res = 0.; 02599 for( i=0; i < nn; i++ ) 02600 { 02601 double help1 = rr[i]/x; 02602 if( help1 < 300. ) 02603 { 02604 double help2 = exp(help1); 02605 double help3 = ( help1 < 1.e-7 ) ? help1*(1.+0.5*help1) : help2-1.; 02606 res += ww[i]*powi(rr[i],n+1)*help2/POW2(help3); 02607 } 02608 } 02609 res /= POW2(x); 02610 02611 free(ww); 02612 free(aa); 02613 free(rr); 02614 free(xx); 02615 } 02616 return (double)n*res; 02617 }