Browse Source

Improved Fox-Glynn for small numbers + int overflow bugfix (Vojta).

git-svn-id: https://www.prismmodelchecker.org/svn/prism/prism/trunk@1926 bbc10eb1-c90d-0410-af57-cb519fbb1720
master
Dave Parker 16 years ago
parent
commit
b6b993f030
  1. 4
      prism/include/prism.h
  2. 257
      prism/src/explicit/FoxGlynn.java
  3. 2
      prism/src/hybrid/PH_StochBoundedUntil.cc
  4. 2
      prism/src/hybrid/PH_StochCumulReward.cc
  5. 2
      prism/src/hybrid/PH_StochTransient.cc
  6. 2
      prism/src/mtbdd/PM_StochBoundedUntil.cc
  7. 2
      prism/src/mtbdd/PM_StochCumulReward.cc
  8. 2
      prism/src/mtbdd/PM_StochTransient.cc
  9. 258
      prism/src/prism/prism.cc
  10. 3
      prism/src/sparse/PS_StochBoundedUntil.cc
  11. 3
      prism/src/sparse/PS_StochCumulReward.cc
  12. 3
      prism/src/sparse/PS_StochTransient.cc

4
prism/include/prism.h

@ -36,8 +36,8 @@
// Fox-Glynn wieghts struct
typedef struct FoxGlynnWeights
{
int left;
int right;
long left;
long right;
double total_weight;
double *weights;
} FoxGlynnWeights;

257
prism/src/explicit/FoxGlynn.java

@ -3,7 +3,7 @@
// Copyright (c) 2002-
// Authors:
// * Dave Parker <david.parker@comlab.ox.ac.uk> (University of Oxford, formerly University of Birmingham)
// * Joachim Meyer-Kayser <Joachim.Meyer-Kayser@informatik.uni-erlangen.de> (University of Erlangen)
// * Vojtech Forejt <forejt@fi.muni.cz> (Masaryk University)
//
//------------------------------------------------------------------------------
//
@ -71,150 +71,141 @@ public final class FoxGlynn
private final void run() throws PrismException
{
int m = (int) Math.floor(q_tmax);
double q = find(m); // get q, left and right
//Log.entry("Weighter: Right: " + right);
//Log.entry("Weighter: Left : " + left);
weights = new double[right - left + 1];
weights[m - left] = q;
//Log.logArray("Weights before calculation: ",weights);
// down
for (int j = m; j > left; j--) {
weights[j - 1 - left] = (j / q_tmax) * weights[j - left];
if (q_tmax == 0.0) {
throw new PrismException("Overflow: TA parameter qtmax = time * maxExitRate = 0.");
}
//up
if (q_tmax < 400) {
if (right > 600) {
throw new PrismException("Overflow: right truncation point > 600.");
else if (q_tmax < 400)
{ //here naive approach should have better performance than Fox Glynn
final double expcoef = Math.exp(-q_tmax); //the "e^-lambda" part of p.m.f. of Poisson dist.
int k; //denotes that we work with event "k steps occur"
double lastval; //(probability that exactly k events occur)/expcoef
double accum; //(probability that 0 to k events occur)/expcoef
double desval = (1-(accuracy/2.0)) / expcoef; //value that we want to accumulate in accum before we stop
java.util.Vector<Double> w = new java.util.Vector<Double>(); //stores weights computed so far.
//k=0 is simple
lastval = 1;
accum = lastval;
w.add(lastval * expcoef);
//add further steps until you have accumulated enough
k = 1;
do {
lastval *= q_tmax / k; // invariant: lastval = q_tmax^k / k!
accum += lastval;
w.add(lastval * expcoef);
k++;
} while (accum < desval);
//store all data
this.left=0;
this.right=k-1;
this.weights = new double[k];
for(int i = 0; i < w.size(); i++)
{
this.weights[i] = w.get(i);
}
for (int j = m; j < right;) {
q = q_tmax / (j + 1);
if (weights[j - left] > underflow / q) {
weights[j + 1 - left] = q * weights[j - left];
j++;
} else {
right = j;
//Log.entry("Weighter: Right is now set to " + right);
computeTotalWeight();
return;
}
}
} else {
for (int j = m; j < right; j++) {
weights[j + 1 - left] = (q_tmax / (j + 1)) * weights[j - left];
}
//we return actual weights, so no reweighting should be done
this.totalWeight = 1.0;
}
computeTotalWeight();
}
private final void computeTotalWeight()
{
int l = left;
int r = right;
totalWeight = 0.0;
while (l < r) {
if (weights[l - left] <= weights[r - left]) {
totalWeight += weights[l - left];
++l;
} else {
totalWeight += weights[r - left];
--r;
else
{ //use actual Fox Glynn for q_tmax>400
if (accuracy < 1e-10) {
throw new PrismException("Overflow: Accuracy is smaller than Fox Glynn can handle (must be at least 1e-10).");
}
}
totalWeight += weights[l - left];
}
private final double find(double m) throws PrismException
{
double k;
if (q_tmax == 0.0)
throw new PrismException("Overflow: TA parameter qtmax = time * maxExitRate = 0.");
if (q_tmax < 25.0)
left = 0;
final double factor = 1e+10; //factor from the paper, it has no real explanation there
final int m = (int) q_tmax; //mode
//run FINDER to get left, right and weight[m]
{
final double sqrtpi = 1.7724538509055160; //square root of PI
final double sqrt2 = 1.4142135623730950; //square root of 2
final double sqrtq = Math.sqrt(q_tmax);
final double aq = (1.0 + 1.0/q_tmax) * Math.exp(0.0625) * sqrt2; //a_\lambda from the paper
final double bq = (1.0 + 1.0/q_tmax) * Math.exp(0.125/q_tmax); //b_\lambda from the paper
//use Corollary 1 to find right truncation point
final double lower_k_1 = 1.0 / (2.0*sqrt2*q_tmax); //lower bound on k from Corollary 1
final double upper_k_1 = sqrtq / (2.0*sqrt2); //upper bound on k from Corollary 1
double k;
//justification for increment is in the paper:
//"increase k through the positive integers greater than 3"
for(k=lower_k_1; k <= upper_k_1;
k=(k==lower_k_1)? k+4 : k+1 )
{
double dkl = 1.0/(1 - Math.exp(-(2.0/9.0)*(k*sqrt2*sqrtq+1.5))); //d(k,\lambda) from the paper
double res = aq*dkl*Math.exp(-k*k/2.0)/(k*sqrt2*sqrtpi); //right hand side of the equation in Corollary 1
if (res <= accuracy/2.0)
{
break;
}
}
if (q_tmax < 400.0) {
// Find right using Corollary 1 with q_tmax=400
double sqrt2 = Math.sqrt(2.0);
double sqrtl = 20;
double a = 1.0025 * Math.exp(0.0625) * sqrt2;
double b = 1.0025 * Math.exp(0.125 / 400); //Math.exp (0.0003125)
double startk = 1.0 / (2.0 * sqrt2 * 400);
double stopk = sqrtl / (2 * sqrt2);
if (k>upper_k_1)
k=upper_k_1;
for (k = startk; k <= stopk; k += 3.0) {
double d = 1.0 / (1 - Math.exp((-2.0 / 9.0) * (k * sqrt2 * sqrtl + 1.5)));
double f = a * d * Math.exp(-0.5 * k * k) / (k * Math.sqrt(2.0 * Math.PI));
this.right = (int) Math.ceil(m+k*sqrt2*sqrtq + 1.5);
if (f <= accuracy / 2.0)
break;
}
//use Corollary 2 to find left truncation point
//NOTE: the original implementation used some upper bound on k,
// however, I didn't find it in the paper and I think it is not needed
final double lower_k_2 = 1.0/(sqrt2*sqrtq); //lower bound on k from Corollary 2
if (k > stopk)
k = stopk;
right = (int) Math.ceil(m + k * sqrt2 * sqrtl + 1.5);
}
if (q_tmax >= 400.0) {
// Find right using Corollary 1 using actual q_tmax
double sqrt2 = Math.sqrt(2.0);
double sqrtl = Math.sqrt(q_tmax);
double a = (1.0 + 1.0 / q_tmax) * Math.exp(0.0625) * sqrt2;
double b = (1.0 + 1.0 / q_tmax) * Math.exp(0.125 / q_tmax);
double startk = 1.0 / (2.0 * sqrt2 * q_tmax);
double stopk = sqrtl / (2 * sqrt2);
for (k = startk; k <= stopk; k += 3.0) {
double d = 1.0 / (1 - Math.exp((-2.0 / 9.0) * (k * sqrt2 * sqrtl + 1.5)));
double f = a * d * Math.exp(-0.5 * k * k) / (k * Math.sqrt(2.0 * Math.PI));
if (f <= accuracy / 2.0)
break;
double res;
k=lower_k_2;
do
{
res = bq*Math.exp(-k*k/2.0)/(k*sqrt2*sqrtpi); //right hand side of the equation in Corollary 2
k++;
}
while (res > accuracy/2.0);
this.left = (int) (m - k*sqrtq - 1.5);
//According to the paper, we should check underflow of lower bound.
//However, it seems that for no reasonable values this can happen.
//And neither the original implementation checked it
double wm = overflow / (factor*(this.right - this.left));
this.weights = new double[this.right-this.left+1];
this.weights[m-this.left] = wm;
}
if (k > stopk)
k = stopk;
right = (int) Math.ceil(m + k * sqrt2 * sqrtl + 1.5);
}
if (q_tmax >= 25.0) {
// Find left using Corollary 2 using actual q_tmax
double sqrt2 = Math.sqrt(2.0);
double sqrtl = Math.sqrt(q_tmax);
double a = (1.0 + 1.0 / q_tmax) * Math.exp(0.0625) * sqrt2;
double b = (1.0 + 1.0 / q_tmax) * Math.exp(0.125 / q_tmax);
double startk = 1.0 / (sqrt2 * sqrtl);
double stopk = (m - 1.5) / (sqrt2 * sqrtl);
for (k = startk; k <= stopk; k += 3.0) {
if (b * Math.exp(-0.5 * k * k) / (k * Math.sqrt(2.0 * Math.PI)) <= accuracy / 2.0)
break;
//end of FINDER
//compute weights
//(at this point this.left, this.right and this.weight[m] is known)
//Down from m
for(int j=m; j>this.left; j--)
this.weights[j-1-this.left] = (j/q_tmax)*this.weights[j-this.left];
//Up from m
for(int j=m; j<this.right; j++)
this.weights[j+1-this.left] = (q_tmax/(j+1))*this.weights[j-this.left];
//Compute totalWeight (i.e. W in the paper)
//instead of summing from left to right, start from smallest
//and go to highest weights to prevent roundoff
this.totalWeight = 0.0;
int s = this.left;
int t = this.right;
while (s<t)
{
if(this.weights[s - this.left] <= this.weights[t - this.left])
{
this.totalWeight += this.weights[s-this.left];
s++;
}
else
{
this.totalWeight += this.weights[t-this.left];
t--;
}
}
if (k > stopk)
k = stopk;
left = (int) Math.floor(m - k * sqrtl - 1.5);
}
if (left < 0) {
left = 0;
System.out.println("Weighter: negative left truncation point found. Ignored.");
this.totalWeight += this.weights[s-this.left];
}
return overflow / (Math.pow(10.0, 10.0) * (right - left));
}
public static void test()

2
prism/src/hybrid/PH_StochBoundedUntil.cc

@ -99,7 +99,7 @@ jlong __jlongpointer mu // probs for multiplying
double time_taken, time_for_setup, time_for_iters;
// misc
bool done;
int i, iters, num_iters;
long i, iters, num_iters;
double x, kb, kbt, max_diag, weight, term_crit_param_unif;
// exception handling around whole function

2
prism/src/hybrid/PH_StochCumulReward.cc

@ -98,7 +98,7 @@ jdouble time // time bound
double time_taken, time_for_setup, time_for_iters;
// misc
bool done;
int i, iters, num_iters;
long i, iters, num_iters;
double max_diag, weight, kb, kbt, term_crit_param_unif;
// exception handling around whole function

2
prism/src/hybrid/PH_StochTransient.cc

@ -94,7 +94,7 @@ jdouble time // time bound
double time_taken, time_for_setup, time_for_iters;
// misc
bool done;
int i, iters, num_iters;
long i, iters, num_iters;
double kb, kbt, max_diag, weight, term_crit_param_unif;
// exception handling around whole function

2
prism/src/mtbdd/PM_StochBoundedUntil.cc

@ -72,7 +72,7 @@ jlong __jlongpointer mu // probs for multiplying
long start1, start2, start3, stop;
double time_taken, time_for_setup, time_for_iters;
// misc
int i, iters, num_iters;
long i, iters, num_iters;
double x, max_diag, weight, unif, term_crit_param_unif;
bool done, combine;

2
prism/src/mtbdd/PM_StochCumulReward.cc

@ -71,7 +71,7 @@ jdouble time // time bound
double time_taken, time_for_setup, time_for_iters;
// misc
bool done;
int i, iters, num_iters;
long i, iters, num_iters;
double max_diag, weight, unif, term_crit_param_unif;
// start clocks

2
prism/src/mtbdd/PM_StochTransient.cc

@ -68,7 +68,7 @@ jdouble time // time
long start1, start2, start3, stop;
double time_taken, time_for_setup, time_for_iters;
// misc
int i, iters, num_iters;
long i, iters, num_iters;
double max_diag, weight, unif, term_crit_param_unif;
bool done, combine;

258
prism/src/prism/prism.cc

@ -3,7 +3,7 @@
// Copyright (c) 2002-
// Authors:
// * Dave Parker <david.parker@comlab.ox.ac.uk> (University of Oxford, formerly University of Birmingham)
// * Joachim Meyer-Kayser <Joachim.Meyer-Kayser@informatik.uni-erlangen.de> (University of Erlangen)
// * Vojtech Forejt <forejt@fi.muni.cz> (Masaryk University)
//
//------------------------------------------------------------------------------
//
@ -30,6 +30,7 @@
#include <stdio.h>
#include <math.h>
#include <new>
#include <vector>
//------------------------------------------------------------------------------
@ -78,150 +79,155 @@ void release_string_array_from_java(JNIEnv *env, jstring *strings_jstrings, cons
//------------------------------------------------------------------------------
// Compute poisson probabilities for uniformisation (Fox-Glynn method)
// NB: This function was written by Joachim Meyer-Kayser (converted from Java)
// Compute poisson probabilities for uniformisation. If q_tmax<400, then a naive implementation
// is used, otherwise the Fox-Glynn method is used.
// Note that Fox-Glynn method requires accuracy to be at least 1e-10.
EXPORT FoxGlynnWeights fox_glynn(double q_tmax, double underflow, double overflow, double accuracy)
{
int m;
double q;
FoxGlynnWeights fgw;
m = (int)floor(q_tmax);
{
double m2 = m;
double k;
if (q_tmax == 0.0) {
//FIXME: what should happen now?
printf("Overflow: TA parameter qtmax = time * maxExitRate = 0.");
fgw.right = -1;
}
else if (q_tmax < 400)
{ //here naive approach should have better performance than Fox Glynn
const double expcoef = exp(-q_tmax); //the "e^-lambda" part of p.m.f. of Poisson dist.
int k; //denotes that we work with event "k steps occur"
double lastval; //(probability that exactly k events occur)/expcoef
double accum; //(probability that 0 to k events occur)/expcoef
double desval = (1-(accuracy/2.0)) / expcoef; //value that we want to accumulate in accum before we stop
std::vector<double> w; //stores weights computed so far.
if (q_tmax == 0.0) {
printf("Overflow: TA parameter qtmax = time * maxExitRate = 0.");
}
if (q_tmax < 25.0) {
fgw.left = 0;
//k=0 is simple
lastval = 1;
accum = lastval;
w.push_back(lastval * expcoef);
//add further steps until you have accumulated enough
k = 1;
do {
lastval *= q_tmax / k; // invariant: lastval = q_tmax^k / k!
accum += lastval;
w.push_back(lastval * expcoef);
k++;
} while (accum < desval);
//store all data to fgw
fgw.left=0;
fgw.right=k-1;
fgw.weights = new double[k];
for(int i = 0; i < w.size(); i++)
{
fgw.weights[i] = w[i];
}
if (q_tmax < 400.0) {
// Find right using Corollary 1 with q_tmax=400
double sqrt2 = sqrt(2.0);
double sqrtl = 20;
double a = 1.0025 * exp (0.0625) * sqrt2;
double b = 1.0025 * exp (0.125/400); //exp (0.0003125)
double startk = 1.0/(2.0 * sqrt2 * 400);
double stopk = sqrtl/(2*sqrt2);
for (k = startk; k <= stopk; k += 3.0) {
double d = 1.0/(1 - exp ((-2.0/9.0)*(k*sqrt2*sqrtl + 1.5)));
double f = a * d * exp (-0.5*k*k) / (k * sqrt (2.0 * 3.1415926));
if (f <= accuracy/2.0)
break;
}
if (k > stopk)
k = stopk;
fgw.right = (int) ceil(m2 + k*sqrt2*sqrtl + 1.5);
//we return actual weights, so no reweighting should be done
fgw.total_weight = 1.0;
}
else
{ //use actual Fox Glynn for q_tmax>400
if (accuracy < 1e-10) {
//FIXME: what should happen now?
printf("Overflow: Accuracy is smaller than Fox Glynn can handle (must be at least 1e-10).");
fgw.right = -1;
}
if (q_tmax >= 400.0) {
// Find right using Corollary 1 using actual q_tmax
double sqrt2 = sqrt (2.0);
double sqrtl = sqrt (q_tmax);
double a = (1.0 + 1.0/q_tmax) * exp (0.0625) * sqrt2;
double b = (1.0 + 1.0/q_tmax) * exp (0.125/q_tmax);
double startk = 1.0/(2.0 * sqrt2 * q_tmax);
double stopk = sqrtl/(2*sqrt2);
for (k = startk; k <= stopk; k += 3.0) {
double d = 1.0/(1 - exp ((-2.0/9.0)*(k*sqrt2*sqrtl + 1.5)));
double f = a * d * exp (-0.5*k*k) / (k * sqrt (2.0 * 3.1415926));
if (f <= accuracy/2.0)
const double factor = 1e+10;
const long m = (long) q_tmax; //mode
//run FINDER to get left, right and weight[m]
{
const double sqrtpi = 1.7724538509055160; //square root of PI
const double sqrt2 = 1.4142135623730950; //square root of 2
const double sqrtq = sqrt(q_tmax);
const double aq = (1.0 + 1.0/q_tmax) * exp(0.0625) * sqrt2; //a_\lambda from the paper
const double bq = (1.0 + 1.0/q_tmax) * exp(0.125/q_tmax); //b_\lambda from the paper
//use Corollary 1 to find right truncation point
const double lower_k_1 = 1.0 / (2.0*sqrt2*q_tmax); //lower bound on k from Corollary 1
const double upper_k_1 = sqrtq / (2.0*sqrt2); //upper bound on k from Corollary 1
double k;
//justification for increment is in the paper:
//"increase k through the positive integers greater than 3"
for(k=lower_k_1; k <= upper_k_1;
k=(k==lower_k_1)? k+4 : k+1 )
{
double dkl = 1.0/(1 - exp(-(2.0/9.0)*(k*sqrt2*sqrtq+1.5))); //d(k,\lambda) from the paper
double res = aq*dkl*exp(-k*k/2.0)/(k*sqrt2*sqrtpi); //right hand side of the equation in Corollary 1
if (res <= accuracy/2.0)
{
break;
}
}
if (k>upper_k_1)
k=upper_k_1;
const double right_d = ceil(m+k*sqrt2*sqrtq + 1.5);
fgw.right = (long) right_d;
//use Corollary 2 to find left truncation point
//NOTE: the original implementation used some upper bound on k,
// however, I didn't find it in the paper and I think it is not needed
const double lower_k_2 = 1.0/(sqrt2*sqrtq); //lower bound on k from Corollary 2
double res;
k=lower_k_2;
do
{
res = bq*exp(-k*k/2.0)/(k*sqrt2*sqrtpi); //right hand side of the equation in Corollary 2
k++;
}
while (res > accuracy/2.0);
if (k > stopk)
k = stopk;
fgw.right = (int) ceil(m2 + k*sqrt2*sqrtl + 1.5);
}
if (q_tmax >= 25.0) {
// Find left using Corollary 2 using actual q_tmax
double sqrt2 = sqrt (2.0);
double sqrtl = sqrt (q_tmax);
double a = (1.0 + 1.0/q_tmax) * exp (0.0625) * sqrt2;
double b = (1.0 + 1.0/q_tmax) * exp (0.125/q_tmax);
double startk = 1.0/(sqrt2*sqrtl);
double stopk = (m2 - 1.5)/(sqrt2*sqrtl);
fgw.left = (long) (m - k*sqrtq - 1.5);
for (k = startk; k <= stopk; k += 3.0) {
if (b * exp(-0.5*k*k)/(k * sqrt (2.0 * 3.1415926)) <= accuracy/2.0)
break;
}
//According to the paper, we should check underflow of lower bound.
//However, it seems that for no reasonable values this can happen.
//And neither the original implementation checked it
if (k > stopk)
k = stopk;
fgw.left = (int) floor(m2 - k*sqrtl - 1.5);
}
if (fgw.left < 0) {
fgw.left = 0;
//printf("Weighter: negative left truncation point found. Ignored.\n");
double wm = overflow / (factor*(fgw.right - fgw.left));
fgw.weights = new double[fgw.right-fgw.left+1];
fgw.weights[m-fgw.left] = wm;
}
//end of FINDER
//compute weights
//(at this point fgw.left, fgw.right and fgw.weight[m] is known)
q = overflow / (pow(10.0, 10.0) * (fgw.right - fgw.left));
}
fgw.weights = new double[fgw.right-fgw.left+1];
fgw.weights[m-fgw.left] = q;
// down
for (int j=m; j>fgw.left; j--) {
fgw.weights[j-1-fgw.left] = (j/q_tmax) * fgw.weights[j-fgw.left];
}
//up
if (q_tmax < 400) {
if (fgw.right > 600) {
printf("Overflow: right truncation point > 600.");
}
for (int j=m; j<fgw.right; ) {
q = q_tmax / (j+1);
if (fgw.weights[j-fgw.left] > underflow/q) {
fgw.weights[j+1-fgw.left] = q * fgw.weights[j-fgw.left];
j++;
}
else {
fgw.right = j;
}
}
}
else {
for (int j=m; j<fgw.right; j++) {
fgw.weights[j+1-fgw.left] = (q_tmax/(j+1)) * fgw.weights[j-fgw.left];
}
}
{
int l = fgw.left;
int r = fgw.right;
//Down from m
for(long j=m; j>fgw.left; j--)
fgw.weights[j-1-fgw.left] = (j/q_tmax)*fgw.weights[j-fgw.left];
//Up from m
for(long j=m; j<fgw.right; j++)
fgw.weights[j+1-fgw.left] = (q_tmax/(j+1))*fgw.weights[j-fgw.left];
//Compute total_weight (i.e. W in the paper)
//instead of summing from left to right, start from smallest
//and go to highest weights to prevent roundoff
fgw.total_weight = 0.0;
while (l < r) {
if (fgw.weights[l-fgw.left] <= fgw.weights[r-fgw.left]) {
fgw.total_weight += fgw.weights[l-fgw.left];
++l;
long s = fgw.left;
long t = fgw.right;
while (s<t)
{
if(fgw.weights[s - fgw.left] <= fgw.weights[t - fgw.left])
{
fgw.total_weight += fgw.weights[s-fgw.left];
s++;
}
else {
fgw.total_weight += fgw.weights[r-fgw.left];
--r;
else
{
fgw.total_weight += fgw.weights[t-fgw.left];
t--;
}
}
fgw.total_weight += fgw.weights[l-fgw.left];
fgw.total_weight += fgw.weights[s-fgw.left];
}
return fgw;
}

3
prism/src/sparse/PS_StochBoundedUntil.cc

@ -85,7 +85,8 @@ jlong __jlongpointer mu // probs for multiplying
double time_taken, time_for_setup, time_for_iters;
// misc
bool done;
int i, j, l, h, iters, num_iters;
int j, l, h;
long i, iters, num_iters;
double d, x, max_diag, weight, kb, kbt, unif, term_crit_param_unif;
// exception handling around whole function

3
prism/src/sparse/PS_StochCumulReward.cc

@ -83,7 +83,8 @@ jdouble time // time bound
double time_taken, time_for_setup, time_for_iters;
// misc
bool done;
int i, j, l, h, iters, num_iters;
int j, l, h;
long i, iters, num_iters;
double d, max_diag, weight, kb, kbt, unif, term_crit_param_unif;
// exception handling around whole function

3
prism/src/sparse/PS_StochTransient.cc

@ -79,7 +79,8 @@ jdouble time // time bound
double time_taken, time_for_setup, time_for_iters;
// misc
bool done;
int i, j, l, h, iters, num_iters;
int j, l, h;
long i, iters, num_iters;
double d, max_diag, weight, kb, kbt, unif, term_crit_param_unif;
// exception handling around whole function

Loading…
Cancel
Save