Browse Source

Adv gen for rewards in explicit engine (from prism-games).

git-svn-id: https://www.prismmodelchecker.org/svn/prism/prism/trunk@3641 bbc10eb1-c90d-0410-af57-cb519fbb1720
master
Dave Parker 14 years ago
parent
commit
0bb97718c1
  1. 18
      prism/src/explicit/MDPSimple.java
  2. 18
      prism/src/explicit/MDPSparse.java

18
prism/src/explicit/MDPSimple.java

@ -891,13 +891,11 @@ public class MDPSimple extends ModelExplicit implements MDP, ModelSimple
@Override
public double mvMultRewMinMaxSingle(int s, double vect[], MDPRewards mdpRewards, boolean min, int adv[])
{
int j, k;
int j, k, advCh = -1;
double d, prob, minmax;
boolean first;
List<Distribution> step;
// TODO: implement adv. gen.
minmax = 0;
first = true;
j = -1;
@ -912,10 +910,22 @@ public class MDPSimple extends ModelExplicit implements MDP, ModelSimple
d += prob * vect[k];
}
// Check whether we have exceeded min/max so far
if (first || (min && d < minmax) || (!min && d > minmax))
if (first || (min && d < minmax) || (!min && d > minmax)) {
minmax = d;
// If adversary generation is enabled, remember optimal choice
if (adv != null)
advCh = j;
}
first = false;
}
// If adversary generation is enabled, store optimal choice
if (adv != null & !first) {
// Only remember strictly better choices (required for max)
if (adv[s] == -1 || (min && minmax < vect[s]) || (!min && minmax > vect[s])) {
adv[s] = advCh;
}
}
// Add state reward (doesn't affect min/max)
minmax += mdpRewards.getStateReward(s);
return minmax;

18
prism/src/explicit/MDPSparse.java

@ -961,12 +961,10 @@ public class MDPSparse extends ModelExplicit implements MDP
@Override
public double mvMultRewMinMaxSingle(int s, double vect[], MDPRewards mdpRewards, boolean min, int adv[])
{
int j, k, l1, h1, l2, h2;
int j, k, l1, h1, l2, h2, advCh = -1;
double d, minmax;
boolean first;
// TODO: implement adv. gen.
minmax = 0;
first = true;
l1 = rowStarts[s];
@ -980,10 +978,22 @@ public class MDPSparse extends ModelExplicit implements MDP
d += nonZeros[k] * vect[cols[k]];
}
// Check whether we have exceeded min/max so far
if (first || (min && d < minmax) || (!min && d > minmax))
if (first || (min && d < minmax) || (!min && d > minmax)) {
minmax = d;
// If adversary generation is enabled, remember optimal choice
if (adv != null)
advCh = j - l1;
}
first = false;
}
// If adversary generation is enabled, store optimal choice
if (adv != null & !first) {
// Only remember strictly better choices (required for max)
if (adv[s] == -1 || (min && minmax < vect[s]) || (!min && minmax > vect[s])) {
adv[s] = advCh;
}
}
// Add state reward (doesn't affect min/max)
minmax += mdpRewards.getStateReward(s);
return minmax;

Loading…
Cancel
Save