diff --git a/prism/src/explicit/MDPSimple.java b/prism/src/explicit/MDPSimple.java index 60db7605..48be9e09 100644 --- a/prism/src/explicit/MDPSimple.java +++ b/prism/src/explicit/MDPSimple.java @@ -891,13 +891,11 @@ public class MDPSimple extends ModelExplicit implements MDP, ModelSimple @Override public double mvMultRewMinMaxSingle(int s, double vect[], MDPRewards mdpRewards, boolean min, int adv[]) { - int j, k; + int j, k, advCh = -1; double d, prob, minmax; boolean first; List step; - // TODO: implement adv. gen. - minmax = 0; first = true; j = -1; @@ -912,10 +910,22 @@ public class MDPSimple extends ModelExplicit implements MDP, ModelSimple d += prob * vect[k]; } // Check whether we have exceeded min/max so far - if (first || (min && d < minmax) || (!min && d > minmax)) + if (first || (min && d < minmax) || (!min && d > minmax)) { minmax = d; + // If adversary generation is enabled, remember optimal choice + if (adv != null) + advCh = j; + } first = false; } + // If adversary generation is enabled, store optimal choice + if (adv != null & !first) { + // Only remember strictly better choices (required for max) + if (adv[s] == -1 || (min && minmax < vect[s]) || (!min && minmax > vect[s])) { + adv[s] = advCh; + } + } + // Add state reward (doesn't affect min/max) minmax += mdpRewards.getStateReward(s); return minmax; diff --git a/prism/src/explicit/MDPSparse.java b/prism/src/explicit/MDPSparse.java index 6b2ce2a5..46c5d121 100644 --- a/prism/src/explicit/MDPSparse.java +++ b/prism/src/explicit/MDPSparse.java @@ -961,12 +961,10 @@ public class MDPSparse extends ModelExplicit implements MDP @Override public double mvMultRewMinMaxSingle(int s, double vect[], MDPRewards mdpRewards, boolean min, int adv[]) { - int j, k, l1, h1, l2, h2; + int j, k, l1, h1, l2, h2, advCh = -1; double d, minmax; boolean first; - // TODO: implement adv. gen. - minmax = 0; first = true; l1 = rowStarts[s]; @@ -980,10 +978,22 @@ public class MDPSparse extends ModelExplicit implements MDP d += nonZeros[k] * vect[cols[k]]; } // Check whether we have exceeded min/max so far - if (first || (min && d < minmax) || (!min && d > minmax)) + if (first || (min && d < minmax) || (!min && d > minmax)) { minmax = d; + // If adversary generation is enabled, remember optimal choice + if (adv != null) + advCh = j - l1; + } first = false; } + // If adversary generation is enabled, store optimal choice + if (adv != null & !first) { + // Only remember strictly better choices (required for max) + if (adv[s] == -1 || (min && minmax < vect[s]) || (!min && minmax > vect[s])) { + adv[s] = advCh; + } + } + // Add state reward (doesn't affect min/max) minmax += mdpRewards.getStateReward(s); return minmax;