Browse Source

More updates to explicit library:

* removed some old rewards code from explicit models
* (and temporarily disabled a few things in PrismSTPGAbstractRefine accordingly)
* added method addActionLabelledChoice to MDPSimple



git-svn-id: https://www.prismmodelchecker.org/svn/prism/prism/trunk@3325 bbc10eb1-c90d-0410-af57-cb519fbb1720
master
Dave Parker 15 years ago
parent
commit
edab23b581
  1. 5
      prism/src/explicit/DTMC.java
  2. 7
      prism/src/explicit/DTMCEmbeddedSimple.java
  3. 7
      prism/src/explicit/DTMCFromMDPMemorylessAdversary.java
  4. 58
      prism/src/explicit/DTMCSimple.java
  5. 7
      prism/src/explicit/DTMCUniformisedSimple.java
  6. 151
      prism/src/explicit/MDPSimple.java
  7. 14
      prism/src/explicit/MDPSparse.java
  8. 4
      prism/src/explicit/PrismSTPGAbstractRefine.java
  9. 5
      prism/src/explicit/STPG.java
  10. 61
      prism/src/explicit/STPGAbstrSimple.java
  11. 3
      prism/src/explicit/STPGModelChecker.java

5
prism/src/explicit/DTMC.java

@ -46,11 +46,6 @@ public interface DTMC extends Model
*/
public Iterator<Entry<Integer, Double>> getTransitionsIterator(int s);
/**
* Get the transition reward (if any) for the transitions in state s.
*/
public double getTransitionReward(int s);
/**
* Perform a single step of precomputation algorithm Prob0, i.e., for states i in {@code subset},
* set bit i of {@code result} iff there is a transition to a state in {@code u}.

7
prism/src/explicit/DTMCEmbeddedSimple.java

@ -30,7 +30,6 @@ import java.util.*;
import java.util.Map.Entry;
import explicit.rewards.MCRewards;
import parser.State;
import parser.Values;
import prism.ModelType;
@ -210,12 +209,6 @@ public class DTMCEmbeddedSimple implements DTMC
throw new RuntimeException("Not implemented yet");
}
public double getTransitionReward(int s)
{
// TODO
return 0;
}
public void prob0step(BitSet subset, BitSet u, BitSet result)
{
int i;

7
prism/src/explicit/DTMCFromMDPMemorylessAdversary.java

@ -30,7 +30,6 @@ import java.util.*;
import java.util.Map.Entry;
import explicit.rewards.MCRewards;
import parser.State;
import parser.Values;
import prism.ModelType;
@ -196,12 +195,6 @@ public class DTMCFromMDPMemorylessAdversary implements DTMC
throw new RuntimeException("Not implemented yet");
}
public double getTransitionReward(int s)
{
// TODO
return 0;
}
public void prob0step(BitSet subset, BitSet u, BitSet result)
{
// TODO

58
prism/src/explicit/DTMCSimple.java

@ -43,12 +43,6 @@ public class DTMCSimple extends ModelSimple implements DTMC
// Transition matrix (distribution list)
protected List<Distribution> trans;
// Rewards
// (if transRewardsConstant non-null, use this for all transitions; otherwise, use transRewards list)
// (for transRewards, null in element s means no rewards for that state)
protected Double transRewardsConstant;
protected List<Double> transRewards;
// Other statistics
protected int numTransitions;
@ -80,7 +74,6 @@ public class DTMCSimple extends ModelSimple implements DTMC
for (int i = 0; i < numStates; i++) {
trans.set(i, new Distribution(dtmc.trans.get(i)));
}
// TODO: copy rewards
numTransitions = dtmc.numTransitions;
}
@ -98,7 +91,6 @@ public class DTMCSimple extends ModelSimple implements DTMC
for (int i = 0; i < numStates; i++) {
trans.set(permut[i], new Distribution(dtmc.trans.get(i), permut));
}
// TODO: permute rewards
numTransitions = dtmc.numTransitions;
}
@ -112,7 +104,6 @@ public class DTMCSimple extends ModelSimple implements DTMC
for (int i = 0; i < numStates; i++) {
trans.add(new Distribution());
}
clearAllRewards();
}
@Override
@ -215,43 +206,6 @@ public class DTMCSimple extends ModelSimple implements DTMC
}
}
/**
* Remove all rewards from the model
*/
public void clearAllRewards()
{
transRewards = null;
transRewardsConstant = null;
}
/**
* Set a constant reward for all transitions
*/
public void setConstantTransitionReward(double r)
{
// This replaces any other reward definitions
transRewards = null;
// Store as a Double (because we use null to check for its existence)
transRewardsConstant = new Double(r);
}
/**
* Set the reward for (all) transitions in state s to r.
*/
public void setTransitionReward(int s, double r)
{
// This would replace any constant reward definition, if it existed
transRewardsConstant = null;
// If no rewards array created yet, create it
if (transRewards == null) {
transRewards = new ArrayList<Double>(numStates);
for (int j = 0; j < numStates; j++)
transRewards.add(0.0);
}
// Set reward
transRewards.set(s, r);
}
// Accessors (for ModelSimple)
@Override
@ -322,7 +276,6 @@ public class DTMCSimple extends ModelSimple implements DTMC
public void exportToPrismExplicit(String baseFilename) throws PrismException
{
exportToPrismExplicitTra(baseFilename + ".tra");
// TODO: Output transition rewards to .trew file, etc.
}
@Override
@ -447,16 +400,6 @@ public class DTMCSimple extends ModelSimple implements DTMC
return trans.get(s).iterator();
}
@Override
public double getTransitionReward(int s)
{
if (transRewardsConstant != null)
return transRewardsConstant;
if (transRewards == null)
return 0.0;
return transRewards.get(s);
}
@Override
public void prob0step(BitSet subset, BitSet u, BitSet result)
{
@ -673,7 +616,6 @@ public class DTMCSimple extends ModelSimple implements DTMC
DTMCSimple dtmc = (DTMCSimple) o;
if (!trans.equals(dtmc.trans))
return false;
// TODO: rewards
if (numTransitions != dtmc.numTransitions)
return false;
return true;

7
prism/src/explicit/DTMCUniformisedSimple.java

@ -30,7 +30,6 @@ import java.util.*;
import java.util.Map.Entry;
import explicit.rewards.MCRewards;
import parser.State;
import parser.Values;
import prism.ModelType;
@ -219,12 +218,6 @@ public class DTMCUniformisedSimple implements DTMC
throw new RuntimeException("Not implemented yet");
}
public double getTransitionReward(int s)
{
// TODO
throw new Error("Not yet supported");
}
public void prob0step(BitSet subset, BitSet u, BitSet result)
{
// TODO

151
prism/src/explicit/MDPSimple.java

@ -33,7 +33,6 @@ import java.io.*;
import explicit.rewards.MDPRewards;
import explicit.rewards.MDPRewardsSimple;
import prism.ModelType;
import prism.PrismException;
import prism.PrismUtils;
@ -53,13 +52,6 @@ public class MDPSimple extends ModelSimple implements MDP
// (null in element s means no actions for that state)
protected List<List<Object>> actions;
// Rewards
// (if transRewardsConstant non-null, use this for all transitions; otherwise, use transRewards list)
// (for transRewards, null in element s means no rewards for that state)
//protected Double transRewardsConstant;
//protected List<List<Double>> transRewards;
protected List<Double> stateRewards;
// Flag: allow duplicates in distribution sets?
protected boolean allowDupes = false;
@ -105,7 +97,6 @@ public class MDPSimple extends ModelSimple implements MDP
setAction(s, i, mdp.getAction(s, i));
}
}
// TODO: copy rewards
}
/**
@ -118,7 +109,6 @@ public class MDPSimple extends ModelSimple implements MDP
for (int s = 0; s < numStates; s++) {
addChoice(s, new Distribution(dtmc.getTransitions(s)));
}
// TODO: copy rewards, etc.
}
/**
@ -142,7 +132,6 @@ public class MDPSimple extends ModelSimple implements MDP
setAction(permut[s], i, mdp.getAction(s, i));
}
}
// TODO: permute rewards
}
// Mutators (for ModelSimple)
@ -158,7 +147,6 @@ public class MDPSimple extends ModelSimple implements MDP
trans.add(new ArrayList<Distribution>());
}
actions = null;
//clearAllRewards();
}
@Override
@ -346,9 +334,9 @@ public class MDPSimple extends ModelSimple implements MDP
// Mutators (other)
/**
* Add a choice (distribution 'distr') to state s (which must exist).
* Distribution is only actually added if it does not already exists for state s.
* (Assuming 'allowDupes' flag is not enabled.)
* Add a choice (distribution {@code distr}) to state {@code s} (which must exist).
* Distribution is only actually added if it does not already exists for state {@code s}.
* (Assuming {@code allowDupes} flag is not enabled.)
* Returns the index of the (existing or newly added) distribution.
* Returns -1 in case of error.
*/
@ -359,17 +347,16 @@ public class MDPSimple extends ModelSimple implements MDP
if (s >= numStates || s < 0)
return -1;
// Add distribution (if new)
set = trans.get(s);
if (!allowDupes) {
int i = set.indexOf(distr);
int i = indexOfChoice(s, distr);
if (i != -1)
return i;
}
set = trans.get(s);
set.add(distr);
// Add null action if necessary
if (actions != null && actions.get(s) != null)
actions.get(s).add(null);
// Add zero reward if necessary
// Update stats
numDistrs++;
maxNumDistrs = Math.max(maxNumDistrs, set.size());
@ -378,16 +365,39 @@ public class MDPSimple extends ModelSimple implements MDP
}
/**
* Remove all rewards from the model
* Add a choice (distribution {@code distr}) labelled with {@code action} to state {@code s} (which must exist).
* Action/distribution is only actually added if it does not already exists for state {@code s}.
* (Assuming {@code allowDupes} flag is not enabled.)
* Returns the index of the (existing or newly added) distribution.
* Returns -1 in case of error.
*/
/*public void clearAllRewards()
public int addActionLabelledChoice(int s, Distribution distr, Object action)
{
transRewards = null;
transRewardsConstant = null;
}*/
List<Distribution> set;
// Check state exists
if (s >= numStates || s < 0)
return -1;
// Add distribution/action (if new)
if (!allowDupes) {
int i = indexOfActionLabelledChoice(s, distr, action);
if (i != -1)
return i;
}
set = trans.get(s);
set.add(distr);
setAction(s, set.size() - 1, action);
// Update stats
numDistrs++;
maxNumDistrs = Math.max(maxNumDistrs, set.size());
numTransitions += distr.size();
return set.size() - 1;
}
/**
* Set the action label for choice i in some state s.
* This method does not know about duplicates (i.e. if setting an action causes
* two choices to be identical, one will not be removed).
* Use {@link #addActionLabelledChoice(int, Distribution, Object)} which is more reliable.
*/
public void setAction(int s, int i, Object o)
{
@ -410,57 +420,6 @@ public class MDPSimple extends ModelSimple implements MDP
actions.get(s).set(i, o);
}
/**
* Set a constant reward for all transitions
*/
/*public void setConstantTransitionReward(double r)
{
// This replaces any other reward definitions
transRewards = null;
// Store as a Double (because we use null to check for its existence)
transRewardsConstant = new Double(r);
}*/
public void setStateReward(int s, double r)
{
if(stateRewards == null) {
stateRewards = new ArrayList<Double>(numStates);
for (int i = 0; i< numStates; i++)
{
stateRewards.add(0.0);
}
}
stateRewards.set(s,r);
}
/**
* Set the reward for choice i in some state s to r.
*/
public void setTransitionReward(int s, int i, double r)
{
//this.trans.get(s).get(i).setReward(r);
/*// This would replace any constant reward definition, if it existed
transRewardsConstant = null;
// If no rewards array created yet, create it
if (transRewards == null) {
transRewards = new ArrayList<List<Double>>(numStates);
for (int j = 0; j < numStates; j++)
transRewards.add(null);
}
// If no rewards for state i yet, create list
if (transRewards.get(s) == null) {
int n = trans.get(s).size();
List<Double> list = new ArrayList<Double>(n);
for (int j = 0; j < n; j++) {
list.add(0.0);
}
transRewards.set(s, list);
}
// Set reward
transRewards.get(s).set(i, r);*/
}
// Accessors (for ModelSimple)
@Override
@ -1078,6 +1037,49 @@ public class MDPSimple extends ModelSimple implements MDP
return maxNumDistrs;
}
/**
* Returns the index of the choice {@code distr} for state {@code s}, if it exists.
* If none, -1 is returned. If there are multiple (i.e. allowDupes is true), the first is returned.
*/
public int indexOfChoice(int s, Distribution distr)
{
return trans.get(s).indexOf(distr);
}
/**
* Returns the index of the {@code action}-labelled choice {@code distr} for state {@code s}, if it exists.
* If none, -1 is returned. If there are multiple (i.e. allowDupes is true), the first is returned.
*/
public int indexOfActionLabelledChoice(int s, Distribution distr, Object action)
{
List<Distribution> set = trans.get(s);
int i, n = set.size();
if (distr == null) {
for (i = 0; i < n; i++) {
if (set.get(i) == null) {
Object a = getAction(s, i);
if (action == null) {
if (a == null) return i;
} else {
if (action.equals(a)) return i;
}
}
}
} else {
for (i = 0; i < n; i++) {
if (distr.equals(set.get(i))) {
Object a = getAction(s, i);
if (action == null) {
if (a == null) return i;
} else {
if (action.equals(a)) return i;
}
}
}
}
return -1;
}
// Standard methods
@Override
@ -1120,7 +1122,6 @@ public class MDPSimple extends ModelSimple implements MDP
if (!trans.equals(mdp.trans))
return false;
// TODO: compare actions (complicated: null = null,null,null,...)
// TODO: compare rewards (complicated: null = 0,0,0,0)
return true;
}

14
prism/src/explicit/MDPSparse.java

@ -60,12 +60,6 @@ public class MDPSparse extends ModelSparse implements MDP
/** Array of action labels for choices (is of size numDistrs) */
protected Object actions[];
// Rewards
// (if transRewardsConstant non-null, use this for all transitions; otherwise, use transRewards list)
// (for transRewards, null in element s means no rewards for that state)
protected Double transRewardsConstant;
protected List<List<Double>> transRewards;
// Other statistics
protected int numDistrs;
protected int numTransitions;
@ -152,8 +146,6 @@ public class MDPSparse extends ModelSparse implements MDP
choiceStarts[numDistrs] = numTransitions;
rowStarts[numStates] = numDistrs;
// TODO copy rewards
}
/**
@ -214,8 +206,6 @@ public class MDPSparse extends ModelSparse implements MDP
}
choiceStarts[numDistrs] = numTransitions;
rowStarts[numStates] = numDistrs;
// TODO: copy rewards
}
/**
@ -287,8 +277,6 @@ public class MDPSparse extends ModelSparse implements MDP
}
choiceStarts[numDistrs] = numTransitions;
rowStarts[numStates] = numDistrs;
// TODO: copy rewards
}
// Mutators (for ModelSparse)
@ -299,7 +287,6 @@ public class MDPSparse extends ModelSparse implements MDP
super.initialise(numStates);
numDistrs = numTransitions = maxNumDistrs = 0;
actions = null;
//clearAllRewards();
}
@Override
@ -1095,7 +1082,6 @@ public class MDPSparse extends ModelSparse implements MDP
if (!Utils.intArraysAreEqual(rowStarts, mdp.rowStarts))
return false;
// TODO: compare actions (complicated: null = null,null,null,...)
// TODO: compare rewards (complicated: null = 0,0,0,0)*/
return true;
}

4
prism/src/explicit/PrismSTPGAbstractRefine.java

@ -175,7 +175,7 @@ public class PrismSTPGAbstractRefine extends QuantAbstractRefine
case DTMC:
distr = buildAbstractDistribution(c, (DTMCSimple) modelConcrete);
j = ((MDPSimple) abstraction).addChoice(a, distr);
((MDPSimple) abstraction).setTransitionReward(a, j, ((DTMC) modelConcrete).getTransitionReward(c));
//((MDPSimple) abstraction).setTransitionReward(a, j, ((DTMC) modelConcrete).getTransitionReward(c));
break;
case CTMC:
distr = buildAbstractDistribution(c, (CTMCSimple) modelConcrete);
@ -329,7 +329,7 @@ public class PrismSTPGAbstractRefine extends QuantAbstractRefine
case DTMC:
distr = buildAbstractDistribution(c, (DTMCSimple) modelConcrete);
j = ((MDPSimple) abstraction).addChoice(a, distr);
((MDPSimple) abstraction).setTransitionReward(a, j, ((DTMC) modelConcrete).getTransitionReward(c));
//((MDPSimple) abstraction).setTransitionReward(a, j, ((DTMC) modelConcrete).getTransitionReward(c));
break;
case CTMC:
distr = buildAbstractDistribution(c, (CTMCSimple) modelConcrete);

5
prism/src/explicit/STPG.java

@ -39,11 +39,6 @@ public interface STPG extends Model
*/
public Object getAction(int s, int i);
/**
* Get the transition reward (if any) for choice i of state s.
*/
//public double getTransitionReward(int s, int i);
/**
* Perform a single step of precomputation algorithm Prob0, i.e., for states i in {@code subset},
* set bit i of {@code result} iff, for all/some player 1 choices, for all/some player 2 choices,

61
prism/src/explicit/STPGAbstrSimple.java

@ -31,7 +31,6 @@ import java.util.*;
import java.io.*;
import explicit.rewards.STPGRewards;
import prism.ModelType;
import prism.PrismException;
import prism.PrismUtils;
@ -81,7 +80,7 @@ public class STPGAbstrSimple extends ModelSimple implements STPG
{
DistributionSet set;
int i;
// TODO: actions? rewards?
// TODO: actions?
initialise(m.getNumStates());
copyFrom(m);
for (i = 0; i < numStates; i++) {
@ -103,7 +102,6 @@ public class STPGAbstrSimple extends ModelSimple implements STPG
for (int i = 0; i < numStates; i++) {
trans.add(new ArrayList<DistributionSet>());
}
//clearAllRewards();
}
@Override
@ -266,52 +264,6 @@ public class STPGAbstrSimple extends ModelSimple implements STPG
return set.size() - 1;
}
/**
* Remove all rewards from the model
*/
/*public void clearAllRewards()
{
transRewards = null;
transRewardsConstant = null;
}*/
/**
* Set a constant reward for all transitions
*/
/*public void setConstantTransitionReward(double r)
{
// This replaces any other reward definitions
transRewards = null;
// Store as a Double (because we use null to check for its existence)
transRewardsConstant = new Double(r);
}*/
/**
* Set the reward for choice i in some state s to r.
*/
/*public void setTransitionReward(int s, int i, double r)
{
// This would replace any constant reward definition, if it existed
transRewardsConstant = null;
// If no rewards array created yet, create it
if (transRewards == null) {
transRewards = new ArrayList<List<Double>>(numStates);
for (int j = 0; j < numStates; j++)
transRewards.add(null);
}
// If no rewards for state i yet, create list
if (transRewards.get(s) == null) {
int n = trans.get(s).size();
List<Double> list = new ArrayList<Double>(n);
for (int j = 0; j < n; j++) {
list.add(0.0);
}
transRewards.set(s, list);
}
// Set reward
transRewards.get(s).set(i, r);
}*/
// Accessors (for ModelSimple)
@Override
@ -514,17 +466,6 @@ public class STPGAbstrSimple extends ModelSimple implements STPG
return null;
}
/*@Override
public double getTransitionReward(int s, int i)
{
List<Double> list;
if (transRewardsConstant != null)
return transRewardsConstant;
if (transRewards == null || (list = transRewards.get(s)) == null)
return 0.0;
return list.get(i);
}*/
@Override
public void prob0step(BitSet subset, BitSet u, boolean forall1, boolean forall2, BitSet result)
{

3
prism/src/explicit/STPGModelChecker.java

@ -781,6 +781,7 @@ public class STPGModelChecker extends ProbModelChecker
/**
* Compute expected reachability rewards.
* @param stpg The STPG
* @param rewards The rewards
* @param target Target states
* @param min1 Min or max rewards for player 1 (true=min, false=max)
* @param min2 Min or max rewards for player 2 (true=min, false=max)
@ -794,6 +795,7 @@ public class STPGModelChecker extends ProbModelChecker
* Compute expected reachability rewards.
* i.e. compute the min/max reward accumulated to reach a state in {@code target}.
* @param stpg The STPG
* @param rewards The rewards
* @param target Target states
* @param min1 Min or max rewards for player 1 (true=min, false=max)
* @param min2 Min or max rewards for player 2 (true=min, false=max)
@ -864,6 +866,7 @@ public class STPGModelChecker extends ProbModelChecker
/**
* Compute expected reachability rewards using value iteration.
* @param stpg The STPG
* @param rewards The rewards
* @param target Target states
* @param inf States for which reward is infinite
* @param min1 Min or max rewards for player 1 (true=min, false=max)

Loading…
Cancel
Save