rename hell pt. 3
This commit is contained in:
commit
2b9ab7cd46
|
|
@ -1,6 +1,9 @@
|
|||
.idea
|
||||
.ipynb_checkpoints
|
||||
.DS_Store
|
||||
.idea
|
||||
notebooks
|
||||
*.egg-info
|
||||
__pycache__
|
||||
Pipfile
|
||||
Pipfile.lock
|
||||
|
|
@ -9,7 +12,10 @@ results
|
|||
*.csv
|
||||
*.txt
|
||||
simulations/.ipynb_checkpoints
|
||||
dist/SimCAD-0.1.tar.gz
|
||||
simulations/validation/config3.py
|
||||
dist/*.gz
|
||||
cadCAD.egg-info
|
||||
|
||||
build
|
||||
SimCAD.egg-info
|
||||
cadCAD.egg-info
|
||||
SimCAD.egg-info
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ Aided Design of economic systems. An economic system is treated as a state based
|
|||
set of endogenous and exogenous state variables which are updated through mechanisms and environmental \
|
||||
processes, respectively. Behavioral models, which may be deterministic or stochastic, provide the evolution of \
|
||||
the system within the action space of the mechanisms. Mathematical formulations of these economic games \
|
||||
treat agent utility as derived from state rather than direct from action, creating a rich dynamic modeling framework.
|
||||
treat agent utility as derived from state rather than direct from action, creating a rich dynamic modeling framework.
|
||||
|
||||
Simulations may be run with a range of initial conditions and parameters for states, behaviors, mechanisms, \
|
||||
and environmental processes to understand and visualize network behavior under various conditions. Support for \
|
||||
|
|
@ -19,6 +19,7 @@ A/B testing policies, monte carlo analysis and other common numerical methods is
|
|||
```bash
|
||||
pip install -r requirements.txt
|
||||
python3 setup.py sdist bdist_wheel
|
||||
pip3 install dist/*.whl
|
||||
```
|
||||
|
||||
**2. Configure Simulation:**
|
||||
|
|
@ -73,7 +74,7 @@ for raw_result, tensor_field in run2.main():
|
|||
print()
|
||||
```
|
||||
|
||||
The above can be run in Jupyter.
|
||||
The above can be run in Jupyter.
|
||||
```bash
|
||||
jupyter notebook
|
||||
```
|
||||
|
|
|
|||
|
|
@ -1,2 +0,0 @@
|
|||
name = "SimCAD"
|
||||
configs = []
|
||||
|
|
@ -1,97 +0,0 @@
|
|||
from functools import reduce
|
||||
from fn.op import foldr
|
||||
import pandas as pd
|
||||
|
||||
from SimCAD.utils import key_filter
|
||||
from SimCAD.configuration.utils.behaviorAggregation import dict_elemwise_sum
|
||||
|
||||
|
||||
class Configuration:
|
||||
def __init__(self, sim_config, state_dict, seed, exogenous_states, env_processes, mechanisms, behavior_ops=[foldr(dict_elemwise_sum())]):
|
||||
self.sim_config = sim_config
|
||||
self.state_dict = state_dict
|
||||
self.seed = seed
|
||||
self.exogenous_states = exogenous_states
|
||||
self.env_processes = env_processes
|
||||
self.behavior_ops = behavior_ops
|
||||
self.mechanisms = mechanisms
|
||||
|
||||
|
||||
class Identity:
|
||||
def __init__(self, behavior_id={'identity': 0}):
|
||||
self.beh_id_return_val = behavior_id
|
||||
|
||||
def b_identity(self, step, sL, s):
|
||||
return self.beh_id_return_val
|
||||
|
||||
def behavior_identity(self, k):
|
||||
return self.b_identity
|
||||
|
||||
def no_state_identity(self, step, sL, s, _input):
|
||||
return None
|
||||
|
||||
def state_identity(self, k):
|
||||
return lambda step, sL, s, _input: (k, s[k])
|
||||
|
||||
def apply_identity_funcs(self, identity, df, cols):
|
||||
def fillna_with_id_func(identity, df, col):
|
||||
return df[[col]].fillna(value=identity(col))
|
||||
|
||||
return list(map(lambda col: fillna_with_id_func(identity, df, col), cols))
|
||||
|
||||
|
||||
class Processor:
|
||||
def __init__(self, id=Identity()):
|
||||
self.id = id
|
||||
self.b_identity = id.b_identity
|
||||
self.behavior_identity = id.behavior_identity
|
||||
self.no_state_identity = id.no_state_identity
|
||||
self.state_identity = id.state_identity
|
||||
self.apply_identity_funcs = id.apply_identity_funcs
|
||||
|
||||
def create_matrix_field(self, mechanisms, key):
|
||||
if key == 'states':
|
||||
identity = self.state_identity
|
||||
elif key == 'behaviors':
|
||||
identity = self.behavior_identity
|
||||
df = pd.DataFrame(key_filter(mechanisms, key))
|
||||
col_list = self.apply_identity_funcs(identity, df, list(df.columns))
|
||||
if len(col_list) != 0:
|
||||
return reduce((lambda x, y: pd.concat([x, y], axis=1)), col_list)
|
||||
else:
|
||||
return pd.DataFrame({'empty': []})
|
||||
|
||||
def generate_config(self, state_dict, mechanisms, exo_proc):
|
||||
|
||||
def no_update_handler(bdf, sdf):
|
||||
if (bdf.empty == False) and (sdf.empty == True):
|
||||
bdf_values = bdf.values.tolist()
|
||||
sdf_values = [[self.no_state_identity] * len(bdf_values) for m in range(len(mechanisms))]
|
||||
return sdf_values, bdf_values
|
||||
elif (bdf.empty == True) and (sdf.empty == False):
|
||||
sdf_values = sdf.values.tolist()
|
||||
bdf_values = [[self.b_identity] * len(sdf_values) for m in range(len(mechanisms))]
|
||||
return sdf_values, bdf_values
|
||||
else:
|
||||
sdf_values = sdf.values.tolist()
|
||||
bdf_values = bdf.values.tolist()
|
||||
return sdf_values, bdf_values
|
||||
|
||||
def only_ep_handler(state_dict):
|
||||
sdf_functions = [
|
||||
lambda step, sL, s, _input: (k, v) for k, v in zip(state_dict.keys(), state_dict.values())
|
||||
]
|
||||
sdf_values = [sdf_functions]
|
||||
bdf_values = [[self.b_identity] * len(sdf_values)]
|
||||
return sdf_values, bdf_values
|
||||
|
||||
if len(mechanisms) != 0:
|
||||
bdf = self.create_matrix_field(mechanisms, 'behaviors')
|
||||
sdf = self.create_matrix_field(mechanisms, 'states')
|
||||
sdf_values, bdf_values = no_update_handler(bdf, sdf)
|
||||
zipped_list = list(zip(sdf_values, bdf_values))
|
||||
else:
|
||||
sdf_values, bdf_values = only_ep_handler(state_dict)
|
||||
zipped_list = list(zip(sdf_values, bdf_values))
|
||||
|
||||
return list(map(lambda x: (x[0] + exo_proc, x[1]), zipped_list))
|
||||
|
|
@ -1,57 +0,0 @@
|
|||
from datetime import datetime, timedelta
|
||||
from decimal import Decimal
|
||||
from fn.func import curried
|
||||
import pandas as pd
|
||||
|
||||
|
||||
class TensorFieldReport:
|
||||
def __init__(self, config_proc):
|
||||
self.config_proc = config_proc
|
||||
|
||||
def create_tensor_field(self, mechanisms, exo_proc, keys=['behaviors', 'states']):
|
||||
dfs = [self.config_proc.create_matrix_field(mechanisms, k) for k in keys]
|
||||
df = pd.concat(dfs, axis=1)
|
||||
for es, i in zip(exo_proc, range(len(exo_proc))):
|
||||
df['es' + str(i + 1)] = es
|
||||
df['m'] = df.index + 1
|
||||
return df
|
||||
|
||||
|
||||
def bound_norm_random(rng, low, high):
|
||||
res = rng.normal((high+low)/2,(high-low)/6)
|
||||
if (res<low or res>high):
|
||||
res = bound_norm_random(rng, low, high)
|
||||
return Decimal(res)
|
||||
|
||||
|
||||
@curried
|
||||
def proc_trigger(trigger_step, update_f, step):
|
||||
if step == trigger_step:
|
||||
return update_f
|
||||
else:
|
||||
return lambda x: x
|
||||
|
||||
|
||||
t_delta = timedelta(days=0, minutes=0, seconds=30)
|
||||
def time_step(dt_str, dt_format='%Y-%m-%d %H:%M:%S', _timedelta = t_delta):
|
||||
dt = datetime.strptime(dt_str, dt_format)
|
||||
t = dt + _timedelta
|
||||
return t.strftime(dt_format)
|
||||
|
||||
|
||||
t_delta = timedelta(days=0, minutes=0, seconds=1)
|
||||
def ep_time_step(s, dt_str, fromat_str='%Y-%m-%d %H:%M:%S', _timedelta = t_delta):
|
||||
if s['mech_step'] == 0:
|
||||
return time_step(dt_str, fromat_str, _timedelta)
|
||||
else:
|
||||
return dt_str
|
||||
|
||||
|
||||
def exo_update_per_ts(ep):
|
||||
@curried
|
||||
def ep_decorator(f, y, step, sL, s, _input):
|
||||
if s['mech_step'] + 1 == 1:
|
||||
return f(step, sL, s, _input)
|
||||
else:
|
||||
return (y, s[y])
|
||||
return {es: ep_decorator(f, es) for es, f in ep.items()}
|
||||
|
|
@ -1,91 +0,0 @@
|
|||
from copy import deepcopy
|
||||
from fn.op import foldr, call
|
||||
from SimCAD.engine.utils import engine_exception
|
||||
|
||||
id_exception = engine_exception(KeyError, KeyError, None)
|
||||
|
||||
|
||||
class Executor:
|
||||
def __init__(self, behavior_ops, behavior_update_exception=id_exception, state_update_exception=id_exception):
|
||||
self.behavior_ops = behavior_ops
|
||||
self.state_update_exception = state_update_exception
|
||||
self.behavior_update_exception = behavior_update_exception
|
||||
|
||||
def get_behavior_input(self, step, sL, s, funcs):
|
||||
ops = self.behavior_ops[::-1]
|
||||
|
||||
def get_col_results(step, sL, s, funcs):
|
||||
return list(map(lambda f: f(step, sL, s), funcs))
|
||||
|
||||
return foldr(call, get_col_results(step, sL, s, funcs))(ops)
|
||||
|
||||
def apply_env_proc(self, env_processes, state_dict, step):
|
||||
for state in state_dict.keys():
|
||||
if state in list(env_processes.keys()):
|
||||
env_state = env_processes[state]
|
||||
if (env_state.__name__ == '_curried') or (env_state.__name__ == 'proc_trigger'):
|
||||
state_dict[state] = env_state(step)(state_dict[state])
|
||||
else:
|
||||
state_dict[state] = env_state(state_dict[state])
|
||||
|
||||
def mech_step(self, m_step, sL, state_funcs, behavior_funcs, env_processes, t_step, run):
|
||||
last_in_obj = sL[-1]
|
||||
|
||||
_input = self.state_update_exception(self.get_behavior_input(m_step, sL, last_in_obj, behavior_funcs))
|
||||
|
||||
last_in_copy = dict([self.behavior_update_exception(f(m_step, sL, last_in_obj, _input)) for f in state_funcs])
|
||||
|
||||
for k in last_in_obj:
|
||||
if k not in last_in_copy:
|
||||
last_in_copy[k] = last_in_obj[k]
|
||||
|
||||
del last_in_obj
|
||||
|
||||
self.apply_env_proc(env_processes, last_in_copy, last_in_copy['timestamp'])
|
||||
|
||||
last_in_copy["mech_step"], last_in_copy["time_step"], last_in_copy['run'] = m_step, t_step, run
|
||||
sL.append(last_in_copy)
|
||||
del last_in_copy
|
||||
|
||||
return sL
|
||||
|
||||
def mech_pipeline(self, states_list, configs, env_processes, t_step, run):
|
||||
m_step = 0
|
||||
states_list_copy = deepcopy(states_list)
|
||||
genesis_states = states_list_copy[-1]
|
||||
genesis_states['mech_step'], genesis_states['time_step'] = m_step, t_step
|
||||
states_list = [genesis_states]
|
||||
|
||||
m_step += 1
|
||||
for config in configs:
|
||||
s_conf, b_conf = config[0], config[1]
|
||||
states_list = self.mech_step(m_step, states_list, s_conf, b_conf, env_processes, t_step, run)
|
||||
m_step += 1
|
||||
|
||||
t_step += 1
|
||||
|
||||
return states_list
|
||||
|
||||
def block_pipeline(self, states_list, configs, env_processes, time_seq, run):
|
||||
time_seq = [x + 1 for x in time_seq]
|
||||
simulation_list = [states_list]
|
||||
for time_step in time_seq:
|
||||
pipe_run = self.mech_pipeline(simulation_list[-1], configs, env_processes, time_step, run)
|
||||
_, *pipe_run = pipe_run
|
||||
simulation_list.append(pipe_run)
|
||||
|
||||
return simulation_list
|
||||
|
||||
def simulation(self, states_list, configs, env_processes, time_seq, runs):
|
||||
pipe_run = []
|
||||
for run in range(runs):
|
||||
run += 1
|
||||
states_list_copy = deepcopy(states_list)
|
||||
head, *tail = self.block_pipeline(states_list_copy, configs, env_processes, time_seq, run)
|
||||
genesis = head.pop()
|
||||
genesis['mech_step'], genesis['time_step'], genesis['run'] = 0, 0, run
|
||||
first_timestep_per_run = [genesis] + tail.pop(0)
|
||||
pipe_run += [first_timestep_per_run] + tail
|
||||
del states_list_copy
|
||||
|
||||
return pipe_run
|
||||
|
|
@ -1,20 +0,0 @@
|
|||
def pipe(x):
|
||||
return x
|
||||
|
||||
|
||||
def print_pipe(x):
|
||||
print(x)
|
||||
return x
|
||||
|
||||
|
||||
def flatten(l):
|
||||
return [item for sublist in l for item in sublist]
|
||||
|
||||
|
||||
def key_filter(l, keyname):
|
||||
return [v[keyname] for k, v in l.items()]
|
||||
|
||||
|
||||
def rename(new_name, f):
|
||||
f.__name__ = new_name
|
||||
return f
|
||||
|
|
@ -0,0 +1,2 @@
|
|||
name = "cadCAD"
|
||||
configs = []
|
||||
|
|
@ -0,0 +1,138 @@
|
|||
from functools import reduce
|
||||
from fn.op import foldr
|
||||
import pandas as pd
|
||||
|
||||
from cadCAD import configs
|
||||
|
||||
from cadCAD.utils import key_filter
|
||||
from cadCAD.configuration.utils import exo_update_per_ts
|
||||
from cadCAD.configuration.utils.policyAggregation import dict_elemwise_sum
|
||||
from cadCAD.configuration.utils.depreciationHandler import sanitize_partial_state_updates, sanitize_config
|
||||
|
||||
|
||||
class Configuration(object):
|
||||
def __init__(self, sim_config={}, initial_state={}, seeds={}, env_processes={},
|
||||
exogenous_states={}, partial_state_update_blocks={}, policy_ops=[foldr(dict_elemwise_sum())], **kwargs):
|
||||
self.sim_config = sim_config
|
||||
self.initial_state = initial_state
|
||||
self.seeds = seeds
|
||||
self.env_processes = env_processes
|
||||
self.exogenous_states = exogenous_states
|
||||
self.partial_state_updates = partial_state_update_blocks
|
||||
self.policy_ops = policy_ops
|
||||
self.kwargs = kwargs
|
||||
|
||||
sanitize_config(self)
|
||||
|
||||
|
||||
def append_configs(sim_configs={}, initial_state={}, seeds={}, raw_exogenous_states={}, env_processes={}, partial_state_update_blocks={}, _exo_update_per_ts=True):
|
||||
if _exo_update_per_ts is True:
|
||||
exogenous_states = exo_update_per_ts(raw_exogenous_states)
|
||||
else:
|
||||
exogenous_states = raw_exogenous_states
|
||||
|
||||
if isinstance(sim_configs, list):
|
||||
for sim_config in sim_configs:
|
||||
config = Configuration(
|
||||
sim_config=sim_config,
|
||||
initial_state=initial_state,
|
||||
seeds=seeds,
|
||||
exogenous_states=exogenous_states,
|
||||
env_processes=env_processes,
|
||||
partial_state_update_blocks=partial_state_update_blocks
|
||||
)
|
||||
configs.append(config)
|
||||
elif isinstance(sim_configs, dict):
|
||||
config = Configuration(
|
||||
sim_config=sim_configs,
|
||||
initial_state=initial_state,
|
||||
seeds=seeds,
|
||||
exogenous_states=exogenous_states,
|
||||
env_processes=env_processes,
|
||||
partial_state_update_blocks=partial_state_update_blocks
|
||||
)
|
||||
configs.append(config)
|
||||
|
||||
|
||||
class Identity:
|
||||
def __init__(self, policy_id={'identity': 0}):
|
||||
self.beh_id_return_val = policy_id
|
||||
|
||||
def p_identity(self, var_dict, sub_step, sL, s):
|
||||
return self.beh_id_return_val
|
||||
|
||||
def policy_identity(self, k):
|
||||
return self.p_identity
|
||||
|
||||
def no_state_identity(self, var_dict, sub_step, sL, s, _input):
|
||||
return None
|
||||
|
||||
def state_identity(self, k):
|
||||
return lambda var_dict, sub_step, sL, s, _input: (k, s[k])
|
||||
|
||||
def apply_identity_funcs(self, identity, df, cols):
|
||||
def fillna_with_id_func(identity, df, col):
|
||||
return df[[col]].fillna(value=identity(col))
|
||||
|
||||
return list(map(lambda col: fillna_with_id_func(identity, df, col), cols))
|
||||
|
||||
|
||||
class Processor:
|
||||
def __init__(self, id=Identity()):
|
||||
self.id = id
|
||||
self.p_identity = id.p_identity
|
||||
self.policy_identity = id.policy_identity
|
||||
self.no_state_identity = id.no_state_identity
|
||||
self.state_identity = id.state_identity
|
||||
self.apply_identity_funcs = id.apply_identity_funcs
|
||||
|
||||
def create_matrix_field(self, partial_state_updates, key):
|
||||
if key == 'variables':
|
||||
identity = self.state_identity
|
||||
elif key == 'policies':
|
||||
identity = self.policy_identity
|
||||
|
||||
df = pd.DataFrame(key_filter(partial_state_updates, key))
|
||||
col_list = self.apply_identity_funcs(identity, df, list(df.columns))
|
||||
if len(col_list) != 0:
|
||||
return reduce((lambda x, y: pd.concat([x, y], axis=1)), col_list)
|
||||
else:
|
||||
return pd.DataFrame({'empty': []})
|
||||
|
||||
def generate_config(self, initial_state, partial_state_updates, exo_proc):
|
||||
|
||||
def no_update_handler(bdf, sdf):
|
||||
if (bdf.empty == False) and (sdf.empty == True):
|
||||
bdf_values = bdf.values.tolist()
|
||||
sdf_values = [[self.no_state_identity] * len(bdf_values) for m in range(len(partial_state_updates))]
|
||||
return sdf_values, bdf_values
|
||||
elif (bdf.empty == True) and (sdf.empty == False):
|
||||
sdf_values = sdf.values.tolist()
|
||||
bdf_values = [[self.p_identity] * len(sdf_values) for m in range(len(partial_state_updates))]
|
||||
return sdf_values, bdf_values
|
||||
else:
|
||||
sdf_values = sdf.values.tolist()
|
||||
bdf_values = bdf.values.tolist()
|
||||
return sdf_values, bdf_values
|
||||
|
||||
def only_ep_handler(state_dict):
|
||||
sdf_functions = [
|
||||
lambda var_dict, sub_step, sL, s, _input: (k, v) for k, v in zip(state_dict.keys(), state_dict.values())
|
||||
]
|
||||
sdf_values = [sdf_functions]
|
||||
bdf_values = [[self.p_identity] * len(sdf_values)]
|
||||
return sdf_values, bdf_values
|
||||
|
||||
if len(partial_state_updates) != 0:
|
||||
# backwards compatibility # ToDo: Move this
|
||||
partial_state_updates = sanitize_partial_state_updates(partial_state_updates)
|
||||
|
||||
bdf = self.create_matrix_field(partial_state_updates, 'policies')
|
||||
sdf = self.create_matrix_field(partial_state_updates, 'variables')
|
||||
sdf_values, bdf_values = no_update_handler(bdf, sdf)
|
||||
zipped_list = list(zip(sdf_values, bdf_values))
|
||||
else:
|
||||
sdf_values, bdf_values = only_ep_handler(initial_state)
|
||||
zipped_list = list(zip(sdf_values, bdf_values))
|
||||
|
||||
return list(map(lambda x: (x[0] + exo_proc, x[1]), zipped_list))
|
||||
|
|
@ -0,0 +1,125 @@
|
|||
from datetime import datetime, timedelta
|
||||
from decimal import Decimal
|
||||
from copy import deepcopy
|
||||
from fn.func import curried
|
||||
import pandas as pd
|
||||
|
||||
# Temporary
|
||||
from cadCAD.configuration.utils.depreciationHandler import sanitize_partial_state_updates
|
||||
from cadCAD.utils import dict_filter, contains_type
|
||||
|
||||
|
||||
# ToDo: Fix - Returns empty when partial_state_update is missing in Configuration
|
||||
class TensorFieldReport:
|
||||
def __init__(self, config_proc):
|
||||
self.config_proc = config_proc
|
||||
|
||||
# ToDo: backwards compatibility
|
||||
def create_tensor_field(self, partial_state_updates, exo_proc, keys = ['policies', 'variables']):
|
||||
|
||||
partial_state_updates = sanitize_partial_state_updates(partial_state_updates) # Temporary
|
||||
|
||||
dfs = [self.config_proc.create_matrix_field(partial_state_updates, k) for k in keys]
|
||||
df = pd.concat(dfs, axis=1)
|
||||
for es, i in zip(exo_proc, range(len(exo_proc))):
|
||||
df['es' + str(i + 1)] = es
|
||||
df['m'] = df.index + 1
|
||||
return df
|
||||
|
||||
|
||||
def state_update(y, x):
|
||||
return lambda var_dict, sub_step, sL, s, _input: (y, x)
|
||||
|
||||
|
||||
def bound_norm_random(rng, low, high):
|
||||
res = rng.normal((high+low)/2, (high-low)/6)
|
||||
if res < low or res > high:
|
||||
res = bound_norm_random(rng, low, high)
|
||||
return Decimal(res)
|
||||
|
||||
|
||||
@curried
|
||||
def proc_trigger(trigger_time, update_f, time):
|
||||
if time == trigger_time:
|
||||
return update_f
|
||||
else:
|
||||
return lambda x: x
|
||||
|
||||
|
||||
tstep_delta = timedelta(days=0, minutes=0, seconds=30)
|
||||
def time_step(dt_str, dt_format='%Y-%m-%d %H:%M:%S', _timedelta = tstep_delta):
|
||||
dt = datetime.strptime(dt_str, dt_format)
|
||||
t = dt + _timedelta
|
||||
return t.strftime(dt_format)
|
||||
|
||||
|
||||
ep_t_delta = timedelta(days=0, minutes=0, seconds=1)
|
||||
def ep_time_step(s, dt_str, fromat_str='%Y-%m-%d %H:%M:%S', _timedelta = ep_t_delta):
|
||||
if s['substep'] == 0:
|
||||
return time_step(dt_str, fromat_str, _timedelta)
|
||||
else:
|
||||
return dt_str
|
||||
|
||||
# mech_sweep_filter
|
||||
def partial_state_sweep_filter(state_field, partial_state_updates):
|
||||
partial_state_dict = dict([(k, v[state_field]) for k, v in partial_state_updates.items()])
|
||||
return dict([
|
||||
(k, dict_filter(v, lambda v: isinstance(v, list))) for k, v in partial_state_dict.items()
|
||||
if contains_type(list(v.values()), list)
|
||||
])
|
||||
|
||||
|
||||
def state_sweep_filter(raw_exogenous_states):
|
||||
return dict([(k, v) for k, v in raw_exogenous_states.items() if isinstance(v, list)])
|
||||
|
||||
# sweep_mech_states
|
||||
@curried
|
||||
def sweep_partial_states(_type, in_config):
|
||||
configs = []
|
||||
# filtered_mech_states
|
||||
filtered_partial_states = partial_state_sweep_filter(_type, in_config.partial_state_updates)
|
||||
if len(filtered_partial_states) > 0:
|
||||
for partial_state, state_dict in filtered_partial_states.items():
|
||||
for state, state_funcs in state_dict.items():
|
||||
for f in state_funcs:
|
||||
config = deepcopy(in_config)
|
||||
config.partial_state_updates[partial_state][_type][state] = f
|
||||
configs.append(config)
|
||||
del config
|
||||
else:
|
||||
configs = [in_config]
|
||||
|
||||
return configs
|
||||
|
||||
|
||||
@curried
|
||||
def sweep_states(state_type, states, in_config):
|
||||
configs = []
|
||||
filtered_states = state_sweep_filter(states)
|
||||
if len(filtered_states) > 0:
|
||||
for state, state_funcs in filtered_states.items():
|
||||
for f in state_funcs:
|
||||
config = deepcopy(in_config)
|
||||
exploded_states = deepcopy(states)
|
||||
exploded_states[state] = f
|
||||
if state_type == 'exogenous':
|
||||
config.exogenous_states = exploded_states
|
||||
elif state_type == 'environmental':
|
||||
config.env_processes = exploded_states
|
||||
configs.append(config)
|
||||
del config, exploded_states
|
||||
else:
|
||||
configs = [in_config]
|
||||
|
||||
return configs
|
||||
|
||||
|
||||
def exo_update_per_ts(ep):
|
||||
@curried
|
||||
def ep_decorator(f, y, var_dict, sub_step, sL, s, _input):
|
||||
if s['substep'] + 1 == 1:
|
||||
return f(var_dict, sub_step, sL, s, _input)
|
||||
else:
|
||||
return y, s[y]
|
||||
|
||||
return {es: ep_decorator(f, es) for es, f in ep.items()}
|
||||
|
|
@ -0,0 +1,41 @@
|
|||
from copy import deepcopy
|
||||
|
||||
|
||||
def sanitize_config(config):
|
||||
# for backwards compatibility, we accept old arguments via **kwargs
|
||||
# TODO: raise specific deprecation warnings for key == 'state_dict', key == 'seed', key == 'mechanisms'
|
||||
for key, value in config.kwargs.items():
|
||||
if key == 'state_dict':
|
||||
config.initial_state = value
|
||||
elif key == 'seed':
|
||||
config.seeds = value
|
||||
elif key == 'mechanisms':
|
||||
config.partial_state_updates = value
|
||||
|
||||
if config.initial_state == {}:
|
||||
raise Exception('The initial conditions of the system have not been set')
|
||||
|
||||
|
||||
def sanitize_partial_state_updates(partial_state_updates):
|
||||
new_partial_state_updates = deepcopy(partial_state_updates)
|
||||
# for backwards compatibility we accept the old keys
|
||||
# ('behaviors' and 'states') and rename them
|
||||
def rename_keys(d):
|
||||
if 'behaviors' in d:
|
||||
d['policies'] = d.pop('behaviors')
|
||||
|
||||
if 'states' in d:
|
||||
d['variables'] = d.pop('states')
|
||||
|
||||
|
||||
# Also for backwards compatibility, we accept partial state update blocks both as list or dict
|
||||
# No need for a deprecation warning as it's already raised by cadCAD.utils.key_filter
|
||||
if (type(new_partial_state_updates)==list):
|
||||
for v in new_partial_state_updates:
|
||||
rename_keys(v)
|
||||
elif (type(new_partial_state_updates)==dict):
|
||||
for k, v in new_partial_state_updates.items():
|
||||
rename_keys(v)
|
||||
|
||||
del partial_state_updates
|
||||
return new_partial_state_updates
|
||||
|
|
@ -0,0 +1,20 @@
|
|||
from cadCAD.utils import flatten_tabulated_dict, tabulate_dict
|
||||
|
||||
|
||||
def process_variables(d):
|
||||
return flatten_tabulated_dict(tabulate_dict(d))
|
||||
|
||||
|
||||
def config_sim(d):
|
||||
if "M" in d:
|
||||
return [
|
||||
{
|
||||
"N": d["N"],
|
||||
"T": d["T"],
|
||||
"M": M
|
||||
}
|
||||
for M in process_variables(d["M"])
|
||||
]
|
||||
else:
|
||||
d["M"] = [{}]
|
||||
return d
|
||||
|
|
@ -2,17 +2,18 @@ from fn.op import foldr
|
|||
from fn.func import curried
|
||||
|
||||
|
||||
def get_base_value(datatype):
|
||||
if datatype is str:
|
||||
def get_base_value(x):
|
||||
if isinstance(x, str):
|
||||
return ''
|
||||
elif datatype is int:
|
||||
elif isinstance(x, int):
|
||||
return 0
|
||||
elif datatype is list:
|
||||
elif isinstance(x, list):
|
||||
return []
|
||||
return 0
|
||||
else:
|
||||
return 0
|
||||
|
||||
|
||||
def behavior_to_dict(v):
|
||||
def policy_to_dict(v):
|
||||
return dict(list(zip(map(lambda n: 'b' + str(n + 1), list(range(len(v)))), v)))
|
||||
|
||||
|
||||
|
|
@ -32,7 +33,7 @@ def sum_dict_values():
|
|||
def dict_op(f, d1, d2):
|
||||
def set_base_value(target_dict, source_dict, key):
|
||||
if key not in target_dict:
|
||||
return get_base_value(type(source_dict[key]))
|
||||
return get_base_value(source_dict[key])
|
||||
else:
|
||||
return target_dict[key]
|
||||
|
||||
|
|
@ -1,9 +1,9 @@
|
|||
from pathos.multiprocessing import ProcessingPool as Pool
|
||||
|
||||
from SimCAD.utils import flatten
|
||||
from SimCAD.configuration import Processor
|
||||
from SimCAD.configuration.utils import TensorFieldReport
|
||||
from SimCAD.engine.simulation import Executor as SimExecutor
|
||||
from cadCAD.utils import flatten
|
||||
from cadCAD.configuration import Processor
|
||||
from cadCAD.configuration.utils import TensorFieldReport
|
||||
from cadCAD.engine.simulation import Executor as SimExecutor
|
||||
|
||||
|
||||
class ExecutionMode:
|
||||
|
|
@ -16,16 +16,16 @@ class ExecutionContext:
|
|||
self.name = context
|
||||
self.method = None
|
||||
|
||||
def single_proc_exec(simulation_execs, states_lists, configs_structs, env_processes_list, Ts, Ns):
|
||||
def single_proc_exec(simulation_execs, var_dict, states_lists, configs_structs, env_processes_list, Ts, Ns):
|
||||
l = [simulation_execs, states_lists, configs_structs, env_processes_list, Ts, Ns]
|
||||
simulation, states_list, config, env_processes, T, N = list(map(lambda x: x.pop(), l))
|
||||
result = simulation(states_list, config, env_processes, T, N)
|
||||
result = simulation(var_dict, states_list, config, env_processes, T, N)
|
||||
return flatten(result)
|
||||
|
||||
def parallelize_simulations(fs, states_list, configs, env_processes, Ts, Ns):
|
||||
l = list(zip(fs, states_list, configs, env_processes, Ts, Ns))
|
||||
def parallelize_simulations(fs, var_dict_list, states_list, configs, env_processes, Ts, Ns):
|
||||
l = list(zip(fs, var_dict_list, states_list, configs, env_processes, Ts, Ns))
|
||||
with Pool(len(configs)) as p:
|
||||
results = p.map(lambda t: t[0](t[1], t[2], t[3], t[4], t[5]), l)
|
||||
results = p.map(lambda t: t[0](t[1], t[2], t[3], t[4], t[5], t[6]), l)
|
||||
return results
|
||||
|
||||
if context == 'single_proc':
|
||||
|
|
@ -47,30 +47,33 @@ class Executor:
|
|||
create_tensor_field = TensorFieldReport(config_proc).create_tensor_field
|
||||
|
||||
print(self.exec_context+": "+str(self.configs))
|
||||
states_lists, Ts, Ns, eps, configs_structs, env_processes_list, mechanisms, simulation_execs = \
|
||||
[], [], [], [], [], [], [], []
|
||||
var_dict_list, states_lists, Ts, Ns, eps, configs_structs, env_processes_list, partial_state_updates, simulation_execs = \
|
||||
[], [], [], [], [], [], [], [], []
|
||||
config_idx = 0
|
||||
for x in self.configs:
|
||||
states_lists.append([x.state_dict])
|
||||
|
||||
Ts.append(x.sim_config['T'])
|
||||
Ns.append(x.sim_config['N'])
|
||||
var_dict_list.append(x.sim_config['M'])
|
||||
states_lists.append([x.initial_state])
|
||||
eps.append(list(x.exogenous_states.values()))
|
||||
configs_structs.append(config_proc.generate_config(x.state_dict, x.mechanisms, eps[config_idx]))
|
||||
configs_structs.append(config_proc.generate_config(x.initial_state, x.partial_state_updates, eps[config_idx]))
|
||||
env_processes_list.append(x.env_processes)
|
||||
mechanisms.append(x.mechanisms)
|
||||
simulation_execs.append(SimExecutor(x.behavior_ops).simulation)
|
||||
partial_state_updates.append(x.partial_state_updates)
|
||||
simulation_execs.append(SimExecutor(x.policy_ops).simulation)
|
||||
|
||||
config_idx += 1
|
||||
|
||||
if self.exec_context == ExecutionMode.single_proc:
|
||||
tensor_field = create_tensor_field(mechanisms.pop(), eps.pop())
|
||||
result = self.exec_method(simulation_execs, states_lists, configs_structs, env_processes_list, Ts, Ns)
|
||||
# ToDO: Deprication Handler - "sanitize" in appropriate place
|
||||
tensor_field = create_tensor_field(partial_state_updates.pop(), eps.pop())
|
||||
result = self.exec_method(simulation_execs, var_dict_list, states_lists, configs_structs, env_processes_list, Ts, Ns)
|
||||
return result, tensor_field
|
||||
elif self.exec_context == ExecutionMode.multi_proc:
|
||||
if len(self.configs) > 1:
|
||||
simulations = self.exec_method(simulation_execs, states_lists, configs_structs, env_processes_list, Ts, Ns)
|
||||
simulations = self.exec_method(simulation_execs, var_dict_list, states_lists, configs_structs, env_processes_list, Ts, Ns)
|
||||
results = []
|
||||
for result, mechanism, ep in list(zip(simulations, mechanisms, eps)):
|
||||
results.append((flatten(result), create_tensor_field(mechanism, ep)))
|
||||
for result, partial_state_updates, ep in list(zip(simulations, partial_state_updates, eps)):
|
||||
results.append((flatten(result), create_tensor_field(partial_state_updates, ep)))
|
||||
|
||||
return results
|
||||
return results
|
||||
|
|
@ -0,0 +1,103 @@
|
|||
from copy import deepcopy
|
||||
from fn.op import foldr, call
|
||||
|
||||
from cadCAD.engine.utils import engine_exception
|
||||
|
||||
id_exception = engine_exception(KeyError, KeyError, None)
|
||||
|
||||
|
||||
class Executor:
|
||||
|
||||
def __init__(self, policy_ops, policy_update_exception=id_exception, state_update_exception=id_exception):
|
||||
self.policy_ops = policy_ops # behavior_ops
|
||||
self.state_update_exception = state_update_exception
|
||||
self.policy_update_exception = policy_update_exception # behavior_update_exception
|
||||
|
||||
# get_behavior_input
|
||||
def get_policy_input(self, var_dict, sub_step, sL, s, funcs):
|
||||
ops = self.policy_ops[::-1]
|
||||
|
||||
def get_col_results(var_dict, sub_step, sL, s, funcs):
|
||||
return list(map(lambda f: f(var_dict, sub_step, sL, s), funcs))
|
||||
|
||||
return foldr(call, get_col_results(var_dict, sub_step, sL, s, funcs))(ops)
|
||||
|
||||
def apply_env_proc(self, env_processes, state_dict, sub_step):
|
||||
for state in state_dict.keys():
|
||||
if state in list(env_processes.keys()):
|
||||
env_state = env_processes[state]
|
||||
if (env_state.__name__ == '_curried') or (env_state.__name__ == 'proc_trigger'):
|
||||
state_dict[state] = env_state(sub_step)(state_dict[state])
|
||||
else:
|
||||
state_dict[state] = env_state(state_dict[state])
|
||||
|
||||
# mech_step
|
||||
def partial_state_update(self, var_dict, sub_step, sL, state_funcs, policy_funcs, env_processes, time_step, run):
|
||||
last_in_obj = sL[-1]
|
||||
|
||||
_input = self.policy_update_exception(self.get_policy_input(var_dict, sub_step, sL, last_in_obj, policy_funcs))
|
||||
|
||||
# ToDo: add env_proc generator to `last_in_copy` iterator as wrapper function
|
||||
last_in_copy = dict(
|
||||
[
|
||||
self.state_update_exception(f(var_dict, sub_step, sL, last_in_obj, _input)) for f in state_funcs
|
||||
]
|
||||
)
|
||||
|
||||
for k in last_in_obj:
|
||||
if k not in last_in_copy:
|
||||
last_in_copy[k] = last_in_obj[k]
|
||||
|
||||
del last_in_obj
|
||||
|
||||
self.apply_env_proc(env_processes, last_in_copy, last_in_copy['timestep'])
|
||||
|
||||
last_in_copy['substep'], last_in_copy['timestep'], last_in_copy['run'] = sub_step, time_step, run
|
||||
sL.append(last_in_copy)
|
||||
del last_in_copy
|
||||
|
||||
return sL
|
||||
|
||||
|
||||
# mech_pipeline
|
||||
def state_update_pipeline(self, var_dict, states_list, configs, env_processes, time_step, run):
|
||||
sub_step = 0
|
||||
states_list_copy = deepcopy(states_list)
|
||||
genesis_states = states_list_copy[-1]
|
||||
genesis_states['substep'], genesis_states['timestep'] = sub_step, time_step
|
||||
states_list = [genesis_states]
|
||||
|
||||
sub_step += 1
|
||||
for config in configs:
|
||||
s_conf, p_conf = config[0], config[1]
|
||||
states_list = self.partial_state_update(var_dict, sub_step, states_list, s_conf, p_conf, env_processes, time_step, run)
|
||||
sub_step += 1
|
||||
|
||||
time_step += 1
|
||||
|
||||
return states_list
|
||||
|
||||
def run_pipeline(self, var_dict, states_list, configs, env_processes, time_seq, run):
|
||||
time_seq = [x + 1 for x in time_seq]
|
||||
simulation_list = [states_list]
|
||||
for time_step in time_seq:
|
||||
pipe_run = self.state_update_pipeline(var_dict, simulation_list[-1], configs, env_processes, time_step, run)
|
||||
_, *pipe_run = pipe_run
|
||||
simulation_list.append(pipe_run)
|
||||
|
||||
return simulation_list
|
||||
|
||||
# ToDo: Muiltithreaded Runs
|
||||
def simulation(self, var_dict, states_list, configs, env_processes, time_seq, runs):
|
||||
pipe_run = []
|
||||
for run in range(runs):
|
||||
run += 1
|
||||
states_list_copy = deepcopy(states_list)
|
||||
head, *tail = self.run_pipeline(var_dict, states_list_copy, configs, env_processes, time_seq, run)
|
||||
genesis = head.pop()
|
||||
genesis['substep'], genesis['timestep'], genesis['run'] = 0, 0, run
|
||||
first_timestep_per_run = [genesis] + tail.pop(0)
|
||||
pipe_run += [first_timestep_per_run] + tail
|
||||
del states_list_copy
|
||||
|
||||
return pipe_run
|
||||
|
|
@ -24,6 +24,8 @@ def retrieve_state(l, offset):
|
|||
return l[last_index(l) + offset + 1]
|
||||
|
||||
|
||||
# exception_function = f(sub_step, sL, sL[-2], _input)
|
||||
# try_function = f(sub_step, sL, last_mut_obj, _input)
|
||||
@curried
|
||||
def engine_exception(ErrorType, error_message, exception_function, try_function):
|
||||
try:
|
||||
|
|
@ -31,3 +33,10 @@ def engine_exception(ErrorType, error_message, exception_function, try_function)
|
|||
except ErrorType:
|
||||
print(error_message)
|
||||
return exception_function
|
||||
|
||||
|
||||
@curried
|
||||
def fit_param(param, x):
|
||||
return x + param
|
||||
|
||||
# fit_param = lambda param: lambda x: x + param
|
||||
|
|
@ -0,0 +1,136 @@
|
|||
from collections import defaultdict
|
||||
from itertools import product
|
||||
import warnings
|
||||
|
||||
def pipe(x):
|
||||
return x
|
||||
|
||||
|
||||
def print_pipe(x):
|
||||
print(x)
|
||||
return x
|
||||
|
||||
|
||||
def flattenDict(l):
|
||||
def tupalize(k, vs):
|
||||
l = []
|
||||
if isinstance(vs, list):
|
||||
for v in vs:
|
||||
l.append((k, v))
|
||||
else:
|
||||
l.append((k, vs))
|
||||
return l
|
||||
|
||||
flat_list = [tupalize(k, vs) for k, vs in l.items()]
|
||||
flat_dict = [dict(items) for items in product(*flat_list)]
|
||||
return flat_dict
|
||||
|
||||
|
||||
def flatten(l):
|
||||
if isinstance(l, list):
|
||||
return [item for sublist in l for item in sublist]
|
||||
elif isinstance(l, dict):
|
||||
return flattenDict(l)
|
||||
|
||||
|
||||
def flatMap(f, collection):
|
||||
return flatten(list(map(f, collection)))
|
||||
|
||||
|
||||
def dict_filter(dictionary, condition):
|
||||
return dict([(k, v) for k, v in dictionary.items() if condition(v)])
|
||||
|
||||
|
||||
def get_max_dict_val_len(g):
|
||||
return len(max(g.values(), key=len))
|
||||
|
||||
|
||||
def tabulate_dict(d):
|
||||
max_len = get_max_dict_val_len(d)
|
||||
_d = {}
|
||||
for k, vl in d.items():
|
||||
if len(vl) != max_len:
|
||||
_d[k] = vl + list([vl[-1]] * (max_len-1))
|
||||
else:
|
||||
_d[k] = vl
|
||||
|
||||
return _d
|
||||
|
||||
|
||||
def flatten_tabulated_dict(d):
|
||||
max_len = get_max_dict_val_len(d)
|
||||
dl = [{} for i in range(max_len)]
|
||||
|
||||
for k, vl in d.items():
|
||||
for v, i in zip(vl, list(range(len(vl)))):
|
||||
dl[i][k] = v
|
||||
|
||||
return dl
|
||||
|
||||
|
||||
def contains_type(_collection, type):
|
||||
return any(isinstance(x, type) for x in _collection)
|
||||
|
||||
|
||||
def drop_right(l, n):
|
||||
return l[:len(l) - n]
|
||||
|
||||
# backwards compatibility
|
||||
# ToDo: Encapsulate in function
|
||||
def key_filter(l, keyname):
|
||||
if (type(l) == list):
|
||||
return [v[keyname] for v in l]
|
||||
# Keeping support to dictionaries for backwards compatibility
|
||||
# Should be removed in the future
|
||||
warnings.warn(
|
||||
"The use of a dictionary to describe Partial State Update Blocks will be deprecated. Use a list instead.",
|
||||
FutureWarning)
|
||||
return [v[keyname] for k, v in l.items()]
|
||||
|
||||
|
||||
def groupByKey(l):
|
||||
d = defaultdict(list)
|
||||
for key, value in l:
|
||||
d[key].append(value)
|
||||
return list(dict(d).items()).pop()
|
||||
|
||||
|
||||
# @curried
|
||||
def rename(new_name, f):
|
||||
f.__name__ = new_name
|
||||
return f
|
||||
|
||||
|
||||
def curry_pot(f, *argv):
|
||||
sweep_ind = f.__name__[0:5] == 'sweep'
|
||||
arg_len = len(argv)
|
||||
if sweep_ind is True and arg_len == 4:
|
||||
return f(argv[0])(argv[1])(argv[2])(argv[3])
|
||||
elif sweep_ind is False and arg_len == 4:
|
||||
return f(argv[0], argv[1], argv[2], argv[3])
|
||||
elif sweep_ind is True and arg_len == 3:
|
||||
return f(argv[0])(argv[1])(argv[2])
|
||||
elif sweep_ind is False and arg_len == 3:
|
||||
return f(argv[0], argv[1], argv[2])
|
||||
else:
|
||||
raise TypeError('curry_pot() needs 3 or 4 positional arguments')
|
||||
|
||||
# def curry_pot(f, *argv):
|
||||
# sweep_ind = f.__name__[0:5] == 'sweep'
|
||||
# arg_len = len(argv)
|
||||
# if sweep_ind is True and arg_len == 4:
|
||||
# return f(argv[0])(argv[1])(argv[2])(argv[3])
|
||||
# elif sweep_ind is False and arg_len == 4:
|
||||
# return f(argv[0])(argv[1])(argv[2])(argv[3])
|
||||
# elif sweep_ind is True and arg_len == 3:
|
||||
# return f(argv[0])(argv[1])(argv[2])
|
||||
# elif sweep_ind is False and arg_len == 3:
|
||||
# return f(argv[0])(argv[1])(argv[2])
|
||||
# else:
|
||||
# raise TypeError('curry_pot() needs 3 or 4 positional arguments')
|
||||
|
||||
# def rename(newname):
|
||||
# def decorator(f):
|
||||
# f.__name__ = newname
|
||||
# return f
|
||||
# return decorator
|
||||
Binary file not shown.
12
setup.py
12
setup.py
|
|
@ -1,6 +1,6 @@
|
|||
from setuptools import setup, find_packages
|
||||
|
||||
long_description = "SimCAD is a differential games based simulation software package for research, validation, and \
|
||||
long_description = "cadCAD is a differential games based simulation software package for research, validation, and \
|
||||
Computer Aided Design of economic systems. An economic system is treated as a state based model and defined through \
|
||||
a set of endogenous and exogenous state variables which are updated through mechanisms and environmental processes, \
|
||||
respectively. Behavioral models, which may be deterministic or stochastic, provide the evolution of the system \
|
||||
|
|
@ -10,14 +10,14 @@ long_description = "SimCAD is a differential games based simulation software pac
|
|||
processes to understand and visualize network behavior under various conditions. Support for A/B testing policies, \
|
||||
monte carlo analysis and other common numerical methods is provided."
|
||||
|
||||
setup(name='SimCAD',
|
||||
version='0.1',
|
||||
description="SimCAD: a differential games based simulation software package for research, validation, and \
|
||||
setup(name='cadCAD',
|
||||
version='0.2',
|
||||
description="cadCAD: a differential games based simulation software package for research, validation, and \
|
||||
Computer Aided Design of economic systems",
|
||||
long_description = long_description,
|
||||
long_description=long_description,
|
||||
url='https://github.com/BlockScience/DiffyQ-SimCAD',
|
||||
author='Joshua E. Jodesty',
|
||||
author_email='joshua@block.science',
|
||||
# license='LICENSE',
|
||||
packages=find_packages() #['SimCAD']
|
||||
packages=find_packages()
|
||||
)
|
||||
|
|
|
|||
File diff suppressed because one or more lines are too long
|
|
@ -1,29 +1,27 @@
|
|||
import pandas as pd
|
||||
from tabulate import tabulate
|
||||
|
||||
# The following imports NEED to be in the exact order
|
||||
from SimCAD.engine import ExecutionMode, ExecutionContext, Executor
|
||||
from validation import config1, config2
|
||||
from SimCAD import configs
|
||||
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
|
||||
from simulations.validation import sweep_config, config1, config2, config4
|
||||
from cadCAD import configs
|
||||
|
||||
exec_mode = ExecutionMode()
|
||||
|
||||
print("Simulation Execution 1")
|
||||
print()
|
||||
first_config = [configs[0]] # from config1
|
||||
single_proc_ctx = ExecutionContext(context=exec_mode.single_proc)
|
||||
run1 = Executor(exec_context=single_proc_ctx, configs=first_config)
|
||||
run1_raw_result, tensor_field = run1.main()
|
||||
result = pd.DataFrame(run1_raw_result)
|
||||
print()
|
||||
print("Tensor Field:")
|
||||
print(tabulate(tensor_field, headers='keys', tablefmt='psql'))
|
||||
print("Output:")
|
||||
print(tabulate(result, headers='keys', tablefmt='psql'))
|
||||
print()
|
||||
# print("Simulation Execution 1")
|
||||
# print()
|
||||
# first_config = [configs[0]] # FOR non-sweep configs ONLY
|
||||
# single_proc_ctx = ExecutionContext(context=exec_mode.single_proc)
|
||||
# run1 = Executor(exec_context=single_proc_ctx, configs=first_config)
|
||||
# run1_raw_result, tensor_field = run1.main()
|
||||
# result = pd.DataFrame(run1_raw_result)
|
||||
# print()
|
||||
# print("Tensor Field:")
|
||||
# print(tabulate(tensor_field, headers='keys', tablefmt='psql'))
|
||||
# print("Output:")
|
||||
# print(tabulate(result, headers='keys', tablefmt='psql'))
|
||||
# print()
|
||||
|
||||
print("Simulation Execution 2: Pairwise Execution")
|
||||
print()
|
||||
print("Simulation Execution 2: Concurrent Execution")
|
||||
multi_proc_ctx = ExecutionContext(context=exec_mode.multi_proc)
|
||||
run2 = Executor(exec_context=multi_proc_ctx, configs=configs)
|
||||
for raw_result, tensor_field in run2.main():
|
||||
|
|
@ -33,4 +31,4 @@ for raw_result, tensor_field in run2.main():
|
|||
print(tabulate(tensor_field, headers='keys', tablefmt='psql'))
|
||||
print("Output:")
|
||||
print(tabulate(result, headers='keys', tablefmt='psql'))
|
||||
print()
|
||||
print()
|
||||
|
|
@ -1,171 +0,0 @@
|
|||
from decimal import Decimal
|
||||
import numpy as np
|
||||
from datetime import timedelta
|
||||
|
||||
from SimCAD import configs
|
||||
from SimCAD.configuration import Configuration
|
||||
from SimCAD.configuration.utils import exo_update_per_ts, proc_trigger, bound_norm_random, \
|
||||
ep_time_step
|
||||
|
||||
seed = {
|
||||
'z': np.random.RandomState(1),
|
||||
'a': np.random.RandomState(2),
|
||||
'b': np.random.RandomState(3),
|
||||
'c': np.random.RandomState(3)
|
||||
}
|
||||
|
||||
# Behaviors per Mechanism
|
||||
# Different return types per mechanism ?? *** No ***
|
||||
def b1m1(step, sL, s):
|
||||
return {'param1': 1}
|
||||
def b2m1(step, sL, s):
|
||||
return {'param1': 1}
|
||||
|
||||
def b1m2(step, sL, s):
|
||||
return {'param1': 1, 'param2': 2}
|
||||
def b2m2(step, sL, s):
|
||||
return {'param1': 1, 'param2': 4}
|
||||
|
||||
def b1m3(step, sL, s):
|
||||
return {'param1': 1, 'param2': np.array([10, 100])}
|
||||
def b2m3(step, sL, s):
|
||||
return {'param1': 1, 'param2': np.array([20, 200])}
|
||||
|
||||
# deff not more than 2
|
||||
# Internal States per Mechanism
|
||||
def s1m1(step, sL, s, _input):
|
||||
y = 's1'
|
||||
x = s['s1'] + _input['param1']
|
||||
return (y, x)
|
||||
def s2m1(step, sL, s, _input):
|
||||
y = 's2'
|
||||
x = s['s2'] + _input['param1']
|
||||
return (y, x)
|
||||
|
||||
def s1m2(step, sL, s, _input):
|
||||
y = 's1'
|
||||
x = s['s1'] + _input['param1']
|
||||
return (y, x)
|
||||
def s2m2(step, sL, s, _input):
|
||||
y = 's2'
|
||||
x = s['s2'] + _input['param1']
|
||||
return (y, x)
|
||||
|
||||
def s1m3(step, sL, s, _input):
|
||||
y = 's1'
|
||||
x = s['s1'] + _input['param1']
|
||||
return (y, x)
|
||||
def s2m3(step, sL, s, _input):
|
||||
y = 's2'
|
||||
x = s['s2'] + _input['param1']
|
||||
return (y, x)
|
||||
|
||||
# Exogenous States
|
||||
proc_one_coef_A = 0.7
|
||||
proc_one_coef_B = 1.3
|
||||
|
||||
def es3p1(step, sL, s, _input):
|
||||
y = 's3'
|
||||
x = s['s3'] * bound_norm_random(seed['a'], proc_one_coef_A, proc_one_coef_B)
|
||||
return (y, x)
|
||||
|
||||
def es4p2(step, sL, s, _input):
|
||||
y = 's4'
|
||||
x = s['s4'] * bound_norm_random(seed['b'], proc_one_coef_A, proc_one_coef_B)
|
||||
return (y, x)
|
||||
|
||||
ts_format = '%Y-%m-%d %H:%M:%S'
|
||||
t_delta = timedelta(days=0, minutes=0, seconds=1)
|
||||
def es5p2(step, sL, s, _input):
|
||||
y = 'timestamp'
|
||||
x = ep_time_step(s, dt_str=s['timestamp'], fromat_str=ts_format, _timedelta=t_delta)
|
||||
return (y, x)
|
||||
|
||||
|
||||
# Environment States
|
||||
def env_a(x):
|
||||
return 10
|
||||
def env_b(x):
|
||||
return 10
|
||||
# def what_ever(x):
|
||||
# return x + 1
|
||||
|
||||
# Genesis States
|
||||
genesis_states = {
|
||||
's1': Decimal(0.0),
|
||||
's2': Decimal(0.0),
|
||||
's3': Decimal(1.0),
|
||||
's4': Decimal(1.0),
|
||||
'timestamp': '2018-10-01 15:16:24'
|
||||
}
|
||||
|
||||
# remove `exo_update_per_ts` to update every ts
|
||||
exogenous_states = exo_update_per_ts(
|
||||
{
|
||||
"s3": es3p1,
|
||||
"s4": es4p2,
|
||||
"timestamp": es5p2
|
||||
}
|
||||
)
|
||||
|
||||
# make env proc trigger field agnostic
|
||||
|
||||
# ToDo: Bug - Can't use environments without proc_trigger. TypeError: 'int' object is not callable
|
||||
# "/Users/jjodesty/Projects/DiffyQ-SimCAD/SimCAD/engine/simulation.py"
|
||||
env_processes = {
|
||||
# "s3": env_a,
|
||||
# "s4": env_b
|
||||
"s3": proc_trigger('2018-10-01 15:16:25', env_a),
|
||||
"s4": proc_trigger('2018-10-01 15:16:25', env_b)
|
||||
}
|
||||
|
||||
# need at least 1 behaviour and 1 state function for the 1st mech with behaviors
|
||||
# mechanisms = {}
|
||||
mechanisms = {
|
||||
"m1": {
|
||||
"behaviors": {
|
||||
"b1": b1m1, # lambda step, sL, s: s['s1'] + 1,
|
||||
"b2": b2m1
|
||||
},
|
||||
"states": { # exclude only. TypeError: reduce() of empty sequence with no initial value
|
||||
"s1": s1m1,
|
||||
"s2": s2m1
|
||||
}
|
||||
},
|
||||
"m2": {
|
||||
"behaviors": {
|
||||
"b1": b1m2,
|
||||
"b2": b2m2
|
||||
},
|
||||
"states": {
|
||||
"s1": s1m2,
|
||||
"s2": s2m2
|
||||
}
|
||||
},
|
||||
"m3": {
|
||||
"behaviors": {
|
||||
"b1": b1m3,
|
||||
"b2": b2m3
|
||||
},
|
||||
"states": {
|
||||
"s1": s1m3,
|
||||
"s2": s2m3
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sim_config = {
|
||||
"N": 2,
|
||||
"T": range(5)
|
||||
}
|
||||
|
||||
configs.append(
|
||||
Configuration(
|
||||
sim_config=sim_config,
|
||||
state_dict=genesis_states,
|
||||
seed=seed,
|
||||
exogenous_states=exogenous_states,
|
||||
env_processes=env_processes,
|
||||
mechanisms=mechanisms
|
||||
)
|
||||
)
|
||||
|
|
@ -1,180 +0,0 @@
|
|||
from decimal import Decimal
|
||||
import numpy as np
|
||||
from datetime import timedelta
|
||||
|
||||
from SimCAD import configs
|
||||
from SimCAD.configuration import Configuration
|
||||
from SimCAD.configuration.utils import exo_update_per_ts, proc_trigger, bound_norm_random, \
|
||||
ep_time_step
|
||||
|
||||
|
||||
seed = {
|
||||
'z': np.random.RandomState(1),
|
||||
'a': np.random.RandomState(2),
|
||||
'b': np.random.RandomState(3),
|
||||
'c': np.random.RandomState(3)
|
||||
}
|
||||
|
||||
# Behaviors per Mechanism
|
||||
# Different return types per mechanism ?? *** No ***
|
||||
def b1m1(step, sL, s):
|
||||
return {'param1': 1}
|
||||
def b2m1(step, sL, s):
|
||||
return {'param2': 4}
|
||||
|
||||
def b1m2(step, sL, s):
|
||||
return {'param1': 'a', 'param2': 2}
|
||||
def b2m2(step, sL, s):
|
||||
return {'param1': 'b', 'param2': 4}
|
||||
|
||||
|
||||
def b1m3(step, sL, s):
|
||||
return {'param1': ['c'], 'param2': np.array([10, 100])}
|
||||
def b2m3(step, sL, s):
|
||||
return {'param1': ['d'], 'param2': np.array([20, 200])}
|
||||
|
||||
|
||||
# Internal States per Mechanism
|
||||
def s1m1(step, sL, s, _input):
|
||||
y = 's1'
|
||||
x = _input['param1']
|
||||
return (y, x)
|
||||
def s2m1(step, sL, s, _input):
|
||||
y = 's2'
|
||||
x = _input['param2']
|
||||
return (y, x)
|
||||
|
||||
def s1m2(step, sL, s, _input):
|
||||
y = 's1'
|
||||
x = _input['param1']
|
||||
return (y, x)
|
||||
def s2m2(step, sL, s, _input):
|
||||
y = 's2'
|
||||
x = _input['param2']
|
||||
return (y, x)
|
||||
|
||||
def s1m3(step, sL, s, _input):
|
||||
y = 's1'
|
||||
x = _input['param1']
|
||||
return (y, x)
|
||||
def s2m3(step, sL, s, _input):
|
||||
y = 's2'
|
||||
x = _input['param2']
|
||||
return (y, x)
|
||||
|
||||
# Exogenous States
|
||||
proc_one_coef_A = 0.7
|
||||
proc_one_coef_B = 1.3
|
||||
|
||||
def es3p1(step, sL, s, _input):
|
||||
y = 's3'
|
||||
x = s['s3'] * bound_norm_random(seed['a'], proc_one_coef_A, proc_one_coef_B)
|
||||
return (y, x)
|
||||
|
||||
def es4p2(step, sL, s, _input):
|
||||
y = 's4'
|
||||
x = s['s4'] * bound_norm_random(seed['b'], proc_one_coef_A, proc_one_coef_B)
|
||||
return (y, x)
|
||||
|
||||
ts_format = '%Y-%m-%d %H:%M:%S'
|
||||
t_delta = timedelta(days=0, minutes=0, seconds=1)
|
||||
def es5p2(step, sL, s, _input):
|
||||
y = 'timestamp'
|
||||
x = ep_time_step(s, dt_str=s['timestamp'], fromat_str=ts_format, _timedelta=t_delta)
|
||||
return (y, x)
|
||||
|
||||
|
||||
# Environment States
|
||||
def env_a(x):
|
||||
return 10
|
||||
def env_b(x):
|
||||
return 10
|
||||
# def what_ever(x):
|
||||
# return x + 1
|
||||
|
||||
# Genesis States
|
||||
genesis_states = {
|
||||
's1': Decimal(0.0),
|
||||
's2': Decimal(0.0),
|
||||
's3': Decimal(1.0),
|
||||
's4': Decimal(1.0),
|
||||
'timestamp': '2018-10-01 15:16:24'
|
||||
}
|
||||
|
||||
# remove `exo_update_per_ts` to update every ts
|
||||
# why `exo_update_per_ts` here instead of `env_processes`
|
||||
exogenous_states = exo_update_per_ts(
|
||||
{
|
||||
"s3": es3p1,
|
||||
"s4": es4p2,
|
||||
"timestamp": es5p2
|
||||
}
|
||||
)
|
||||
|
||||
# make env proc trigger field agnostic
|
||||
env_processes = {
|
||||
"s3": proc_trigger('2018-10-01 15:16:25', env_a),
|
||||
"s4": proc_trigger('2018-10-01 15:16:25', env_b)
|
||||
}
|
||||
|
||||
# lambdas
|
||||
# genesis Sites should always be there
|
||||
# [1, 2]
|
||||
# behavior_ops = [ foldr(_ + _), lambda x: x + 0 ]
|
||||
|
||||
|
||||
# [1, 2] = {'b1': ['a'], 'b2', [1]} =
|
||||
# behavior_ops = [behavior_to_dict, print_fwd, sum_dict_values]
|
||||
# behavior_ops = [foldr(dict_elemwise_sum())]
|
||||
# behavior_ops = []
|
||||
|
||||
# need at least 1 behaviour and 1 state function for the 1st mech with behaviors
|
||||
# mechanisms = {}
|
||||
mechanisms = {
|
||||
"m1": {
|
||||
"behaviors": {
|
||||
"b1": b1m1, # lambda step, sL, s: s['s1'] + 1,
|
||||
# "b2": b2m1
|
||||
},
|
||||
"states": { # exclude only. TypeError: reduce() of empty sequence with no initial value
|
||||
"s1": s1m1,
|
||||
# "s2": s2m1
|
||||
}
|
||||
},
|
||||
"m2": {
|
||||
"behaviors": {
|
||||
"b1": b1m2,
|
||||
# "b2": b2m2
|
||||
},
|
||||
"states": {
|
||||
"s1": s1m2,
|
||||
# "s2": s2m2
|
||||
}
|
||||
},
|
||||
"m3": {
|
||||
"behaviors": {
|
||||
"b1": b1m3,
|
||||
"b2": b2m3
|
||||
},
|
||||
"states": {
|
||||
"s1": s1m3,
|
||||
"s2": s2m3
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sim_config = {
|
||||
"N": 2,
|
||||
"T": range(5)
|
||||
}
|
||||
|
||||
configs.append(
|
||||
Configuration(
|
||||
sim_config=sim_config,
|
||||
state_dict=genesis_states,
|
||||
seed=seed,
|
||||
exogenous_states=exogenous_states,
|
||||
env_processes=env_processes,
|
||||
mechanisms=mechanisms
|
||||
)
|
||||
)
|
||||
|
|
@ -2,13 +2,12 @@ from decimal import Decimal
|
|||
import numpy as np
|
||||
from datetime import timedelta
|
||||
|
||||
from SimCAD import configs
|
||||
from SimCAD.configuration import Configuration
|
||||
from SimCAD.configuration.utils import exo_update_per_ts, proc_trigger, bound_norm_random, \
|
||||
ep_time_step
|
||||
from cadCAD.configuration import append_configs
|
||||
from cadCAD.configuration.utils import proc_trigger, bound_norm_random, ep_time_step
|
||||
from cadCAD.configuration.utils.parameterSweep import config_sim
|
||||
|
||||
|
||||
seed = {
|
||||
seeds = {
|
||||
'z': np.random.RandomState(1),
|
||||
'a': np.random.RandomState(2),
|
||||
'b': np.random.RandomState(3),
|
||||
|
|
@ -16,47 +15,47 @@ seed = {
|
|||
}
|
||||
|
||||
|
||||
# Behaviors per Mechanism
|
||||
def b1m1(step, sL, s):
|
||||
# Policies per Mechanism
|
||||
def p1m1(_g, step, sL, s):
|
||||
return {'param1': 1}
|
||||
def b2m1(step, sL, s):
|
||||
def p2m1(_g, step, sL, s):
|
||||
return {'param2': 4}
|
||||
|
||||
def b1m2(step, sL, s):
|
||||
def p1m2(_g, step, sL, s):
|
||||
return {'param1': 'a', 'param2': 2}
|
||||
def b2m2(step, sL, s):
|
||||
def p2m2(_g, step, sL, s):
|
||||
return {'param1': 'b', 'param2': 4}
|
||||
|
||||
def b1m3(step, sL, s):
|
||||
def p1m3(_g, step, sL, s):
|
||||
return {'param1': ['c'], 'param2': np.array([10, 100])}
|
||||
def b2m3(step, sL, s):
|
||||
def p2m3(_g, step, sL, s):
|
||||
return {'param1': ['d'], 'param2': np.array([20, 200])}
|
||||
|
||||
|
||||
# Internal States per Mechanism
|
||||
def s1m1(step, sL, s, _input):
|
||||
def s1m1(_g, step, sL, s, _input):
|
||||
y = 's1'
|
||||
x = _input['param1']
|
||||
return (y, x)
|
||||
def s2m1(step, sL, s, _input):
|
||||
def s2m1(_g, step, sL, s, _input):
|
||||
y = 's2'
|
||||
x = _input['param2']
|
||||
return (y, x)
|
||||
|
||||
def s1m2(step, sL, s, _input):
|
||||
def s1m2(_g, step, sL, s, _input):
|
||||
y = 's1'
|
||||
x = _input['param1']
|
||||
return (y, x)
|
||||
def s2m2(step, sL, s, _input):
|
||||
def s2m2(_g, step, sL, s, _input):
|
||||
y = 's2'
|
||||
x = _input['param2']
|
||||
return (y, x)
|
||||
|
||||
def s1m3(step, sL, s, _input):
|
||||
def s1m3(_g, step, sL, s, _input):
|
||||
y = 's1'
|
||||
x = _input['param1']
|
||||
return (y, x)
|
||||
def s2m3(step, sL, s, _input):
|
||||
def s2m3(_g, step, sL, s, _input):
|
||||
y = 's2'
|
||||
x = _input['param2']
|
||||
return (y, x)
|
||||
|
|
@ -66,21 +65,21 @@ def s2m3(step, sL, s, _input):
|
|||
proc_one_coef_A = 0.7
|
||||
proc_one_coef_B = 1.3
|
||||
|
||||
def es3p1(step, sL, s, _input):
|
||||
def es3p1(_g, step, sL, s, _input):
|
||||
y = 's3'
|
||||
x = s['s3'] * bound_norm_random(seed['a'], proc_one_coef_A, proc_one_coef_B)
|
||||
x = s['s3'] * bound_norm_random(seeds['a'], proc_one_coef_A, proc_one_coef_B)
|
||||
return (y, x)
|
||||
|
||||
def es4p2(step, sL, s, _input):
|
||||
def es4p2(_g, step, sL, s, _input):
|
||||
y = 's4'
|
||||
x = s['s4'] * bound_norm_random(seed['b'], proc_one_coef_A, proc_one_coef_B)
|
||||
x = s['s4'] * bound_norm_random(seeds['b'], proc_one_coef_A, proc_one_coef_B)
|
||||
return (y, x)
|
||||
|
||||
ts_format = '%Y-%m-%d %H:%M:%S'
|
||||
t_delta = timedelta(days=0, minutes=0, seconds=1)
|
||||
def es5p2(step, sL, s, _input):
|
||||
y = 'timestamp'
|
||||
x = ep_time_step(s, dt_str=s['timestamp'], fromat_str=ts_format, _timedelta=t_delta)
|
||||
def es5p2(_g, step, sL, s, _input):
|
||||
y = 'timestep'
|
||||
x = ep_time_step(s, dt_str=s['timestep'], fromat_str=ts_format, _timedelta=t_delta)
|
||||
return (y, x)
|
||||
|
||||
|
||||
|
|
@ -99,53 +98,50 @@ genesis_states = {
|
|||
's2': Decimal(0.0),
|
||||
's3': Decimal(1.0),
|
||||
's4': Decimal(1.0),
|
||||
'timestamp': '2018-10-01 15:16:24'
|
||||
# 'timestep': '2018-10-01 15:16:24'
|
||||
}
|
||||
|
||||
|
||||
# remove `exo_update_per_ts` to update every ts
|
||||
exogenous_states = exo_update_per_ts(
|
||||
{
|
||||
raw_exogenous_states = {
|
||||
"s3": es3p1,
|
||||
"s4": es4p2,
|
||||
"timestamp": es5p2
|
||||
}
|
||||
)
|
||||
# "timestep": es5p2
|
||||
}
|
||||
|
||||
|
||||
env_processes = {
|
||||
"s3": env_a,
|
||||
"s4": proc_trigger('2018-10-01 15:16:25', env_b)
|
||||
"s4": proc_trigger(1, env_b)
|
||||
}
|
||||
|
||||
|
||||
mechanisms = {
|
||||
partial_state_update_block = {
|
||||
"m1": {
|
||||
"behaviors": {
|
||||
"b1": b1m1,
|
||||
"b2": b2m1
|
||||
"policies": {
|
||||
"b1": p1m1,
|
||||
"b2": p2m1
|
||||
},
|
||||
"states": {
|
||||
"variables": {
|
||||
"s1": s1m1,
|
||||
"s2": s2m1
|
||||
}
|
||||
},
|
||||
"m2": {
|
||||
"behaviors": {
|
||||
"b1": b1m2,
|
||||
"b2": b2m2
|
||||
"policies": {
|
||||
"b1": p1m2,
|
||||
"b2": p2m2
|
||||
},
|
||||
"states": {
|
||||
"variables": {
|
||||
"s1": s1m2,
|
||||
"s2": s2m2
|
||||
}
|
||||
},
|
||||
"m3": {
|
||||
"behaviors": {
|
||||
"b1": b1m3,
|
||||
"b2": b2m3
|
||||
"policies": {
|
||||
"b1": p1m3,
|
||||
"b2": p2m3
|
||||
},
|
||||
"states": {
|
||||
"variables": {
|
||||
"s1": s1m3,
|
||||
"s2": s2m3
|
||||
}
|
||||
|
|
@ -153,19 +149,19 @@ mechanisms = {
|
|||
}
|
||||
|
||||
|
||||
sim_config = {
|
||||
"N": 2,
|
||||
"T": range(5)
|
||||
}
|
||||
|
||||
|
||||
configs.append(
|
||||
Configuration(
|
||||
sim_config=sim_config,
|
||||
state_dict=genesis_states,
|
||||
seed=seed,
|
||||
exogenous_states=exogenous_states,
|
||||
env_processes=env_processes,
|
||||
mechanisms=mechanisms
|
||||
)
|
||||
sim_config = config_sim(
|
||||
{
|
||||
"N": 2,
|
||||
"T": range(5),
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
append_configs(
|
||||
sim_configs=sim_config,
|
||||
initial_state=genesis_states,
|
||||
seeds=seeds,
|
||||
raw_exogenous_states=raw_exogenous_states,
|
||||
env_processes=env_processes,
|
||||
partial_state_update_blocks=partial_state_update_block
|
||||
)
|
||||
|
|
@ -2,13 +2,11 @@ from decimal import Decimal
|
|||
import numpy as np
|
||||
from datetime import timedelta
|
||||
|
||||
from SimCAD import configs
|
||||
from SimCAD.configuration import Configuration
|
||||
from SimCAD.configuration.utils import exo_update_per_ts, proc_trigger, bound_norm_random, \
|
||||
ep_time_step
|
||||
from cadCAD.configuration import append_configs
|
||||
from cadCAD.configuration.utils import proc_trigger, bound_norm_random, ep_time_step
|
||||
from cadCAD.configuration.utils.parameterSweep import config_sim
|
||||
|
||||
|
||||
seed = {
|
||||
seeds = {
|
||||
'z': np.random.RandomState(1),
|
||||
'a': np.random.RandomState(2),
|
||||
'b': np.random.RandomState(3),
|
||||
|
|
@ -16,47 +14,47 @@ seed = {
|
|||
}
|
||||
|
||||
|
||||
# Behaviors per Mechanism
|
||||
def b1m1(step, sL, s):
|
||||
# Policies per Mechanism
|
||||
def p1m1(_g, step, sL, s):
|
||||
return {'param1': 1}
|
||||
def b2m1(step, sL, s):
|
||||
def p2m1(_g, step, sL, s):
|
||||
return {'param2': 4}
|
||||
|
||||
def b1m2(step, sL, s):
|
||||
def p1m2(_g, step, sL, s):
|
||||
return {'param1': 'a', 'param2': 2}
|
||||
def b2m2(step, sL, s):
|
||||
def p2m2(_g, step, sL, s):
|
||||
return {'param1': 'b', 'param2': 4}
|
||||
|
||||
def b1m3(step, sL, s):
|
||||
def p1m3(_g, step, sL, s):
|
||||
return {'param1': ['c'], 'param2': np.array([10, 100])}
|
||||
def b2m3(step, sL, s):
|
||||
def p2m3(_g, step, sL, s):
|
||||
return {'param1': ['d'], 'param2': np.array([20, 200])}
|
||||
|
||||
|
||||
# Internal States per Mechanism
|
||||
def s1m1(step, sL, s, _input):
|
||||
def s1m1(_g, step, sL, s, _input):
|
||||
y = 's1'
|
||||
x = _input['param1']
|
||||
return (y, x)
|
||||
def s2m1(step, sL, s, _input):
|
||||
def s2m1(_g, step, sL, s, _input):
|
||||
y = 's2'
|
||||
x = _input['param2']
|
||||
return (y, x)
|
||||
|
||||
def s1m2(step, sL, s, _input):
|
||||
def s1m2(_g, step, sL, s, _input):
|
||||
y = 's1'
|
||||
x = _input['param1']
|
||||
return (y, x)
|
||||
def s2m2(step, sL, s, _input):
|
||||
def s2m2(_g, step, sL, s, _input):
|
||||
y = 's2'
|
||||
x = _input['param2']
|
||||
return (y, x)
|
||||
|
||||
def s1m3(step, sL, s, _input):
|
||||
def s1m3(_g, step, sL, s, _input):
|
||||
y = 's1'
|
||||
x = _input['param1']
|
||||
return (y, x)
|
||||
def s2m3(step, sL, s, _input):
|
||||
def s2m3(_g, step, sL, s, _input):
|
||||
y = 's2'
|
||||
x = _input['param2']
|
||||
return (y, x)
|
||||
|
|
@ -66,21 +64,21 @@ def s2m3(step, sL, s, _input):
|
|||
proc_one_coef_A = 0.7
|
||||
proc_one_coef_B = 1.3
|
||||
|
||||
def es3p1(step, sL, s, _input):
|
||||
def es3p1(_g, step, sL, s, _input):
|
||||
y = 's3'
|
||||
x = s['s3'] * bound_norm_random(seed['a'], proc_one_coef_A, proc_one_coef_B)
|
||||
x = s['s3'] * bound_norm_random(seeds['a'], proc_one_coef_A, proc_one_coef_B)
|
||||
return (y, x)
|
||||
|
||||
def es4p2(step, sL, s, _input):
|
||||
def es4p2(_g, step, sL, s, _input):
|
||||
y = 's4'
|
||||
x = s['s4'] * bound_norm_random(seed['b'], proc_one_coef_A, proc_one_coef_B)
|
||||
x = s['s4'] * bound_norm_random(seeds['b'], proc_one_coef_A, proc_one_coef_B)
|
||||
return (y, x)
|
||||
|
||||
ts_format = '%Y-%m-%d %H:%M:%S'
|
||||
t_delta = timedelta(days=0, minutes=0, seconds=1)
|
||||
def es5p2(step, sL, s, _input):
|
||||
y = 'timestamp'
|
||||
x = ep_time_step(s, dt_str=s['timestamp'], fromat_str=ts_format, _timedelta=t_delta)
|
||||
def es5p2(_g, step, sL, s, _input):
|
||||
y = 'timestep'
|
||||
x = ep_time_step(s, dt_str=s['timestep'], fromat_str=ts_format, _timedelta=t_delta)
|
||||
return (y, x)
|
||||
|
||||
|
||||
|
|
@ -99,31 +97,28 @@ genesis_states = {
|
|||
's2': Decimal(0.0),
|
||||
's3': Decimal(1.0),
|
||||
's4': Decimal(1.0),
|
||||
'timestamp': '2018-10-01 15:16:24'
|
||||
# 'timestep': '2018-10-01 15:16:24'
|
||||
}
|
||||
|
||||
|
||||
# remove `exo_update_per_ts` to update every ts
|
||||
exogenous_states = exo_update_per_ts(
|
||||
{
|
||||
raw_exogenous_states = {
|
||||
"s3": es3p1,
|
||||
"s4": es4p2,
|
||||
"timestamp": es5p2
|
||||
}
|
||||
)
|
||||
# "timestep": es5p2
|
||||
}
|
||||
|
||||
|
||||
env_processes = {
|
||||
"s3": proc_trigger('2018-10-01 15:16:25', env_a),
|
||||
"s4": proc_trigger('2018-10-01 15:16:25', env_b)
|
||||
"s3": proc_trigger(1, env_a),
|
||||
"s4": proc_trigger(1, env_b)
|
||||
}
|
||||
|
||||
|
||||
mechanisms = {
|
||||
partial_state_update_block = {
|
||||
"m1": {
|
||||
"behaviors": {
|
||||
"b1": b1m1,
|
||||
# "b2": b2m1
|
||||
"policies": {
|
||||
"b1": p1m1,
|
||||
# "b2": p2m1
|
||||
},
|
||||
"states": {
|
||||
"s1": s1m1,
|
||||
|
|
@ -131,9 +126,9 @@ mechanisms = {
|
|||
}
|
||||
},
|
||||
"m2": {
|
||||
"behaviors": {
|
||||
"b1": b1m2,
|
||||
# "b2": b2m2
|
||||
"policies": {
|
||||
"b1": p1m2,
|
||||
# "b2": p2m2
|
||||
},
|
||||
"states": {
|
||||
"s1": s1m2,
|
||||
|
|
@ -141,9 +136,9 @@ mechanisms = {
|
|||
}
|
||||
},
|
||||
"m3": {
|
||||
"behaviors": {
|
||||
"b1": b1m3,
|
||||
"b2": b2m3
|
||||
"policies": {
|
||||
"b1": p1m3,
|
||||
"b2": p2m3
|
||||
},
|
||||
"states": {
|
||||
"s1": s1m3,
|
||||
|
|
@ -153,19 +148,19 @@ mechanisms = {
|
|||
}
|
||||
|
||||
|
||||
sim_config = {
|
||||
"N": 2,
|
||||
"T": range(5)
|
||||
}
|
||||
|
||||
|
||||
configs.append(
|
||||
Configuration(
|
||||
sim_config=sim_config,
|
||||
state_dict=genesis_states,
|
||||
seed=seed,
|
||||
exogenous_states=exogenous_states,
|
||||
env_processes=env_processes,
|
||||
mechanisms=mechanisms
|
||||
)
|
||||
sim_config = config_sim(
|
||||
{
|
||||
"N": 2,
|
||||
"T": range(5),
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
append_configs(
|
||||
sim_configs=sim_config,
|
||||
initial_state=genesis_states,
|
||||
seeds=seeds,
|
||||
raw_exogenous_states=raw_exogenous_states,
|
||||
env_processes=env_processes,
|
||||
partial_state_update_blocks=partial_state_update_block
|
||||
)
|
||||
|
|
@ -0,0 +1,142 @@
|
|||
from decimal import Decimal
|
||||
import numpy as np
|
||||
from datetime import timedelta
|
||||
|
||||
from cadCAD.configuration import append_configs
|
||||
from cadCAD.configuration.utils import proc_trigger, bound_norm_random, ep_time_step
|
||||
from cadCAD.configuration.utils.parameterSweep import config_sim
|
||||
|
||||
|
||||
seeds = {
|
||||
'z': np.random.RandomState(1),
|
||||
'a': np.random.RandomState(2),
|
||||
'b': np.random.RandomState(3),
|
||||
'c': np.random.RandomState(3)
|
||||
}
|
||||
|
||||
|
||||
# Policies per Mechanism
|
||||
def p1m1(_g, step, sL, s):
|
||||
return {'param1': 1}
|
||||
def p2m1(_g, step, sL, s):
|
||||
return {'param2': 4}
|
||||
|
||||
def p1m2(_g, step, sL, s):
|
||||
return {'param1': 'a', 'param2': 2}
|
||||
def p2m2(_g, step, sL, s):
|
||||
return {'param1': 'b', 'param2': 4}
|
||||
|
||||
def p1m3(_g, step, sL, s):
|
||||
return {'param1': ['c'], 'param2': np.array([10, 100])}
|
||||
def p2m3(_g, step, sL, s):
|
||||
return {'param1': ['d'], 'param2': np.array([20, 200])}
|
||||
|
||||
|
||||
# Internal States per Mechanism
|
||||
def s1m1(_g, step, sL, s, _input):
|
||||
y = 's1'
|
||||
x = _input['param1']
|
||||
return (y, x)
|
||||
def s2m1(_g, step, sL, s, _input):
|
||||
y = 's2'
|
||||
x = _input['param2']
|
||||
return (y, x)
|
||||
|
||||
def s1m2(_g, step, sL, s, _input):
|
||||
y = 's1'
|
||||
x = _input['param1']
|
||||
return (y, x)
|
||||
def s2m2(_g, step, sL, s, _input):
|
||||
y = 's2'
|
||||
x = _input['param2']
|
||||
return (y, x)
|
||||
|
||||
def s1m3(_g, step, sL, s, _input):
|
||||
y = 's1'
|
||||
x = _input['param1']
|
||||
return (y, x)
|
||||
def s2m3(_g, step, sL, s, _input):
|
||||
y = 's2'
|
||||
x = _input['param2']
|
||||
return (y, x)
|
||||
|
||||
def s1m4(_g, step, sL, s, _input):
|
||||
y = 's1'
|
||||
x = [1]
|
||||
return (y, x)
|
||||
|
||||
|
||||
# Exogenous States
|
||||
proc_one_coef_A = 0.7
|
||||
proc_one_coef_B = 1.3
|
||||
|
||||
def es3p1(_g, step, sL, s, _input):
|
||||
y = 's3'
|
||||
x = s['s3'] * bound_norm_random(seeds['a'], proc_one_coef_A, proc_one_coef_B)
|
||||
return (y, x)
|
||||
|
||||
def es4p2(_g, step, sL, s, _input):
|
||||
y = 's4'
|
||||
x = s['s4'] * bound_norm_random(seeds['b'], proc_one_coef_A, proc_one_coef_B)
|
||||
return (y, x)
|
||||
|
||||
ts_format = '%Y-%m-%d %H:%M:%S'
|
||||
t_delta = timedelta(days=0, minutes=0, seconds=1)
|
||||
def es5p2(_g, step, sL, s, _input):
|
||||
y = 'timestamp'
|
||||
x = ep_time_step(s, dt_str=s['timestamp'], fromat_str=ts_format, _timedelta=t_delta)
|
||||
return (y, x)
|
||||
|
||||
|
||||
# Environment States
|
||||
def env_a(x):
|
||||
return 5
|
||||
def env_b(x):
|
||||
return 10
|
||||
# def what_ever(x):
|
||||
# return x + 1
|
||||
|
||||
|
||||
# Genesis States
|
||||
genesis_states = {
|
||||
's1': Decimal(0.0),
|
||||
's2': Decimal(0.0),
|
||||
's3': Decimal(1.0),
|
||||
's4': Decimal(1.0),
|
||||
'timestamp': '2018-10-01 15:16:24'
|
||||
}
|
||||
|
||||
|
||||
raw_exogenous_states = {
|
||||
"s3": es3p1,
|
||||
"s4": es4p2,
|
||||
"timestamp": es5p2
|
||||
}
|
||||
|
||||
|
||||
env_processes = {
|
||||
"s3": env_a,
|
||||
"s4": proc_trigger('2018-10-01 15:16:25', env_b)
|
||||
}
|
||||
|
||||
|
||||
partial_state_update_block = [
|
||||
]
|
||||
|
||||
|
||||
sim_config = config_sim(
|
||||
{
|
||||
"N": 2,
|
||||
"T": range(5),
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
append_configs(
|
||||
sim_configs=sim_config,
|
||||
initial_state=genesis_states,
|
||||
seeds={},
|
||||
raw_exogenous_states={},
|
||||
env_processes={},
|
||||
partial_state_update_blocks=partial_state_update_block
|
||||
)
|
||||
|
|
@ -1,178 +0,0 @@
|
|||
from decimal import Decimal
|
||||
import numpy as np
|
||||
from datetime import timedelta
|
||||
|
||||
from SimCAD import configs
|
||||
from SimCAD.configuration import Configuration
|
||||
from SimCAD.configuration.utils import exo_update_per_ts, proc_trigger, bound_norm_random, \
|
||||
ep_time_step
|
||||
|
||||
seed = {
|
||||
'z': np.random.RandomState(1),
|
||||
'a': np.random.RandomState(2),
|
||||
'b': np.random.RandomState(3),
|
||||
'c': np.random.RandomState(3)
|
||||
}
|
||||
|
||||
# Behaviors per Mechanism
|
||||
# Different return types per mechanism ?? *** No ***
|
||||
def b1m1(step, sL, s):
|
||||
return {'param1': 1}
|
||||
def b2m1(step, sL, s):
|
||||
return {'param2': 4}
|
||||
|
||||
def b1m2(step, sL, s):
|
||||
return {'param1': 'a', 'param2': 2}
|
||||
def b2m2(step, sL, s):
|
||||
return {'param1': 'b', 'param2': 4}
|
||||
|
||||
def b1m3(step, sL, s):
|
||||
return {'param1': ['c'], 'param2': np.array([10, 100])}
|
||||
def b2m3(step, sL, s):
|
||||
return {'param1': ['d'], 'param2': np.array([20, 200])}
|
||||
|
||||
# deff not more than 2
|
||||
# Internal States per Mechanism
|
||||
def s1m1(step, sL, s, _input):
|
||||
y = 's1'
|
||||
x = _input['param1'] #+ [Coef1 x 5]
|
||||
return (y, x)
|
||||
def s2m1(step, sL, s, _input):
|
||||
y = 's2'
|
||||
x = _input['param2'] #+ [Coef2 x 5]
|
||||
return (y, x)
|
||||
|
||||
def s1m2(step, sL, s, _input):
|
||||
y = 's1'
|
||||
x = _input['param1']
|
||||
return (y, x)
|
||||
def s2m2(step, sL, s, _input):
|
||||
y = 's2'
|
||||
x = _input['param2']
|
||||
return (y, x)
|
||||
|
||||
def s1m3(step, sL, s, _input):
|
||||
y = 's1'
|
||||
x = _input['param1']
|
||||
return (y, x)
|
||||
def s2m3(step, sL, s, _input):
|
||||
y = 's2'
|
||||
x = _input['param2']
|
||||
return (y, x)
|
||||
|
||||
# Exogenous States
|
||||
proc_one_coef_A = 0.7
|
||||
proc_one_coef_B = 1.3
|
||||
|
||||
def es3p1(step, sL, s, _input):
|
||||
y = 's3'
|
||||
x = s['s3'] * bound_norm_random(seed['a'], proc_one_coef_A, proc_one_coef_B)
|
||||
return (y, x)
|
||||
|
||||
def es4p2(step, sL, s, _input):
|
||||
y = 's4'
|
||||
x = s['s4'] * bound_norm_random(seed['b'], proc_one_coef_A, proc_one_coef_B)
|
||||
return (y, x)
|
||||
|
||||
ts_format = '%Y-%m-%d %H:%M:%S'
|
||||
t_delta = timedelta(days=0, minutes=0, seconds=1)
|
||||
def es5p2(step, sL, s, _input):
|
||||
y = 'timestamp'
|
||||
x = ep_time_step(s, dt_str=s['timestamp'], fromat_str=ts_format, _timedelta=t_delta)
|
||||
return (y, x)
|
||||
|
||||
|
||||
# Environment States
|
||||
def env_a(x):
|
||||
return 5
|
||||
def env_b(x):
|
||||
return 10
|
||||
# def what_ever(x):
|
||||
# return x + 1
|
||||
|
||||
# Genesis States
|
||||
genesis_states = {
|
||||
's1': Decimal(0.0),
|
||||
's2': Decimal(0.0),
|
||||
's3': Decimal(1.0),
|
||||
's4': Decimal(1.0),
|
||||
'timestamp': '2018-10-01 15:16:24'
|
||||
}
|
||||
|
||||
# remove `exo_update_per_ts` to update every ts
|
||||
exogenous_states = exo_update_per_ts(
|
||||
{
|
||||
"s3": es3p1,
|
||||
"s4": es4p2,
|
||||
"timestamp": es5p2
|
||||
}
|
||||
)
|
||||
|
||||
# ToDo: make env proc trigger field agnostic
|
||||
# ToDo: input json into function renaming __name__
|
||||
env_processes = {
|
||||
"s3": env_a,
|
||||
"s4": proc_trigger('2018-10-01 15:16:25', env_b)
|
||||
}
|
||||
|
||||
# lambdas
|
||||
# genesis Sites should always be there
|
||||
# [1, 2]
|
||||
# behavior_ops = [ foldr(_ + _), lambda x: x + 0 ]
|
||||
|
||||
# [1, 2] = {'b1': ['a'], 'b2', [1]} =
|
||||
# behavior_ops = [ behavior_to_dict, print_fwd, sum_dict_values ]
|
||||
# behavior_ops = [foldr(dict_elemwise_sum())]
|
||||
# behavior_ops = [foldr(lambda a, b: a + b)]
|
||||
|
||||
# need at least 1 behaviour and 1 state function for the 1st mech with behaviors
|
||||
# mechanisms = {}
|
||||
|
||||
mechanisms = {
|
||||
"m1": {
|
||||
"behaviors": {
|
||||
"b1": b1m1, # lambda step, sL, s: s['s1'] + 1,
|
||||
"b2": b2m1
|
||||
},
|
||||
"states": { # exclude only. TypeError: reduce() of empty sequence with no initial value
|
||||
"s1": s1m1,
|
||||
"s2": s2m1
|
||||
}
|
||||
},
|
||||
"m2": {
|
||||
"behaviors": {
|
||||
"b1": b1m2,
|
||||
"b2": b2m2
|
||||
},
|
||||
"states": {
|
||||
"s1": s1m2,
|
||||
"s2": s2m2
|
||||
}
|
||||
},
|
||||
"m3": {
|
||||
"behaviors": {
|
||||
"b1": b1m3,
|
||||
"b2": b2m3
|
||||
},
|
||||
"states": {
|
||||
"s1": s1m3,
|
||||
"s2": s2m3
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sim_config = {
|
||||
"N": 2,
|
||||
"T": range(5)
|
||||
}
|
||||
|
||||
configs.append(
|
||||
Configuration(
|
||||
sim_config=sim_config,
|
||||
state_dict=genesis_states,
|
||||
seed=seed,
|
||||
exogenous_states=exogenous_states,
|
||||
env_processes=env_processes,
|
||||
mechanisms=mechanisms
|
||||
)
|
||||
)
|
||||
|
|
@ -1,180 +0,0 @@
|
|||
from decimal import Decimal
|
||||
import numpy as np
|
||||
from datetime import timedelta
|
||||
|
||||
from SimCAD import configs
|
||||
from SimCAD.configuration import Configuration
|
||||
from SimCAD.configuration.utils import exo_update_per_ts, proc_trigger, bound_norm_random, \
|
||||
ep_time_step
|
||||
|
||||
|
||||
seed = {
|
||||
'z': np.random.RandomState(1),
|
||||
'a': np.random.RandomState(2),
|
||||
'b': np.random.RandomState(3),
|
||||
'c': np.random.RandomState(3)
|
||||
}
|
||||
|
||||
# Behaviors per Mechanism
|
||||
# Different return types per mechanism ?? *** No ***
|
||||
def b1m1(step, sL, s):
|
||||
return {'param1': 1}
|
||||
def b2m1(step, sL, s):
|
||||
return {'param2': 4}
|
||||
|
||||
def b1m2(step, sL, s):
|
||||
return {'param1': 'a', 'param2': 2}
|
||||
def b2m2(step, sL, s):
|
||||
return {'param1': 'b', 'param2': 4}
|
||||
|
||||
|
||||
def b1m3(step, sL, s):
|
||||
return {'param1': ['c'], 'param2': np.array([10, 100])}
|
||||
def b2m3(step, sL, s):
|
||||
return {'param1': ['d'], 'param2': np.array([20, 200])}
|
||||
|
||||
|
||||
# Internal States per Mechanism
|
||||
def s1m1(step, sL, s, _input):
|
||||
y = 's1'
|
||||
x = _input['param1']
|
||||
return (y, x)
|
||||
def s2m1(step, sL, s, _input):
|
||||
y = 's2'
|
||||
x = _input['param2']
|
||||
return (y, x)
|
||||
|
||||
def s1m2(step, sL, s, _input):
|
||||
y = 's1'
|
||||
x = _input['param1']
|
||||
return (y, x)
|
||||
def s2m2(step, sL, s, _input):
|
||||
y = 's2'
|
||||
x = _input['param2']
|
||||
return (y, x)
|
||||
|
||||
def s1m3(step, sL, s, _input):
|
||||
y = 's1'
|
||||
x = _input['param1']
|
||||
return (y, x)
|
||||
def s2m3(step, sL, s, _input):
|
||||
y = 's2'
|
||||
x = _input['param2']
|
||||
return (y, x)
|
||||
|
||||
# Exogenous States
|
||||
proc_one_coef_A = 0.7
|
||||
proc_one_coef_B = 1.3
|
||||
|
||||
def es3p1(step, sL, s, _input):
|
||||
y = 's3'
|
||||
x = s['s3'] * bound_norm_random(seed['a'], proc_one_coef_A, proc_one_coef_B)
|
||||
return (y, x)
|
||||
|
||||
def es4p2(step, sL, s, _input):
|
||||
y = 's4'
|
||||
x = s['s4'] * bound_norm_random(seed['b'], proc_one_coef_A, proc_one_coef_B)
|
||||
return (y, x)
|
||||
|
||||
ts_format = '%Y-%m-%d %H:%M:%S'
|
||||
t_delta = timedelta(days=0, minutes=0, seconds=1)
|
||||
def es5p2(step, sL, s, _input):
|
||||
y = 'timestamp'
|
||||
x = ep_time_step(s, dt_str=s['timestamp'], fromat_str=ts_format, _timedelta=t_delta)
|
||||
return (y, x)
|
||||
|
||||
|
||||
# Environment States
|
||||
def env_a(x):
|
||||
return 10
|
||||
def env_b(x):
|
||||
return 10
|
||||
# def what_ever(x):
|
||||
# return x + 1
|
||||
|
||||
# Genesis States
|
||||
genesis_states = {
|
||||
's1': Decimal(0.0),
|
||||
's2': Decimal(0.0),
|
||||
's3': Decimal(1.0),
|
||||
's4': Decimal(1.0),
|
||||
'timestamp': '2018-10-01 15:16:24'
|
||||
}
|
||||
|
||||
# remove `exo_update_per_ts` to update every ts
|
||||
# why `exo_update_per_ts` here instead of `env_processes`
|
||||
exogenous_states = exo_update_per_ts(
|
||||
{
|
||||
"s3": es3p1,
|
||||
"s4": es4p2,
|
||||
"timestamp": es5p2
|
||||
}
|
||||
)
|
||||
|
||||
# make env proc trigger field agnostic
|
||||
env_processes = {
|
||||
"s3": proc_trigger('2018-10-01 15:16:25', env_a),
|
||||
"s4": proc_trigger('2018-10-01 15:16:25', env_b)
|
||||
}
|
||||
|
||||
# lambdas
|
||||
# genesis Sites should always be there
|
||||
# [1, 2]
|
||||
# behavior_ops = [ foldr(_ + _), lambda x: x + 0 ]
|
||||
|
||||
|
||||
# [1, 2] = {'b1': ['a'], 'b2', [1]} =
|
||||
# behavior_ops = [behavior_to_dict, print_fwd, sum_dict_values]
|
||||
# behavior_ops = [foldr(dict_elemwise_sum())]
|
||||
# behavior_ops = []
|
||||
|
||||
# need at least 1 behaviour and 1 state function for the 1st mech with behaviors
|
||||
# mechanisms = {}
|
||||
mechanisms = {
|
||||
"m1": {
|
||||
"behaviors": {
|
||||
"b1": b1m1, # lambda step, sL, s: s['s1'] + 1,
|
||||
# "b2": b2m1
|
||||
},
|
||||
"states": { # exclude only. TypeError: reduce() of empty sequence with no initial value
|
||||
"s1": s1m1,
|
||||
# "s2": s2m1
|
||||
}
|
||||
},
|
||||
"m2": {
|
||||
"behaviors": {
|
||||
"b1": b1m2,
|
||||
# "b2": b2m2
|
||||
},
|
||||
"states": {
|
||||
"s1": s1m2,
|
||||
# "s2": s2m2
|
||||
}
|
||||
},
|
||||
"m3": {
|
||||
"behaviors": {
|
||||
"b1": b1m3,
|
||||
"b2": b2m3
|
||||
},
|
||||
"states": {
|
||||
"s1": s1m3,
|
||||
"s2": s2m3
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sim_config = {
|
||||
"N": 2,
|
||||
"T": range(5)
|
||||
}
|
||||
|
||||
configs.append(
|
||||
Configuration(
|
||||
sim_config=sim_config,
|
||||
state_dict=genesis_states,
|
||||
seed=seed,
|
||||
exogenous_states=exogenous_states,
|
||||
env_processes=env_processes,
|
||||
mechanisms=mechanisms
|
||||
)
|
||||
)
|
||||
|
|
@ -0,0 +1,196 @@
|
|||
from decimal import Decimal
|
||||
import numpy as np
|
||||
from datetime import timedelta
|
||||
import pprint
|
||||
|
||||
from cadCAD.configuration import append_configs
|
||||
from cadCAD.configuration.utils import proc_trigger, ep_time_step
|
||||
from cadCAD.configuration.utils.parameterSweep import config_sim
|
||||
|
||||
pp = pprint.PrettyPrinter(indent=4)
|
||||
|
||||
seeds = {
|
||||
'z': np.random.RandomState(1),
|
||||
'a': np.random.RandomState(2),
|
||||
'b': np.random.RandomState(3),
|
||||
'c': np.random.RandomState(3)
|
||||
}
|
||||
|
||||
|
||||
g = {
|
||||
'alpha': [1],
|
||||
'beta': [2, 5],
|
||||
'gamma': [3, 4],
|
||||
'omega': [7]
|
||||
}
|
||||
|
||||
# Policies per Mechanism
|
||||
def p1m1(_g, step, sL, s):
|
||||
return {'param1': 1}
|
||||
|
||||
def p2m1(_g, step, sL, s):
|
||||
return {'param2': 4}
|
||||
|
||||
def p1m2(_g, step, sL, s):
|
||||
return {'param1': 'a', 'param2': _g['beta']}
|
||||
|
||||
def p2m2(_g, step, sL, s):
|
||||
return {'param1': 'b', 'param2': 0}
|
||||
|
||||
def p1m3(_g, step, sL, s):
|
||||
return {'param1': np.array([10, 100])}
|
||||
|
||||
def p2m3(_g, step, sL, s):
|
||||
return {'param1': np.array([20, 200])}
|
||||
|
||||
# Internal States per Mechanism
|
||||
def s1m1(_g, step, sL, s, _input):
|
||||
y = 's1'
|
||||
x = 0
|
||||
return (y, x)
|
||||
|
||||
def s2m1(_g, step, sL, s, _input):
|
||||
y = 's2'
|
||||
x = _g['beta']
|
||||
return (y, x)
|
||||
|
||||
def s1m2(_g, step, sL, s, _input):
|
||||
y = 's1'
|
||||
x = _input['param2']
|
||||
return (y, x)
|
||||
|
||||
def s2m2(_g, step, sL, s, _input):
|
||||
y = 's2'
|
||||
x = _input['param2']
|
||||
return (y, x)
|
||||
|
||||
def s1m3(_g, step, sL, s, _input):
|
||||
y = 's1'
|
||||
x = 0
|
||||
return (y, x)
|
||||
|
||||
def s2m3(_g, step, sL, s, _input):
|
||||
y = 's2'
|
||||
x = 0
|
||||
return (y, x)
|
||||
|
||||
|
||||
# Exogenous States
|
||||
proc_one_coef_A = 0.7
|
||||
proc_one_coef_B = 1.3
|
||||
|
||||
|
||||
def es3p1(_g, step, sL, s, _input):
|
||||
y = 's3'
|
||||
x = _g['gamma']
|
||||
return (y, x)
|
||||
# @curried
|
||||
def es4p2(_g, step, sL, s, _input):
|
||||
y = 's4'
|
||||
x = _g['gamma']
|
||||
return (y, x)
|
||||
|
||||
ts_format = '%Y-%m-%d %H:%M:%S'
|
||||
t_delta = timedelta(days=0, minutes=0, seconds=1)
|
||||
def es5p2(_g, step, sL, s, _input):
|
||||
y = 'timestep'
|
||||
x = ep_time_step(s, dt_str=s['timestep'], fromat_str=ts_format, _timedelta=t_delta)
|
||||
return (y, x)
|
||||
|
||||
|
||||
# Environment States
|
||||
# @curried
|
||||
# def env_a(param, x):
|
||||
# return x + param
|
||||
def env_a(x):
|
||||
return x
|
||||
def env_b(x):
|
||||
return 10
|
||||
|
||||
|
||||
# Genesis States
|
||||
genesis_states = {
|
||||
's1': Decimal(0.0),
|
||||
's2': Decimal(0.0),
|
||||
's3': Decimal(1.0),
|
||||
's4': Decimal(1.0),
|
||||
# 'timestep': '2018-10-01 15:16:24'
|
||||
}
|
||||
|
||||
|
||||
# remove `exo_update_per_ts` to update every ts
|
||||
raw_exogenous_states = {
|
||||
"s3": es3p1,
|
||||
"s4": es4p2,
|
||||
# "timestep": es5p2
|
||||
}
|
||||
|
||||
|
||||
# ToDo: make env proc trigger field agnostic
|
||||
# ToDo: input json into function renaming __name__
|
||||
triggered_env_b = proc_trigger(1, env_b)
|
||||
env_processes = {
|
||||
"s3": env_a, #sweep(beta, env_a),
|
||||
"s4": triggered_env_b #rename('parameterized', triggered_env_b) #sweep(beta, triggered_env_b)
|
||||
}
|
||||
# parameterized_env_processes = parameterize_states(env_processes)
|
||||
#
|
||||
# pp.pprint(parameterized_env_processes)
|
||||
# exit()
|
||||
|
||||
# ToDo: The number of values entered in sweep should be the # of config objs created,
|
||||
# not dependent on the # of times the sweep is applied
|
||||
# sweep exo_state func and point to exo-state in every other funtion
|
||||
# param sweep on genesis states
|
||||
|
||||
partial_state_update_block = {
|
||||
"m1": {
|
||||
"policies": {
|
||||
"b1": p1m1,
|
||||
"b2": p2m1
|
||||
},
|
||||
"variables": {
|
||||
"s1": s1m1,
|
||||
"s2": s2m1
|
||||
}
|
||||
},
|
||||
"m2": {
|
||||
"policies": {
|
||||
"b1": p1m2,
|
||||
"b2": p2m2,
|
||||
},
|
||||
"variables": {
|
||||
"s1": s1m2,
|
||||
"s2": s2m2
|
||||
}
|
||||
},
|
||||
"m3": {
|
||||
"policies": {
|
||||
"b1": p1m3,
|
||||
"b2": p2m3
|
||||
},
|
||||
"variables": {
|
||||
"s1": s1m3,
|
||||
"s2": s2m3
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
sim_config = config_sim(
|
||||
{
|
||||
"N": 2,
|
||||
"T": range(5),
|
||||
"M": g
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
append_configs(
|
||||
sim_configs=sim_config,
|
||||
initial_state=genesis_states,
|
||||
seeds=seeds,
|
||||
raw_exogenous_states=raw_exogenous_states,
|
||||
env_processes=env_processes,
|
||||
partial_state_update_blocks=partial_state_update_block
|
||||
)
|
||||
Loading…
Reference in New Issue