clean staging
This commit is contained in:
commit
1862416b86
|
|
@ -12,7 +12,10 @@ results
|
|||
*.csv
|
||||
*.txt
|
||||
simulations/.ipynb_checkpoints
|
||||
simulations/validation/config3.py
|
||||
dist/*.gz
|
||||
cadCAD.egg-info
|
||||
|
||||
build
|
||||
SimCAD.egg-info
|
||||
cadCAD.egg-info
|
||||
SimCAD.egg-info
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ Aided Design of economic systems. An economic system is treated as a state based
|
|||
set of endogenous and exogenous state variables which are updated through mechanisms and environmental \
|
||||
processes, respectively. Behavioral models, which may be deterministic or stochastic, provide the evolution of \
|
||||
the system within the action space of the mechanisms. Mathematical formulations of these economic games \
|
||||
treat agent utility as derived from state rather than direct from action, creating a rich dynamic modeling framework.
|
||||
treat agent utility as derived from state rather than direct from action, creating a rich dynamic modeling framework.
|
||||
|
||||
Simulations may be run with a range of initial conditions and parameters for states, behaviors, mechanisms, \
|
||||
and environmental processes to understand and visualize network behavior under various conditions. Support for \
|
||||
|
|
@ -21,7 +21,7 @@ SimCAD is written in Python 3.
|
|||
```bash
|
||||
pip3 install -r requirements.txt
|
||||
python3 setup.py sdist bdist_wheel
|
||||
pip3 install dist/SimCAD-0.1-py3-none-any.whl
|
||||
pip3 install dist/*.whl
|
||||
```
|
||||
|
||||
**2. Configure Simulation:**
|
||||
|
|
@ -76,7 +76,7 @@ for raw_result, tensor_field in run2.main():
|
|||
print()
|
||||
```
|
||||
|
||||
The above can be run in Jupyter.
|
||||
The above can be run in Jupyter.
|
||||
```bash
|
||||
jupyter notebook
|
||||
```
|
||||
|
|
|
|||
|
|
@ -1,2 +0,0 @@
|
|||
name = "SimCAD"
|
||||
configs = []
|
||||
|
|
@ -1,131 +0,0 @@
|
|||
from functools import reduce
|
||||
from fn.op import foldr
|
||||
import pandas as pd
|
||||
|
||||
from SimCAD import configs
|
||||
from SimCAD.utils import key_filter
|
||||
from SimCAD.configuration.utils.behaviorAggregation import dict_elemwise_sum
|
||||
from SimCAD.configuration.utils import exo_update_per_ts
|
||||
|
||||
|
||||
class Configuration(object):
|
||||
def __init__(self, sim_config=None, state_dict=None, seed=None, env_processes=None,
|
||||
exogenous_states=None, mechanisms=None, behavior_ops=[foldr(dict_elemwise_sum())]):
|
||||
self.sim_config = sim_config
|
||||
self.state_dict = state_dict
|
||||
self.seed = seed
|
||||
self.env_processes = env_processes
|
||||
self.exogenous_states = exogenous_states
|
||||
self.mechanisms = mechanisms
|
||||
self.behavior_ops = behavior_ops
|
||||
|
||||
|
||||
def append_configs(sim_configs, state_dict, seed, raw_exogenous_states, env_processes, mechanisms, _exo_update_per_ts=True):
|
||||
if _exo_update_per_ts is True:
|
||||
exogenous_states = exo_update_per_ts(raw_exogenous_states)
|
||||
else:
|
||||
exogenous_states = raw_exogenous_states
|
||||
|
||||
if isinstance(sim_configs, list):
|
||||
for sim_config in sim_configs:
|
||||
configs.append(
|
||||
Configuration(
|
||||
sim_config=sim_config,
|
||||
state_dict=state_dict,
|
||||
seed=seed,
|
||||
exogenous_states=exogenous_states,
|
||||
env_processes=env_processes,
|
||||
mechanisms=mechanisms
|
||||
)
|
||||
)
|
||||
elif isinstance(sim_configs, dict):
|
||||
configs.append(
|
||||
Configuration(
|
||||
sim_config=sim_configs,
|
||||
state_dict=state_dict,
|
||||
seed=seed,
|
||||
exogenous_states=exogenous_states,
|
||||
env_processes=env_processes,
|
||||
mechanisms=mechanisms
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class Identity:
|
||||
def __init__(self, behavior_id={'identity': 0}):
|
||||
self.beh_id_return_val = behavior_id
|
||||
|
||||
def b_identity(self, var_dict, step, sL, s):
|
||||
return self.beh_id_return_val
|
||||
|
||||
def behavior_identity(self, k):
|
||||
return self.b_identity
|
||||
|
||||
def no_state_identity(self, var_dict, step, sL, s, _input):
|
||||
return None
|
||||
|
||||
def state_identity(self, k):
|
||||
return lambda var_dict, step, sL, s, _input: (k, s[k])
|
||||
|
||||
def apply_identity_funcs(self, identity, df, cols):
|
||||
def fillna_with_id_func(identity, df, col):
|
||||
return df[[col]].fillna(value=identity(col))
|
||||
|
||||
return list(map(lambda col: fillna_with_id_func(identity, df, col), cols))
|
||||
|
||||
|
||||
class Processor:
|
||||
def __init__(self, id=Identity()):
|
||||
self.id = id
|
||||
self.b_identity = id.b_identity
|
||||
self.behavior_identity = id.behavior_identity
|
||||
self.no_state_identity = id.no_state_identity
|
||||
self.state_identity = id.state_identity
|
||||
self.apply_identity_funcs = id.apply_identity_funcs
|
||||
|
||||
def create_matrix_field(self, mechanisms, key):
|
||||
if key == 'states':
|
||||
identity = self.state_identity
|
||||
elif key == 'behaviors':
|
||||
identity = self.behavior_identity
|
||||
df = pd.DataFrame(key_filter(mechanisms, key))
|
||||
col_list = self.apply_identity_funcs(identity, df, list(df.columns))
|
||||
if len(col_list) != 0:
|
||||
return reduce((lambda x, y: pd.concat([x, y], axis=1)), col_list)
|
||||
else:
|
||||
return pd.DataFrame({'empty': []})
|
||||
|
||||
def generate_config(self, state_dict, mechanisms, exo_proc):
|
||||
|
||||
def no_update_handler(bdf, sdf):
|
||||
if (bdf.empty == False) and (sdf.empty == True):
|
||||
bdf_values = bdf.values.tolist()
|
||||
sdf_values = [[self.no_state_identity] * len(bdf_values) for m in range(len(mechanisms))]
|
||||
return sdf_values, bdf_values
|
||||
elif (bdf.empty == True) and (sdf.empty == False):
|
||||
sdf_values = sdf.values.tolist()
|
||||
bdf_values = [[self.b_identity] * len(sdf_values) for m in range(len(mechanisms))]
|
||||
return sdf_values, bdf_values
|
||||
else:
|
||||
sdf_values = sdf.values.tolist()
|
||||
bdf_values = bdf.values.tolist()
|
||||
return sdf_values, bdf_values
|
||||
|
||||
def only_ep_handler(state_dict):
|
||||
sdf_functions = [
|
||||
lambda step, sL, s, _input: (k, v) for k, v in zip(state_dict.keys(), state_dict.values())
|
||||
]
|
||||
sdf_values = [sdf_functions]
|
||||
bdf_values = [[self.b_identity] * len(sdf_values)]
|
||||
return sdf_values, bdf_values
|
||||
|
||||
if len(mechanisms) != 0:
|
||||
bdf = self.create_matrix_field(mechanisms, 'behaviors')
|
||||
sdf = self.create_matrix_field(mechanisms, 'states')
|
||||
sdf_values, bdf_values = no_update_handler(bdf, sdf)
|
||||
zipped_list = list(zip(sdf_values, bdf_values))
|
||||
else:
|
||||
sdf_values, bdf_values = only_ep_handler(state_dict)
|
||||
zipped_list = list(zip(sdf_values, bdf_values))
|
||||
|
||||
return list(map(lambda x: (x[0] + exo_proc, x[1]), zipped_list))
|
||||
|
|
@ -1,104 +0,0 @@
|
|||
from copy import deepcopy
|
||||
from fn.op import foldr, call
|
||||
|
||||
from SimCAD.engine.utils import engine_exception
|
||||
|
||||
id_exception = engine_exception(KeyError, KeyError, None)
|
||||
|
||||
|
||||
class Executor:
|
||||
def __init__(self, behavior_ops, behavior_update_exception=id_exception, state_update_exception=id_exception):
|
||||
self.behavior_ops = behavior_ops
|
||||
self.state_update_exception = state_update_exception
|
||||
self.behavior_update_exception = behavior_update_exception
|
||||
|
||||
def get_behavior_input(self, var_dict, step, sL, s, funcs):
|
||||
ops = self.behavior_ops[::-1]
|
||||
|
||||
def get_col_results(var_dict, step, sL, s, funcs):
|
||||
# return list(map(lambda f: curry_pot(f, step, sL, s), funcs))
|
||||
return list(map(lambda f: f(var_dict, step, sL, s), funcs))
|
||||
|
||||
# print(get_col_results(step, sL, s, funcs))
|
||||
return foldr(call, get_col_results(var_dict, step, sL, s, funcs))(ops)
|
||||
|
||||
def apply_env_proc(self, env_processes, state_dict, step):
|
||||
for state in state_dict.keys():
|
||||
if state in list(env_processes.keys()):
|
||||
env_state = env_processes[state]
|
||||
if (env_state.__name__ == '_curried') or (env_state.__name__ == 'proc_trigger'):
|
||||
state_dict[state] = env_state(step)(state_dict[state])
|
||||
else:
|
||||
state_dict[state] = env_state(state_dict[state])
|
||||
|
||||
def mech_step(self, var_dict, m_step, sL, state_funcs, behavior_funcs, env_processes, t_step, run):
|
||||
last_in_obj = sL[-1]
|
||||
|
||||
_input = self.behavior_update_exception(self.get_behavior_input(var_dict, m_step, sL, last_in_obj, behavior_funcs))
|
||||
# print(_input)
|
||||
|
||||
# ToDo: add env_proc generator to `last_in_copy` iterator as wrapper function
|
||||
last_in_copy = dict(
|
||||
[
|
||||
# self.state_update_exception(curry_pot(f, m_step, sL, last_in_obj, _input)) for f in state_funcs
|
||||
self.state_update_exception(f(var_dict, m_step, sL, last_in_obj, _input)) for f in state_funcs
|
||||
]
|
||||
)
|
||||
|
||||
for k in last_in_obj:
|
||||
if k not in last_in_copy:
|
||||
last_in_copy[k] = last_in_obj[k]
|
||||
|
||||
del last_in_obj
|
||||
|
||||
self.apply_env_proc(env_processes, last_in_copy, last_in_copy['timestamp'])
|
||||
|
||||
last_in_copy["mech_step"], last_in_copy["time_step"], last_in_copy['run'] = m_step, t_step, run
|
||||
sL.append(last_in_copy)
|
||||
del last_in_copy
|
||||
|
||||
return sL
|
||||
|
||||
def mech_pipeline(self, var_dict, states_list, configs, env_processes, t_step, run):
|
||||
m_step = 0
|
||||
states_list_copy = deepcopy(states_list)
|
||||
genesis_states = states_list_copy[-1]
|
||||
genesis_states['mech_step'], genesis_states['time_step'] = m_step, t_step
|
||||
states_list = [genesis_states]
|
||||
|
||||
m_step += 1
|
||||
for config in configs:
|
||||
s_conf, b_conf = config[0], config[1]
|
||||
states_list = self.mech_step(var_dict, m_step, states_list, s_conf, b_conf, env_processes, t_step, run)
|
||||
m_step += 1
|
||||
|
||||
t_step += 1
|
||||
|
||||
return states_list
|
||||
|
||||
# ToDo: Rename Run Pipeline
|
||||
def block_pipeline(self, var_dict, states_list, configs, env_processes, time_seq, run):
|
||||
time_seq = [x + 1 for x in time_seq]
|
||||
simulation_list = [states_list]
|
||||
for time_step in time_seq:
|
||||
pipe_run = self.mech_pipeline(var_dict, simulation_list[-1], configs, env_processes, time_step, run)
|
||||
_, *pipe_run = pipe_run
|
||||
simulation_list.append(pipe_run)
|
||||
|
||||
return simulation_list
|
||||
|
||||
|
||||
# ToDo: Muiltithreaded Runs
|
||||
def simulation(self, var_dict, states_list, configs, env_processes, time_seq, runs):
|
||||
pipe_run = []
|
||||
for run in range(runs):
|
||||
run += 1
|
||||
states_list_copy = deepcopy(states_list)
|
||||
head, *tail = self.block_pipeline(var_dict, states_list_copy, configs, env_processes, time_seq, run)
|
||||
genesis = head.pop()
|
||||
genesis['mech_step'], genesis['time_step'], genesis['run'] = 0, 0, run
|
||||
first_timestep_per_run = [genesis] + tail.pop(0)
|
||||
pipe_run += [first_timestep_per_run] + tail
|
||||
del states_list_copy
|
||||
|
||||
return pipe_run
|
||||
|
|
@ -0,0 +1,2 @@
|
|||
name = "cadCAD"
|
||||
configs = []
|
||||
|
|
@ -0,0 +1,138 @@
|
|||
from functools import reduce
|
||||
from fn.op import foldr
|
||||
import pandas as pd
|
||||
|
||||
from cadCAD import configs
|
||||
|
||||
from cadCAD.utils import key_filter
|
||||
from cadCAD.configuration.utils import exo_update_per_ts
|
||||
from cadCAD.configuration.utils.policyAggregation import dict_elemwise_sum
|
||||
from cadCAD.configuration.utils.depreciationHandler import sanitize_partial_state_updates, sanitize_config
|
||||
|
||||
|
||||
class Configuration(object):
|
||||
def __init__(self, sim_config={}, initial_state={}, seeds={}, env_processes={},
|
||||
exogenous_states={}, partial_state_update_blocks={}, policy_ops=[foldr(dict_elemwise_sum())], **kwargs):
|
||||
self.sim_config = sim_config
|
||||
self.initial_state = initial_state
|
||||
self.seeds = seeds
|
||||
self.env_processes = env_processes
|
||||
self.exogenous_states = exogenous_states
|
||||
self.partial_state_updates = partial_state_update_blocks
|
||||
self.policy_ops = policy_ops
|
||||
self.kwargs = kwargs
|
||||
|
||||
sanitize_config(self)
|
||||
|
||||
|
||||
def append_configs(sim_configs={}, initial_state={}, seeds={}, raw_exogenous_states={}, env_processes={}, partial_state_update_blocks={}, _exo_update_per_ts=True):
|
||||
if _exo_update_per_ts is True:
|
||||
exogenous_states = exo_update_per_ts(raw_exogenous_states)
|
||||
else:
|
||||
exogenous_states = raw_exogenous_states
|
||||
|
||||
if isinstance(sim_configs, list):
|
||||
for sim_config in sim_configs:
|
||||
config = Configuration(
|
||||
sim_config=sim_config,
|
||||
initial_state=initial_state,
|
||||
seeds=seeds,
|
||||
exogenous_states=exogenous_states,
|
||||
env_processes=env_processes,
|
||||
partial_state_update_blocks=partial_state_update_blocks
|
||||
)
|
||||
configs.append(config)
|
||||
elif isinstance(sim_configs, dict):
|
||||
config = Configuration(
|
||||
sim_config=sim_configs,
|
||||
initial_state=initial_state,
|
||||
seeds=seeds,
|
||||
exogenous_states=exogenous_states,
|
||||
env_processes=env_processes,
|
||||
partial_state_update_blocks=partial_state_update_blocks
|
||||
)
|
||||
configs.append(config)
|
||||
|
||||
|
||||
class Identity:
|
||||
def __init__(self, policy_id={'identity': 0}):
|
||||
self.beh_id_return_val = policy_id
|
||||
|
||||
def p_identity(self, var_dict, sub_step, sL, s):
|
||||
return self.beh_id_return_val
|
||||
|
||||
def policy_identity(self, k):
|
||||
return self.p_identity
|
||||
|
||||
def no_state_identity(self, var_dict, sub_step, sL, s, _input):
|
||||
return None
|
||||
|
||||
def state_identity(self, k):
|
||||
return lambda var_dict, sub_step, sL, s, _input: (k, s[k])
|
||||
|
||||
def apply_identity_funcs(self, identity, df, cols):
|
||||
def fillna_with_id_func(identity, df, col):
|
||||
return df[[col]].fillna(value=identity(col))
|
||||
|
||||
return list(map(lambda col: fillna_with_id_func(identity, df, col), cols))
|
||||
|
||||
|
||||
class Processor:
|
||||
def __init__(self, id=Identity()):
|
||||
self.id = id
|
||||
self.p_identity = id.p_identity
|
||||
self.policy_identity = id.policy_identity
|
||||
self.no_state_identity = id.no_state_identity
|
||||
self.state_identity = id.state_identity
|
||||
self.apply_identity_funcs = id.apply_identity_funcs
|
||||
|
||||
def create_matrix_field(self, partial_state_updates, key):
|
||||
if key == 'variables':
|
||||
identity = self.state_identity
|
||||
elif key == 'policies':
|
||||
identity = self.policy_identity
|
||||
|
||||
df = pd.DataFrame(key_filter(partial_state_updates, key))
|
||||
col_list = self.apply_identity_funcs(identity, df, list(df.columns))
|
||||
if len(col_list) != 0:
|
||||
return reduce((lambda x, y: pd.concat([x, y], axis=1)), col_list)
|
||||
else:
|
||||
return pd.DataFrame({'empty': []})
|
||||
|
||||
def generate_config(self, initial_state, partial_state_updates, exo_proc):
|
||||
|
||||
def no_update_handler(bdf, sdf):
|
||||
if (bdf.empty == False) and (sdf.empty == True):
|
||||
bdf_values = bdf.values.tolist()
|
||||
sdf_values = [[self.no_state_identity] * len(bdf_values) for m in range(len(partial_state_updates))]
|
||||
return sdf_values, bdf_values
|
||||
elif (bdf.empty == True) and (sdf.empty == False):
|
||||
sdf_values = sdf.values.tolist()
|
||||
bdf_values = [[self.p_identity] * len(sdf_values) for m in range(len(partial_state_updates))]
|
||||
return sdf_values, bdf_values
|
||||
else:
|
||||
sdf_values = sdf.values.tolist()
|
||||
bdf_values = bdf.values.tolist()
|
||||
return sdf_values, bdf_values
|
||||
|
||||
def only_ep_handler(state_dict):
|
||||
sdf_functions = [
|
||||
lambda var_dict, sub_step, sL, s, _input: (k, v) for k, v in zip(state_dict.keys(), state_dict.values())
|
||||
]
|
||||
sdf_values = [sdf_functions]
|
||||
bdf_values = [[self.p_identity] * len(sdf_values)]
|
||||
return sdf_values, bdf_values
|
||||
|
||||
if len(partial_state_updates) != 0:
|
||||
# backwards compatibility # ToDo: Move this
|
||||
partial_state_updates = sanitize_partial_state_updates(partial_state_updates)
|
||||
|
||||
bdf = self.create_matrix_field(partial_state_updates, 'policies')
|
||||
sdf = self.create_matrix_field(partial_state_updates, 'variables')
|
||||
sdf_values, bdf_values = no_update_handler(bdf, sdf)
|
||||
zipped_list = list(zip(sdf_values, bdf_values))
|
||||
else:
|
||||
sdf_values, bdf_values = only_ep_handler(initial_state)
|
||||
zipped_list = list(zip(sdf_values, bdf_values))
|
||||
|
||||
return list(map(lambda x: (x[0] + exo_proc, x[1]), zipped_list))
|
||||
|
|
@ -4,15 +4,22 @@ from copy import deepcopy
|
|||
from fn.func import curried
|
||||
import pandas as pd
|
||||
|
||||
from SimCAD.utils import dict_filter, contains_type
|
||||
# Temporary
|
||||
from cadCAD.configuration.utils.depreciationHandler import sanitize_partial_state_updates
|
||||
from cadCAD.utils import dict_filter, contains_type
|
||||
|
||||
|
||||
# ToDo: Fix - Returns empty when partial_state_update is missing in Configuration
|
||||
class TensorFieldReport:
|
||||
def __init__(self, config_proc):
|
||||
self.config_proc = config_proc
|
||||
|
||||
def create_tensor_field(self, mechanisms, exo_proc, keys=['behaviors', 'states']):
|
||||
dfs = [self.config_proc.create_matrix_field(mechanisms, k) for k in keys]
|
||||
# ToDo: backwards compatibility
|
||||
def create_tensor_field(self, partial_state_updates, exo_proc, keys = ['policies', 'variables']):
|
||||
|
||||
partial_state_updates = sanitize_partial_state_updates(partial_state_updates) # Temporary
|
||||
|
||||
dfs = [self.config_proc.create_matrix_field(partial_state_updates, k) for k in keys]
|
||||
df = pd.concat(dfs, axis=1)
|
||||
for es, i in zip(exo_proc, range(len(exo_proc))):
|
||||
df['es' + str(i + 1)] = es
|
||||
|
|
@ -20,12 +27,8 @@ class TensorFieldReport:
|
|||
return df
|
||||
|
||||
|
||||
# def s_update(y, x):
|
||||
# return lambda step, sL, s, _input: (y, x)
|
||||
#
|
||||
#
|
||||
def state_update(y, x):
|
||||
return lambda step, sL, s, _input: (y, x)
|
||||
return lambda var_dict, sub_step, sL, s, _input: (y, x)
|
||||
|
||||
|
||||
def bound_norm_random(rng, low, high):
|
||||
|
|
@ -36,15 +39,15 @@ def bound_norm_random(rng, low, high):
|
|||
|
||||
|
||||
@curried
|
||||
def proc_trigger(trigger_step, update_f, step):
|
||||
if step == trigger_step:
|
||||
def proc_trigger(trigger_time, update_f, time):
|
||||
if time == trigger_time:
|
||||
return update_f
|
||||
else:
|
||||
return lambda x: x
|
||||
|
||||
|
||||
step_t_delta = timedelta(days=0, minutes=0, seconds=30)
|
||||
def time_step(dt_str, dt_format='%Y-%m-%d %H:%M:%S', _timedelta = step_t_delta):
|
||||
tstep_delta = timedelta(days=0, minutes=0, seconds=30)
|
||||
def time_step(dt_str, dt_format='%Y-%m-%d %H:%M:%S', _timedelta = tstep_delta):
|
||||
dt = datetime.strptime(dt_str, dt_format)
|
||||
t = dt + _timedelta
|
||||
return t.strftime(dt_format)
|
||||
|
|
@ -52,16 +55,16 @@ def time_step(dt_str, dt_format='%Y-%m-%d %H:%M:%S', _timedelta = step_t_delta):
|
|||
|
||||
ep_t_delta = timedelta(days=0, minutes=0, seconds=1)
|
||||
def ep_time_step(s, dt_str, fromat_str='%Y-%m-%d %H:%M:%S', _timedelta = ep_t_delta):
|
||||
if s['mech_step'] == 0:
|
||||
if s['substep'] == 0:
|
||||
return time_step(dt_str, fromat_str, _timedelta)
|
||||
else:
|
||||
return dt_str
|
||||
|
||||
|
||||
def mech_sweep_filter(mech_field, mechanisms):
|
||||
mech_dict = dict([(k, v[mech_field]) for k, v in mechanisms.items()])
|
||||
# mech_sweep_filter
|
||||
def partial_state_sweep_filter(state_field, partial_state_updates):
|
||||
partial_state_dict = dict([(k, v[state_field]) for k, v in partial_state_updates.items()])
|
||||
return dict([
|
||||
(k, dict_filter(v, lambda v: isinstance(v, list))) for k, v in mech_dict.items()
|
||||
(k, dict_filter(v, lambda v: isinstance(v, list))) for k, v in partial_state_dict.items()
|
||||
if contains_type(list(v.values()), list)
|
||||
])
|
||||
|
||||
|
|
@ -69,16 +72,18 @@ def mech_sweep_filter(mech_field, mechanisms):
|
|||
def state_sweep_filter(raw_exogenous_states):
|
||||
return dict([(k, v) for k, v in raw_exogenous_states.items() if isinstance(v, list)])
|
||||
|
||||
# sweep_mech_states
|
||||
@curried
|
||||
def sweep_mechs(_type, in_config):
|
||||
def sweep_partial_states(_type, in_config):
|
||||
configs = []
|
||||
filtered_mech_states = mech_sweep_filter(_type, in_config.mechanisms)
|
||||
if len(filtered_mech_states) > 0:
|
||||
for mech, state_dict in filtered_mech_states.items():
|
||||
# filtered_mech_states
|
||||
filtered_partial_states = partial_state_sweep_filter(_type, in_config.partial_state_updates)
|
||||
if len(filtered_partial_states) > 0:
|
||||
for partial_state, state_dict in filtered_partial_states.items():
|
||||
for state, state_funcs in state_dict.items():
|
||||
for f in state_funcs:
|
||||
config = deepcopy(in_config)
|
||||
config.mechanisms[mech][_type][state] = f
|
||||
config.partial_state_updates[partial_state][_type][state] = f
|
||||
configs.append(config)
|
||||
del config
|
||||
else:
|
||||
|
|
@ -111,9 +116,9 @@ def sweep_states(state_type, states, in_config):
|
|||
|
||||
def exo_update_per_ts(ep):
|
||||
@curried
|
||||
def ep_decorator(f, y, var_dict, step, sL, s, _input):
|
||||
if s['mech_step'] + 1 == 1:
|
||||
return f(var_dict, step, sL, s, _input) # curry_pot
|
||||
def ep_decorator(f, y, var_dict, sub_step, sL, s, _input):
|
||||
if s['substep'] + 1 == 1:
|
||||
return f(var_dict, sub_step, sL, s, _input)
|
||||
else:
|
||||
return y, s[y]
|
||||
|
||||
|
|
@ -0,0 +1,41 @@
|
|||
from copy import deepcopy
|
||||
|
||||
|
||||
def sanitize_config(config):
|
||||
# for backwards compatibility, we accept old arguments via **kwargs
|
||||
# TODO: raise specific deprecation warnings for key == 'state_dict', key == 'seed', key == 'mechanisms'
|
||||
for key, value in config.kwargs.items():
|
||||
if key == 'state_dict':
|
||||
config.initial_state = value
|
||||
elif key == 'seed':
|
||||
config.seeds = value
|
||||
elif key == 'mechanisms':
|
||||
config.partial_state_updates = value
|
||||
|
||||
if config.initial_state == {}:
|
||||
raise Exception('The initial conditions of the system have not been set')
|
||||
|
||||
|
||||
def sanitize_partial_state_updates(partial_state_updates):
|
||||
new_partial_state_updates = deepcopy(partial_state_updates)
|
||||
# for backwards compatibility we accept the old keys
|
||||
# ('behaviors' and 'states') and rename them
|
||||
def rename_keys(d):
|
||||
if 'behaviors' in d:
|
||||
d['policies'] = d.pop('behaviors')
|
||||
|
||||
if 'states' in d:
|
||||
d['variables'] = d.pop('states')
|
||||
|
||||
|
||||
# Also for backwards compatibility, we accept partial state update blocks both as list or dict
|
||||
# No need for a deprecation warning as it's already raised by cadCAD.utils.key_filter
|
||||
if (type(new_partial_state_updates)==list):
|
||||
for v in new_partial_state_updates:
|
||||
rename_keys(v)
|
||||
elif (type(new_partial_state_updates)==dict):
|
||||
for k, v in new_partial_state_updates.items():
|
||||
rename_keys(v)
|
||||
|
||||
del partial_state_updates
|
||||
return new_partial_state_updates
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
from SimCAD.utils import flatten_tabulated_dict, tabulate_dict
|
||||
from cadCAD.utils import flatten_tabulated_dict, tabulate_dict
|
||||
|
||||
|
||||
def process_variables(d):
|
||||
|
|
@ -13,7 +13,7 @@ def get_base_value(x):
|
|||
return 0
|
||||
|
||||
|
||||
def behavior_to_dict(v):
|
||||
def policy_to_dict(v):
|
||||
return dict(list(zip(map(lambda n: 'b' + str(n + 1), list(range(len(v)))), v)))
|
||||
|
||||
|
||||
|
|
@ -1,9 +1,9 @@
|
|||
from pathos.multiprocessing import ProcessingPool as Pool
|
||||
|
||||
from SimCAD.utils import flatten
|
||||
from SimCAD.configuration import Processor
|
||||
from SimCAD.configuration.utils import TensorFieldReport
|
||||
from SimCAD.engine.simulation import Executor as SimExecutor
|
||||
from cadCAD.utils import flatten
|
||||
from cadCAD.configuration import Processor
|
||||
from cadCAD.configuration.utils import TensorFieldReport
|
||||
from cadCAD.engine.simulation import Executor as SimExecutor
|
||||
|
||||
|
||||
class ExecutionMode:
|
||||
|
|
@ -47,7 +47,7 @@ class Executor:
|
|||
create_tensor_field = TensorFieldReport(config_proc).create_tensor_field
|
||||
|
||||
print(self.exec_context+": "+str(self.configs))
|
||||
var_dict_list, states_lists, Ts, Ns, eps, configs_structs, env_processes_list, mechanisms, simulation_execs = \
|
||||
var_dict_list, states_lists, Ts, Ns, eps, configs_structs, env_processes_list, partial_state_updates, simulation_execs = \
|
||||
[], [], [], [], [], [], [], [], []
|
||||
config_idx = 0
|
||||
for x in self.configs:
|
||||
|
|
@ -55,24 +55,25 @@ class Executor:
|
|||
Ts.append(x.sim_config['T'])
|
||||
Ns.append(x.sim_config['N'])
|
||||
var_dict_list.append(x.sim_config['M'])
|
||||
states_lists.append([x.state_dict])
|
||||
states_lists.append([x.initial_state])
|
||||
eps.append(list(x.exogenous_states.values()))
|
||||
configs_structs.append(config_proc.generate_config(x.state_dict, x.mechanisms, eps[config_idx]))
|
||||
configs_structs.append(config_proc.generate_config(x.initial_state, x.partial_state_updates, eps[config_idx]))
|
||||
env_processes_list.append(x.env_processes)
|
||||
mechanisms.append(x.mechanisms)
|
||||
simulation_execs.append(SimExecutor(x.behavior_ops).simulation)
|
||||
partial_state_updates.append(x.partial_state_updates)
|
||||
simulation_execs.append(SimExecutor(x.policy_ops).simulation)
|
||||
|
||||
config_idx += 1
|
||||
|
||||
if self.exec_context == ExecutionMode.single_proc:
|
||||
tensor_field = create_tensor_field(mechanisms.pop(), eps.pop())
|
||||
# ToDO: Deprication Handler - "sanitize" in appropriate place
|
||||
tensor_field = create_tensor_field(partial_state_updates.pop(), eps.pop())
|
||||
result = self.exec_method(simulation_execs, var_dict_list, states_lists, configs_structs, env_processes_list, Ts, Ns)
|
||||
return result, tensor_field
|
||||
elif self.exec_context == ExecutionMode.multi_proc:
|
||||
if len(self.configs) > 1:
|
||||
simulations = self.exec_method(simulation_execs, var_dict_list, states_lists, configs_structs, env_processes_list, Ts, Ns)
|
||||
results = []
|
||||
for result, mechanism, ep in list(zip(simulations, mechanisms, eps)):
|
||||
results.append((flatten(result), create_tensor_field(mechanism, ep)))
|
||||
for result, partial_state_updates, ep in list(zip(simulations, partial_state_updates, eps)):
|
||||
results.append((flatten(result), create_tensor_field(partial_state_updates, ep)))
|
||||
|
||||
return results
|
||||
|
|
@ -0,0 +1,103 @@
|
|||
from copy import deepcopy
|
||||
from fn.op import foldr, call
|
||||
|
||||
from cadCAD.engine.utils import engine_exception
|
||||
|
||||
id_exception = engine_exception(KeyError, KeyError, None)
|
||||
|
||||
|
||||
class Executor:
|
||||
|
||||
def __init__(self, policy_ops, policy_update_exception=id_exception, state_update_exception=id_exception):
|
||||
self.policy_ops = policy_ops # behavior_ops
|
||||
self.state_update_exception = state_update_exception
|
||||
self.policy_update_exception = policy_update_exception # behavior_update_exception
|
||||
|
||||
# get_behavior_input
|
||||
def get_policy_input(self, var_dict, sub_step, sL, s, funcs):
|
||||
ops = self.policy_ops[::-1]
|
||||
|
||||
def get_col_results(var_dict, sub_step, sL, s, funcs):
|
||||
return list(map(lambda f: f(var_dict, sub_step, sL, s), funcs))
|
||||
|
||||
return foldr(call, get_col_results(var_dict, sub_step, sL, s, funcs))(ops)
|
||||
|
||||
def apply_env_proc(self, env_processes, state_dict, sub_step):
|
||||
for state in state_dict.keys():
|
||||
if state in list(env_processes.keys()):
|
||||
env_state = env_processes[state]
|
||||
if (env_state.__name__ == '_curried') or (env_state.__name__ == 'proc_trigger'):
|
||||
state_dict[state] = env_state(sub_step)(state_dict[state])
|
||||
else:
|
||||
state_dict[state] = env_state(state_dict[state])
|
||||
|
||||
# mech_step
|
||||
def partial_state_update(self, var_dict, sub_step, sL, state_funcs, policy_funcs, env_processes, time_step, run):
|
||||
last_in_obj = sL[-1]
|
||||
|
||||
_input = self.policy_update_exception(self.get_policy_input(var_dict, sub_step, sL, last_in_obj, policy_funcs))
|
||||
|
||||
# ToDo: add env_proc generator to `last_in_copy` iterator as wrapper function
|
||||
last_in_copy = dict(
|
||||
[
|
||||
self.state_update_exception(f(var_dict, sub_step, sL, last_in_obj, _input)) for f in state_funcs
|
||||
]
|
||||
)
|
||||
|
||||
for k in last_in_obj:
|
||||
if k not in last_in_copy:
|
||||
last_in_copy[k] = last_in_obj[k]
|
||||
|
||||
del last_in_obj
|
||||
|
||||
self.apply_env_proc(env_processes, last_in_copy, last_in_copy['timestep'])
|
||||
|
||||
last_in_copy['substep'], last_in_copy['timestep'], last_in_copy['run'] = sub_step, time_step, run
|
||||
sL.append(last_in_copy)
|
||||
del last_in_copy
|
||||
|
||||
return sL
|
||||
|
||||
|
||||
# mech_pipeline
|
||||
def state_update_pipeline(self, var_dict, states_list, configs, env_processes, time_step, run):
|
||||
sub_step = 0
|
||||
states_list_copy = deepcopy(states_list)
|
||||
genesis_states = states_list_copy[-1]
|
||||
genesis_states['substep'], genesis_states['timestep'] = sub_step, time_step
|
||||
states_list = [genesis_states]
|
||||
|
||||
sub_step += 1
|
||||
for config in configs:
|
||||
s_conf, p_conf = config[0], config[1]
|
||||
states_list = self.partial_state_update(var_dict, sub_step, states_list, s_conf, p_conf, env_processes, time_step, run)
|
||||
sub_step += 1
|
||||
|
||||
time_step += 1
|
||||
|
||||
return states_list
|
||||
|
||||
def run_pipeline(self, var_dict, states_list, configs, env_processes, time_seq, run):
|
||||
time_seq = [x + 1 for x in time_seq]
|
||||
simulation_list = [states_list]
|
||||
for time_step in time_seq:
|
||||
pipe_run = self.state_update_pipeline(var_dict, simulation_list[-1], configs, env_processes, time_step, run)
|
||||
_, *pipe_run = pipe_run
|
||||
simulation_list.append(pipe_run)
|
||||
|
||||
return simulation_list
|
||||
|
||||
# ToDo: Muiltithreaded Runs
|
||||
def simulation(self, var_dict, states_list, configs, env_processes, time_seq, runs):
|
||||
pipe_run = []
|
||||
for run in range(runs):
|
||||
run += 1
|
||||
states_list_copy = deepcopy(states_list)
|
||||
head, *tail = self.run_pipeline(var_dict, states_list_copy, configs, env_processes, time_seq, run)
|
||||
genesis = head.pop()
|
||||
genesis['substep'], genesis['timestep'], genesis['run'] = 0, 0, run
|
||||
first_timestep_per_run = [genesis] + tail.pop(0)
|
||||
pipe_run += [first_timestep_per_run] + tail
|
||||
del states_list_copy
|
||||
|
||||
return pipe_run
|
||||
|
|
@ -24,8 +24,8 @@ def retrieve_state(l, offset):
|
|||
return l[last_index(l) + offset + 1]
|
||||
|
||||
|
||||
# exception_function = f(m_step, sL, sL[-2], _input)
|
||||
# try_function = f(m_step, sL, last_mut_obj, _input)
|
||||
# exception_function = f(sub_step, sL, sL[-2], _input)
|
||||
# try_function = f(sub_step, sL, last_mut_obj, _input)
|
||||
@curried
|
||||
def engine_exception(ErrorType, error_message, exception_function, try_function):
|
||||
try:
|
||||
|
|
@ -1,6 +1,6 @@
|
|||
from collections import defaultdict
|
||||
from itertools import product
|
||||
|
||||
import warnings
|
||||
|
||||
def pipe(x):
|
||||
return x
|
||||
|
|
@ -75,8 +75,16 @@ def contains_type(_collection, type):
|
|||
def drop_right(l, n):
|
||||
return l[:len(l) - n]
|
||||
|
||||
|
||||
# backwards compatibility
|
||||
# ToDo: Encapsulate in function
|
||||
def key_filter(l, keyname):
|
||||
if (type(l) == list):
|
||||
return [v[keyname] for v in l]
|
||||
# Keeping support to dictionaries for backwards compatibility
|
||||
# Should be removed in the future
|
||||
warnings.warn(
|
||||
"The use of a dictionary to describe Partial State Update Blocks will be deprecated. Use a list instead.",
|
||||
FutureWarning)
|
||||
return [v[keyname] for k, v in l.items()]
|
||||
|
||||
|
||||
|
|
@ -125,4 +133,4 @@ def curry_pot(f, *argv):
|
|||
# def decorator(f):
|
||||
# f.__name__ = newname
|
||||
# return f
|
||||
# return decorator
|
||||
# return decorator
|
||||
Binary file not shown.
Binary file not shown.
|
|
@ -1,576 +0,0 @@
|
|||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import _thread\n",
|
||||
"import time\n",
|
||||
"from fn.func import curried"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Define a function for the thread\n",
|
||||
"def f(threadName, delay):\n",
|
||||
" count = 0\n",
|
||||
" print(count)\n",
|
||||
" # while count < 5:\n",
|
||||
" # time.sleep(delay)\n",
|
||||
" # count += 1\n",
|
||||
" # print(count)\n",
|
||||
" \n",
|
||||
"def pipe(x):\n",
|
||||
" print(x)\n",
|
||||
" return x"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"00\n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Create two threads as follows\n",
|
||||
"try:\n",
|
||||
" _thread.start_new_thread( f, (\"Thread-1\", 2, ) )\n",
|
||||
" _thread.start_new_thread( f, (\"Thread-2\", 4, ) )\n",
|
||||
"except:\n",
|
||||
" print (\"Error: unable to start thread\")\n",
|
||||
"\n",
|
||||
"while 1:\n",
|
||||
" pass\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[1, 2]\n",
|
||||
"('s2', <function fit_param.<locals>.<lambda> at 0x1099efae8>)\n",
|
||||
"('s2', <function fit_param.<locals>.<lambda> at 0x1099ef9d8>)\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from SimCAD.engine.utils import sweep\n",
|
||||
"from SimCAD.utils import rename\n",
|
||||
"from SimCAD.configuration.utils import s_update\n",
|
||||
"\n",
|
||||
"# @curried\n",
|
||||
"def fit_param(param):\n",
|
||||
" return lambda x: x + param\n",
|
||||
"\n",
|
||||
"# xf = lambda param: lambda x: x + param\n",
|
||||
"\n",
|
||||
"def sweep(params, y, xf):\n",
|
||||
" op = [rename('sweep', s_update(y, xf(param))) for param in params]\n",
|
||||
" print(params)\n",
|
||||
" # print()\n",
|
||||
" return op\n",
|
||||
"\n",
|
||||
"for f in sweep([1,2], 's2', fit_param):\n",
|
||||
" print(f(1,2,3,4))\n",
|
||||
"# sweep([1,2], 's2', xf)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[1, 64, 2187, 65536]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# instantiate and configure the worker pool\n",
|
||||
"from pathos.threading import ThreadPool\n",
|
||||
"pool = ThreadPool(nodes=4)\n",
|
||||
"\n",
|
||||
"# do a blocking map on the chosen function\n",
|
||||
"print(pool.map(pow, [1,2,3,4], [5,6,7,8]))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"ename": "SyntaxError",
|
||||
"evalue": "invalid syntax (<ipython-input-2-6e999d313015>, line 3)",
|
||||
"traceback": [
|
||||
"\u001b[0;36m File \u001b[0;32m\"<ipython-input-2-6e999d313015>\"\u001b[0;36m, line \u001b[0;32m3\u001b[0m\n\u001b[0;31m [for f in fs]\u001b[0m\n\u001b[0m ^\u001b[0m\n\u001b[0;31mSyntaxError\u001b[0m\u001b[0;31m:\u001b[0m invalid syntax\n"
|
||||
],
|
||||
"output_type": "error"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"with Pool(len(configs)) as p:\n",
|
||||
" results = p.map(lambda t: t[0](t[1], t[2], t[3], t[4], t[5]), l)\n",
|
||||
" \n",
|
||||
"\n",
|
||||
"def state_multithreading(self, fs, m_step, sL, last_in_obj, _input):\n",
|
||||
" if type(fs) == 'list':\n",
|
||||
" pool.map(f(m_step, sL, last_in_obj, _input), fs)\n",
|
||||
" else:\n",
|
||||
" f(m_step, sL, last_in_obj, _input)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[('s2', [11, 23])]"
|
||||
]
|
||||
},
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from itertools import groupby\n",
|
||||
"l = [('s2', 11), ('s2', 23)]\n",
|
||||
"l.sort(key = lambda i : i[0])\n",
|
||||
"[(key, [i[1] for i in values]) for key, values in groupby(l, lambda i: i[0])]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def groupByKV(l):\n",
|
||||
" l.sort(key = lambda i : i[0])\n",
|
||||
" return [(key, [i[1] for i in values]) for key, values in groupby(l, lambda i: i[0])]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[('s2', [11, 23])]"
|
||||
]
|
||||
},
|
||||
"execution_count": 18,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"groupByKV(l)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 20,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"ename": "SyntaxError",
|
||||
"evalue": "invalid syntax (<ipython-input-20-fada0ccd8d2a>, line 2)",
|
||||
"traceback": [
|
||||
"\u001b[0;36m File \u001b[0;32m\"<ipython-input-20-fada0ccd8d2a>\"\u001b[0;36m, line \u001b[0;32m2\u001b[0m\n\u001b[0;31m collect = lambda tuplist: reduce(lambda acc, (k,v): acc[k].append(v) or acc,tuplist, defaultdict(list))\u001b[0m\n\u001b[0m ^\u001b[0m\n\u001b[0;31mSyntaxError\u001b[0m\u001b[0;31m:\u001b[0m invalid syntax\n"
|
||||
],
|
||||
"output_type": "error"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from collections import defaultdict \n",
|
||||
"collect = lambda tuplist: reduce(lambda acc, (k,v): acc[k].append(v) or acc,tuplist, defaultdict(list))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 23,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from collections import defaultdict\n",
|
||||
"d = defaultdict(list)\n",
|
||||
"for key, value in [('s2', 11), ('s2', 23)]:\n",
|
||||
" d[key].append(value)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 25,
|
||||
"metadata": {
|
||||
"collapsed": true
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Help on defaultdict object:\n",
|
||||
"\n",
|
||||
"class defaultdict(builtins.dict)\n",
|
||||
" | defaultdict(default_factory[, ...]) --> dict with default factory\n",
|
||||
" | \n",
|
||||
" | The default factory is called without arguments to produce\n",
|
||||
" | a new value when a key is not present, in __getitem__ only.\n",
|
||||
" | A defaultdict compares equal to a dict with the same items.\n",
|
||||
" | All remaining arguments are treated the same as if they were\n",
|
||||
" | passed to the dict constructor, including keyword arguments.\n",
|
||||
" | \n",
|
||||
" | Method resolution order:\n",
|
||||
" | defaultdict\n",
|
||||
" | builtins.dict\n",
|
||||
" | builtins.object\n",
|
||||
" | \n",
|
||||
" | Methods defined here:\n",
|
||||
" | \n",
|
||||
" | __copy__(...)\n",
|
||||
" | D.copy() -> a shallow copy of D.\n",
|
||||
" | \n",
|
||||
" | __getattribute__(self, name, /)\n",
|
||||
" | Return getattr(self, name).\n",
|
||||
" | \n",
|
||||
" | __init__(self, /, *args, **kwargs)\n",
|
||||
" | Initialize self. See help(type(self)) for accurate signature.\n",
|
||||
" | \n",
|
||||
" | __missing__(...)\n",
|
||||
" | __missing__(key) # Called by __getitem__ for missing key; pseudo-code:\n",
|
||||
" | if self.default_factory is None: raise KeyError((key,))\n",
|
||||
" | self[key] = value = self.default_factory()\n",
|
||||
" | return value\n",
|
||||
" | \n",
|
||||
" | __reduce__(...)\n",
|
||||
" | Return state information for pickling.\n",
|
||||
" | \n",
|
||||
" | __repr__(self, /)\n",
|
||||
" | Return repr(self).\n",
|
||||
" | \n",
|
||||
" | copy(...)\n",
|
||||
" | D.copy() -> a shallow copy of D.\n",
|
||||
" | \n",
|
||||
" | ----------------------------------------------------------------------\n",
|
||||
" | Data descriptors defined here:\n",
|
||||
" | \n",
|
||||
" | default_factory\n",
|
||||
" | Factory for default value called by __missing__().\n",
|
||||
" | \n",
|
||||
" | ----------------------------------------------------------------------\n",
|
||||
" | Methods inherited from builtins.dict:\n",
|
||||
" | \n",
|
||||
" | __contains__(self, key, /)\n",
|
||||
" | True if D has a key k, else False.\n",
|
||||
" | \n",
|
||||
" | __delitem__(self, key, /)\n",
|
||||
" | Delete self[key].\n",
|
||||
" | \n",
|
||||
" | __eq__(self, value, /)\n",
|
||||
" | Return self==value.\n",
|
||||
" | \n",
|
||||
" | __ge__(self, value, /)\n",
|
||||
" | Return self>=value.\n",
|
||||
" | \n",
|
||||
" | __getitem__(...)\n",
|
||||
" | x.__getitem__(y) <==> x[y]\n",
|
||||
" | \n",
|
||||
" | __gt__(self, value, /)\n",
|
||||
" | Return self>value.\n",
|
||||
" | \n",
|
||||
" | __iter__(self, /)\n",
|
||||
" | Implement iter(self).\n",
|
||||
" | \n",
|
||||
" | __le__(self, value, /)\n",
|
||||
" | Return self<=value.\n",
|
||||
" | \n",
|
||||
" | __len__(self, /)\n",
|
||||
" | Return len(self).\n",
|
||||
" | \n",
|
||||
" | __lt__(self, value, /)\n",
|
||||
" | Return self<value.\n",
|
||||
" | \n",
|
||||
" | __ne__(self, value, /)\n",
|
||||
" | Return self!=value.\n",
|
||||
" | \n",
|
||||
" | __new__(*args, **kwargs) from builtins.type\n",
|
||||
" | Create and return a new object. See help(type) for accurate signature.\n",
|
||||
" | \n",
|
||||
" | __setitem__(self, key, value, /)\n",
|
||||
" | Set self[key] to value.\n",
|
||||
" | \n",
|
||||
" | __sizeof__(...)\n",
|
||||
" | D.__sizeof__() -> size of D in memory, in bytes\n",
|
||||
" | \n",
|
||||
" | clear(...)\n",
|
||||
" | D.clear() -> None. Remove all items from D.\n",
|
||||
" | \n",
|
||||
" | fromkeys(iterable, value=None, /) from builtins.type\n",
|
||||
" | Returns a new dict with keys from iterable and values equal to value.\n",
|
||||
" | \n",
|
||||
" | get(...)\n",
|
||||
" | D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None.\n",
|
||||
" | \n",
|
||||
" | items(...)\n",
|
||||
" | D.items() -> a set-like object providing a view on D's items\n",
|
||||
" | \n",
|
||||
" | keys(...)\n",
|
||||
" | D.keys() -> a set-like object providing a view on D's keys\n",
|
||||
" | \n",
|
||||
" | pop(...)\n",
|
||||
" | D.pop(k[,d]) -> v, remove specified key and return the corresponding value.\n",
|
||||
" | If key is not found, d is returned if given, otherwise KeyError is raised\n",
|
||||
" | \n",
|
||||
" | popitem(...)\n",
|
||||
" | D.popitem() -> (k, v), remove and return some (key, value) pair as a\n",
|
||||
" | 2-tuple; but raise KeyError if D is empty.\n",
|
||||
" | \n",
|
||||
" | setdefault(...)\n",
|
||||
" | D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D\n",
|
||||
" | \n",
|
||||
" | update(...)\n",
|
||||
" | D.update([E, ]**F) -> None. Update D from dict/iterable E and F.\n",
|
||||
" | If E is present and has a .keys() method, then does: for k in E: D[k] = E[k]\n",
|
||||
" | If E is present and lacks a .keys() method, then does: for k, v in E: D[k] = v\n",
|
||||
" | In either case, this is followed by: for k in F: D[k] = F[k]\n",
|
||||
" | \n",
|
||||
" | values(...)\n",
|
||||
" | D.values() -> an object providing a view on D's values\n",
|
||||
" | \n",
|
||||
" | ----------------------------------------------------------------------\n",
|
||||
" | Data and other attributes inherited from builtins.dict:\n",
|
||||
" | \n",
|
||||
" | __hash__ = None\n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"help(d)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 27,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'s2': [11, 23]}"
|
||||
]
|
||||
},
|
||||
"execution_count": 27,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"dict(d)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 55,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def groupByKey(l):\n",
|
||||
" d = defaultdict(list)\n",
|
||||
" for key, value in l:\n",
|
||||
" d[key].append(value)\n",
|
||||
" return list(dict(d).items()).pop()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 56,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"('s2', [11, 23])"
|
||||
]
|
||||
},
|
||||
"execution_count": 56,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"r = groupByKey([('s2', 11), ('s2', 23)])\n",
|
||||
"r"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# xf = lambda param: 1.0 + param\n",
|
||||
"# def xf(y, param, s):\n",
|
||||
"# return s[y] + param\n",
|
||||
"\n",
|
||||
"# def fit_param(param):\n",
|
||||
"# y = 's2'\n",
|
||||
"# x = 1 + param\n",
|
||||
"# return lambda step, sL, s, _input: (y, x)\n",
|
||||
"#\n",
|
||||
"# def fit_param(param):\n",
|
||||
"# return lambda step, sL, s, _input: (\n",
|
||||
"# 's2',\n",
|
||||
"# s['s2'] + param\n",
|
||||
"# )\n",
|
||||
"#\n",
|
||||
"# s2m1 = sweep(\n",
|
||||
"# params = [Decimal(11.0), Decimal(22.0)],\n",
|
||||
"# sweep_f = fit_param\n",
|
||||
"# )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from decimal import Decimal\n",
|
||||
"from itertools import product\n",
|
||||
"\n",
|
||||
"# def \n",
|
||||
"\n",
|
||||
"l = {\n",
|
||||
" 's1': 1, \n",
|
||||
" 's2': [Decimal('11'), Decimal('22')], \n",
|
||||
" 's3': [Decimal('12'), Decimal('23')], \n",
|
||||
" 's4': 10, \n",
|
||||
" 'timestamp': '2018-10-01 15:16:25', \n",
|
||||
" 'mech_step': 0, \n",
|
||||
" 'time_step': 1\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"def flattenDict(l):\n",
|
||||
" def tupalize(k, vs):\n",
|
||||
" l = []\n",
|
||||
" if isinstance(vs, list):\n",
|
||||
" for v in vs:\n",
|
||||
" l.append((k, v))\n",
|
||||
" else:\n",
|
||||
" l.append((k, vs))\n",
|
||||
" return l\n",
|
||||
"\n",
|
||||
" flat_list = [tupalize(k, vs) for k, vs in l.items()]\n",
|
||||
" flat_dict = [dict(items) for items in product(*flat_list)]\n",
|
||||
" return flat_dict"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[{'s1': 1,\n",
|
||||
" 's2': Decimal('11'),\n",
|
||||
" 's3': Decimal('12'),\n",
|
||||
" 's4': 10,\n",
|
||||
" 'timestamp': '2018-10-01 15:16:25',\n",
|
||||
" 'mech_step': 0,\n",
|
||||
" 'time_step': 1},\n",
|
||||
" {'s1': 1,\n",
|
||||
" 's2': Decimal('11'),\n",
|
||||
" 's3': Decimal('23'),\n",
|
||||
" 's4': 10,\n",
|
||||
" 'timestamp': '2018-10-01 15:16:25',\n",
|
||||
" 'mech_step': 0,\n",
|
||||
" 'time_step': 1},\n",
|
||||
" {'s1': 1,\n",
|
||||
" 's2': Decimal('22'),\n",
|
||||
" 's3': Decimal('12'),\n",
|
||||
" 's4': 10,\n",
|
||||
" 'timestamp': '2018-10-01 15:16:25',\n",
|
||||
" 'mech_step': 0,\n",
|
||||
" 'time_step': 1},\n",
|
||||
" {'s1': 1,\n",
|
||||
" 's2': Decimal('22'),\n",
|
||||
" 's3': Decimal('23'),\n",
|
||||
" 's4': 10,\n",
|
||||
" 'timestamp': '2018-10-01 15:16:25',\n",
|
||||
" 'mech_step': 0,\n",
|
||||
" 'time_step': 1}]"
|
||||
]
|
||||
},
|
||||
"execution_count": 15,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"flattenDict(l)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 1
|
||||
}
|
||||
12
setup.py
12
setup.py
|
|
@ -1,6 +1,6 @@
|
|||
from setuptools import setup, find_packages
|
||||
|
||||
long_description = "SimCAD is a differential games based simulation software package for research, validation, and \
|
||||
long_description = "cadCAD is a differential games based simulation software package for research, validation, and \
|
||||
Computer Aided Design of economic systems. An economic system is treated as a state based model and defined through \
|
||||
a set of endogenous and exogenous state variables which are updated through mechanisms and environmental processes, \
|
||||
respectively. Behavioral models, which may be deterministic or stochastic, provide the evolution of the system \
|
||||
|
|
@ -10,14 +10,14 @@ long_description = "SimCAD is a differential games based simulation software pac
|
|||
processes to understand and visualize network behavior under various conditions. Support for A/B testing policies, \
|
||||
monte carlo analysis and other common numerical methods is provided."
|
||||
|
||||
setup(name='SimCAD',
|
||||
version='0.1',
|
||||
description="SimCAD: a differential games based simulation software package for research, validation, and \
|
||||
setup(name='cadCAD',
|
||||
version='0.2',
|
||||
description="cadCAD: a differential games based simulation software package for research, validation, and \
|
||||
Computer Aided Design of economic systems",
|
||||
long_description = long_description,
|
||||
long_description=long_description,
|
||||
url='https://github.com/BlockScience/DiffyQ-SimCAD',
|
||||
author='Joshua E. Jodesty',
|
||||
author_email='joshua@block.science',
|
||||
# license='LICENSE',
|
||||
packages=find_packages() #['SimCAD']
|
||||
packages=find_packages()
|
||||
)
|
||||
|
|
|
|||
File diff suppressed because one or more lines are too long
|
|
@ -1,15 +1,15 @@
|
|||
import pandas as pd
|
||||
from tabulate import tabulate
|
||||
# The following imports NEED to be in the exact order
|
||||
from SimCAD.engine import ExecutionMode, ExecutionContext, Executor
|
||||
from simulations.validation import config1, config2 # sweep_config
|
||||
from SimCAD import configs
|
||||
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
|
||||
from simulations.validation import sweep_config #, config1, config2
|
||||
from cadCAD import configs
|
||||
|
||||
exec_mode = ExecutionMode()
|
||||
|
||||
# print("Simulation Execution 1")
|
||||
# print()
|
||||
# first_config = [configs[0]] # from config1
|
||||
# first_config = [configs[0]] # FOR non-sweep configs ONLY
|
||||
# single_proc_ctx = ExecutionContext(context=exec_mode.single_proc)
|
||||
# run1 = Executor(exec_context=single_proc_ctx, configs=first_config)
|
||||
# run1_raw_result, tensor_field = run1.main()
|
||||
|
|
|
|||
|
|
@ -2,12 +2,12 @@ from decimal import Decimal
|
|||
import numpy as np
|
||||
from datetime import timedelta
|
||||
|
||||
from SimCAD.configuration import append_configs
|
||||
from SimCAD.configuration.utils import proc_trigger, bound_norm_random, ep_time_step
|
||||
from SimCAD.configuration.utils.parameterSweep import config_sim
|
||||
from cadCAD.configuration import append_configs
|
||||
from cadCAD.configuration.utils import proc_trigger, bound_norm_random, ep_time_step
|
||||
from cadCAD.configuration.utils.parameterSweep import config_sim
|
||||
|
||||
|
||||
seed = {
|
||||
seeds = {
|
||||
'z': np.random.RandomState(1),
|
||||
'a': np.random.RandomState(2),
|
||||
'b': np.random.RandomState(3),
|
||||
|
|
@ -15,20 +15,20 @@ seed = {
|
|||
}
|
||||
|
||||
|
||||
# Behaviors per Mechanism
|
||||
def b1m1(_g, step, sL, s):
|
||||
# Policies per Mechanism
|
||||
def p1m1(_g, step, sL, s):
|
||||
return {'param1': 1}
|
||||
def b2m1(_g, step, sL, s):
|
||||
def p2m1(_g, step, sL, s):
|
||||
return {'param2': 4}
|
||||
|
||||
def b1m2(_g, step, sL, s):
|
||||
def p1m2(_g, step, sL, s):
|
||||
return {'param1': 'a', 'param2': 2}
|
||||
def b2m2(_g, step, sL, s):
|
||||
def p2m2(_g, step, sL, s):
|
||||
return {'param1': 'b', 'param2': 4}
|
||||
|
||||
def b1m3(_g, step, sL, s):
|
||||
def p1m3(_g, step, sL, s):
|
||||
return {'param1': ['c'], 'param2': np.array([10, 100])}
|
||||
def b2m3(_g, step, sL, s):
|
||||
def p2m3(_g, step, sL, s):
|
||||
return {'param1': ['d'], 'param2': np.array([20, 200])}
|
||||
|
||||
|
||||
|
|
@ -67,19 +67,19 @@ proc_one_coef_B = 1.3
|
|||
|
||||
def es3p1(_g, step, sL, s, _input):
|
||||
y = 's3'
|
||||
x = s['s3'] * bound_norm_random(seed['a'], proc_one_coef_A, proc_one_coef_B)
|
||||
x = s['s3'] * bound_norm_random(seeds['a'], proc_one_coef_A, proc_one_coef_B)
|
||||
return (y, x)
|
||||
|
||||
def es4p2(_g, step, sL, s, _input):
|
||||
y = 's4'
|
||||
x = s['s4'] * bound_norm_random(seed['b'], proc_one_coef_A, proc_one_coef_B)
|
||||
x = s['s4'] * bound_norm_random(seeds['b'], proc_one_coef_A, proc_one_coef_B)
|
||||
return (y, x)
|
||||
|
||||
ts_format = '%Y-%m-%d %H:%M:%S'
|
||||
t_delta = timedelta(days=0, minutes=0, seconds=1)
|
||||
def es5p2(_g, step, sL, s, _input):
|
||||
y = 'timestamp'
|
||||
x = ep_time_step(s, dt_str=s['timestamp'], fromat_str=ts_format, _timedelta=t_delta)
|
||||
y = 'timestep'
|
||||
x = ep_time_step(s, dt_str=s['timestep'], fromat_str=ts_format, _timedelta=t_delta)
|
||||
return (y, x)
|
||||
|
||||
|
||||
|
|
@ -98,50 +98,50 @@ genesis_states = {
|
|||
's2': Decimal(0.0),
|
||||
's3': Decimal(1.0),
|
||||
's4': Decimal(1.0),
|
||||
'timestamp': '2018-10-01 15:16:24'
|
||||
# 'timestep': '2018-10-01 15:16:24'
|
||||
}
|
||||
|
||||
|
||||
raw_exogenous_states = {
|
||||
"s3": es3p1,
|
||||
"s4": es4p2,
|
||||
"timestamp": es5p2
|
||||
# "timestep": es5p2
|
||||
}
|
||||
|
||||
|
||||
env_processes = {
|
||||
"s3": env_a,
|
||||
"s4": proc_trigger('2018-10-01 15:16:25', env_b)
|
||||
"s4": proc_trigger(1, env_b)
|
||||
}
|
||||
|
||||
|
||||
mechanisms = {
|
||||
partial_state_update_block = {
|
||||
"m1": {
|
||||
"behaviors": {
|
||||
"b1": b1m1,
|
||||
"b2": b2m1
|
||||
"policies": {
|
||||
"b1": p1m1,
|
||||
"b2": p2m1
|
||||
},
|
||||
"states": {
|
||||
"variables": {
|
||||
"s1": s1m1,
|
||||
"s2": s2m1
|
||||
}
|
||||
},
|
||||
"m2": {
|
||||
"behaviors": {
|
||||
"b1": b1m2,
|
||||
"b2": b2m2
|
||||
"policies": {
|
||||
"b1": p1m2,
|
||||
"b2": p2m2
|
||||
},
|
||||
"states": {
|
||||
"variables": {
|
||||
"s1": s1m2,
|
||||
"s2": s2m2
|
||||
}
|
||||
},
|
||||
"m3": {
|
||||
"behaviors": {
|
||||
"b1": b1m3,
|
||||
"b2": b2m3
|
||||
"policies": {
|
||||
"b1": p1m3,
|
||||
"b2": p2m3
|
||||
},
|
||||
"states": {
|
||||
"variables": {
|
||||
"s1": s1m3,
|
||||
"s2": s2m3
|
||||
}
|
||||
|
|
@ -159,9 +159,9 @@ sim_config = config_sim(
|
|||
|
||||
append_configs(
|
||||
sim_configs=sim_config,
|
||||
state_dict=genesis_states,
|
||||
seed=seed,
|
||||
initial_state=genesis_states,
|
||||
seeds=seeds,
|
||||
raw_exogenous_states=raw_exogenous_states,
|
||||
env_processes=env_processes,
|
||||
mechanisms=mechanisms
|
||||
partial_state_update_blocks=partial_state_update_block
|
||||
)
|
||||
|
|
|
|||
|
|
@ -2,11 +2,11 @@ from decimal import Decimal
|
|||
import numpy as np
|
||||
from datetime import timedelta
|
||||
|
||||
from SimCAD.configuration import append_configs
|
||||
from SimCAD.configuration.utils import proc_trigger, bound_norm_random, ep_time_step
|
||||
from SimCAD.configuration.utils.parameterSweep import config_sim
|
||||
from cadCAD.configuration import append_configs
|
||||
from cadCAD.configuration.utils import proc_trigger, bound_norm_random, ep_time_step
|
||||
from cadCAD.configuration.utils.parameterSweep import config_sim
|
||||
|
||||
seed = {
|
||||
seeds = {
|
||||
'z': np.random.RandomState(1),
|
||||
'a': np.random.RandomState(2),
|
||||
'b': np.random.RandomState(3),
|
||||
|
|
@ -14,20 +14,20 @@ seed = {
|
|||
}
|
||||
|
||||
|
||||
# Behaviors per Mechanism
|
||||
def b1m1(_g, step, sL, s):
|
||||
# Policies per Mechanism
|
||||
def p1m1(_g, step, sL, s):
|
||||
return {'param1': 1}
|
||||
def b2m1(_g, step, sL, s):
|
||||
def p2m1(_g, step, sL, s):
|
||||
return {'param2': 4}
|
||||
|
||||
def b1m2(_g, step, sL, s):
|
||||
def p1m2(_g, step, sL, s):
|
||||
return {'param1': 'a', 'param2': 2}
|
||||
def b2m2(_g, step, sL, s):
|
||||
def p2m2(_g, step, sL, s):
|
||||
return {'param1': 'b', 'param2': 4}
|
||||
|
||||
def b1m3(_g, step, sL, s):
|
||||
def p1m3(_g, step, sL, s):
|
||||
return {'param1': ['c'], 'param2': np.array([10, 100])}
|
||||
def b2m3(_g, step, sL, s):
|
||||
def p2m3(_g, step, sL, s):
|
||||
return {'param1': ['d'], 'param2': np.array([20, 200])}
|
||||
|
||||
|
||||
|
|
@ -66,19 +66,19 @@ proc_one_coef_B = 1.3
|
|||
|
||||
def es3p1(_g, step, sL, s, _input):
|
||||
y = 's3'
|
||||
x = s['s3'] * bound_norm_random(seed['a'], proc_one_coef_A, proc_one_coef_B)
|
||||
x = s['s3'] * bound_norm_random(seeds['a'], proc_one_coef_A, proc_one_coef_B)
|
||||
return (y, x)
|
||||
|
||||
def es4p2(_g, step, sL, s, _input):
|
||||
y = 's4'
|
||||
x = s['s4'] * bound_norm_random(seed['b'], proc_one_coef_A, proc_one_coef_B)
|
||||
x = s['s4'] * bound_norm_random(seeds['b'], proc_one_coef_A, proc_one_coef_B)
|
||||
return (y, x)
|
||||
|
||||
ts_format = '%Y-%m-%d %H:%M:%S'
|
||||
t_delta = timedelta(days=0, minutes=0, seconds=1)
|
||||
def es5p2(_g, step, sL, s, _input):
|
||||
y = 'timestamp'
|
||||
x = ep_time_step(s, dt_str=s['timestamp'], fromat_str=ts_format, _timedelta=t_delta)
|
||||
y = 'timestep'
|
||||
x = ep_time_step(s, dt_str=s['timestep'], fromat_str=ts_format, _timedelta=t_delta)
|
||||
return (y, x)
|
||||
|
||||
|
||||
|
|
@ -97,28 +97,28 @@ genesis_states = {
|
|||
's2': Decimal(0.0),
|
||||
's3': Decimal(1.0),
|
||||
's4': Decimal(1.0),
|
||||
'timestamp': '2018-10-01 15:16:24'
|
||||
# 'timestep': '2018-10-01 15:16:24'
|
||||
}
|
||||
|
||||
|
||||
raw_exogenous_states = {
|
||||
"s3": es3p1,
|
||||
"s4": es4p2,
|
||||
"timestamp": es5p2
|
||||
# "timestep": es5p2
|
||||
}
|
||||
|
||||
|
||||
env_processes = {
|
||||
"s3": proc_trigger('2018-10-01 15:16:25', env_a),
|
||||
"s4": proc_trigger('2018-10-01 15:16:25', env_b)
|
||||
"s3": proc_trigger(1, env_a),
|
||||
"s4": proc_trigger(1, env_b)
|
||||
}
|
||||
|
||||
|
||||
mechanisms = {
|
||||
partial_state_update_block = {
|
||||
"m1": {
|
||||
"behaviors": {
|
||||
"b1": b1m1,
|
||||
# "b2": b2m1
|
||||
"policies": {
|
||||
"b1": p1m1,
|
||||
# "b2": p2m1
|
||||
},
|
||||
"states": {
|
||||
"s1": s1m1,
|
||||
|
|
@ -126,9 +126,9 @@ mechanisms = {
|
|||
}
|
||||
},
|
||||
"m2": {
|
||||
"behaviors": {
|
||||
"b1": b1m2,
|
||||
# "b2": b2m2
|
||||
"policies": {
|
||||
"b1": p1m2,
|
||||
# "b2": p2m2
|
||||
},
|
||||
"states": {
|
||||
"s1": s1m2,
|
||||
|
|
@ -136,9 +136,9 @@ mechanisms = {
|
|||
}
|
||||
},
|
||||
"m3": {
|
||||
"behaviors": {
|
||||
"b1": b1m3,
|
||||
"b2": b2m3
|
||||
"policies": {
|
||||
"b1": p1m3,
|
||||
"b2": p2m3
|
||||
},
|
||||
"states": {
|
||||
"s1": s1m3,
|
||||
|
|
@ -158,9 +158,9 @@ sim_config = config_sim(
|
|||
|
||||
append_configs(
|
||||
sim_configs=sim_config,
|
||||
state_dict=genesis_states,
|
||||
seed=seed,
|
||||
initial_state=genesis_states,
|
||||
seeds=seeds,
|
||||
raw_exogenous_states=raw_exogenous_states,
|
||||
env_processes=env_processes,
|
||||
mechanisms=mechanisms
|
||||
partial_state_update_blocks=partial_state_update_block
|
||||
)
|
||||
|
|
|
|||
|
|
@ -0,0 +1,142 @@
|
|||
from decimal import Decimal
|
||||
import numpy as np
|
||||
from datetime import timedelta
|
||||
|
||||
from cadCAD.configuration import append_configs
|
||||
from cadCAD.configuration.utils import proc_trigger, bound_norm_random, ep_time_step
|
||||
from cadCAD.configuration.utils.parameterSweep import config_sim
|
||||
|
||||
|
||||
seeds = {
|
||||
'z': np.random.RandomState(1),
|
||||
'a': np.random.RandomState(2),
|
||||
'b': np.random.RandomState(3),
|
||||
'c': np.random.RandomState(3)
|
||||
}
|
||||
|
||||
|
||||
# Policies per Mechanism
|
||||
def p1m1(_g, step, sL, s):
|
||||
return {'param1': 1}
|
||||
def p2m1(_g, step, sL, s):
|
||||
return {'param2': 4}
|
||||
|
||||
def p1m2(_g, step, sL, s):
|
||||
return {'param1': 'a', 'param2': 2}
|
||||
def p2m2(_g, step, sL, s):
|
||||
return {'param1': 'b', 'param2': 4}
|
||||
|
||||
def p1m3(_g, step, sL, s):
|
||||
return {'param1': ['c'], 'param2': np.array([10, 100])}
|
||||
def p2m3(_g, step, sL, s):
|
||||
return {'param1': ['d'], 'param2': np.array([20, 200])}
|
||||
|
||||
|
||||
# Internal States per Mechanism
|
||||
def s1m1(_g, step, sL, s, _input):
|
||||
y = 's1'
|
||||
x = _input['param1']
|
||||
return (y, x)
|
||||
def s2m1(_g, step, sL, s, _input):
|
||||
y = 's2'
|
||||
x = _input['param2']
|
||||
return (y, x)
|
||||
|
||||
def s1m2(_g, step, sL, s, _input):
|
||||
y = 's1'
|
||||
x = _input['param1']
|
||||
return (y, x)
|
||||
def s2m2(_g, step, sL, s, _input):
|
||||
y = 's2'
|
||||
x = _input['param2']
|
||||
return (y, x)
|
||||
|
||||
def s1m3(_g, step, sL, s, _input):
|
||||
y = 's1'
|
||||
x = _input['param1']
|
||||
return (y, x)
|
||||
def s2m3(_g, step, sL, s, _input):
|
||||
y = 's2'
|
||||
x = _input['param2']
|
||||
return (y, x)
|
||||
|
||||
def s1m4(_g, step, sL, s, _input):
|
||||
y = 's1'
|
||||
x = [1]
|
||||
return (y, x)
|
||||
|
||||
|
||||
# Exogenous States
|
||||
proc_one_coef_A = 0.7
|
||||
proc_one_coef_B = 1.3
|
||||
|
||||
def es3p1(_g, step, sL, s, _input):
|
||||
y = 's3'
|
||||
x = s['s3'] * bound_norm_random(seeds['a'], proc_one_coef_A, proc_one_coef_B)
|
||||
return (y, x)
|
||||
|
||||
def es4p2(_g, step, sL, s, _input):
|
||||
y = 's4'
|
||||
x = s['s4'] * bound_norm_random(seeds['b'], proc_one_coef_A, proc_one_coef_B)
|
||||
return (y, x)
|
||||
|
||||
ts_format = '%Y-%m-%d %H:%M:%S'
|
||||
t_delta = timedelta(days=0, minutes=0, seconds=1)
|
||||
def es5p2(_g, step, sL, s, _input):
|
||||
y = 'timestamp'
|
||||
x = ep_time_step(s, dt_str=s['timestamp'], fromat_str=ts_format, _timedelta=t_delta)
|
||||
return (y, x)
|
||||
|
||||
|
||||
# Environment States
|
||||
def env_a(x):
|
||||
return 5
|
||||
def env_b(x):
|
||||
return 10
|
||||
# def what_ever(x):
|
||||
# return x + 1
|
||||
|
||||
|
||||
# Genesis States
|
||||
genesis_states = {
|
||||
's1': Decimal(0.0),
|
||||
's2': Decimal(0.0),
|
||||
's3': Decimal(1.0),
|
||||
's4': Decimal(1.0),
|
||||
'timestamp': '2018-10-01 15:16:24'
|
||||
}
|
||||
|
||||
|
||||
raw_exogenous_states = {
|
||||
"s3": es3p1,
|
||||
"s4": es4p2,
|
||||
"timestamp": es5p2
|
||||
}
|
||||
|
||||
|
||||
env_processes = {
|
||||
"s3": env_a,
|
||||
"s4": proc_trigger('2018-10-01 15:16:25', env_b)
|
||||
}
|
||||
|
||||
|
||||
partial_state_update_block = [
|
||||
]
|
||||
|
||||
|
||||
sim_config = config_sim(
|
||||
{
|
||||
"N": 2,
|
||||
"T": range(5),
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
append_configs(
|
||||
sim_configs=sim_config,
|
||||
initial_state=genesis_states,
|
||||
seeds={},
|
||||
raw_exogenous_states={},
|
||||
env_processes={},
|
||||
partial_state_update_blocks=partial_state_update_block
|
||||
)
|
||||
|
|
@ -3,13 +3,13 @@ import numpy as np
|
|||
from datetime import timedelta
|
||||
import pprint
|
||||
|
||||
from SimCAD.configuration import append_configs
|
||||
from SimCAD.configuration.utils import proc_trigger, ep_time_step
|
||||
from SimCAD.configuration.utils.parameterSweep import config_sim
|
||||
from cadCAD.configuration import append_configs
|
||||
from cadCAD.configuration.utils import proc_trigger, ep_time_step
|
||||
from cadCAD.configuration.utils.parameterSweep import config_sim
|
||||
|
||||
pp = pprint.PrettyPrinter(indent=4)
|
||||
|
||||
seed = {
|
||||
seeds = {
|
||||
'z': np.random.RandomState(1),
|
||||
'a': np.random.RandomState(2),
|
||||
'b': np.random.RandomState(3),
|
||||
|
|
@ -24,23 +24,23 @@ g = {
|
|||
'omega': [7]
|
||||
}
|
||||
|
||||
# Behaviors per Mechanism
|
||||
def b1m1(_g, step, sL, s):
|
||||
# Policies per Mechanism
|
||||
def p1m1(_g, step, sL, s):
|
||||
return {'param1': 1}
|
||||
|
||||
def b2m1(_g, step, sL, s):
|
||||
def p2m1(_g, step, sL, s):
|
||||
return {'param2': 4}
|
||||
|
||||
def b1m2(_g, step, sL, s):
|
||||
def p1m2(_g, step, sL, s):
|
||||
return {'param1': 'a', 'param2': _g['beta']}
|
||||
|
||||
def b2m2(_g, step, sL, s):
|
||||
def p2m2(_g, step, sL, s):
|
||||
return {'param1': 'b', 'param2': 0}
|
||||
|
||||
def b1m3(_g, step, sL, s):
|
||||
def p1m3(_g, step, sL, s):
|
||||
return {'param1': np.array([10, 100])}
|
||||
|
||||
def b2m3(_g, step, sL, s):
|
||||
def p2m3(_g, step, sL, s):
|
||||
return {'param1': np.array([20, 200])}
|
||||
|
||||
# Internal States per Mechanism
|
||||
|
|
@ -93,8 +93,8 @@ def es4p2(_g, step, sL, s, _input):
|
|||
ts_format = '%Y-%m-%d %H:%M:%S'
|
||||
t_delta = timedelta(days=0, minutes=0, seconds=1)
|
||||
def es5p2(_g, step, sL, s, _input):
|
||||
y = 'timestamp'
|
||||
x = ep_time_step(s, dt_str=s['timestamp'], fromat_str=ts_format, _timedelta=t_delta)
|
||||
y = 'timestep'
|
||||
x = ep_time_step(s, dt_str=s['timestep'], fromat_str=ts_format, _timedelta=t_delta)
|
||||
return (y, x)
|
||||
|
||||
|
||||
|
|
@ -114,7 +114,7 @@ genesis_states = {
|
|||
's2': Decimal(0.0),
|
||||
's3': Decimal(1.0),
|
||||
's4': Decimal(1.0),
|
||||
'timestamp': '2018-10-01 15:16:24'
|
||||
# 'timestep': '2018-10-01 15:16:24'
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -122,13 +122,13 @@ genesis_states = {
|
|||
raw_exogenous_states = {
|
||||
"s3": es3p1,
|
||||
"s4": es4p2,
|
||||
"timestamp": es5p2
|
||||
# "timestep": es5p2
|
||||
}
|
||||
|
||||
|
||||
# ToDo: make env proc trigger field agnostic
|
||||
# ToDo: input json into function renaming __name__
|
||||
triggered_env_b = proc_trigger('2018-10-01 15:16:25', env_b)
|
||||
triggered_env_b = proc_trigger(1, env_b)
|
||||
env_processes = {
|
||||
"s3": env_a, #sweep(beta, env_a),
|
||||
"s4": triggered_env_b #rename('parameterized', triggered_env_b) #sweep(beta, triggered_env_b)
|
||||
|
|
@ -143,33 +143,33 @@ env_processes = {
|
|||
# sweep exo_state func and point to exo-state in every other funtion
|
||||
# param sweep on genesis states
|
||||
|
||||
mechanisms = {
|
||||
partial_state_update_block = {
|
||||
"m1": {
|
||||
"behaviors": {
|
||||
"b1": b1m1,
|
||||
"b2": b2m1
|
||||
"policies": {
|
||||
"b1": p1m1,
|
||||
"b2": p2m1
|
||||
},
|
||||
"states": {
|
||||
"variables": {
|
||||
"s1": s1m1,
|
||||
"s2": s2m1
|
||||
}
|
||||
},
|
||||
"m2": {
|
||||
"behaviors": {
|
||||
"b1": b1m2,
|
||||
"b2": b2m2,
|
||||
"policies": {
|
||||
"b1": p1m2,
|
||||
"b2": p2m2,
|
||||
},
|
||||
"states": {
|
||||
"variables": {
|
||||
"s1": s1m2,
|
||||
"s2": s2m2
|
||||
}
|
||||
},
|
||||
"m3": {
|
||||
"behaviors": {
|
||||
"b1": b1m3,
|
||||
"b2": b2m3
|
||||
"policies": {
|
||||
"b1": p1m3,
|
||||
"b2": p2m3
|
||||
},
|
||||
"states": {
|
||||
"variables": {
|
||||
"s1": s1m3,
|
||||
"s2": s2m3
|
||||
}
|
||||
|
|
@ -188,9 +188,9 @@ sim_config = config_sim(
|
|||
|
||||
append_configs(
|
||||
sim_configs=sim_config,
|
||||
state_dict=genesis_states,
|
||||
seed=seed,
|
||||
initial_state=genesis_states,
|
||||
seeds=seeds,
|
||||
raw_exogenous_states=raw_exogenous_states,
|
||||
env_processes=env_processes,
|
||||
mechanisms=mechanisms
|
||||
partial_state_update_blocks=partial_state_update_block
|
||||
)
|
||||
Loading…
Reference in New Issue