Merge pull request #51 from BlockScience/staging

ver. 0.2.4 - overwrite master
This commit is contained in:
Joshua E. Jodesty 2019-05-31 17:42:54 -04:00 committed by GitHub
commit fe8d9a1eac
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
44 changed files with 6868 additions and 236 deletions

3
.gitignore vendored
View File

@ -1,4 +1,5 @@
.idea
jupyter notebook
.ipynb_checkpoints
.DS_Store
.idea
@ -15,7 +16,7 @@ simulations/.ipynb_checkpoints
simulations/validation/config3.py
dist/*.gz
cadCAD.egg-info
monkeytype.sqlite3
build
cadCAD.egg-info
SimCAD.egg-info

View File

@ -1,6 +1,5 @@
from typing import Dict, Callable, List, Tuple
from functools import reduce
from fn.op import foldr
import pandas as pd
from pandas.core.frame import DataFrame
@ -10,11 +9,14 @@ from cadCAD.configuration.utils import exo_update_per_ts
from cadCAD.configuration.utils.policyAggregation import dict_elemwise_sum
from cadCAD.configuration.utils.depreciationHandler import sanitize_partial_state_updates, sanitize_config
# policy_ops=[foldr(dict_elemwise_sum())]
# policy_ops=[reduce, lambda a, b: {**a, **b}]
class Configuration(object):
def __init__(self, sim_config={}, initial_state={}, seeds={}, env_processes={},
exogenous_states={}, partial_state_update_blocks={}, policy_ops=[foldr(dict_elemwise_sum())],
exogenous_states={}, partial_state_update_blocks={}, policy_ops=[lambda a, b: a + b],
**kwargs) -> None:
# print(exogenous_states)
self.sim_config = sim_config
self.initial_state = initial_state
self.seeds = seeds
@ -26,33 +28,26 @@ class Configuration(object):
sanitize_config(self)
# ToDo: Remove Seeds
def append_configs(sim_configs={}, initial_state={}, seeds={}, raw_exogenous_states={}, env_processes={},
partial_state_update_blocks={}, _exo_update_per_ts: bool = True) -> None:
partial_state_update_blocks={}, policy_ops=[lambda a, b: a + b], _exo_update_per_ts: bool = True) -> None:
if _exo_update_per_ts is True:
exogenous_states = exo_update_per_ts(raw_exogenous_states)
else:
exogenous_states = raw_exogenous_states
if isinstance(sim_configs, list):
for sim_config in sim_configs:
config = Configuration(
sim_config=sim_config,
initial_state=initial_state,
seeds=seeds,
exogenous_states=exogenous_states,
env_processes=env_processes,
partial_state_update_blocks=partial_state_update_blocks
)
configs.append(config)
elif isinstance(sim_configs, dict):
if isinstance(sim_configs, dict):
sim_configs = [sim_configs]
for sim_config in sim_configs:
config = Configuration(
sim_config=sim_configs,
sim_config=sim_config,
initial_state=initial_state,
seeds=seeds,
exogenous_states=exogenous_states,
env_processes=env_processes,
partial_state_update_blocks=partial_state_update_blocks
partial_state_update_blocks=partial_state_update_blocks,
policy_ops=policy_ops
)
configs.append(config)

View File

@ -1,7 +1,8 @@
from datetime import datetime, timedelta
from decimal import Decimal
from copy import deepcopy
from functools import reduce
from fn.func import curried
from funcy import curry
import pandas as pd
# Temporary
@ -35,27 +36,31 @@ def bound_norm_random(rng, low, high):
res = rng.normal((high+low)/2, (high-low)/6)
if res < low or res > high:
res = bound_norm_random(rng, low, high)
return Decimal(res)
# return Decimal(res)
return float(res)
@curried
def proc_trigger(trigger_time, update_f, time):
if time == trigger_time:
return update_f
def env_proc_trigger(timestep, f, time):
if time == timestep:
return f
else:
return lambda x: x
tstep_delta = timedelta(days=0, minutes=0, seconds=30)
def time_step(dt_str, dt_format='%Y-%m-%d %H:%M:%S', _timedelta = tstep_delta):
# print(dt_str)
dt = datetime.strptime(dt_str, dt_format)
t = dt + _timedelta
return t.strftime(dt_format)
# ToDo: Inject in first elem of last PSUB from Historical state
ep_t_delta = timedelta(days=0, minutes=0, seconds=1)
def ep_time_step(s, dt_str, fromat_str='%Y-%m-%d %H:%M:%S', _timedelta = ep_t_delta):
if s['substep'] == 0:
def ep_time_step(s_condition, dt_str, fromat_str='%Y-%m-%d %H:%M:%S', _timedelta = ep_t_delta):
# print(dt_str)
if s_condition:
return time_step(dt_str, fromat_str, _timedelta)
else:
return dt_str
@ -124,21 +129,92 @@ def exo_update_per_ts(ep):
return {es: ep_decorator(f, es) for es, f in ep.items()}
def trigger_condition(s, pre_conditions, cond_opp):
condition_bools = [s[field] in precondition_values for field, precondition_values in pre_conditions.items()]
return reduce(cond_opp, condition_bools)
# Param Sweep enabling middleware
def apply_state_condition(pre_conditions, cond_opp, y, f, _g, step, sL, s, _input):
if trigger_condition(s, pre_conditions, cond_opp):
return f(_g, step, sL, s, _input)
else:
return y, s[y]
def var_trigger(y, f, pre_conditions, cond_op):
return lambda _g, step, sL, s, _input: apply_state_condition(pre_conditions, cond_op, y, f, _g, step, sL, s, _input)
def var_substep_trigger(substeps):
def trigger(end_substep, y, f):
pre_conditions = {'substep': substeps}
cond_opp = lambda a, b: a and b
return var_trigger(y, f, pre_conditions, cond_opp)
return lambda y, f: curry(trigger)(substeps)(y)(f)
def env_trigger(end_substep):
def trigger(end_substep, trigger_field, trigger_vals, funct_list):
def env_update(state_dict, sweep_dict, target_value):
state_dict_copy = deepcopy(state_dict)
# Use supstep to simulate current sysMetrics
if state_dict_copy['substep'] == end_substep:
state_dict_copy['timestep'] = state_dict_copy['timestep'] + 1
if state_dict_copy[trigger_field] in trigger_vals:
for g in funct_list:
target_value = g(sweep_dict, target_value)
del state_dict_copy
return target_value
return env_update
return lambda trigger_field, trigger_vals, funct_list: \
curry(trigger)(end_substep)(trigger_field)(trigger_vals)(funct_list)
# param sweep enabling middleware
def config_sim(d):
def process_variables(d):
return flatten_tabulated_dict(tabulate_dict(d))
if "M" in d:
return [
{
"N": d["N"],
"T": d["T"],
"M": M
}
for M in process_variables(d["M"])
]
return [{"N": d["N"], "T": d["T"], "M": M} for M in process_variables(d["M"])]
else:
d["M"] = [{}]
return d
return d
def psub_list(psu_block, psu_steps):
return [psu_block[psu] for psu in psu_steps]
def psub(policies, state_updates):
return {
'policies': policies,
'states': state_updates
}
def genereate_psubs(policy_grid, states_grid, policies, state_updates):
PSUBS = []
for policy_ids, state_list in zip(policy_grid, states_grid):
filtered_policies = {k: v for (k, v) in policies.items() if k in policy_ids}
filtered_state_updates = {k: v for (k, v) in state_updates.items() if k in state_list}
PSUBS.append(psub(filtered_policies, filtered_state_updates))
return PSUBS
def access_block(sH, y, psu_block_offset, exculsion_list=[]):
exculsion_list += [y]
def filter_history(key_list, sH):
filter = lambda key_list: \
lambda d: {k: v for k, v in d.items() if k not in key_list}
return list(map(filter(key_list), sH))
if psu_block_offset < -1:
if len(sH) >= abs(psu_block_offset):
return filter_history(exculsion_list, sH[psu_block_offset])
else:
return []
elif psu_block_offset < 0:
return filter_history(exculsion_list, sH[psu_block_offset])
else:
return []

View File

@ -1,7 +1,6 @@
from fn.op import foldr
from fn.func import curried
def get_base_value(x):
if isinstance(x, str):
return ''
@ -18,7 +17,7 @@ def policy_to_dict(v):
add = lambda a, b: a + b
# df_union = lambda a, b: ...
@curried
def foldr_dict_vals(f, d):
@ -41,6 +40,5 @@ def dict_op(f, d1, d2):
return {k: f(set_base_value(d1, d2, k), set_base_value(d2, d1, k)) for k in key_set}
def dict_elemwise_sum():
return dict_op(add)

View File

@ -0,0 +1,60 @@
from collections import namedtuple
from copy import deepcopy
from inspect import getmembers, ismethod
from pandas.core.frame import DataFrame
from cadCAD.utils import SilentDF
def val_switch(v):
if isinstance(v, DataFrame) is True:
return SilentDF(v)
else:
return v
class udcView(object):
def __init__(self, d, masked_members):
self.__dict__ = d
self.masked_members = masked_members
# returns dict to dataframe
# def __repr__(self):
def __repr__(self):
members = {}
variables = {
k: val_switch(v) for k, v in self.__dict__.items()
if str(type(v)) != "<class 'method'>" and k not in self.masked_members # and isinstance(v, DataFrame) is not True
}
members['methods'] = [k for k, v in self.__dict__.items() if str(type(v)) == "<class 'method'>"]
members.update(variables)
return f"{members}" #[1:-1]
class udcBroker(object):
def __init__(self, obj, function_filter=['__init__']):
d = {}
funcs = dict(getmembers(obj, ismethod))
filtered_functions = {k: v for k, v in funcs.items() if k not in function_filter}
d['obj'] = obj
d.update(deepcopy(vars(obj))) # somehow is enough
d.update(filtered_functions)
self.members_dict = d
def get_members(self):
return self.members_dict
def get_view(self, masked_members):
return udcView(self.members_dict, masked_members)
def get_namedtuple(self):
return namedtuple("Hydra", self.members_dict.keys())(*self.members_dict.values())
def UDO(udo, masked_members=['obj']):
return udcBroker(udo).get_view(masked_members)
def udoPipe(obj_view):
return UDO(obj_view.obj, obj_view.masked_members)

View File

@ -27,7 +27,6 @@ def single_proc_exec(
Ts: List[range],
Ns: List[int]
):
l = [simulation_execs, states_lists, configs_structs, env_processes_list, Ts, Ns]
simulation_exec, states_list, config, env_processes, T, N = list(map(lambda x: x.pop(), l))
result = simulation_exec(var_dict_list, states_list, config, env_processes, T, N)
@ -66,7 +65,6 @@ class Executor:
self.exec_method = exec_context.method
self.exec_context = exec_context.name
self.configs = configs
self.main = self.execute
def execute(self) -> Tuple[List[Dict[str, Any]], DataFrame]:
config_proc = Processor()
@ -76,6 +74,7 @@ class Executor:
var_dict_list, states_lists, Ts, Ns, eps, configs_structs, env_processes_list, partial_state_updates, simulation_execs = \
[], [], [], [], [], [], [], [], []
config_idx = 0
print(self.configs)
for x in self.configs:
Ts.append(x.sim_config['T'])
@ -84,6 +83,7 @@ class Executor:
states_lists.append([x.initial_state])
eps.append(list(x.exogenous_states.values()))
configs_structs.append(config_proc.generate_config(x.initial_state, x.partial_state_updates, eps[config_idx]))
# print(env_processes_list)
env_processes_list.append(x.env_processes)
partial_state_updates.append(x.partial_state_updates)
simulation_execs.append(SimExecutor(x.policy_ops).simulation)
@ -98,12 +98,12 @@ class Executor:
result = self.exec_method(simulation_execs, var_dict_list, states_lists, configs_structs, env_processes_list, Ts, Ns)
final_result = result, tensor_field
elif self.exec_context == ExecutionMode.multi_proc:
if len(self.configs) > 1:
simulations = self.exec_method(simulation_execs, var_dict_list, states_lists, configs_structs, env_processes_list, Ts, Ns)
results = []
for result, partial_state_updates, ep in list(zip(simulations, partial_state_updates, eps)):
results.append((flatten(result), create_tensor_field(partial_state_updates, ep)))
# if len(self.configs) > 1:
simulations = self.exec_method(simulation_execs, var_dict_list, states_lists, configs_structs, env_processes_list, Ts, Ns)
results = []
for result, partial_state_updates, ep in list(zip(simulations, partial_state_updates, eps)):
results.append((flatten(result), create_tensor_field(partial_state_updates, ep)))
final_result = results
final_result = results
return final_result

View File

@ -1,7 +1,7 @@
from typing import Any, Callable, Dict, List, Tuple
from pathos.pools import ThreadPool as TPool
from copy import deepcopy
from fn.op import foldr, call
from functools import reduce
from cadCAD.engine.utils import engine_exception
from cadCAD.utils import flatten
@ -11,55 +11,100 @@ id_exception: Callable = engine_exception(KeyError, KeyError, None)
class Executor:
def __init__(
self,
policy_ops: List[Callable],
policy_update_exception: Callable = id_exception,
state_update_exception: Callable = id_exception
) -> None:
self,
policy_ops: List[Callable],
policy_update_exception: Callable = id_exception,
state_update_exception: Callable = id_exception
) -> None:
# behavior_ops
self.policy_ops = policy_ops
self.state_update_exception = state_update_exception
self.policy_update_exception = policy_update_exception
# behavior_update_exception
# get_behavior_input # sL: State Window
def get_policy_input(
self,
var_dict: Dict[str, List[Any]],
sweep_dict: Dict[str, List[Any]],
sub_step: int,
sL: List[Dict[str, Any]],
s: Dict[str, Any],
funcs: List[Callable]
) -> Dict[str, Any]:
ops = self.policy_ops[::-1]
ops = self.policy_ops
def get_col_results(var_dict, sub_step, sL, s, funcs):
return list(map(lambda f: f(var_dict, sub_step, sL, s), funcs))
def get_col_results(sweep_dict, sub_step, sL, s, funcs):
return list(map(lambda f: f(sweep_dict, sub_step, sL, s), funcs))
return foldr(call, get_col_results(var_dict, sub_step, sL, s, funcs))(ops)
def compose(init_reduction_funct, funct_list, val_list):
result, i = None, 0
composition = lambda x: [reduce(init_reduction_funct, x)] + funct_list
for g in composition(val_list):
if i == 0:
result = g
i = 1
else:
result = g(result)
return result
col_results = get_col_results(sweep_dict, sub_step, sL, s, funcs)
key_set = list(set(list(reduce(lambda a, b: a + b, list(map(lambda x: list(x.keys()), col_results))))))
new_dict = {k: [] for k in key_set}
for d in col_results:
for k in d.keys():
new_dict[k].append(d[k])
ops_head, *ops_tail = ops
return {
k: compose(
init_reduction_funct=ops_head, # func executed on value list
funct_list=ops_tail,
val_list=val_list
) for k, val_list in new_dict.items()
}
# [f1] = ops
# return {k: reduce(f1, val_list) for k, val_list in new_dict.items()}
# return foldr(call, col_results)(ops)
def apply_env_proc(
self,
sweep_dict,
env_processes: Dict[str, Callable],
state_dict: Dict[str, Any],
sub_step: int
) -> None:
for state in state_dict.keys():
if state in list(env_processes.keys()):
env_state: Callable = env_processes[state]
if (env_state.__name__ == '_curried') or (env_state.__name__ == 'proc_trigger'):
state_dict[state] = env_state(sub_step)(state_dict[state])
else:
state_dict[state] = env_state(state_dict[state])
) -> Dict[str, Any]:
def env_composition(target_field, state_dict, target_value):
function_type = type(lambda x: x)
env_update = env_processes[target_field]
if isinstance(env_update, list):
for f in env_update:
target_value = f(sweep_dict, target_value)
elif isinstance(env_update, function_type):
target_value = env_update(state_dict, sweep_dict, target_value)
else:
target_value = env_update
return target_value
filtered_state_dict = {k: v for k, v in state_dict.items() if k in env_processes.keys()}
env_proc_dict = {
target_field: env_composition(target_field, state_dict, target_value)
for target_field, target_value in filtered_state_dict.items()
}
for k, v in env_proc_dict.items():
state_dict[k] = v
return state_dict
# ToDo: Redifined as a function that applies the tensor field to a set og last conditions
# mech_step
def partial_state_update(
self,
var_dict: Dict[str, List[Any]],
sweep_dict: Dict[str, List[Any]],
sub_step: int,
sL: Any,
sH: Any,
state_funcs: List[Callable],
policy_funcs: List[Callable],
env_processes: Dict[str, Callable],
@ -68,24 +113,23 @@ class Executor:
) -> List[Dict[str, Any]]:
last_in_obj: Dict[str, Any] = deepcopy(sL[-1])
_input: Dict[str, Any] = self.policy_update_exception(self.get_policy_input(var_dict, sub_step, sL, last_in_obj, policy_funcs))
_input: Dict[str, Any] = self.policy_update_exception(self.get_policy_input(sweep_dict, sub_step, sH, last_in_obj, policy_funcs))
# ToDo: add env_proc generator to `last_in_copy` iterator as wrapper function
last_in_copy: Dict[str, Any] = dict(
[
self.state_update_exception(f(var_dict, sub_step, sL, last_in_obj, _input)) for f in state_funcs
]
)
# ToDo: Can be multithreaded ??
def generate_record(state_funcs):
for f in state_funcs:
yield self.state_update_exception(f(sweep_dict, sub_step, sH, last_in_obj, _input))
for k in last_in_obj:
if k not in last_in_copy:
last_in_copy[k] = last_in_obj[k]
del last_in_obj
self.apply_env_proc(env_processes, last_in_copy, last_in_copy['timestep'])
def transfer_missing_fields(source, destination):
for k in source:
if k not in destination:
destination[k] = source[k]
del source # last_in_obj
return destination
last_in_copy: Dict[str, Any] = transfer_missing_fields(last_in_obj, dict(generate_record(state_funcs)))
last_in_copy: Dict[str, Any] = self.apply_env_proc(sweep_dict, env_processes, last_in_copy)
# ToDo: make 'substep' & 'timestep' reserve fields
last_in_copy['substep'], last_in_copy['timestep'], last_in_copy['run'] = sub_step, time_step, run
@ -97,8 +141,8 @@ class Executor:
# mech_pipeline - state_update_block
def state_update_pipeline(
self,
var_dict: Dict[str, List[Any]],
states_list: List[Dict[str, Any]],
sweep_dict: Dict[str, List[Any]],
simulation_list, #states_list: List[Dict[str, Any]],
configs: List[Tuple[List[Callable], List[Callable]]],
env_processes: Dict[str, Callable],
time_step: int,
@ -106,18 +150,27 @@ class Executor:
) -> List[Dict[str, Any]]:
sub_step = 0
states_list_copy: List[Dict[str, Any]] = deepcopy(states_list)
states_list_copy: List[Dict[str, Any]] = deepcopy(simulation_list[-1])
# ToDo: Causes Substep repeats in sL:
genesis_states: Dict[str, Any] = states_list_copy[-1]
genesis_states['substep'], genesis_states['timestep'] = sub_step, time_step
if len(states_list_copy) == 1:
genesis_states['substep'] = sub_step
# genesis_states['timestep'] = 0
# else:
# genesis_states['timestep'] = time_step
del states_list_copy
states_list: List[Dict[str, Any]] = [genesis_states]
# ToDo: Was causing Substep repeats in sL, use for yield
sub_step += 1
for config in configs:
s_conf, p_conf = config[0], config[1]
states_list: List[Dict[str, Any]] = self.partial_state_update(
var_dict, sub_step, states_list, s_conf, p_conf, env_processes, time_step, run
)
for [s_conf, p_conf] in configs: # tensor field
states_list: List[Dict[str, Any]] = self.partial_state_update(
sweep_dict, sub_step, states_list, simulation_list, s_conf, p_conf, env_processes, time_step, run
)
sub_step += 1
time_step += 1
@ -127,7 +180,7 @@ class Executor:
# state_update_pipeline
def run_pipeline(
self,
var_dict: Dict[str, List[Any]],
sweep_dict: Dict[str, List[Any]],
states_list: List[Dict[str, Any]],
configs: List[Tuple[List[Callable], List[Callable]]],
env_processes: Dict[str, Callable],
@ -136,19 +189,24 @@ class Executor:
) -> List[List[Dict[str, Any]]]:
time_seq: List[int] = [x + 1 for x in time_seq]
# ToDo: simulation_list should be a Tensor that is generated throughout the Executor
simulation_list: List[List[Dict[str, Any]]] = [states_list]
for time_step in time_seq:
pipe_run: List[Dict[str, Any]] = self.state_update_pipeline(
var_dict, simulation_list[-1], configs, env_processes, time_step, run
sweep_dict, simulation_list, configs, env_processes, time_step, run
)
_, *pipe_run = pipe_run
simulation_list.append(pipe_run)
return simulation_list
# ToDo: Below can be recieved from a tensor field
# configs: List[Tuple[List[Callable], List[Callable]]]
def simulation(
self,
var_dict: Dict[str, List[Any]],
sweep_dict: Dict[str, List[Any]],
states_list: List[Dict[str, Any]],
configs: List[Tuple[List[Callable], List[Callable]]],
env_processes: Dict[str, Callable],
@ -156,20 +214,24 @@ class Executor:
runs: int
) -> List[List[Dict[str, Any]]]:
def execute_run(var_dict, states_list, configs, env_processes, time_seq, run) -> List[Dict[str, Any]]:
def execute_run(sweep_dict, states_list, configs, env_processes, time_seq, run) -> List[Dict[str, Any]]:
run += 1
states_list_copy: List[Dict[str, Any]] = deepcopy(states_list)
head, *tail = self.run_pipeline(var_dict, states_list_copy, configs, env_processes, time_seq, run)
def generate_init_sys_metrics(genesis_states_list):
for d in genesis_states_list:
d['run'], d['substep'], d['timestep'] = run, 0, 0
yield d
states_list_copy: List[Dict[str, Any]] = list(generate_init_sys_metrics(deepcopy(states_list)))
first_timestep_per_run: List[Dict[str, Any]] = self.run_pipeline(sweep_dict, states_list_copy, configs, env_processes, time_seq, run)
del states_list_copy
genesis: Dict[str, Any] = head.pop()
genesis['substep'], genesis['timestep'], genesis['run'] = 0, 0, run
first_timestep_per_run: List[Dict[str, Any]] = [genesis] + tail.pop(0)
return [first_timestep_per_run] + tail
return first_timestep_per_run
pipe_run: List[List[Dict[str, Any]]] = flatten(
TPool().map(
lambda run: execute_run(var_dict, states_list, configs, env_processes, time_seq, run),
lambda run: execute_run(sweep_dict, states_list, configs, env_processes, time_seq, run),
list(range(runs))
)
)

View File

@ -1,8 +1,41 @@
from functools import reduce
from typing import Dict, List
from collections import defaultdict
from collections import defaultdict, Counter
from itertools import product
import warnings
from pandas import DataFrame
class SilentDF(DataFrame):
def __repr__(self):
return str(hex(id(DataFrame))) #"pandas.core.frame.DataFrame"
def append_dict(dict, new_dict):
dict.update(new_dict)
return dict
# def val_switch(v):
# if isinstance(v, DataFrame) is True or isinstance(v, SilentDF) is True:
# return SilentDF(v)
# else:
# return v.x
class IndexCounter:
def __init__(self):
self.i = 0
def __call__(self):
self.i += 1
return self.i
# def compose(*functions):
# return reduce(lambda f, g: lambda x: f(g(x)), functions, lambda x: x)
def compose(*functions):
return reduce(lambda f, g: lambda x: f(g(x)), functions, lambda x: x)
def pipe(x):
return x

133
cadCAD/utils/sys_config.py Normal file
View File

@ -0,0 +1,133 @@
from funcy import curry
from cadCAD.configuration.utils import ep_time_step, time_step
def increment(y, incr_by):
return lambda _g, step, sL, s, _input: (y, s[y] + incr_by)
def track(y):
return lambda _g, step, sL, s, _input: (y, s[y].x)
def simple_state_update(y, x):
return lambda _g, step, sH, s, _input: (y, x)
def simple_policy_update(y):
return lambda _g, step, sH, s: y
def update_timestamp(y, timedelta, format):
return lambda _g, step, sL, s, _input: (
y,
ep_time_step(s, dt_str=s[y], fromat_str=format, _timedelta=timedelta)
)
def apply(f, y: str, incr_by: int):
return lambda _g, step, sL, s, _input: (y, curry(f)(s[y])(incr_by))
def add(y: str, incr_by):
return apply(lambda a, b: a + b, y, incr_by)
def increment_state_by_int(y: str, incr_by: int):
return lambda _g, step, sL, s, _input: (y, s[y] + incr_by)
def s(y, x):
return lambda _g, step, sH, s, _input: (y, x)
def time_model(y, substeps, time_delta, ts_format='%Y-%m-%d %H:%M:%S'):
def apply_incriment_condition(s):
if s['substep'] == 0 or s['substep'] == substeps:
return y, time_step(dt_str=s[y], dt_format=ts_format, _timedelta=time_delta)
else:
return y, s[y]
return lambda _g, step, sL, s, _input: apply_incriment_condition(s)
# ToDo: Impliment Matrix reduction
#
# [
# {'conditions': [123], 'opp': lambda a, b: a and b},
# {'conditions': [123], 'opp': lambda a, b: a and b}
# ]
# def trigger_condition2(s, conditions, cond_opp):
# # print(conditions)
# condition_bools = [s[field] in precondition_values for field, precondition_values in conditions.items()]
# return reduce(cond_opp, condition_bools)
#
# def trigger_multi_conditions(s, multi_conditions, multi_cond_opp):
# # print([(d['conditions'], d['reduction_opp']) for d in multi_conditions])
# condition_bools = [
# trigger_condition2(s, conditions, opp) for conditions, opp in [
# (d['conditions'], d['reduction_opp']) for d in multi_conditions
# ]
# ]
# return reduce(multi_cond_opp, condition_bools)
#
# def apply_state_condition2(multi_conditions, multi_cond_opp, y, f, _g, step, sL, s, _input):
# if trigger_multi_conditions(s, multi_conditions, multi_cond_opp):
# return f(_g, step, sL, s, _input)
# else:
# return y, s[y]
#
# def proc_trigger2(y, f, multi_conditions, multi_cond_opp):
# return lambda _g, step, sL, s, _input: apply_state_condition2(multi_conditions, multi_cond_opp, y, f, _g, step, sL, s, _input)
#
# def timestep_trigger2(end_substep, y, f):
# multi_conditions = [
# {
# 'condition': {
# 'substep': [0, end_substep]
# },
# 'reduction_opp': lambda a, b: a and b
# }
# ]
# multi_cond_opp = lambda a, b: a and b
# return proc_trigger2(y, f, multi_conditions, multi_cond_opp)
#
# @curried
# print(env_trigger(3).__module__)
# pp.pprint(dir(env_trigger))
# @curried
# def env_proc_trigger(trigger_time, update_f, time):
# if time == trigger_time:
# return update_f
# else:
# return lambda x: x
# def p1m1(_g, step, sL, s):
# return {'param1': 1}
#
# def apply_policy_condition(policies, policy_id, f, conditions, _g, step, sL, s):
# if trigger_condition(s, conditions):
# policies[policy_id] = f(_g, step, sL, s)
# return policies
# else:
# return policies
#
# def proc_trigger2(policies, conditions, policy_id, f):
# return lambda _g, step, sL, s: apply_policy_condition(policies, policy_id, f, conditions,_g, step, sL, s)
# policies_updates = {"p1": udo_policyA, "p2": udo_policyB}
# @curried
# def proc_trigger(trigger_time, update_f, time):
# if time == trigger_time:
# return update_f
# else:
# return lambda x: x
# def repr(_g, step, sL, s, _input):
# y = 'z'
# x = s['state_udo'].__repr__()
# return (y, x)

Binary file not shown.

BIN
dist/cadCAD-0.2.4-py3-none-any.whl vendored Normal file

Binary file not shown.

View File

@ -2,4 +2,5 @@ pandas
wheel
pathos
fn
tabulate
tabulate
funcy

View File

@ -11,7 +11,7 @@ long_description = "cadCAD is a differential games based simulation software pac
monte carlo analysis and other common numerical methods is provided."
setup(name='cadCAD',
version='0.2.2',
version='0.2.4',
description="cadCAD: a differential games based simulation software package for research, validation, and \
Computer Aided Design of economic systems",
long_description=long_description,

View File

@ -0,0 +1,24 @@
import pandas as pd
from tabulate import tabulate
# The following imports NEED to be in the exact order
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
# from simulations.validation import config1_test_pipe
# from simulations.validation import config1
from simulations.validation import write_simulation
from cadCAD import configs
exec_mode = ExecutionMode()
print("Simulation Execution: Single Configuration")
print()
first_config = configs # only contains config1
single_proc_ctx = ExecutionContext(context=exec_mode.single_proc)
run = Executor(exec_context=single_proc_ctx, configs=first_config)
raw_result, _ = run.main()
result = pd.DataFrame(raw_result)
result.to_csv('/Users/jjodesty/Projects/DiffyQ-SimCAD/simulations/external_data/output.csv', index=False)
print("Output:")
print(tabulate(result, headers='keys', tablefmt='psql'))
print()

View File

@ -13,7 +13,7 @@ run = Executor(exec_context=multi_proc_ctx, configs=configs)
i = 0
config_names = ['sweep_config_A', 'sweep_config_B']
for raw_result, tensor_field in run.main():
for raw_result, tensor_field in run.execute():
result = pd.DataFrame(raw_result)
print()
print("Tensor Field: " + config_names[i])

View File

@ -1,10 +1,9 @@
from decimal import Decimal
import numpy as np
from datetime import timedelta
from cadCAD.configuration import append_configs
from cadCAD.configuration.utils import proc_trigger, bound_norm_random, ep_time_step, config_sim
from cadCAD.configuration import append_configs
from cadCAD.configuration.utils import bound_norm_random, config_sim, time_step, env_trigger
seeds = {
'z': np.random.RandomState(1),
@ -18,9 +17,7 @@ seeds = {
def p1m1(_g, step, sL, s):
return {'param1': 1}
def p2m1(_g, step, sL, s):
return {'param2': 4}
# []
return {'param1': 1, 'param2': 4}
def p1m2(_g, step, sL, s):
return {'param1': 'a', 'param2': 2}
@ -36,7 +33,7 @@ def p2m3(_g, step, sL, s):
# Internal States per Mechanism
def s1m1(_g, step, sL, s, _input):
y = 's1'
x = _input['param1']
x = s['s1'] + 1
return (y, x)
def s2m1(_g, step, sL, s, _input):
y = 's2'
@ -45,7 +42,7 @@ def s2m1(_g, step, sL, s, _input):
def s1m2(_g, step, sL, s, _input):
y = 's1'
x = _input['param1']
x = s['s1'] + 1
return (y, x)
def s2m2(_g, step, sL, s, _input):
y = 's2'
@ -54,69 +51,57 @@ def s2m2(_g, step, sL, s, _input):
def s1m3(_g, step, sL, s, _input):
y = 's1'
x = _input['param1']
x = s['s1'] + 1
return (y, x)
def s2m3(_g, step, sL, s, _input):
y = 's2'
x = _input['param2']
return (y, x)
def policies(_g, step, sL, s, _input):
y = 'policies'
x = _input
return (y, x)
# Exogenous States
proc_one_coef_A = 0.7
proc_one_coef_B = 1.3
def es3p1(_g, step, sL, s, _input):
def es3(_g, step, sL, s, _input):
y = 's3'
x = s['s3'] * bound_norm_random(seeds['a'], proc_one_coef_A, proc_one_coef_B)
return (y, x)
def es4p2(_g, step, sL, s, _input):
def es4(_g, step, sL, s, _input):
y = 's4'
x = s['s4'] * bound_norm_random(seeds['b'], proc_one_coef_A, proc_one_coef_B)
return (y, x)
ts_format = '%Y-%m-%d %H:%M:%S'
t_delta = timedelta(days=0, minutes=0, seconds=1)
def es5p2(_g, step, sL, s, _input):
def update_timestamp(_g, step, sL, s, _input):
y = 'timestamp'
x = ep_time_step(s, dt_str=s['timestamp'], fromat_str=ts_format, _timedelta=t_delta)
return (y, x)
# Environment States
def env_a(x):
return 5
def env_b(x):
return 10
# def what_ever(x):
# return x + 1
return y, time_step(dt_str=s[y], dt_format='%Y-%m-%d %H:%M:%S', _timedelta=timedelta(days=0, minutes=0, seconds=1))
# Genesis States
genesis_states = {
's1': Decimal(0.0),
's2': Decimal(0.0),
's3': Decimal(1.0),
's4': Decimal(1.0),
's1': 0.0,
's2': 0.0,
's3': 1.0,
's4': 1.0,
'timestamp': '2018-10-01 15:16:24'
}
raw_exogenous_states = {
"s3": es3p1,
"s4": es4p2,
"timestamp": es5p2
}
# Environment Process
# ToDo: Depreciation Waring for env_proc_trigger convention
env_processes = {
"s3": env_a,
"s4": proc_trigger(1, env_b)
"s3": [lambda _g, x: 5],
"s4": env_trigger(3)(trigger_field='timestep', trigger_vals=[1], funct_list=[lambda _g, x: 10])
}
partial_state_update_block = {
partial_state_update_blocks = {
"m1": {
"policies": {
"b1": p1m1,
@ -124,7 +109,10 @@ partial_state_update_block = {
},
"variables": {
"s1": s1m1,
"s2": s2m1
"s2": s2m1,
"s3": es3,
"s4": es4,
"timestamp": update_timestamp
}
},
"m2": {
@ -134,7 +122,9 @@ partial_state_update_block = {
},
"variables": {
"s1": s1m2,
"s2": s2m2
"s2": s2m2,
# "s3": es3p1,
# "s4": es4p2,
}
},
"m3": {
@ -144,7 +134,9 @@ partial_state_update_block = {
},
"variables": {
"s1": s1m3,
"s2": s2m3
"s2": s2m3,
# "s3": es3p1,
# "s4": es4p2,
}
}
}
@ -157,12 +149,10 @@ sim_config = config_sim(
}
)
append_configs(
sim_configs=sim_config,
initial_state=genesis_states,
seeds=seeds,
raw_exogenous_states=raw_exogenous_states,
env_processes=env_processes,
partial_state_update_blocks=partial_state_update_block
partial_state_update_blocks=partial_state_update_blocks,
policy_ops=[lambda a, b: a + b]
)

View File

@ -1,16 +1,14 @@
from decimal import Decimal
import numpy as np
from datetime import timedelta
from cadCAD.configuration import append_configs
from cadCAD.configuration.utils import proc_trigger, bound_norm_random, ep_time_step, config_sim
from cadCAD.configuration.utils import bound_norm_random, config_sim, env_trigger, time_step
seeds = {
'z': np.random.RandomState(1),
'a': np.random.RandomState(2),
'b': np.random.RandomState(3),
'c': np.random.RandomState(3)
'c': np.random.RandomState(4)
}
@ -64,56 +62,38 @@ def s2m3(_g, step, sL, s, _input):
proc_one_coef_A = 0.7
proc_one_coef_B = 1.3
def es3p1(_g, step, sL, s, _input):
def es3(_g, step, sL, s, _input):
y = 's3'
x = s['s3'] * bound_norm_random(seeds['a'], proc_one_coef_A, proc_one_coef_B)
return (y, x)
def es4p2(_g, step, sL, s, _input):
def es4(_g, step, sL, s, _input):
y = 's4'
x = s['s4'] * bound_norm_random(seeds['b'], proc_one_coef_A, proc_one_coef_B)
return (y, x)
ts_format = '%Y-%m-%d %H:%M:%S'
t_delta = timedelta(days=0, minutes=0, seconds=1)
def es5p2(_g, step, sL, s, _input):
def update_timestamp(_g, step, sL, s, _input):
y = 'timestamp'
x = ep_time_step(s, dt_str=s['timestamp'], fromat_str=ts_format, _timedelta=t_delta)
return (y, x)
# Environment States
def env_a(x):
return 10
def env_b(x):
return 10
# def what_ever(x):
# return x + 1
return y, time_step(dt_str=s[y], dt_format='%Y-%m-%d %H:%M:%S', _timedelta=timedelta(days=0, minutes=0, seconds=1))
# Genesis States
genesis_states = {
's1': Decimal(0.0),
's2': Decimal(0.0),
's3': Decimal(1.0),
's4': Decimal(1.0),
's1': 0,
's2': 0,
's3': 1,
's4': 1,
'timestamp': '2018-10-01 15:16:24'
}
raw_exogenous_states = {
"s3": es3p1,
"s4": es4p2,
"timestamp": es5p2
}
# Environment Process
# ToDo: Depreciation Waring for env_proc_trigger convention
env_processes = {
"s3": proc_trigger(1, env_a),
"s4": proc_trigger(1, env_b)
"s3": [lambda _g, x: 5],
"s4": env_trigger(3)(trigger_field='timestep', trigger_vals=[2], funct_list=[lambda _g, x: 10])
}
partial_state_update_block = {
"m1": {
"policies": {
@ -123,6 +103,9 @@ partial_state_update_block = {
"states": {
"s1": s1m1,
# "s2": s2m1
"s3": es3,
"s4": es4,
"timestep": update_timestamp
}
},
"m2": {
@ -155,12 +138,9 @@ sim_config = config_sim(
}
)
append_configs(
sim_configs=sim_config,
initial_state=genesis_states,
seeds=seeds,
raw_exogenous_states=raw_exogenous_states,
env_processes=env_processes,
partial_state_update_blocks=partial_state_update_block
)

View File

@ -0,0 +1,67 @@
from cadCAD.configuration import append_configs
from cadCAD.configuration.utils import config_sim
import pandas as pd
from cadCAD.utils import SilentDF
df = SilentDF(pd.read_csv('/Users/jjodesty/Projects/DiffyQ-SimCAD/simulations/external_data/output.csv'))
def query(s, df):
return df[
(df['run'] == s['run']) & (df['substep'] == s['substep']) & (df['timestep'] == s['timestep'])
].drop(columns=['run', 'substep', "timestep"])
def p1(_g, substep, sL, s):
result_dict = query(s, df).to_dict()
del result_dict["ds3"]
return {k: list(v.values()).pop() for k, v in result_dict.items()}
def p2(_g, substep, sL, s):
result_dict = query(s, df).to_dict()
del result_dict["ds1"], result_dict["ds2"]
return {k: list(v.values()).pop() for k, v in result_dict.items()}
# ToDo: SilentDF(df) wont work
#integrate_ext_dataset
def integrate_ext_dataset(_g, step, sL, s, _input):
result_dict = query(s, df).to_dict()
return 'external_data', {k: list(v.values()).pop() for k, v in result_dict.items()}
def increment(y, incr_by):
return lambda _g, step, sL, s, _input: (y, s[y] + incr_by)
increment = increment('increment', 1)
def view_policies(_g, step, sL, s, _input):
return 'policies', _input
external_data = {'ds1': None, 'ds2': None, 'ds3': None}
state_dict = {
'increment': 0,
'external_data': external_data,
'policies': external_data
}
policies = {"p1": p1, "p2": p2}
states = {'increment': increment, 'external_data': integrate_ext_dataset, 'policies': view_policies}
PSUB = {'policies': policies, 'states': states}
# needs M1&2 need behaviors
partial_state_update_blocks = {
'PSUB1': PSUB,
'PSUB2': PSUB,
'PSUB3': PSUB
}
sim_config = config_sim({
"N": 2,
"T": range(4)
})
append_configs(
sim_configs=sim_config,
initial_state=state_dict,
partial_state_update_blocks=partial_state_update_blocks,
policy_ops=[lambda a, b: {**a, **b}]
)

View File

@ -0,0 +1,85 @@
from cadCAD.configuration import append_configs
from cadCAD.configuration.utils import config_sim, access_block
policies, variables = {}, {}
exclusion_list = ['nonexsistant', 'last_x', '2nd_to_last_x', '3rd_to_last_x', '4th_to_last_x']
# Policies per Mechanism
# WARNING: DO NOT delete elements from sH
def last_update(_g, substep, sH, s):
return {"last_x": access_block(sH, "last_x", -1, exclusion_list)}
policies["last_x"] = last_update
def second2last_update(_g, substep, sH, s):
return {"2nd_to_last_x": access_block(sH, "2nd_to_last_x", -2, exclusion_list)}
policies["2nd_to_last_x"] = second2last_update
# Internal States per Mechanism
# WARNING: DO NOT delete elements from sH
def add(y, x):
return lambda _g, substep, sH, s, _input: (y, s[y] + x)
variables['x'] = add('x', 1)
# last_partial_state_update_block
def nonexsistant(_g, substep, sH, s, _input):
return 'nonexsistant', access_block(sH, "nonexsistant", 0, exclusion_list)
variables['nonexsistant'] = nonexsistant
# last_partial_state_update_block
def last_x(_g, substep, sH, s, _input):
return 'last_x', _input["last_x"]
variables['last_x'] = last_x
# 2nd to last partial state update block
def second_to_last_x(_g, substep, sH, s, _input):
return '2nd_to_last_x', _input["2nd_to_last_x"]
variables['2nd_to_last_x'] = second_to_last_x
# 3rd to last partial state update block
def third_to_last_x(_g, substep, sH, s, _input):
return '3rd_to_last_x', access_block(sH, "3rd_to_last_x", -3, exclusion_list)
variables['3rd_to_last_x'] = third_to_last_x
# 4th to last partial state update block
def fourth_to_last_x(_g, substep, sH, s, _input):
return '4th_to_last_x', access_block(sH, "4th_to_last_x", -4, exclusion_list)
variables['4th_to_last_x'] = fourth_to_last_x
genesis_states = {
'x': 0,
'nonexsistant': [],
'last_x': [],
'2nd_to_last_x': [],
'3rd_to_last_x': [],
'4th_to_last_x': []
}
PSUB = {
"policies": policies,
"variables": variables
}
partial_state_update_block = {
"PSUB1": PSUB,
"PSUB2": PSUB,
"PSUB3": PSUB
}
sim_config = config_sim(
{
"N": 1,
"T": range(3),
}
)
append_configs(
sim_configs=sim_config,
initial_state=genesis_states,
partial_state_update_blocks=partial_state_update_block
)

View File

@ -0,0 +1,86 @@
import numpy as np
from cadCAD.configuration import append_configs
from cadCAD.configuration.utils import config_sim
# Policies per Mechanism
def p1m1(_g, step, sL, s):
return {'policy1': 1}
def p2m1(_g, step, sL, s):
return {'policy2': 2}
def p1m2(_g, step, sL, s):
return {'policy1': 2, 'policy2': 2}
def p2m2(_g, step, sL, s):
return {'policy1': 2, 'policy2': 2}
def p1m3(_g, step, sL, s):
return {'policy1': 1, 'policy2': 2, 'policy3': 3}
def p2m3(_g, step, sL, s):
return {'policy1': 1, 'policy2': 2, 'policy3': 3}
# Internal States per Mechanism
def add(y, x):
return lambda _g, step, sH, s, _input: (y, s[y] + x)
def policies(_g, step, sH, s, _input):
y = 'policies'
x = _input
return (y, x)
# Genesis States
genesis_states = {
'policies': {},
's1': 0
}
variables = {
's1': add('s1', 1),
"policies": policies
}
partial_state_update_block = {
"m1": {
"policies": {
"p1": p1m1,
"p2": p2m1
},
"variables": variables
},
"m2": {
"policies": {
"p1": p1m2,
"p2": p2m2
},
"variables": variables
},
"m3": {
"policies": {
"p1": p1m3,
"p2": p2m3
},
"variables": variables
}
}
sim_config = config_sim(
{
"N": 1,
"T": range(3),
}
)
# Aggregation == Reduce Map / Reduce Map Aggregation
# ToDo: subsequent functions should accept the entire datastructure
# using env functions (include in reg test using / for env proc)
append_configs(
sim_configs=sim_config,
initial_state=genesis_states,
partial_state_update_blocks=partial_state_update_block,
policy_ops=[lambda a, b: a + b, lambda y: y * 2] # Default: lambda a, b: a + b ToDO: reduction function requires high lvl explanation
)

View File

@ -0,0 +1,159 @@
import numpy as np
from datetime import timedelta
import pprint
from cadCAD.configuration import append_configs
from cadCAD.configuration.utils import env_trigger, var_substep_trigger, config_sim, time_step, psub_list
from typing import Dict, List
pp = pprint.PrettyPrinter(indent=4)
seeds = {
'z': np.random.RandomState(1),
'a': np.random.RandomState(2),
'b': np.random.RandomState(3),
'c': np.random.RandomState(3)
}
# Optional
g: Dict[str, List[int]] = {
'alpha': [1],
# 'beta': [2],
# 'gamma': [3],
'beta': [2, 5],
'gamma': [3, 4],
'omega': [7]
}
psu_steps = ['m1', 'm2', 'm3']
system_substeps = len(psu_steps)
var_timestep_trigger = var_substep_trigger([0, system_substeps])
env_timestep_trigger = env_trigger(system_substeps)
env_process = {}
psu_block = {k: {"policies": {}, "variables": {}} for k in psu_steps}
# ['s1', 's2', 's3', 's4']
# Policies per Mechanism
def p1m1(_g, step, sL, s):
return {'param1': 1}
psu_block['m1']['policies']['p1'] = p1m1
def p2m1(_g, step, sL, s):
return {'param2': 4}
psu_block['m1']['policies']['p2'] = p2m1
def p1m2(_g, step, sL, s):
return {'param1': 'a', 'param2': _g['beta']}
psu_block['m2']['policies']['p1'] = p1m2
def p2m2(_g, step, sL, s):
return {'param1': 'b', 'param2': 0}
psu_block['m2']['policies']['p2'] = p2m2
def p1m3(_g, step, sL, s):
return {'param1': np.array([10, 100])}
psu_block['m3']['policies']['p1'] = p1m3
def p2m3(_g, step, sL, s):
return {'param1': np.array([20, 200])}
psu_block['m3']['policies']['p2'] = p2m3
# Internal States per Mechanism
def s1m1(_g, step, sL, s, _input):
return 's1', 0
psu_block['m1']["variables"]['s1'] = s1m1
def s2m1(_g, step, sL, s, _input):
return 's2', _g['beta']
psu_block['m1']["variables"]['s2'] = s2m1
def s1m2(_g, step, sL, s, _input):
return 's1', _input['param2']
psu_block['m2']["variables"]['s1'] = s1m2
def s2m2(_g, step, sL, s, _input):
return 's2', _input['param2']
psu_block['m2']["variables"]['s2'] = s2m2
def s1m3(_g, step, sL, s, _input):
return 's1', 0
psu_block['m3']["variables"]['s1'] = s1m3
def s2m3(_g, step, sL, s, _input):
return 's2', 0
psu_block['m3']["variables"]['s2'] = s2m3
# Exogenous States
def update_timestamp(_g, step, sL, s, _input):
y = 'timestamp'
return y, time_step(dt_str=s[y], dt_format='%Y-%m-%d %H:%M:%S', _timedelta=timedelta(days=0, minutes=0, seconds=1))
for m in ['m1','m2','m3']:
# psu_block[m]["variables"]['timestamp'] = update_timestamp
psu_block[m]["variables"]['timestamp'] = var_timestep_trigger(y='timestamp', f=update_timestamp)
# psu_block[m]["variables"]['timestamp'] = var_trigger(
# y='timestamp', f=update_timestamp, pre_conditions={'substep': [0, system_substeps]}, cond_op=lambda a, b: a and b
# )
proc_one_coef = 0.7
def es3(_g, step, sL, s, _input):
return 's3', s['s3'] + proc_one_coef
# use `timestep_trigger` to update every ts
for m in ['m1','m2','m3']:
psu_block[m]["variables"]['s3'] = var_timestep_trigger(y='s3', f=es3)
def es4(_g, step, sL, s, _input):
return 's4', s['s4'] + _g['gamma']
for m in ['m1','m2','m3']:
psu_block[m]["variables"]['s4'] = var_timestep_trigger(y='s4', f=es4)
# ToDo: The number of values entered in sweep should be the # of config objs created,
# not dependent on the # of times the sweep is applied
# sweep exo_state func and point to exo-state in every other funtion
# param sweep on genesis states
# Genesis States
genesis_states = {
's1': 0.0,
's2': 0.0,
's3': 1.0,
's4': 1.0,
'timestamp': '2018-10-01 15:16:24'
}
# Environment Process
# ToDo: Validate - make env proc trigger field agnostic
env_process["s3"] = [lambda _g, x: _g['beta'], lambda _g, x: x + 1]
env_process["s4"] = env_timestep_trigger(trigger_field='timestep', trigger_vals=[5], funct_list=[lambda _g, x: _g['beta']])
# config_sim Necessary
sim_config = config_sim(
{
"N": 2,
"T": range(5),
"M": g, # Optional
}
)
# New Convention
partial_state_update_blocks = psub_list(psu_block, psu_steps)
append_configs(
sim_configs=sim_config,
initial_state=genesis_states,
seeds=seeds,
env_processes=env_process,
partial_state_update_blocks=partial_state_update_blocks
)
print()
print("Policie State Update Block:")
pp.pprint(partial_state_update_blocks)
print()
print()

View File

@ -0,0 +1,176 @@
import pandas as pd
from fn.func import curried
from datetime import timedelta
import pprint as pp
from cadCAD.utils import SilentDF #, val_switch
from cadCAD.configuration import append_configs
from cadCAD.configuration.utils import time_step, config_sim, var_trigger, var_substep_trigger, env_trigger, psub_list
from cadCAD.configuration.utils.userDefinedObject import udoPipe, UDO
DF = SilentDF(pd.read_csv('/Users/jjodesty/Projects/DiffyQ-SimCAD/simulations/external_data/output.csv'))
class udoExample(object):
def __init__(self, x, dataset=None):
self.x = x
self.mem_id = str(hex(id(self)))
self.ds = dataset # for setting ds initially or querying
self.perception = {}
def anon(self, f):
return f(self)
def updateX(self):
self.x += 1
return self
def perceive(self, s):
self.perception = self.ds[
(self.ds['run'] == s['run']) & (self.ds['substep'] == s['substep']) & (self.ds['timestep'] == s['timestep'])
].drop(columns=['run', 'substep']).to_dict()
return self
def read(self, ds_uri):
self.ds = SilentDF(pd.read_csv(ds_uri))
return self
def write(self, ds_uri):
pd.to_csv(ds_uri)
# ToDo: Generic update function
pass
state_udo = UDO(udo=udoExample(0, DF), masked_members=['obj', 'perception'])
policy_udoA = UDO(udo=udoExample(0, DF), masked_members=['obj', 'perception'])
policy_udoB = UDO(udo=udoExample(0, DF), masked_members=['obj', 'perception'])
sim_config = config_sim({
"N": 2,
"T": range(4)
})
# ToDo: DataFrame Column order
state_dict = {
'increment': 0,
'state_udo': state_udo, 'state_udo_tracker': 0,
'state_udo_perception_tracker': {"ds1": None, "ds2": None, "ds3": None, "timestep": None},
'udo_policies': {'udo_A': policy_udoA, 'udo_B': policy_udoB},
'udo_policy_tracker': (0, 0),
'timestamp': '2019-01-01 00:00:00'
}
psu_steps = ['m1', 'm2', 'm3']
system_substeps = len(psu_steps)
var_timestep_trigger = var_substep_trigger([0, system_substeps])
env_timestep_trigger = env_trigger(system_substeps)
psu_block = {k: {"policies": {}, "variables": {}} for k in psu_steps}
def udo_policyA(_g, step, sL, s):
s['udo_policies']['udo_A'].updateX()
return {'udo_A': udoPipe(s['udo_policies']['udo_A'])}
# policies['a'] = udo_policyA
for m in psu_steps:
psu_block[m]['policies']['a'] = udo_policyA
def udo_policyB(_g, step, sL, s):
s['udo_policies']['udo_B'].updateX()
return {'udo_B': udoPipe(s['udo_policies']['udo_B'])}
# policies['b'] = udo_policyB
for m in psu_steps:
psu_block[m]['policies']['b'] = udo_policyB
# policies = {"p1": udo_policyA, "p2": udo_policyB}
# policies = {"A": udo_policyA, "B": udo_policyB}
def add(y: str, added_val):
return lambda _g, step, sL, s, _input: (y, s[y] + added_val)
# state_updates['increment'] = add('increment', 1)
for m in psu_steps:
psu_block[m]["variables"]['increment'] = add('increment', 1)
@curried
def perceive(s, self):
self.perception = self.ds[
(self.ds['run'] == s['run']) & (self.ds['substep'] == s['substep']) & (self.ds['timestep'] == s['timestep'])
].drop(columns=['run', 'substep']).to_dict()
return self
def state_udo_update(_g, step, sL, s, _input):
y = 'state_udo'
# s['hydra_state'].updateX().anon(perceive(s))
s['state_udo'].updateX().perceive(s)
x = udoPipe(s['state_udo'])
return y, x
for m in psu_steps:
psu_block[m]["variables"]['state_udo'] = state_udo_update
def track(destination, source):
return lambda _g, step, sL, s, _input: (destination, s[source].x)
state_udo_tracker = track('state_udo_tracker', 'state_udo')
for m in psu_steps:
psu_block[m]["variables"]['state_udo_tracker'] = state_udo_tracker
def track_state_udo_perception(destination, source):
def id(past_perception):
if len(past_perception) == 0:
return state_dict['state_udo_perception_tracker']
else:
return past_perception
return lambda _g, step, sL, s, _input: (destination, id(s[source].perception))
state_udo_perception_tracker = track_state_udo_perception('state_udo_perception_tracker', 'state_udo')
for m in psu_steps:
psu_block[m]["variables"]['state_udo_perception_tracker'] = state_udo_perception_tracker
def view_udo_policy(_g, step, sL, s, _input):
return 'udo_policies', _input
for m in psu_steps:
psu_block[m]["variables"]['udo_policies'] = view_udo_policy
def track_udo_policy(destination, source):
def val_switch(v):
if isinstance(v, pd.DataFrame) is True or isinstance(v, SilentDF) is True:
return SilentDF(v)
else:
return v.x
return lambda _g, step, sL, s, _input: (destination, tuple(val_switch(v) for _, v in s[source].items()))
udo_policy_tracker = track_udo_policy('udo_policy_tracker', 'udo_policies')
for m in psu_steps:
psu_block[m]["variables"]['udo_policy_tracker'] = udo_policy_tracker
def update_timestamp(_g, step, sL, s, _input):
y = 'timestamp'
return y, time_step(dt_str=s[y], dt_format='%Y-%m-%d %H:%M:%S', _timedelta=timedelta(days=0, minutes=0, seconds=1))
for m in psu_steps:
psu_block[m]["variables"]['timestamp'] = var_timestep_trigger(y='timestamp', f=update_timestamp)
# psu_block[m]["variables"]['timestamp'] = var_trigger(
# y='timestamp', f=update_timestamp,
# pre_conditions={'substep': [0, system_substeps]}, cond_op=lambda a, b: a and b
# )
# psu_block[m]["variables"]['timestamp'] = update_timestamp
# ToDo: Bug without specifying parameters
# New Convention
partial_state_update_blocks = psub_list(psu_block, psu_steps)
append_configs(
sim_configs=sim_config,
initial_state=state_dict,
partial_state_update_blocks=partial_state_update_blocks
)
print()
print("State Updates:")
pp.pprint(partial_state_update_blocks)
print()

View File

@ -0,0 +1,169 @@
import pandas as pd
import pprint as pp
from fn.func import curried
from datetime import timedelta
from cadCAD.utils import SilentDF #, val_switch
from cadCAD.configuration import append_configs
from cadCAD.configuration.utils import time_step, config_sim
from cadCAD.configuration.utils.userDefinedObject import udoPipe, UDO
DF = SilentDF(pd.read_csv('/Users/jjodesty/Projects/DiffyQ-SimCAD/simulations/external_data/output.csv'))
class udoExample(object):
def __init__(self, x, dataset=None):
self.x = x
self.mem_id = str(hex(id(self)))
self.ds = dataset # for setting ds initially or querying
self.perception = {}
def anon(self, f):
return f(self)
def updateX(self):
self.x += 1
return self
def perceive(self, s):
self.perception = self.ds[
(self.ds['run'] == s['run']) & (self.ds['substep'] == s['substep']) & (self.ds['timestep'] == s['timestep'])
].drop(columns=['run', 'substep']).to_dict()
return self
def read(self, ds_uri):
self.ds = SilentDF(pd.read_csv(ds_uri))
return self
def write(self, ds_uri):
pd.to_csv(ds_uri)
# ToDo: Generic update function
pass
# can be accessed after an update within the same substep and timestep
state_udo = UDO(udo=udoExample(0, DF), masked_members=['obj', 'perception'])
policy_udoA = UDO(udo=udoExample(0, DF), masked_members=['obj', 'perception'])
policy_udoB = UDO(udo=udoExample(0, DF), masked_members=['obj', 'perception'])
def udo_policyA(_g, step, sL, s):
s['udo_policies']['udo_A'].updateX()
return {'udo_A': udoPipe(s['udo_policies']['udo_A'])}
def udo_policyB(_g, step, sL, s):
s['udo_policies']['udo_B'].updateX()
return {'udo_B': udoPipe(s['udo_policies']['udo_B'])}
policies = {"p1": udo_policyA, "p2": udo_policyB}
# ToDo: DataFrame Column order
state_dict = {
'increment': 0,
'state_udo': state_udo, 'state_udo_tracker_a': 0, 'state_udo_tracker_b': 0,
'state_udo_perception_tracker': {"ds1": None, "ds2": None, "ds3": None, "timestep": None},
'udo_policies': {'udo_A': policy_udoA, 'udo_B': policy_udoB},
'udo_policy_tracker_a': (0, 0), 'udo_policy_tracker_b': (0, 0),
'timestamp': '2019-01-01 00:00:00'
}
@curried
def perceive(s, self):
self.perception = self.ds[
(self.ds['run'] == s['run']) & (self.ds['substep'] == s['substep']) & (self.ds['timestep'] == s['timestep'])
].drop(columns=['run', 'substep']).to_dict()
return self
def view_udo_policy(_g, step, sL, s, _input):
return 'udo_policies', _input
def state_udo_update(_g, step, sL, s, _input):
y = 'state_udo'
# s['hydra_state'].updateX().anon(perceive(s))
s['state_udo'].updateX().perceive(s)
x = udoPipe(s['state_udo'])
return y, x
def increment(y, incr_by):
return lambda _g, step, sL, s, _input: (y, s[y] + incr_by)
def track(destination, source):
return lambda _g, step, sL, s, _input: (destination, s[source].x)
def track_udo_policy(destination, source):
def val_switch(v):
if isinstance(v, pd.DataFrame) is True or isinstance(v, SilentDF) is True:
return SilentDF(v)
else:
return v.x
return lambda _g, step, sL, s, _input: (destination, tuple(val_switch(v) for _, v in s[source].items()))
def track_state_udo_perception(destination, source):
def id(past_perception):
if len(past_perception) == 0:
return state_dict['state_udo_perception_tracker']
else:
return past_perception
return lambda _g, step, sL, s, _input: (destination, id(s[source].perception))
def time_model(y, substeps, time_delta, ts_format='%Y-%m-%d %H:%M:%S'):
def apply_incriment_condition(s):
if s['substep'] == 0 or s['substep'] == substeps:
return y, time_step(dt_str=s[y], dt_format=ts_format, _timedelta=time_delta)
else:
return y, s[y]
return lambda _g, step, sL, s, _input: apply_incriment_condition(s)
states = {
'increment': increment('increment', 1),
'state_udo_tracker_a': track('state_udo_tracker_a', 'state_udo'),
'state_udo': state_udo_update,
'state_udo_perception_tracker': track_state_udo_perception('state_udo_perception_tracker', 'state_udo'),
'state_udo_tracker_b': track('state_udo_tracker_b', 'state_udo'),
'udo_policy_tracker_a': track_udo_policy('udo_policy_tracker_a', 'udo_policies'),
'udo_policies': view_udo_policy,
'udo_policy_tracker_b': track_udo_policy('udo_policy_tracker_b', 'udo_policies')
}
substeps=3
update_timestamp = time_model(
'timestamp',
substeps=3,
time_delta=timedelta(days=0, minutes=0, seconds=1),
ts_format='%Y-%m-%d %H:%M:%S'
)
states['timestamp'] = update_timestamp
PSUB = {
'policies': policies,
'states': states
}
# needs M1&2 need behaviors
partial_state_update_blocks = [PSUB] * substeps
# pp.pprint(partial_state_update_blocks)
sim_config = config_sim({
"N": 2,
"T": range(4)
})
# ToDo: Bug without specifying parameters
append_configs(
sim_configs=sim_config,
initial_state=state_dict,
seeds={},
raw_exogenous_states={},
env_processes={},
partial_state_update_blocks=partial_state_update_blocks,
# policy_ops=[lambda a, b: {**a, **b}]
)
print()
print("State Updates:")
pp.pprint(partial_state_update_blocks)
print()

View File

@ -0,0 +1,23 @@
import pandas as pd
from tabulate import tabulate
# The following imports NEED to be in the exact order
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
from simulations.regression_tests import config1
from cadCAD import configs
exec_mode = ExecutionMode()
print("Simulation Execution: Single Configuration")
print()
first_config = configs # only contains config1
single_proc_ctx = ExecutionContext(context=exec_mode.single_proc)
run = Executor(exec_context=single_proc_ctx, configs=first_config)
raw_result, tensor_field = run.execute()
result = pd.DataFrame(raw_result)
print()
print("Tensor Field: config1")
print(tabulate(tensor_field, headers='keys', tablefmt='psql'))
print("Output:")
print(tabulate(result, headers='keys', tablefmt='psql'))
print()

View File

@ -0,0 +1,23 @@
import pandas as pd
from tabulate import tabulate
# The following imports NEED to be in the exact order
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
from simulations.regression_tests import config2
from cadCAD import configs
exec_mode = ExecutionMode()
print("Simulation Execution: Single Configuration")
print()
first_config = configs # only contains config2
single_proc_ctx = ExecutionContext(context=exec_mode.single_proc)
run = Executor(exec_context=single_proc_ctx, configs=first_config)
raw_result, tensor_field = run.execute()
result = pd.DataFrame(raw_result)
print()
print("Tensor Field: config1")
print(tabulate(tensor_field, headers='keys', tablefmt='psql'))
print("Output:")
print(tabulate(result, headers='keys', tablefmt='psql'))
print()

View File

@ -0,0 +1,26 @@
import pandas as pd
from tabulate import tabulate
# The following imports NEED to be in the exact order
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
from simulations.regression_tests import external_dataset
from cadCAD import configs
exec_mode = ExecutionMode()
print("Simulation Execution: Single Configuration")
print()
first_config = configs # only contains config1
single_proc_ctx = ExecutionContext(context=exec_mode.single_proc)
run = Executor(exec_context=single_proc_ctx, configs=first_config)
raw_result, tensor_field = run.execute()
result = pd.DataFrame(raw_result)
result = pd.concat([result, result['external_data'].apply(pd.Series)], axis=1)[
['run', 'substep', 'timestep', 'increment', 'external_data', 'policies', 'ds1', 'ds2', 'ds3', ]
]
print()
print("Tensor Field: config1")
print(tabulate(tensor_field, headers='keys', tablefmt='psql'))
print("Output:")
print(tabulate(result, headers='keys', tablefmt='psql'))
print()

View File

@ -0,0 +1,27 @@
import pandas as pd
from tabulate import tabulate
# The following imports NEED to be in the exact order
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
from simulations.regression_tests import historical_state_access
from cadCAD import configs
exec_mode = ExecutionMode()
print("Simulation Execution: Single Configuration")
print()
first_config = configs # only contains config1
single_proc_ctx = ExecutionContext(context=exec_mode.single_proc)
run = Executor(exec_context=single_proc_ctx, configs=first_config)
raw_result, tensor_field = run.execute()
result = pd.DataFrame(raw_result)
cols = ['run','substep','timestep','x','nonexsistant','last_x','2nd_to_last_x','3rd_to_last_x','4th_to_last_x']
result = result[cols]
print()
print("Tensor Field: config1")
print(tabulate(tensor_field, headers='keys', tablefmt='psql'))
print("Output:")
print(tabulate(result, headers='keys', tablefmt='psql'))
print()

View File

@ -2,7 +2,7 @@ import pandas as pd
from tabulate import tabulate
# The following imports NEED to be in the exact order
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
from simulations.validation import config1, config2
from simulations.regression_tests import config1, config2
from cadCAD import configs
exec_mode = ExecutionMode()
@ -11,12 +11,13 @@ print("Simulation Execution: Concurrent Execution")
multi_proc_ctx = ExecutionContext(context=exec_mode.multi_proc)
run = Executor(exec_context=multi_proc_ctx, configs=configs)
# print(configs)
i = 0
config_names = ['config1', 'config2']
for raw_result, tensor_field in run.main():
for raw_result, tensor_field in run.execute():
result = pd.DataFrame(raw_result)
print()
print("Tensor Field: " + config_names[i])
print(f"Tensor Field: {config_names[i]}")
print(tabulate(tensor_field, headers='keys', tablefmt='psql'))
print("Output:")
print(tabulate(result, headers='keys', tablefmt='psql'))

View File

@ -0,0 +1,26 @@
import pandas as pd
from tabulate import tabulate
# The following imports NEED to be in the exact order
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
from simulations.regression_tests import sweep_config
from cadCAD import configs
# pprint(configs)
exec_mode = ExecutionMode()
print("Simulation Execution: Concurrent Execution")
multi_proc_ctx = ExecutionContext(context=exec_mode.multi_proc)
run = Executor(exec_context=multi_proc_ctx, configs=configs)
i = 0
config_names = ['sweep_config_A', 'sweep_config_B']
for raw_result, tensor_field in run.execute():
result = pd.DataFrame(raw_result)
print()
print("Tensor Field: " + config_names[i])
print(tabulate(tensor_field, headers='keys', tablefmt='psql'))
print("Output:")
print(tabulate(result, headers='keys', tablefmt='psql'))
print()
i += 1

View File

@ -0,0 +1,23 @@
import pandas as pd
from tabulate import tabulate
# The following imports NEED to be in the exact order
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
from simulations.regression_tests import policy_aggregation
from cadCAD import configs
exec_mode = ExecutionMode()
print("Simulation Execution: Single Configuration")
print()
first_config = configs # only contains config1
single_proc_ctx = ExecutionContext(context=exec_mode.single_proc)
run = Executor(exec_context=single_proc_ctx, configs=first_config)
raw_result, tensor_field = run.execute()
result = pd.DataFrame(raw_result)
print()
print("Tensor Field: config1")
print(tabulate(tensor_field, headers='keys', tablefmt='psql'))
print("Output:")
print(tabulate(result, headers='keys', tablefmt='psql'))
print()

View File

@ -0,0 +1,44 @@
import pandas as pd
from tabulate import tabulate
# The following imports NEED to be in the exact order
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
from simulations.regression_tests import udo
from cadCAD import configs
exec_mode = ExecutionMode()
print("Simulation Execution: Single Configuration")
print()
first_config = configs # only contains config1
single_proc_ctx = ExecutionContext(context=exec_mode.single_proc)
run = Executor(exec_context=single_proc_ctx, configs=first_config)
# cols = configs[0].initial_state.keys()
cols = [
'increment',
'state_udo_tracker', 'state_udo', 'state_udo_perception_tracker',
'udo_policies', 'udo_policy_tracker',
'timestamp'
]
raw_result, tensor_field = run.execute()
result = pd.DataFrame(raw_result)[['run', 'substep', 'timestep'] + cols]
# result = pd.concat([result.drop(['c'], axis=1), result['c'].apply(pd.Series)], axis=1)
# print(list(result['c']))
# print(tabulate(result['c'].apply(pd.Series), headers='keys', tablefmt='psql'))
print()
print("Tensor Field: config1")
print(tabulate(tensor_field, headers='keys', tablefmt='psql'))
print("Output:")
print(tabulate(result, headers='keys', tablefmt='psql'))
print()
print(result.info(verbose=True))
# def f(df, col):
# for k in df[col].iloc[0].keys():
# df[k] = None
# for index, row in df.iterrows():
# # df.apply(lambda row:, axis=1)

View File

@ -0,0 +1,44 @@
import pandas as pd
from tabulate import tabulate
# The following imports NEED to be in the exact order
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
from simulations.regression_tests import udo_inter_substep_update
from cadCAD import configs
exec_mode = ExecutionMode()
print("Simulation Execution: Single Configuration")
print()
first_config = configs # only contains config1
single_proc_ctx = ExecutionContext(context=exec_mode.single_proc)
run = Executor(exec_context=single_proc_ctx, configs=first_config)
# cols = configs[0].initial_state.keys()
cols = [
'increment',
'state_udo_tracker_a', 'state_udo', 'state_udo_perception_tracker', 'state_udo_tracker_b',
'udo_policy_tracker_a', 'udo_policies', 'udo_policy_tracker_b',
'timestamp'
]
raw_result, tensor_field = run.execute()
result = pd.DataFrame(raw_result)[['run', 'substep', 'timestep'] + cols]
# result = pd.concat([result.drop(['c'], axis=1), result['c'].apply(pd.Series)], axis=1)
# print(list(result['c']))
# print(tabulate(result['c'].apply(pd.Series), headers='keys', tablefmt='psql'))
print()
print("Tensor Field: config1")
print(tabulate(tensor_field, headers='keys', tablefmt='psql'))
print("Output:")
print(tabulate(result, headers='keys', tablefmt='psql'))
print()
print(result.info(verbose=True))
# def f(df, col):
# for k in df[col].iloc[0].keys():
# df[k] = None
# for index, row in df.iterrows():
# # df.apply(lambda row:, axis=1)

View File

@ -0,0 +1,166 @@
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
from cadCAD.configuration import Configuration
from cadCAD.configuration.utils.userDefinedObject import udoPipe, UDO
import networkx as nx
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pprint as pp
T = 50 #iterations in our simulation
n = 3 #number of boxes in our network
m = 2 #for barabasi graph type number of edges is (n-2)*m
G = nx.barabasi_albert_graph(n, m)
k = len(G.edges)
# class udoExample(object):
# def __init__(self, G):
# self.G = G
# self.mem_id = str(hex(id(self)))
g = UDO(udo=G)
print()
# print(g.edges)
# print(G.edges)
# pp.pprint(f"{type(g)}: {g}")
# next
balls = np.zeros(n,)
for node in g.nodes:
rv = np.random.randint(1,25)
g.nodes[node]['initial_balls'] = rv
balls[node] = rv
# pp.pprint(balls)
# next
scale=100
nx.draw_kamada_kawai(G, node_size=balls*scale,labels=nx.get_node_attributes(G,'initial_balls'))
# next
initial_conditions = {'balls':balls, 'network':G}
print(initial_conditions)
# next
def update_balls(params, step, sL, s, _input):
delta_balls = _input['delta']
new_balls = s['balls']
for e in G.edges:
move_ball = delta_balls[e]
src = e[0]
dst = e[1]
if (new_balls[src] >= move_ball) and (new_balls[dst] >= -move_ball):
new_balls[src] = new_balls[src] - move_ball
new_balls[dst] = new_balls[dst] + move_ball
key = 'balls'
value = new_balls
return (key, value)
def update_network(params, step, sL, s, _input):
new_nodes = _input['nodes']
new_edges = _input['edges']
new_balls = _input['quantity']
graph = s['network']
for node in new_nodes:
graph.add_node(node)
graph.nodes[node]['initial_balls'] = new_balls[node]
graph.nodes[node]['strat'] = _input['node_strats'][node]
for edge in new_edges:
graph.add_edge(edge[0], edge[1])
graph.edges[edge]['strat'] = _input['edge_strats'][edge]
key = 'network'
value = graph
return (key, value)
def update_network_balls(params, step, sL, s, _input):
new_nodes = _input['nodes']
new_balls = _input['quantity']
balls = np.zeros(len(s['balls']) + len(new_nodes))
for node in s['network'].nodes:
balls[node] = s['balls'][node]
for node in new_nodes:
balls[node] = new_balls[node]
key = 'balls'
value = balls
return (key, value)
# next
def greedy_robot(src_balls, dst_balls):
# robot wishes to accumlate balls at its source
# takes half of its neighbors balls
if src_balls < dst_balls:
delta = -np.floor(dst_balls / 2)
else:
delta = 0
return delta
def fair_robot(src_balls, dst_balls):
# robot follows the simple balancing rule
delta = np.sign(src_balls - dst_balls)
return delta
def giving_robot(src_balls, dst_balls):
# robot wishes to gice away balls one at a time
if src_balls > 0:
delta = 1
else:
delta = 0
return delta
# next
robot_strategies = [greedy_robot,fair_robot, giving_robot]
for node in G.nodes:
nstrats = len(robot_strategies)
rv = np.random.randint(0,nstrats)
G.nodes[node]['strat'] = robot_strategies[rv]
for e in G.edges:
owner_node = e[0]
G.edges[e]['strat'] = G.nodes[owner_node]['strat']
# next
def robotic_network(params, step, sL, s):
graph = s['network']
delta_balls = {}
for e in graph.edges:
src = e[0]
src_balls = s['balls'][src]
dst = e[1]
dst_balls = s['balls'][dst]
# transfer balls according to specific robot strat
strat = graph.edges[e]['strat']
delta_balls[e] = strat(src_balls, dst_balls)
return_dict = {'nodes': [], 'edges': {}, 'quantity': {}, 'node_strats': {}, 'edge_strats': {}, 'delta': delta_balls}
return (return_dict)

View File

@ -0,0 +1,221 @@
from cadCAD.configuration import append_configs
from cadCAD.configuration.utils.userDefinedObject import udoPipe, UDO
import networkx as nx
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
T = 50 #iterations in our simulation
n = 3 #number of boxes in our network
m = 2 #for barabasi graph type number of edges is (n-2)*m
G = nx.barabasi_albert_graph(n, m)
k = len(G.edges)
balls = np.zeros(n,)
for node in G.nodes:
rv = np.random.randint(1,25)
G.nodes[node]['initial_balls'] = rv
balls[node] = rv
scale=100
nx.draw_kamada_kawai(G, node_size=balls*scale,labels=nx.get_node_attributes(G,'initial_balls'))
def greedy_robot(src_balls, dst_balls):
# robot wishes to accumlate balls at its source
# takes half of its neighbors balls
if src_balls < dst_balls:
return -np.floor(dst_balls / 2)
else:
return 0
def fair_robot(src_balls, dst_balls):
# robot follows the simple balancing rule
return np.sign(src_balls - dst_balls)
def giving_robot(src_balls, dst_balls):
# robot wishes to gice away balls one at a time
if src_balls > 0:
return 1
else:
return 0
robot_strategies = [greedy_robot,fair_robot, giving_robot]
for node in G.nodes:
nstrats = len(robot_strategies)
rv = np.random.randint(0,nstrats)
G.nodes[node]['strat'] = robot_strategies[rv]
for e in G.edges:
owner_node = e[0]
G.edges[e]['strat'] = G.nodes[owner_node]['strat']
default_policy = {'nodes': [], 'edges': {}, 'quantity': {}, 'node_strats': {}, 'edge_strats': {}, 'delta': {}}
class robot(object):
def __init__(self, graph, balls, internal_policy=default_policy):
self.mem_id = str(hex(id(self)))
self.internal_policy = internal_policy
self.graph = graph
self.balls = balls
def robotic_network(self, graph, balls): # move balls
self.graph, self.balls = graph, balls
delta_balls = {}
for e in self.graph.edges:
src = e[0]
src_balls = self.balls[src]
dst = e[1]
dst_balls = self.balls[dst]
# transfer balls according to specific robot strat
strat = self.graph.edges[e]['strat']
delta_balls[e] = strat(src_balls, dst_balls)
self.internal_policy = {'nodes': [], 'edges': {}, 'quantity': {}, 'node_strats': {}, 'edge_strats': {}, 'delta': delta_balls}
return self
def agent_arrival(self, graph, balls): # add node
self.graph, self.balls = graph, balls
node = len(self.graph.nodes)
edge_list = self.graph.edges
# choose a m random edges without replacement
# new = np.random.choose(edgelist,m)
new = [0, 1] # tester
nodes = [node]
edges = [(node, new_node) for new_node in new]
initial_balls = {node: np.random.randint(1, 25)}
rv = np.random.randint(0, nstrats)
node_strat = {node: robot_strategies[rv]}
edge_strats = {e: robot_strategies[rv] for e in edges}
self.internal_policy = {'nodes': nodes,
'edges': edges,
'quantity': initial_balls,
'node_strats': node_strat,
'edge_strats': edge_strats,
'delta': np.zeros(node + 1)
}
return self
robot_udo = UDO(udo=robot(G, balls), masked_members=['obj'])
initial_conditions = {'balls': balls, 'network': G, 'robot': robot_udo}
def update_balls(params, step, sL, s, _input):
delta_balls = _input['robot'].internal_policy['delta']
new_balls = s['balls']
for e in G.edges:
move_ball = delta_balls[e]
src = e[0]
dst = e[1]
if (new_balls[src] >= move_ball) and (new_balls[dst] >= -move_ball):
new_balls[src] = new_balls[src] - move_ball
new_balls[dst] = new_balls[dst] + move_ball
key = 'balls'
value = new_balls
return (key, value)
def update_network(params, step, sL, s, _input):
new_nodes = _input['robot'].internal_policy['nodes']
new_edges = _input['robot'].internal_policy['edges']
new_balls = _input['robot'].internal_policy['quantity']
graph = s['network']
for node in new_nodes:
graph.add_node(node)
graph.nodes[node]['initial_balls'] = new_balls[node]
graph.nodes[node]['strat'] = _input['robot'].internal_policy['node_strats'][node]
for edge in new_edges:
graph.add_edge(edge[0], edge[1])
graph.edges[edge]['strat'] = _input['robot'].internal_policy['edge_strats'][edge]
key = 'network'
value = graph
return (key, value)
def update_network_balls(params, step, sL, s, _input):
new_nodes = _input['robot'].internal_policy['nodes']
new_balls = _input['robot'].internal_policy['quantity']
balls = np.zeros(len(s['balls']) + len(new_nodes))
for node in s['network'].nodes:
balls[node] = s['balls'][node]
for node in new_nodes:
balls[node] = new_balls[node]
key = 'balls'
value = balls
return (key, value)
def robotic_network(params, step, sL, s):
s['robot'].robotic_network(s['network'], s['balls'])
return {'robot': udoPipe(s['robot'])}
def agent_arrival(params, step, sL, s):
s['robot'].agent_arrival(s['network'], s['balls'])
return {'robot': udoPipe(s['robot'])}
def get_robot(params, step, sL, s, _input):
return 'robot', _input['robot']
partial_state_update_blocks = [
{
'policies': {
# The following policy functions will be evaluated and their returns will be passed to the state update functions
'p1': robotic_network
},
'variables': { # The following state variables will be updated simultaneously
'balls': update_balls,
'robot': get_robot
}
},
{
'policies': {
# The following policy functions will be evaluated and their returns will be passed to the state update functions
'p1': agent_arrival
},
'variables': { # The following state variables will be updated simultaneously
'network': update_network,
'balls': update_network_balls,
'robot': get_robot
}
}
]
simulation_parameters = {
'T': range(T),
'N': 1,
'M': {}
}
append_configs(
sim_configs=simulation_parameters, #dict containing state update functions
initial_state=initial_conditions, #dict containing variable names and initial values
partial_state_update_blocks= partial_state_update_blocks #, #dict containing state update functions
# policy_ops=[lambda a, b: {**a, **b}]
)
# config = Configuration(initial_state=initial_conditions, #dict containing variable names and initial values
# partial_state_update_blocks=partial_state_update_blocks, #dict containing state update functions
# sim_config=simulation_parameters #dict containing simulation parameters
# )

View File

@ -2,7 +2,6 @@ import pandas as pd
from tabulate import tabulate
# The following imports NEED to be in the exact order
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
from simulations.validation import config1
from cadCAD import configs
exec_mode = ExecutionMode()
@ -20,4 +19,7 @@ print("Tensor Field: config1")
print(tabulate(tensor_field, headers='keys', tablefmt='psql'))
print("Output:")
print(tabulate(result, headers='keys', tablefmt='psql'))
print(result[['network']])
print()
print(result[['network', 'substep']])

File diff suppressed because one or more lines are too long

View File

@ -3,7 +3,7 @@ import numpy as np
from datetime import timedelta
from cadCAD.configuration import append_configs
from cadCAD.configuration.utils import proc_trigger, bound_norm_random, ep_time_step, config_sim
from cadCAD.configuration.utils import env_proc_trigger, bound_norm_random, ep_time_step, config_sim
seeds = {
@ -92,8 +92,6 @@ def env_a(x):
return 5
def env_b(x):
return 10
# def what_ever(x):
# return x + 1
# Genesis States
@ -115,7 +113,7 @@ raw_exogenous_states = {
env_processes = {
"s3": env_a,
"s4": proc_trigger('2018-10-01 15:16:25', env_b)
"s4": env_proc_trigger('2018-10-01 15:16:25', env_b)
}

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,150 @@
import networkx as nx
from scipy.stats import expon, gamma
import numpy as np
import matplotlib.pyplot as plt
#helper functions
def get_nodes_by_type(g, node_type_selection):
return [node for node in g.nodes if g.nodes[node]['type']== node_type_selection ]
def get_edges_by_type(g, edge_type_selection):
return [edge for edge in g.edges if g.edges[edge]['type']== edge_type_selection ]
def total_funds_given_total_supply(total_supply):
#can put any bonding curve invariant here for initializatio!
total_funds = total_supply
return total_funds
#maximum share of funds a proposal can take
default_beta = .2 #later we should set this to be param so we can sweep it
# tuning param for the trigger function
default_rho = .001
def trigger_threshold(requested, funds, supply, beta = default_beta, rho = default_rho):
share = requested/funds
if share < beta:
return rho*supply/(beta-share)**2
else:
return np.inf
def initialize_network(n,m, funds_func=total_funds_given_total_supply, trigger_func =trigger_threshold ):
network = nx.DiGraph()
for i in range(n):
network.add_node(i)
network.nodes[i]['type']="participant"
h_rv = expon.rvs(loc=0.0, scale=1000)
network.nodes[i]['holdings'] = h_rv
s_rv = np.random.rand()
network.nodes[i]['sentiment'] = s_rv
participants = get_nodes_by_type(network, 'participant')
initial_supply = np.sum([ network.nodes[i]['holdings'] for i in participants])
initial_funds = funds_func(initial_supply)
#generate initial proposals
for ind in range(m):
j = n+ind
network.add_node(j)
network.nodes[j]['type']="proposal"
network.nodes[j]['conviction']=0
network.nodes[j]['status']='candidate'
network.nodes[j]['age']=0
r_rv = gamma.rvs(3,loc=0.001, scale=10000)
network.node[j]['funds_requested'] = r_rv
network.nodes[j]['trigger']= trigger_threshold(r_rv, initial_funds, initial_supply)
for i in range(n):
network.add_edge(i, j)
rv = np.random.rand()
a_rv = 1-4*(1-rv)*rv #polarized distribution
network.edges[(i, j)]['affinity'] = a_rv
network.edges[(i,j)]['tokens'] = 0
network.edges[(i, j)]['conviction'] = 0
proposals = get_nodes_by_type(network, 'proposal')
total_requested = np.sum([ network.nodes[i]['funds_requested'] for i in proposals])
return network, initial_funds, initial_supply, total_requested
def trigger_sweep(field, trigger_func,xmax=.2,default_alpha=.5):
if field == 'token_supply':
alpha = default_alpha
share_of_funds = np.arange(.001,xmax,.001)
total_supply = np.arange(0,10**9, 10**6)
demo_data_XY = np.outer(share_of_funds,total_supply)
demo_data_Z0=np.empty(demo_data_XY.shape)
demo_data_Z1=np.empty(demo_data_XY.shape)
demo_data_Z2=np.empty(demo_data_XY.shape)
demo_data_Z3=np.empty(demo_data_XY.shape)
for sof_ind in range(len(share_of_funds)):
sof = share_of_funds[sof_ind]
for ts_ind in range(len(total_supply)):
ts = total_supply[ts_ind]
tc = ts /(1-alpha)
trigger = trigger_func(sof, 1, ts)
demo_data_Z0[sof_ind,ts_ind] = np.log10(trigger)
demo_data_Z1[sof_ind,ts_ind] = trigger
demo_data_Z2[sof_ind,ts_ind] = trigger/tc #share of maximum possible conviction
demo_data_Z3[sof_ind,ts_ind] = np.log10(trigger/tc)
return {'log10_trigger':demo_data_Z0,
'trigger':demo_data_Z1,
'share_of_max_conv': demo_data_Z2,
'log10_share_of_max_conv':demo_data_Z3,
'total_supply':total_supply,
'share_of_funds':share_of_funds}
elif field == 'alpha':
alpha = np.arange(.5,1,.01)
share_of_funds = np.arange(.001,xmax,.001)
total_supply = 10**9
demo_data_XY = np.outer(share_of_funds,alpha)
demo_data_Z4=np.empty(demo_data_XY.shape)
demo_data_Z5=np.empty(demo_data_XY.shape)
demo_data_Z6=np.empty(demo_data_XY.shape)
demo_data_Z7=np.empty(demo_data_XY.shape)
for sof_ind in range(len(share_of_funds)):
sof = share_of_funds[sof_ind]
for a_ind in range(len(alpha)):
ts = total_supply
a = alpha[a_ind]
tc = ts /(1-a)
trigger = trigger_func(sof, 1, ts)
demo_data_Z4[sof_ind,a_ind] = np.log10(trigger)
demo_data_Z5[sof_ind,a_ind] = trigger
demo_data_Z6[sof_ind,a_ind] = trigger/tc #share of maximum possible conviction
demo_data_Z7[sof_ind,a_ind] = np.log10(trigger/tc)
return {'log10_trigger':demo_data_Z4,
'trigger':demo_data_Z5,
'share_of_max_conv': demo_data_Z6,
'log10_share_of_max_conv':demo_data_Z7,
'alpha':alpha,
'share_of_funds':share_of_funds}
else:
return "invalid field"
def trigger_plotter(share_of_funds,Z, color_label,y, ylabel,cmap='jet'):
dims = (10, 5)
fig, ax = plt.subplots(figsize=dims)
cf = plt.contourf(share_of_funds, y, Z.T, 100, cmap=cmap)
cbar=plt.colorbar(cf)
plt.axis([share_of_funds[0], share_of_funds[-1], y[0], y[-1]])
#ax.set_xscale('log')
plt.ylabel(ylabel)
plt.xlabel('Share of Funds Requested')
plt.title('Trigger Function Map')
cbar.ax.set_ylabel(color_label)

View File

@ -0,0 +1,548 @@
import numpy as np
from cadCAD.configuration.utils import config_sim
from simulations.validation.conviction_helpers import *
#import networkx as nx
from scipy.stats import expon, gamma
#functions for partial state update block 1
#Driving processes: arrival of participants, proposals and funds
##-----------------------------------------
def gen_new_participant(network, new_participant_holdings):
i = len([node for node in network.nodes])
network.add_node(i)
network.nodes[i]['type']="participant"
s_rv = np.random.rand()
network.nodes[i]['sentiment'] = s_rv
network.nodes[i]['holdings']=new_participant_holdings
for j in get_nodes_by_type(network, 'proposal'):
network.add_edge(i, j)
rv = np.random.rand()
a_rv = 1-4*(1-rv)*rv #polarized distribution
network.edges[(i, j)]['affinity'] = a_rv
network.edges[(i,j)]['tokens'] = a_rv*network.nodes[i]['holdings']
network.edges[(i, j)]['conviction'] = 0
return network
scale_factor = 1000
def gen_new_proposal(network, funds, supply, total_funds, trigger_func):
j = len([node for node in network.nodes])
network.add_node(j)
network.nodes[j]['type']="proposal"
network.nodes[j]['conviction']=0
network.nodes[j]['status']='candidate'
network.nodes[j]['age']=0
rescale = scale_factor*funds/total_funds
r_rv = gamma.rvs(3,loc=0.001, scale=rescale)
network.node[j]['funds_requested'] = r_rv
network.nodes[j]['trigger']= trigger_func(r_rv, funds, supply)
participants = get_nodes_by_type(network, 'participant')
proposing_participant = np.random.choice(participants)
for i in participants:
network.add_edge(i, j)
if i==proposing_participant:
network.edges[(i, j)]['affinity']=1
else:
rv = np.random.rand()
a_rv = 1-4*(1-rv)*rv #polarized distribution
network.edges[(i, j)]['affinity'] = a_rv
network.edges[(i, j)]['conviction'] = 0
network.edges[(i,j)]['tokens'] = 0
return network
def driving_process(params, step, sL, s):
#placeholder plumbing for random processes
arrival_rate = 10/s['sentiment']
rv1 = np.random.rand()
new_participant = bool(rv1<1/arrival_rate)
if new_participant:
h_rv = expon.rvs(loc=0.0, scale=1000)
new_participant_holdings = h_rv
else:
new_participant_holdings = 0
network = s['network']
affinities = [network.edges[e]['affinity'] for e in network.edges ]
median_affinity = np.median(affinities)
proposals = get_nodes_by_type(network, 'proposal')
fund_requests = [network.nodes[j]['funds_requested'] for j in proposals if network.nodes[j]['status']=='candidate' ]
funds = s['funds']
total_funds_requested = np.sum(fund_requests)
proposal_rate = 10/median_affinity * total_funds_requested/funds
rv2 = np.random.rand()
new_proposal = bool(rv2<1/proposal_rate)
sentiment = s['sentiment']
funds = s['funds']
scale_factor = 1+4000*sentiment**2
#this shouldn't happen but expon is throwing domain errors
if scale_factor > 1:
funds_arrival = expon.rvs(loc = 0, scale = scale_factor )
else:
funds_arrival = 0
return({'new_participant':new_participant,
'new_participant_holdings':new_participant_holdings,
'new_proposal':new_proposal,
'funds_arrival':funds_arrival})
#Mechanisms for updating the state based on driving processes
##---
def update_network(params, step, sL, s, _input):
print(params)
print(type(params))
network = s['network']
funds = s['funds']
supply = s['supply']
trigger_func = params['trigger_func']
new_participant = _input['new_participant'] #T/F
new_proposal = _input['new_proposal'] #T/F
if new_participant:
new_participant_holdings = _input['new_participant_holdings']
network = gen_new_participant(network, new_participant_holdings)
if new_proposal:
network= gen_new_proposal(network,funds,supply )
#update age of the existing proposals
proposals = get_nodes_by_type(network, 'proposal')
for j in proposals:
network.nodes[j]['age'] = network.nodes[j]['age']+1
if network.nodes[j]['status'] == 'candidate':
requested = network.nodes[j]['funds_requested']
network.nodes[j]['trigger'] = trigger_func(requested, funds, supply)
else:
network.nodes[j]['trigger'] = np.nan
key = 'network'
value = network
return (key, value)
def increment_funds(params, step, sL, s, _input):
funds = s['funds']
funds_arrival = _input['funds_arrival']
#increment funds
funds = funds + funds_arrival
key = 'funds'
value = funds
return (key, value)
def increment_supply(params, step, sL, s, _input):
supply = s['supply']
supply_arrival = _input['new_participant_holdings']
#increment funds
supply = supply + supply_arrival
key = 'supply'
value = supply
return (key, value)
#functions for partial state update block 2
#Driving processes: completion of previously funded proposals
##-----------------------------------------
def check_progress(params, step, sL, s):
network = s['network']
proposals = get_nodes_by_type(network, 'proposal')
completed = []
for j in proposals:
if network.nodes[j]['status'] == 'active':
grant_size = network.nodes[j]['funds_requested']
base_completion_rate=params['base_completion_rate']
likelihood = 1.0/(base_completion_rate+np.log(grant_size))
if np.random.rand() < likelihood:
completed.append(j)
return({'completed':completed})
#Mechanisms for updating the state based on check progress
##---
def complete_proposal(params, step, sL, s, _input):
network = s['network']
participants = get_nodes_by_type(network, 'participant')
completed = _input['completed']
for j in completed:
network.nodes[j]['status']='completed'
for i in participants:
force = network.edges[(i,j)]['affinity']
sentiment = network.node[i]['sentiment']
network.node[i]['sentiment'] = get_sentimental(sentiment, force, decay=0)
key = 'network'
value = network
return (key, value)
def update_sentiment_on_completion(params, step, sL, s, _input):
network = s['network']
proposals = get_nodes_by_type(network, 'proposal')
completed = _input['completed']
grants_outstanding = np.sum([network.nodes[j]['funds_requested'] for j in proposals if network.nodes[j]['status']=='active'])
grants_completed = np.sum([network.nodes[j]['funds_requested'] for j in completed])
sentiment = s['sentiment']
force = grants_completed/grants_outstanding
mu = params['sentiment_decay']
if (force >=0) and (force <=1):
sentiment = get_sentimental(sentiment, force, mu)
else:
sentiment = get_sentimental(sentiment, 0, mu)
key = 'sentiment'
value = sentiment
return (key, value)
def get_sentimental(sentiment, force, decay=0):
mu = decay
sentiment = sentiment*(1-mu) + force
if sentiment > 1:
sentiment = 1
return sentiment
#functions for partial state update block 3
#Decision processes: trigger function policy
##-----------------------------------------
def trigger_function(params, step, sL, s):
network = s['network']
funds = s['funds']
supply = s['supply']
proposals = get_nodes_by_type(network, 'proposal')
tmin = params['tmin']
accepted = []
triggers = {}
for j in proposals:
if network.nodes[j]['status'] == 'candidate':
requested = network.nodes[j]['funds_requested']
age = network.nodes[j]['age']
threshold = trigger_threshold(requested, funds, supply)
if age > tmin:
conviction = network.nodes[j]['conviction']
if conviction >threshold:
accepted.append(j)
else:
threshold = np.nan
triggers[j] = threshold
return({'accepted':accepted, 'triggers':triggers})
def decrement_funds(params, step, sL, s, _input):
funds = s['funds']
network = s['network']
accepted = _input['accepted']
#decrement funds
for j in accepted:
funds = funds - network.nodes[j]['funds_requested']
key = 'funds'
value = funds
return (key, value)
def update_proposals(params, step, sL, s, _input):
network = s['network']
accepted = _input['accepted']
triggers = _input['triggers']
participants = get_nodes_by_type(network, 'participant')
proposals = get_nodes_by_type(network, 'proposals')
sensitivity = params['sensitivity']
for j in proposals:
network.nodes[j]['trigger'] = triggers[j]
#bookkeeping conviction and participant sentiment
for j in accepted:
network.nodes[j]['status']='active'
network.nodes[j]['conviction']=np.nan
#change status to active
for i in participants:
#operating on edge = (i,j)
#reset tokens assigned to other candidates
network.edges[(i,j)]['tokens']=0
network.edges[(i,j)]['conviction'] = np.nan
#update participants sentiments (positive or negative)
affinities = [network.edges[(i,p)]['affinity'] for p in proposals if not(p in accepted)]
if len(affinities)>1:
max_affinity = np.max(affinities)
force = network.edges[(i,j)]['affinity']-sensitivity*max_affinity
else:
force = 0
#based on what their affinities to the accepted proposals
network.nodes[i]['sentiment'] = get_sentimental(network.nodes[i]['sentiment'], force, False)
key = 'network'
value = network
return (key, value)
def update_sentiment_on_release(params, step, sL, s, _input):
network = s['network']
proposals = get_nodes_by_type(network, 'proposal')
accepted = _input['accepted']
proposals_outstanding = np.sum([network.nodes[j]['funds_requested'] for j in proposals if network.nodes[j]['status']=='candidate'])
proposals_accepted = np.sum([network.nodes[j]['funds_requested'] for j in accepted])
sentiment = s['sentiment']
force = proposals_accepted/proposals_outstanding
if (force >=0) and (force <=1):
sentiment = get_sentimental(sentiment, force, False)
else:
sentiment = get_sentimental(sentiment, 0, False)
key = 'sentiment'
value = sentiment
return (key, value)
def participants_decisions(params, step, sL, s):
network = s['network']
participants = get_nodes_by_type(network, 'participant')
proposals = get_nodes_by_type(network, 'proposal')
candidates = [j for j in proposals if network.nodes[j]['status']=='candidate']
sensitivity = params['sensitivity']
gain = .01
delta_holdings={}
proposals_supported ={}
for i in participants:
force = network.nodes[i]['sentiment']-sensitivity
delta_holdings[i] = network.nodes[i]['holdings']*gain*force
support = []
for j in candidates:
affinity = network.edges[(i, j)]['affinity']
cutoff = sensitivity*np.max([network.edges[(i,p)]['affinity'] for p in candidates])
if cutoff <.5:
cutoff = .5
if affinity > cutoff:
support.append(j)
proposals_supported[i] = support
return({'delta_holdings':delta_holdings, 'proposals_supported':proposals_supported})
def update_tokens(params, step, sL, s, _input):
network = s['network']
delta_holdings = _input['delta_holdings']
proposals = get_nodes_by_type(network, 'proposal')
proposals_supported = _input['proposals_supported']
participants = get_nodes_by_type(network, 'participant')
alpha = params['alpha']
for i in participants:
network.nodes[i]['holdings'] = network.nodes[i]['holdings']+delta_holdings[i]
supported = proposals_supported[i]
total_affinity = np.sum([ network.edges[(i, j)]['affinity'] for j in supported])
for j in proposals:
if j in supported:
normalized_affinity = network.edges[(i, j)]['affinity']/total_affinity
network.edges[(i, j)]['tokens'] = normalized_affinity*network.nodes[i]['holdings']
else:
network.edges[(i, j)]['tokens'] = 0
prior_conviction = network.edges[(i, j)]['conviction']
current_tokens = network.edges[(i, j)]['tokens']
network.edges[(i, j)]['conviction'] =current_tokens+alpha*prior_conviction
for j in proposals:
network.nodes[j]['conviction'] = np.sum([ network.edges[(i, j)]['conviction'] for i in participants])
key = 'network'
value = network
return (key, value)
def update_supply(params, step, sL, s, _input):
supply = s['supply']
delta_holdings = _input['delta_holdings']
delta_supply = np.sum([v for v in delta_holdings.values()])
supply = supply + delta_supply
key = 'supply'
value = supply
return (key, value)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# The Partial State Update Blocks
partial_state_update_blocks = [
{
'policies': {
#new proposals or new participants
'random': driving_process
},
'variables': {
'network': update_network,
'funds':increment_funds,
'supply':increment_supply
}
},
{
'policies': {
'completion': check_progress #see if any of the funded proposals completes
},
'variables': { # The following state variables will be updated simultaneously
'sentiment': update_sentiment_on_completion, #note completing decays sentiment, completing bumps it
'network': complete_proposal #book-keeping
}
},
{
'policies': {
'release': trigger_function #check each proposal to see if it passes
},
'variables': { # The following state variables will be updated simultaneously
'funds': decrement_funds, #funds expended
'sentiment': update_sentiment_on_release, #releasing funds can bump sentiment
'network': update_proposals #reset convictions, and participants sentiments
#update based on affinities
}
},
{
'policies': {
'participants_act': participants_decisions, #high sentiment, high affinity =>buy
#low sentiment, low affinities => burn
#assign tokens to top affinities
},
'variables': {
'supply': update_supply,
'network': update_tokens #update everyones holdings
#and their conviction for each proposal
}
}
]
n= 25 #initial participants
m= 3 #initial proposals
initial_sentiment = .5
network, initial_funds, initial_supply, total_requested = initialize_network(n,m,total_funds_given_total_supply,trigger_threshold)
initial_conditions = {'network':network,
'supply': initial_supply,
'funds':initial_funds,
'sentiment': initial_sentiment}
#power of 1 token forever
# conviction_capactity = [2]
# alpha = [1-1/cc for cc in conviction_capactity]
# print(alpha)
params={
'sensitivity': [.75],
'tmin': [7], #unit days; minimum periods passed before a proposal can pass
'sentiment_decay': [.001], #termed mu in the state update function
'alpha': [0.5, 0.9],
'base_completion_rate': [10],
'trigger_func': [trigger_threshold]
}
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Settings of general simulation parameters, unrelated to the system itself
# `T` is a range with the number of discrete units of time the simulation will run for;
# `N` is the number of times the simulation will be run (Monte Carlo runs)
time_periods_per_run = 250
monte_carlo_runs = 1
simulation_parameters = config_sim({
'T': range(time_periods_per_run),
'N': monte_carlo_runs,
'M': params
})
from cadCAD.configuration import append_configs
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# The configurations above are then packaged into a `Configuration` object
append_configs(
initial_state=initial_conditions, #dict containing variable names and initial values
partial_state_update_blocks=partial_state_update_blocks, #dict containing state update functions
sim_configs=simulation_parameters #dict containing simulation parameters
)
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
from cadCAD import configs
exec_mode = ExecutionMode()
multi_proc_ctx = ExecutionContext(context=exec_mode.multi_proc)
run = Executor(exec_context=multi_proc_ctx, configs=configs)
raw_result, tensor = run.execute()
# exec_mode = ExecutionMode()
# exec_context = ExecutionContext(context=exec_mode.multi_proc)
# # run = Executor(exec_context=exec_context, configs=configs)
# executor = Executor(exec_context, configs) # Pass the configuration object inside an array
# raw_result, tensor = executor.execute() # The `main()` method returns a tuple; its first elements contains the raw results

View File

@ -0,0 +1,555 @@
from pprint import pprint
import numpy as np
from tabulate import tabulate
from cadCAD.configuration.utils import config_sim
from simulations.validation.conviction_helpers import *
#import networkx as nx
from scipy.stats import expon, gamma
#functions for partial state update block 1
#Driving processes: arrival of participants, proposals and funds
##-----------------------------------------
def gen_new_participant(network, new_participant_holdings):
i = len([node for node in network.nodes])
network.add_node(i)
network.nodes[i]['type']="participant"
s_rv = np.random.rand()
network.nodes[i]['sentiment'] = s_rv
network.nodes[i]['holdings']=new_participant_holdings
for j in get_nodes_by_type(network, 'proposal'):
network.add_edge(i, j)
rv = np.random.rand()
a_rv = 1-4*(1-rv)*rv #polarized distribution
network.edges[(i, j)]['affinity'] = a_rv
network.edges[(i,j)]['tokens'] = a_rv*network.nodes[i]['holdings']
network.edges[(i, j)]['conviction'] = 0
return network
scale_factor = 1000
def gen_new_proposal(network, funds, supply, trigger_func):
j = len([node for node in network.nodes])
network.add_node(j)
network.nodes[j]['type']="proposal"
network.nodes[j]['conviction']=0
network.nodes[j]['status']='candidate'
network.nodes[j]['age']=0
rescale = scale_factor*funds
r_rv = gamma.rvs(3,loc=0.001, scale=rescale)
network.node[j]['funds_requested'] = r_rv
network.nodes[j]['trigger']= trigger_func(r_rv, funds, supply)
participants = get_nodes_by_type(network, 'participant')
proposing_participant = np.random.choice(participants)
for i in participants:
network.add_edge(i, j)
if i==proposing_participant:
network.edges[(i, j)]['affinity']=1
else:
rv = np.random.rand()
a_rv = 1-4*(1-rv)*rv #polarized distribution
network.edges[(i, j)]['affinity'] = a_rv
network.edges[(i, j)]['conviction'] = 0
network.edges[(i,j)]['tokens'] = 0
return network
def driving_process(params, step, sL, s):
#placeholder plumbing for random processes
arrival_rate = 10/s['sentiment']
rv1 = np.random.rand()
new_participant = bool(rv1<1/arrival_rate)
if new_participant:
h_rv = expon.rvs(loc=0.0, scale=1000)
new_participant_holdings = h_rv
else:
new_participant_holdings = 0
network = s['network']
affinities = [network.edges[e]['affinity'] for e in network.edges ]
median_affinity = np.median(affinities)
proposals = get_nodes_by_type(network, 'proposal')
fund_requests = [network.nodes[j]['funds_requested'] for j in proposals if network.nodes[j]['status']=='candidate' ]
funds = s['funds']
total_funds_requested = np.sum(fund_requests)
proposal_rate = 10/median_affinity * total_funds_requested/funds
rv2 = np.random.rand()
new_proposal = bool(rv2<1/proposal_rate)
sentiment = s['sentiment']
funds = s['funds']
scale_factor = 1+4000*sentiment**2
#this shouldn't happen but expon is throwing domain errors
if scale_factor > 1:
funds_arrival = expon.rvs(loc = 0, scale = scale_factor )
else:
funds_arrival = 0
return({'new_participant':new_participant,
'new_participant_holdings':new_participant_holdings,
'new_proposal':new_proposal,
'funds_arrival':funds_arrival})
#Mechanisms for updating the state based on driving processes
##---
def update_network(params, step, sL, s, _input):
network = s['network']
funds = s['funds']
supply = s['supply']
trigger_func = params['trigger_func']
new_participant = _input['new_participant'] #T/F
new_proposal = _input['new_proposal'] #T/F
if new_participant:
new_participant_holdings = _input['new_participant_holdings']
network = gen_new_participant(network, new_participant_holdings)
if new_proposal:
network= gen_new_proposal(network,funds,supply,trigger_func )
#update age of the existing proposals
proposals = get_nodes_by_type(network, 'proposal')
for j in proposals:
network.nodes[j]['age'] = network.nodes[j]['age']+1
if network.nodes[j]['status'] == 'candidate':
requested = network.nodes[j]['funds_requested']
network.nodes[j]['trigger'] = trigger_func(requested, funds, supply)
else:
network.nodes[j]['trigger'] = np.nan
key = 'network'
value = network
return (key, value)
def increment_funds(params, step, sL, s, _input):
funds = s['funds']
funds_arrival = _input['funds_arrival']
#increment funds
funds = funds + funds_arrival
key = 'funds'
value = funds
return (key, value)
def increment_supply(params, step, sL, s, _input):
supply = s['supply']
supply_arrival = _input['new_participant_holdings']
#increment funds
supply = supply + supply_arrival
key = 'supply'
value = supply
return (key, value)
#functions for partial state update block 2
#Driving processes: completion of previously funded proposals
##-----------------------------------------
def check_progress(params, step, sL, s):
network = s['network']
proposals = get_nodes_by_type(network, 'proposal')
completed = []
for j in proposals:
if network.nodes[j]['status'] == 'active':
grant_size = network.nodes[j]['funds_requested']
base_completion_rate=params['base_completion_rate']
likelihood = 1.0/(base_completion_rate+np.log(grant_size))
if np.random.rand() < likelihood:
completed.append(j)
return({'completed':completed})
#Mechanisms for updating the state based on check progress
##---
def complete_proposal(params, step, sL, s, _input):
network = s['network']
participants = get_nodes_by_type(network, 'participant')
completed = _input['completed']
for j in completed:
network.nodes[j]['status']='completed'
for i in participants:
force = network.edges[(i,j)]['affinity']
sentiment = network.node[i]['sentiment']
network.node[i]['sentiment'] = get_sentimental(sentiment, force, decay=0)
key = 'network'
value = network
return (key, value)
def update_sentiment_on_completion(params, step, sL, s, _input):
network = s['network']
proposals = get_nodes_by_type(network, 'proposal')
completed = _input['completed']
grants_outstanding = np.sum([network.nodes[j]['funds_requested'] for j in proposals if network.nodes[j]['status']=='active'])
grants_completed = np.sum([network.nodes[j]['funds_requested'] for j in completed])
sentiment = s['sentiment']
force = grants_completed/grants_outstanding
mu = params['sentiment_decay']
if (force >=0) and (force <=1):
sentiment = get_sentimental(sentiment, force, mu)
else:
sentiment = get_sentimental(sentiment, 0, mu)
key = 'sentiment'
value = sentiment
return (key, value)
def get_sentimental(sentiment, force, decay=0):
mu = decay
sentiment = sentiment*(1-mu) + force
if sentiment > 1:
sentiment = 1
return sentiment
#functions for partial state update block 3
#Decision processes: trigger function policy
##-----------------------------------------
def trigger_function(params, step, sL, s):
network = s['network']
funds = s['funds']
supply = s['supply']
proposals = get_nodes_by_type(network, 'proposal')
tmin = params['tmin']
accepted = []
triggers = {}
for j in proposals:
if network.nodes[j]['status'] == 'candidate':
requested = network.nodes[j]['funds_requested']
age = network.nodes[j]['age']
threshold = trigger_threshold(requested, funds, supply)
if age > tmin:
conviction = network.nodes[j]['conviction']
if conviction >threshold:
accepted.append(j)
else:
threshold = np.nan
triggers[j] = threshold
return({'accepted':accepted, 'triggers':triggers})
def decrement_funds(params, step, sL, s, _input):
funds = s['funds']
network = s['network']
accepted = _input['accepted']
#decrement funds
for j in accepted:
funds = funds - network.nodes[j]['funds_requested']
key = 'funds'
value = funds
return (key, value)
def update_proposals(params, step, sL, s, _input):
network = s['network']
accepted = _input['accepted']
triggers = _input['triggers']
participants = get_nodes_by_type(network, 'participant')
proposals = get_nodes_by_type(network, 'proposals')
sensitivity = params['sensitivity']
for j in proposals:
network.nodes[j]['trigger'] = triggers[j]
#bookkeeping conviction and participant sentiment
for j in accepted:
network.nodes[j]['status']='active'
network.nodes[j]['conviction']=np.nan
#change status to active
for i in participants:
#operating on edge = (i,j)
#reset tokens assigned to other candidates
network.edges[(i,j)]['tokens']=0
network.edges[(i,j)]['conviction'] = np.nan
#update participants sentiments (positive or negative)
affinities = [network.edges[(i,p)]['affinity'] for p in proposals if not(p in accepted)]
if len(affinities)>1:
max_affinity = np.max(affinities)
force = network.edges[(i,j)]['affinity']-sensitivity*max_affinity
else:
force = 0
#based on what their affinities to the accepted proposals
network.nodes[i]['sentiment'] = get_sentimental(network.nodes[i]['sentiment'], force, False)
key = 'network'
value = network
return (key, value)
def update_sentiment_on_release(params, step, sL, s, _input):
network = s['network']
proposals = get_nodes_by_type(network, 'proposal')
accepted = _input['accepted']
proposals_outstanding = np.sum([network.nodes[j]['funds_requested'] for j in proposals if network.nodes[j]['status']=='candidate'])
proposals_accepted = np.sum([network.nodes[j]['funds_requested'] for j in accepted])
sentiment = s['sentiment']
force = proposals_accepted/proposals_outstanding
if (force >=0) and (force <=1):
sentiment = get_sentimental(sentiment, force, False)
else:
sentiment = get_sentimental(sentiment, 0, False)
key = 'sentiment'
value = sentiment
return (key, value)
def participants_decisions(params, step, sL, s):
network = s['network']
participants = get_nodes_by_type(network, 'participant')
proposals = get_nodes_by_type(network, 'proposal')
candidates = [j for j in proposals if network.nodes[j]['status']=='candidate']
sensitivity = params['sensitivity']
gain = .01
delta_holdings={}
proposals_supported ={}
for i in participants:
force = network.nodes[i]['sentiment']-sensitivity
delta_holdings[i] = network.nodes[i]['holdings']*gain*force
support = []
for j in candidates:
affinity = network.edges[(i, j)]['affinity']
cutoff = sensitivity*np.max([network.edges[(i,p)]['affinity'] for p in candidates])
if cutoff <.5:
cutoff = .5
if affinity > cutoff:
support.append(j)
proposals_supported[i] = support
return({'delta_holdings':delta_holdings, 'proposals_supported':proposals_supported})
def update_tokens(params, step, sL, s, _input):
network = s['network']
delta_holdings = _input['delta_holdings']
proposals = get_nodes_by_type(network, 'proposal')
proposals_supported = _input['proposals_supported']
participants = get_nodes_by_type(network, 'participant')
alpha = params['alpha']
for i in participants:
network.nodes[i]['holdings'] = network.nodes[i]['holdings']+delta_holdings[i]
supported = proposals_supported[i]
total_affinity = np.sum([ network.edges[(i, j)]['affinity'] for j in supported])
for j in proposals:
if j in supported:
normalized_affinity = network.edges[(i, j)]['affinity']/total_affinity
network.edges[(i, j)]['tokens'] = normalized_affinity*network.nodes[i]['holdings']
else:
network.edges[(i, j)]['tokens'] = 0
prior_conviction = network.edges[(i, j)]['conviction']
current_tokens = network.edges[(i, j)]['tokens']
network.edges[(i, j)]['conviction'] =current_tokens+alpha*prior_conviction
for j in proposals:
network.nodes[j]['conviction'] = np.sum([ network.edges[(i, j)]['conviction'] for i in participants])
key = 'network'
value = network
return (key, value)
def update_supply(params, step, sL, s, _input):
supply = s['supply']
delta_holdings = _input['delta_holdings']
delta_supply = np.sum([v for v in delta_holdings.values()])
supply = supply + delta_supply
key = 'supply'
value = supply
return (key, value)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# The Partial State Update Blocks
partial_state_update_blocks = [
{
'policies': {
#new proposals or new participants
'random': driving_process
},
'variables': {
'network': update_network,
'funds':increment_funds,
'supply':increment_supply
}
},
{
'policies': {
'completion': check_progress #see if any of the funded proposals completes
},
'variables': { # The following state variables will be updated simultaneously
'sentiment': update_sentiment_on_completion, #note completing decays sentiment, completing bumps it
'network': complete_proposal #book-keeping
}
},
{
'policies': {
'release': trigger_function #check each proposal to see if it passes
},
'variables': { # The following state variables will be updated simultaneously
'funds': decrement_funds, #funds expended
'sentiment': update_sentiment_on_release, #releasing funds can bump sentiment
'network': update_proposals #reset convictions, and participants sentiments
#update based on affinities
}
},
{
'policies': {
'participants_act': participants_decisions, #high sentiment, high affinity =>buy
#low sentiment, low affinities => burn
#assign tokens to top affinities
},
'variables': {
'supply': update_supply,
'network': update_tokens #update everyones holdings
#and their conviction for each proposal
}
}
]
n= 25 #initial participants
m= 3 #initial proposals
initial_sentiment = .5
network, initial_funds, initial_supply, total_requested = initialize_network(n,m,total_funds_given_total_supply,trigger_threshold)
initial_conditions = {'network':network,
'supply': initial_supply,
'funds':initial_funds,
'sentiment': initial_sentiment}
#power of 1 token forever
# conviction_capactity = [2]
# alpha = [1-1/cc for cc in conviction_capactity]
# print(alpha)
params={
'sensitivity': [.75],
'tmin': [7], #unit days; minimum periods passed before a proposal can pass
'sentiment_decay': [.001], #termed mu in the state update function
'alpha': [0.5],
'base_completion_rate': [10],
'trigger_func': [trigger_threshold]
}
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Settings of general simulation parameters, unrelated to the system itself
# `T` is a range with the number of discrete units of time the simulation will run for;
# `N` is the number of times the simulation will be run (Monte Carlo runs)
time_periods_per_run = 250
monte_carlo_runs = 1
simulation_parameters = config_sim({
'T': range(time_periods_per_run),
'N': monte_carlo_runs,
'M': params
})
from cadCAD.configuration import append_configs
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# The configurations above are then packaged into a `Configuration` object
append_configs(
initial_state=initial_conditions, #dict containing variable names and initial values
partial_state_update_blocks=partial_state_update_blocks, #dict containing state update functions
sim_configs=simulation_parameters #dict containing simulation parameters
)
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
from cadCAD import configs
import pandas as pd
exec_mode = ExecutionMode()
multi_proc_ctx = ExecutionContext(context=exec_mode.multi_proc)
run = Executor(exec_context=multi_proc_ctx, configs=configs)
i = 0
for raw_result, tensor_field in run.execute():
result = pd.DataFrame(raw_result)
print()
print(f"Tensor Field: {type(tensor_field)}")
print(tabulate(tensor_field, headers='keys', tablefmt='psql'))
print(f"Output: {type(result)}")
print(tabulate(result, headers='keys', tablefmt='psql'))
print()
i += 1

View File

@ -0,0 +1,183 @@
from decimal import Decimal
import numpy as np
from datetime import timedelta
import pprint
from cadCAD.configuration import append_configs
from cadCAD.configuration.utils import env_proc_trigger, ep_time_step, config_sim
from typing import Dict, List
# from cadCAD.utils.sys_config import exo, exo_check
pp = pprint.PrettyPrinter(indent=4)
seeds = {
'z': np.random.RandomState(1),
'a': np.random.RandomState(2),
'b': np.random.RandomState(3),
'c': np.random.RandomState(3)
}
# Optional
g: Dict[str, List[int]] = {
'alpha': [1],
'beta': [2, 5],
'gamma': [3, 4],
'omega': [7]
}
# Policies per Mechanism
def p1m1(_g, step, sL, s):
return {'param1': 1}
def p2m1(_g, step, sL, s):
return {'param2': 4}
def p1m2(_g, step, sL, s):
return {'param1': 'a', 'param2': _g['beta']}
def p2m2(_g, step, sL, s):
return {'param1': 'b', 'param2': 0}
def p1m3(_g, step, sL, s):
return {'param1': np.array([10, 100])}
def p2m3(_g, step, sL, s):
return {'param1': np.array([20, 200])}
# Internal States per Mechanism
def s1m1(_g, step, sL, s, _input):
return 's1', 0
def s2m1(_g, step, sL, s, _input):
return 's2', _g['beta']
def s1m2(_g, step, sL, s, _input):
return 's1', _input['param2']
def s2m2(_g, step, sL, s, _input):
return 's2', _input['param2']
def s1m3(_g, step, sL, s, _input):
return 's1', 0
def s2m3(_g, step, sL, s, _input):
return 's2', 0
# Exogenous States
proc_one_coef_A = 0.7
proc_one_coef_B = 1.3
def es3p1(_g, step, sL, s, _input):
return 's3', _g['gamma']
# @curried
def es4p2(_g, step, sL, s, _input):
return 's4', _g['gamma']
ts_format = '%Y-%m-%d %H:%M:%S'
t_delta = timedelta(days=0, minutes=0, seconds=1)
def es5p2(_g, step, sL, s, _input):
y = 'timestep'
x = ep_time_step(s, dt_str=s['timestep'], fromat_str=ts_format, _timedelta=t_delta)
return (y, x)
# Environment States
# @curried
# def env_a(param, x):
# return x + param
def env_a(x):
return x
def env_b(x):
return 10
# Genesis States
genesis_states = {
's1': Decimal(0.0),
's2': Decimal(0.0),
's3': Decimal(1.0),
's4': Decimal(1.0),
# 'timestep': '2018-10-01 15:16:24'
}
# remove `exo_update_per_ts` to update every ts
raw_exogenous_states = {
"s3": es3p1,
"s4": es4p2,
# "timestep": es5p2
}
# ToDo: make env proc trigger field agnostic
# ToDo: input json into function renaming __name__
triggered_env_b = env_proc_trigger(1, env_b)
env_processes = {
"s3": env_a, #sweep(beta, env_a),
"s4": triggered_env_b #rename('parameterized', triggered_env_b) #sweep(beta, triggered_env_b)
}
# parameterized_env_processes = parameterize_states(env_processes)
#
# pp.pprint(parameterized_env_processes)
# exit()
# ToDo: The number of values entered in sweep should be the # of config objs created,
# not dependent on the # of times the sweep is applied
# sweep exo_state func and point to exo-state in every other funtion
# param sweep on genesis states
partial_state_update_block = {
"m1": {
"policies": {
"b1": p1m1,
"b2": p2m1
},
"variables": {
"s1": s1m1,
"s2": s2m1
}
},
"m2": {
"policies": {
"b1": p1m2,
"b2": p2m2,
},
"variables": {
"s1": s1m2,
"s2": s2m2
}
},
"m3": {
"policies": {
"b1": p1m3,
"b2": p2m3
},
"variables": {
"s1": s1m3,
"s2": s2m3
}
}
}
# config_sim Necessary
sim_config = config_sim(
{
"N": 2,
"T": range(5),
"M": g # Optional
}
)
# New Convention
append_configs(
sim_configs=sim_config,
initial_state=genesis_states,
seeds=seeds,
raw_exogenous_states={}, #raw_exogenous_states,
env_processes={}, #env_processes,
partial_state_update_blocks=partial_state_update_block
)

View File

@ -4,7 +4,7 @@ from datetime import timedelta
import pprint
from cadCAD.configuration import append_configs
from cadCAD.configuration.utils import proc_trigger, ep_time_step, config_sim
from cadCAD.configuration.utils import env_proc_trigger, ep_time_step, config_sim
from typing import Dict, List
@ -17,7 +17,7 @@ seeds = {
'c': np.random.RandomState(3)
}
# Optional
g: Dict[str, List[int]] = {
'alpha': [1],
'beta': [2, 5],
@ -46,34 +46,22 @@ def p2m3(_g, step, sL, s):
# Internal States per Mechanism
def s1m1(_g, step, sL, s, _input):
y = 's1'
x = 0
return (y, x)
return 's1', 0
def s2m1(_g, step, sL, s, _input):
y = 's2'
x = _g['beta']
return (y, x)
return 's2', _g['beta']
def s1m2(_g, step, sL, s, _input):
y = 's1'
x = _input['param2']
return (y, x)
return 's1', _input['param2']
def s2m2(_g, step, sL, s, _input):
y = 's2'
x = _input['param2']
return (y, x)
return 's2', _input['param2']
def s1m3(_g, step, sL, s, _input):
y = 's1'
x = 0
return (y, x)
return 's1', 0
def s2m3(_g, step, sL, s, _input):
y = 's2'
x = 0
return (y, x)
return 's2', 0
# Exogenous States
@ -82,24 +70,23 @@ proc_one_coef_B = 1.3
def es3p1(_g, step, sL, s, _input):
y = 's3'
x = _g['gamma']
return (y, x)
return 's3', _g['gamma']
# @curried
def es4p2(_g, step, sL, s, _input):
y = 's4'
x = _g['gamma']
return (y, x)
return 's4', _g['gamma']
ts_format = '%Y-%m-%d %H:%M:%S'
t_delta = timedelta(days=0, minutes=0, seconds=1)
def es5p2(_g, step, sL, s, _input):
y = 'timestamp'
x = ep_time_step(s, dt_str=s['timestamp'], fromat_str=ts_format, _timedelta=t_delta)
y = 'timestep'
x = ep_time_step(s, dt_str=s['timestep'], fromat_str=ts_format, _timedelta=t_delta)
return (y, x)
# Environment States
# @curried
# def env_a(param, x):
# return x + param
def env_a(x):
return x
def env_b(x):
@ -112,7 +99,7 @@ genesis_states = {
's2': Decimal(0.0),
's3': Decimal(1.0),
's4': Decimal(1.0),
'timestamp': '2018-10-01 15:16:24'
# 'timestep': '2018-10-01 15:16:24'
}
@ -120,14 +107,26 @@ genesis_states = {
raw_exogenous_states = {
"s3": es3p1,
"s4": es4p2,
'timestamp': es5p2
# "timestep": es5p2
}
triggered_env_b = proc_trigger(1, env_b)
# ToDo: make env proc trigger field agnostic
# ToDo: input json into function renaming __name__
triggered_env_b = env_proc_trigger(1, env_b)
env_processes = {
"s3": env_a,
"s4": triggered_env_b
"s3": env_a, #sweep(beta, env_a),
"s4": triggered_env_b #rename('parameterized', triggered_env_b) #sweep(beta, triggered_env_b)
}
# parameterized_env_processes = parameterize_states(env_processes)
#
# pp.pprint(parameterized_env_processes)
# exit()
# ToDo: The number of values entered in sweep should be the # of config objs created,
# not dependent on the # of times the sweep is applied
# sweep exo_state func and point to exo-state in every other funtion
# param sweep on genesis states
partial_state_update_block = {
"m1": {
@ -162,16 +161,16 @@ partial_state_update_block = {
}
}
# config_sim Necessary
sim_config = config_sim(
{
"N": 2,
"T": range(5),
"M": g
"M": g # Optional
}
)
# New Convention
append_configs(
sim_configs=sim_config,
initial_state=genesis_states,

View File

@ -0,0 +1,118 @@
from decimal import Decimal
import numpy as np
from cadCAD.configuration import append_configs
from cadCAD.configuration.utils import bound_norm_random, config_sim
seeds = {
'z': np.random.RandomState(1),
'a': np.random.RandomState(2),
'b': np.random.RandomState(3),
'c': np.random.RandomState(3)
}
# Policies per Mechanism
def p1(_g, step, sL, s):
return {'param1': 10}
def p2(_g, step, sL, s):
return {'param1': 10, 'param2': 40}
# Internal States per Mechanism
def s1(_g, step, sL, s, _input):
y = 'ds1'
x = s['ds1'] + 1
return (y, x)
def s2(_g, step, sL, s, _input):
y = 'ds2'
x = _input['param2']
return (y, x)
# Exogenous States
proc_one_coef_A = 0.7
proc_one_coef_B = 1.3
def es(_g, step, sL, s, _input):
y = 'ds3'
x = s['ds3'] * bound_norm_random(seeds['a'], proc_one_coef_A, proc_one_coef_B)
return (y, x)
# Environment States
def env_a(x):
return 5
def env_b(x):
return 10
# Genesis States
genesis_states = {
'ds1': Decimal(0.0),
'ds2': Decimal(0.0),
'ds3': Decimal(1.0)
}
raw_exogenous_states = {
"ds3": es
}
env_processes = {
"ds3": env_a
}
partial_state_update_block = {
"m1": {
"policies": {
"p1": p1,
"p2": p2
},
"variables": {
"ds1": s1,
"ds2": s2
}
},
"m2": {
"policies": {
"p1": p1,
"p2": p2
},
"variables": {
"ds1": s1,
"ds2": s2
}
},
"m3": {
"policies": {
"p1": p1,
"p2": p2
},
"variables": {
"ds1": s1,
"ds2": s2
}
}
}
sim_config = config_sim(
{
"N": 2,
"T": range(4),
}
)
append_configs(
sim_configs=sim_config,
initial_state=genesis_states,
seeds=seeds,
raw_exogenous_states=raw_exogenous_states,
env_processes=env_processes,
partial_state_update_blocks=partial_state_update_block,
policy_ops=[lambda a, b: a + b]
)