param sweep patch

This commit is contained in:
Joshua E. Jodesty 2019-05-31 11:25:54 -04:00
parent 7f28bae21a
commit f224df3ed4
28 changed files with 2755 additions and 257 deletions

View File

@ -17,6 +17,7 @@ class Configuration(object):
def __init__(self, sim_config={}, initial_state={}, seeds={}, env_processes={}, def __init__(self, sim_config={}, initial_state={}, seeds={}, env_processes={},
exogenous_states={}, partial_state_update_blocks={}, policy_ops=[lambda a, b: a + b], exogenous_states={}, partial_state_update_blocks={}, policy_ops=[lambda a, b: a + b],
**kwargs) -> None: **kwargs) -> None:
# print(exogenous_states)
self.sim_config = sim_config self.sim_config = sim_config
self.initial_state = initial_state self.initial_state = initial_state
self.seeds = seeds self.seeds = seeds

View File

@ -2,6 +2,7 @@ from datetime import datetime, timedelta
from decimal import Decimal from decimal import Decimal
from copy import deepcopy from copy import deepcopy
from functools import reduce from functools import reduce
from pprint import pprint
from fn.func import curried from fn.func import curried
from funcy import curry from funcy import curry
@ -38,13 +39,14 @@ def bound_norm_random(rng, low, high):
res = rng.normal((high+low)/2, (high-low)/6) res = rng.normal((high+low)/2, (high-low)/6)
if res < low or res > high: if res < low or res > high:
res = bound_norm_random(rng, low, high) res = bound_norm_random(rng, low, high)
return Decimal(res) # return Decimal(res)
return float(res)
@curried @curried
def env_proc_trigger(trigger_time, update_f, time): def env_proc_trigger(timestep, f, time):
if time == trigger_time: if time == timestep:
return update_f return f
else: else:
return lambda x: x return lambda x: x
@ -130,8 +132,8 @@ def exo_update_per_ts(ep):
return {es: ep_decorator(f, es) for es, f in ep.items()} return {es: ep_decorator(f, es) for es, f in ep.items()}
def trigger_condition(s, conditions, cond_opp): def trigger_condition(s, pre_conditions, cond_opp):
condition_bools = [s[field] in precondition_values for field, precondition_values in conditions.items()] condition_bools = [s[field] in precondition_values for field, precondition_values in pre_conditions.items()]
return reduce(cond_opp, condition_bools) return reduce(cond_opp, condition_bools)
def apply_state_condition(pre_conditions, cond_opp, y, f, _g, step, sL, s, _input): def apply_state_condition(pre_conditions, cond_opp, y, f, _g, step, sL, s, _input):
@ -149,12 +151,13 @@ def var_substep_trigger(substeps):
pre_conditions = {'substep': substeps} pre_conditions = {'substep': substeps}
cond_opp = lambda a, b: a and b cond_opp = lambda a, b: a and b
return var_trigger(y, f, pre_conditions, cond_opp) return var_trigger(y, f, pre_conditions, cond_opp)
return lambda y, f: curry(trigger)(substeps)(y)(f) return lambda y, f: curry(trigger)(substeps)(y)(f)
def env_trigger(end_substep): def env_trigger(end_substep):
def trigger(end_substep, trigger_field, trigger_vals, funct_list): def trigger(end_substep, trigger_field, trigger_vals, funct_list):
def env_update(state_dict, target_value): def env_update(state_dict, sweep_dict, target_value):
state_dict_copy = deepcopy(state_dict) state_dict_copy = deepcopy(state_dict)
# Use supstep to simulate current sysMetrics # Use supstep to simulate current sysMetrics
if state_dict_copy['substep'] == end_substep: if state_dict_copy['substep'] == end_substep:
@ -162,7 +165,7 @@ def env_trigger(end_substep):
if state_dict_copy[trigger_field] in trigger_vals: if state_dict_copy[trigger_field] in trigger_vals:
for g in funct_list: for g in funct_list:
target_value = g(target_value) target_value = g(sweep_dict, target_value)
del state_dict_copy del state_dict_copy
return target_value return target_value
@ -182,6 +185,7 @@ def config_sim(d):
return flatten_tabulated_dict(tabulate_dict(d)) return flatten_tabulated_dict(tabulate_dict(d))
if "M" in d: if "M" in d:
# print([{"N": d["N"], "T": d["T"], "M": M} for M in process_variables(d["M"])])
return [{"N": d["N"], "T": d["T"], "M": M} for M in process_variables(d["M"])] return [{"N": d["N"], "T": d["T"], "M": M} for M in process_variables(d["M"])]
else: else:
d["M"] = [{}] d["M"] = [{}]
@ -203,4 +207,21 @@ def genereate_psubs(policy_grid, states_grid, policies, state_updates):
filtered_state_updates = {k: v for (k, v) in state_updates.items() if k in state_list} filtered_state_updates = {k: v for (k, v) in state_updates.items() if k in state_list}
PSUBS.append(psub(filtered_policies, filtered_state_updates)) PSUBS.append(psub(filtered_policies, filtered_state_updates))
return PSUBS return PSUBS
def access_block(sH, y, psu_block_offset, exculsion_list=[]):
exculsion_list += [y]
def filter_history(key_list, sH):
filter = lambda key_list: \
lambda d: {k: v for k, v in d.items() if k not in key_list}
return list(map(filter(key_list), sH))
if psu_block_offset < -1:
if len(sH) >= abs(psu_block_offset):
return filter_history(exculsion_list, sH[psu_block_offset])
else:
return []
elif psu_block_offset < 0:
return filter_history(exculsion_list, sH[psu_block_offset])
else:
return []

View File

@ -27,9 +27,11 @@ def single_proc_exec(
Ts: List[range], Ts: List[range],
Ns: List[int] Ns: List[int]
): ):
# print(env_processes_list)
# print(configs_structs)
l = [simulation_execs, states_lists, configs_structs, env_processes_list, Ts, Ns] l = [simulation_execs, states_lists, configs_structs, env_processes_list, Ts, Ns]
simulation_exec, states_list, config, env_processes, T, N = list(map(lambda x: x.pop(), l)) simulation_exec, states_list, config, env_processes, T, N = list(map(lambda x: x.pop(), l))
# print(config.env_processes)
result = simulation_exec(var_dict_list, states_list, config, env_processes, T, N) result = simulation_exec(var_dict_list, states_list, config, env_processes, T, N)
return flatten(result) return flatten(result)
@ -66,7 +68,7 @@ class Executor:
self.exec_method = exec_context.method self.exec_method = exec_context.method
self.exec_context = exec_context.name self.exec_context = exec_context.name
self.configs = configs self.configs = configs
self.main = self.execute # self.main = self.execute
def execute(self) -> Tuple[List[Dict[str, Any]], DataFrame]: def execute(self) -> Tuple[List[Dict[str, Any]], DataFrame]:
config_proc = Processor() config_proc = Processor()
@ -76,6 +78,7 @@ class Executor:
var_dict_list, states_lists, Ts, Ns, eps, configs_structs, env_processes_list, partial_state_updates, simulation_execs = \ var_dict_list, states_lists, Ts, Ns, eps, configs_structs, env_processes_list, partial_state_updates, simulation_execs = \
[], [], [], [], [], [], [], [], [] [], [], [], [], [], [], [], [], []
config_idx = 0 config_idx = 0
print(self.configs)
for x in self.configs: for x in self.configs:
Ts.append(x.sim_config['T']) Ts.append(x.sim_config['T'])
@ -84,6 +87,7 @@ class Executor:
states_lists.append([x.initial_state]) states_lists.append([x.initial_state])
eps.append(list(x.exogenous_states.values())) eps.append(list(x.exogenous_states.values()))
configs_structs.append(config_proc.generate_config(x.initial_state, x.partial_state_updates, eps[config_idx])) configs_structs.append(config_proc.generate_config(x.initial_state, x.partial_state_updates, eps[config_idx]))
# print(env_processes_list)
env_processes_list.append(x.env_processes) env_processes_list.append(x.env_processes)
partial_state_updates.append(x.partial_state_updates) partial_state_updates.append(x.partial_state_updates)
simulation_execs.append(SimExecutor(x.policy_ops).simulation) simulation_execs.append(SimExecutor(x.policy_ops).simulation)
@ -98,12 +102,12 @@ class Executor:
result = self.exec_method(simulation_execs, var_dict_list, states_lists, configs_structs, env_processes_list, Ts, Ns) result = self.exec_method(simulation_execs, var_dict_list, states_lists, configs_structs, env_processes_list, Ts, Ns)
final_result = result, tensor_field final_result = result, tensor_field
elif self.exec_context == ExecutionMode.multi_proc: elif self.exec_context == ExecutionMode.multi_proc:
if len(self.configs) > 1: # if len(self.configs) > 1:
simulations = self.exec_method(simulation_execs, var_dict_list, states_lists, configs_structs, env_processes_list, Ts, Ns) simulations = self.exec_method(simulation_execs, var_dict_list, states_lists, configs_structs, env_processes_list, Ts, Ns)
results = [] results = []
for result, partial_state_updates, ep in list(zip(simulations, partial_state_updates, eps)): for result, partial_state_updates, ep in list(zip(simulations, partial_state_updates, eps)):
results.append((flatten(result), create_tensor_field(partial_state_updates, ep))) results.append((flatten(result), create_tensor_field(partial_state_updates, ep)))
final_result = results final_result = results
return final_result return final_result

View File

@ -28,7 +28,7 @@ class Executor:
# get_behavior_input # sL: State Window # get_behavior_input # sL: State Window
def get_policy_input( def get_policy_input(
self, self,
var_dict: Dict[str, List[Any]], sweep_dict: Dict[str, List[Any]],
sub_step: int, sub_step: int,
sL: List[Dict[str, Any]], sL: List[Dict[str, Any]],
s: Dict[str, Any], s: Dict[str, Any],
@ -39,8 +39,8 @@ class Executor:
ops = self.policy_ops ops = self.policy_ops
def get_col_results(var_dict, sub_step, sL, s, funcs): def get_col_results(sweep_dict, sub_step, sL, s, funcs):
return list(map(lambda f: f(var_dict, sub_step, sL, s), funcs)) return list(map(lambda f: f(sweep_dict, sub_step, sL, s), funcs))
def compose(init_reduction_funct, funct_list, val_list): def compose(init_reduction_funct, funct_list, val_list):
result, i = None, 0 result, i = None, 0
@ -53,7 +53,7 @@ class Executor:
result = g(result) result = g(result)
return result return result
col_results = get_col_results(var_dict, sub_step, sL, s, funcs) col_results = get_col_results(sweep_dict, sub_step, sL, s, funcs)
key_set = list(set(list(reduce(lambda a, b: a + b, list(map(lambda x: list(x.keys()), col_results)))))) key_set = list(set(list(reduce(lambda a, b: a + b, list(map(lambda x: list(x.keys()), col_results))))))
new_dict = {k: [] for k in key_set} new_dict = {k: [] for k in key_set}
for d in col_results: for d in col_results:
@ -73,24 +73,9 @@ class Executor:
# return {k: reduce(f1, val_list) for k, val_list in new_dict.items()} # return {k: reduce(f1, val_list) for k, val_list in new_dict.items()}
# return foldr(call, col_results)(ops) # return foldr(call, col_results)(ops)
# def apply_env_proc(
# self,
# env_processes: Dict[str, Callable],
# state_dict: Dict[str, Any],
# time_step: int
# ) -> Dict[str, Any]:
# for state in state_dict.keys():
# if state in list(env_processes.keys()):
# env_state: Callable = env_processes[state]
# if (env_state.__name__ == '_curried') or (env_state.__name__ == 'proc_trigger'):
# state_dict[state] = env_state(sub_step)(state_dict[state])
# else:
# state_dict[state] = env_state(state_dict[state])
#
# return state_dict
def apply_env_proc( def apply_env_proc(
self, self,
sweep_dict,
env_processes: Dict[str, Callable], env_processes: Dict[str, Callable],
state_dict: Dict[str, Any], state_dict: Dict[str, Any],
) -> Dict[str, Any]: ) -> Dict[str, Any]:
@ -99,9 +84,10 @@ class Executor:
function_type = type(lambda x: x) function_type = type(lambda x: x)
env_update = env_processes[target_field] env_update = env_processes[target_field]
if isinstance(env_update, list): if isinstance(env_update, list):
target_value = compose(*env_update[::-1])(target_value) for f in env_update:
target_value = f(sweep_dict, target_value)
elif isinstance(env_update, function_type): elif isinstance(env_update, function_type):
target_value = env_update(state_dict, target_value) target_value = env_update(state_dict, sweep_dict, target_value)
else: else:
target_value = env_update target_value = env_update
@ -122,7 +108,7 @@ class Executor:
# mech_step # mech_step
def partial_state_update( def partial_state_update(
self, self,
var_dict: Dict[str, List[Any]], sweep_dict: Dict[str, List[Any]],
sub_step: int, sub_step: int,
sL: Any, sL: Any,
sH: Any, sH: Any,
@ -150,13 +136,13 @@ class Executor:
# print(last_in_obj) # print(last_in_obj)
# print(sH[-1]) # print(sH[-1])
_input: Dict[str, Any] = self.policy_update_exception(self.get_policy_input(var_dict, sub_step, sH, last_in_obj, policy_funcs)) _input: Dict[str, Any] = self.policy_update_exception(self.get_policy_input(sweep_dict, sub_step, sH, last_in_obj, policy_funcs))
# ToDo: add env_proc generator to `last_in_copy` iterator as wrapper function # ToDo: add env_proc generator to `last_in_copy` iterator as wrapper function
# ToDo: Can be multithreaded ?? # ToDo: Can be multithreaded ??
def generate_record(state_funcs): def generate_record(state_funcs):
for f in state_funcs: for f in state_funcs:
yield self.state_update_exception(f(var_dict, sub_step, sH, last_in_obj, _input)) yield self.state_update_exception(f(sweep_dict, sub_step, sH, last_in_obj, _input))
def transfer_missing_fields(source, destination): def transfer_missing_fields(source, destination):
for k in source: for k in source:
@ -168,7 +154,9 @@ class Executor:
last_in_copy: Dict[str, Any] = transfer_missing_fields(last_in_obj, dict(generate_record(state_funcs))) last_in_copy: Dict[str, Any] = transfer_missing_fields(last_in_obj, dict(generate_record(state_funcs)))
# ToDo: Remove # ToDo: Remove
# last_in_copy: Dict[str, Any] = self.apply_env_proc(env_processes, last_in_copy, last_in_copy['timestep']) # last_in_copy: Dict[str, Any] = self.apply_env_proc(env_processes, last_in_copy, last_in_copy['timestep'])
last_in_copy: Dict[str, Any] = self.apply_env_proc(env_processes, last_in_copy) # print(env_processes)
# print()
last_in_copy: Dict[str, Any] = self.apply_env_proc(sweep_dict, env_processes, last_in_copy)
# ToDo: make 'substep' & 'timestep' reserve fields # ToDo: make 'substep' & 'timestep' reserve fields
@ -185,7 +173,7 @@ class Executor:
# mech_pipeline - state_update_block # mech_pipeline - state_update_block
def state_update_pipeline( def state_update_pipeline(
self, self,
var_dict: Dict[str, List[Any]], sweep_dict: Dict[str, List[Any]],
simulation_list, #states_list: List[Dict[str, Any]], simulation_list, #states_list: List[Dict[str, Any]],
configs: List[Tuple[List[Callable], List[Callable]]], configs: List[Tuple[List[Callable], List[Callable]]],
env_processes: Dict[str, Callable], env_processes: Dict[str, Callable],
@ -229,7 +217,7 @@ class Executor:
for [s_conf, p_conf] in configs: # tensor field for [s_conf, p_conf] in configs: # tensor field
states_list: List[Dict[str, Any]] = self.partial_state_update( states_list: List[Dict[str, Any]] = self.partial_state_update(
var_dict, sub_step, states_list, simulation_list, s_conf, p_conf, env_processes, time_step, run sweep_dict, sub_step, states_list, simulation_list, s_conf, p_conf, env_processes, time_step, run
) )
# print(sub_step) # print(sub_step)
# print(simulation_list) # print(simulation_list)
@ -244,7 +232,7 @@ class Executor:
# state_update_pipeline # state_update_pipeline
def run_pipeline( def run_pipeline(
self, self,
var_dict: Dict[str, List[Any]], sweep_dict: Dict[str, List[Any]],
states_list: List[Dict[str, Any]], states_list: List[Dict[str, Any]],
configs: List[Tuple[List[Callable], List[Callable]]], configs: List[Tuple[List[Callable], List[Callable]]],
env_processes: Dict[str, Callable], env_processes: Dict[str, Callable],
@ -262,7 +250,7 @@ class Executor:
# print(simulation_list) # print(simulation_list)
for time_step in time_seq: for time_step in time_seq:
pipe_run: List[Dict[str, Any]] = self.state_update_pipeline( pipe_run: List[Dict[str, Any]] = self.state_update_pipeline(
var_dict, simulation_list, configs, env_processes, time_step, run sweep_dict, simulation_list, configs, env_processes, time_step, run
) )
_, *pipe_run = pipe_run _, *pipe_run = pipe_run
@ -276,7 +264,7 @@ class Executor:
# configs: List[Tuple[List[Callable], List[Callable]]] # configs: List[Tuple[List[Callable], List[Callable]]]
def simulation( def simulation(
self, self,
var_dict: Dict[str, List[Any]], sweep_dict: Dict[str, List[Any]],
states_list: List[Dict[str, Any]], states_list: List[Dict[str, Any]],
configs: List[Tuple[List[Callable], List[Callable]]], configs: List[Tuple[List[Callable], List[Callable]]],
env_processes: Dict[str, Callable], env_processes: Dict[str, Callable],
@ -284,7 +272,7 @@ class Executor:
runs: int runs: int
) -> List[List[Dict[str, Any]]]: ) -> List[List[Dict[str, Any]]]:
def execute_run(var_dict, states_list, configs, env_processes, time_seq, run) -> List[Dict[str, Any]]: def execute_run(sweep_dict, states_list, configs, env_processes, time_seq, run) -> List[Dict[str, Any]]:
run += 1 run += 1
def generate_init_sys_metrics(genesis_states_list): def generate_init_sys_metrics(genesis_states_list):
@ -294,14 +282,14 @@ class Executor:
states_list_copy: List[Dict[str, Any]] = list(generate_init_sys_metrics(deepcopy(states_list))) states_list_copy: List[Dict[str, Any]] = list(generate_init_sys_metrics(deepcopy(states_list)))
first_timestep_per_run: List[Dict[str, Any]] = self.run_pipeline(var_dict, states_list_copy, configs, env_processes, time_seq, run) first_timestep_per_run: List[Dict[str, Any]] = self.run_pipeline(sweep_dict, states_list_copy, configs, env_processes, time_seq, run)
del states_list_copy del states_list_copy
return first_timestep_per_run return first_timestep_per_run
pipe_run: List[List[Dict[str, Any]]] = flatten( pipe_run: List[List[Dict[str, Any]]] = flatten(
TPool().map( TPool().map(
lambda run: execute_run(var_dict, states_list, configs, env_processes, time_seq, run), lambda run: execute_run(sweep_dict, states_list, configs, env_processes, time_seq, run),
list(range(runs)) list(range(runs))
) )
) )

Binary file not shown.

BIN
dist/cadCAD-0.2.3-py3-none-any.whl vendored Normal file

Binary file not shown.

View File

@ -11,7 +11,7 @@ long_description = "cadCAD is a differential games based simulation software pac
monte carlo analysis and other common numerical methods is provided." monte carlo analysis and other common numerical methods is provided."
setup(name='cadCAD', setup(name='cadCAD',
version='0.2.2', version='0.2.3',
description="cadCAD: a differential games based simulation software package for research, validation, and \ description="cadCAD: a differential games based simulation software package for research, validation, and \
Computer Aided Design of economic systems", Computer Aided Design of economic systems",
long_description=long_description, long_description=long_description,

View File

@ -2,10 +2,8 @@ import pandas as pd
from tabulate import tabulate from tabulate import tabulate
# The following imports NEED to be in the exact order # The following imports NEED to be in the exact order
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
# from simulations.validation import policy_aggregation
from simulations.validation import config1 from simulations.validation import config1
# from simulations.validation import externalds
# from simulations.validation import external_dataset
from cadCAD import configs from cadCAD import configs
exec_mode = ExecutionMode() exec_mode = ExecutionMode()
@ -16,7 +14,7 @@ first_config = configs # only contains config1
single_proc_ctx = ExecutionContext(context=exec_mode.single_proc) single_proc_ctx = ExecutionContext(context=exec_mode.single_proc)
run = Executor(exec_context=single_proc_ctx, configs=first_config) run = Executor(exec_context=single_proc_ctx, configs=first_config)
raw_result, tensor_field = run.main() raw_result, tensor_field = run.execute()
result = pd.DataFrame(raw_result) result = pd.DataFrame(raw_result)
print() print()
print("Tensor Field: config1") print("Tensor Field: config1")

View File

@ -0,0 +1,25 @@
import pandas as pd
from tabulate import tabulate
# The following imports NEED to be in the exact order
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
from simulations.validation import config2
from cadCAD import configs
exec_mode = ExecutionMode()
print("Simulation Execution: Single Configuration")
print()
first_config = configs # only contains config2
# print(configs[0].env_processes)
single_proc_ctx = ExecutionContext(context=exec_mode.single_proc)
run = Executor(exec_context=single_proc_ctx, configs=first_config)
raw_result, tensor_field = run.execute()
result = pd.DataFrame(raw_result)
print()
print("Tensor Field: config1")
print(tabulate(tensor_field, headers='keys', tablefmt='psql'))
print("Output:")
print(tabulate(result, headers='keys', tablefmt='psql'))
print()

View File

@ -5,7 +5,6 @@ from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
# from simulations.validation import config1_test_pipe # from simulations.validation import config1_test_pipe
# from simulations.validation import config1 # from simulations.validation import config1
# from simulations.validation import externalds # from simulations.validation import externalds
from simulations.validation import external_dataset
from cadCAD import configs from cadCAD import configs
exec_mode = ExecutionMode() exec_mode = ExecutionMode()
@ -16,10 +15,10 @@ first_config = configs # only contains config1
single_proc_ctx = ExecutionContext(context=exec_mode.single_proc) single_proc_ctx = ExecutionContext(context=exec_mode.single_proc)
run = Executor(exec_context=single_proc_ctx, configs=first_config) run = Executor(exec_context=single_proc_ctx, configs=first_config)
raw_result, tensor_field = run.main() raw_result, tensor_field = run.execute()
result = pd.DataFrame(raw_result) result = pd.DataFrame(raw_result)
result = pd.concat([result, result['external_data'].apply(pd.Series)], axis=1)[ result = pd.concat([result, result['external_data'].apply(pd.Series)], axis=1)[
['run', 'substep', 'timestep', 'increment', 'external_data', 'ds1', 'ds2', 'ds3', 'policies'] ['run', 'substep', 'timestep', 'increment', 'external_data', 'policies', 'ds1', 'ds2', 'ds3', ]
] ]
print() print()
print("Tensor Field: config1") print("Tensor Field: config1")

View File

@ -17,7 +17,7 @@ run = Executor(exec_context=single_proc_ctx, configs=first_config)
raw_result, _ = run.main() raw_result, _ = run.main()
result = pd.DataFrame(raw_result) result = pd.DataFrame(raw_result)
result.to_csv('/Users/jjodesty/Projects/DiffyQ-SimCAD/simulations/output.csv', index=False) result.to_csv('/Users/jjodesty/Projects/DiffyQ-SimCAD/simulations/external_data/output.csv', index=False)
print("Output:") print("Output:")
print(tabulate(result, headers='keys', tablefmt='psql')) print(tabulate(result, headers='keys', tablefmt='psql'))

View File

@ -13,7 +13,7 @@ run = Executor(exec_context=multi_proc_ctx, configs=configs)
i = 0 i = 0
config_names = ['config1', 'config2'] config_names = ['config1', 'config2']
for raw_result, tensor_field in run.main(): for raw_result, tensor_field in run.execute():
result = pd.DataFrame(raw_result) result = pd.DataFrame(raw_result)
print() print()
print("Tensor Field: " + config_names[i]) print("Tensor Field: " + config_names[i])

View File

@ -13,7 +13,7 @@ run = Executor(exec_context=multi_proc_ctx, configs=configs)
i = 0 i = 0
config_names = ['sweep_config_A', 'sweep_config_B'] config_names = ['sweep_config_A', 'sweep_config_B']
for raw_result, tensor_field in run.main(): for raw_result, tensor_field in run.execute():
result = pd.DataFrame(raw_result) result = pd.DataFrame(raw_result)
print() print()
print("Tensor Field: " + config_names[i]) print("Tensor Field: " + config_names[i])

View File

@ -1,3 +1,5 @@
from pprint import pprint
import pandas as pd import pandas as pd
from tabulate import tabulate from tabulate import tabulate
# The following imports NEED to be in the exact order # The following imports NEED to be in the exact order
@ -6,6 +8,8 @@ from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
from simulations.regression_tests import sweep_config from simulations.regression_tests import sweep_config
from cadCAD import configs from cadCAD import configs
# pprint(configs)
exec_mode = ExecutionMode() exec_mode = ExecutionMode()
print("Simulation Execution: Concurrent Execution") print("Simulation Execution: Concurrent Execution")
@ -14,7 +18,7 @@ run = Executor(exec_context=multi_proc_ctx, configs=configs)
i = 0 i = 0
config_names = ['sweep_config_A', 'sweep_config_B'] config_names = ['sweep_config_A', 'sweep_config_B']
for raw_result, tensor_field in run.main(): for raw_result, tensor_field in run.execute():
result = pd.DataFrame(raw_result) result = pd.DataFrame(raw_result)
print() print()
print("Tensor Field: " + config_names[i]) print("Tensor Field: " + config_names[i])

View File

@ -0,0 +1,23 @@
import pandas as pd
from tabulate import tabulate
# The following imports NEED to be in the exact order
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
from simulations.regression_tests import policy_aggregation
from cadCAD import configs
exec_mode = ExecutionMode()
print("Simulation Execution: Single Configuration")
print()
first_config = configs # only contains config1
single_proc_ctx = ExecutionContext(context=exec_mode.single_proc)
run = Executor(exec_context=single_proc_ctx, configs=first_config)
raw_result, tensor_field = run.execute()
result = pd.DataFrame(raw_result)
print()
print("Tensor Field: config1")
print(tabulate(tensor_field, headers='keys', tablefmt='psql'))
print("Output:")
print(tabulate(result, headers='keys', tablefmt='psql'))
print()

View File

@ -5,12 +5,6 @@ from cadCAD.utils import SilentDF
df = SilentDF(pd.read_csv('/Users/jjodesty/Projects/DiffyQ-SimCAD/simulations/external_data/output.csv')) df = SilentDF(pd.read_csv('/Users/jjodesty/Projects/DiffyQ-SimCAD/simulations/external_data/output.csv'))
external_data = {'ds1': None, 'ds2': None, 'ds3': None}
state_dict = {
'increment': 0,
'external_data': external_data,
'policies': external_data,
}
def query(s, df): def query(s, df):
return df[ return df[
@ -22,7 +16,7 @@ def p1(_g, substep, sL, s):
del result_dict["ds3"] del result_dict["ds3"]
return {k: list(v.values()).pop() for k, v in result_dict.items()} return {k: list(v.values()).pop() for k, v in result_dict.items()}
def p2(_g, step, sL, s): def p2(_g, substep, sL, s):
result_dict = query(s, df).to_dict() result_dict = query(s, df).to_dict()
del result_dict["ds1"], result_dict["ds2"] del result_dict["ds1"], result_dict["ds2"]
return {k: list(v.values()).pop() for k, v in result_dict.items()} return {k: list(v.values()).pop() for k, v in result_dict.items()}
@ -41,6 +35,14 @@ def view_policies(_g, step, sL, s, _input):
return 'policies', _input return 'policies', _input
external_data = {'ds1': None, 'ds2': None, 'ds3': None}
state_dict = {
'increment': 0,
'external_data': external_data,
'policies': external_data
}
policies = {"p1": p1, "p2": p2} policies = {"p1": p1, "p2": p2}
states = {'increment': increment, 'external_data': integrate_ext_dataset, 'policies': view_policies} states = {'increment': increment, 'external_data': integrate_ext_dataset, 'policies': view_policies}
PSUB = {'policies': policies, 'states': states} PSUB = {'policies': policies, 'states': states}

View File

@ -0,0 +1,85 @@
from cadCAD.configuration import append_configs
from cadCAD.configuration.utils import config_sim, access_block
policies, variables = {}, {}
exclusion_list = ['nonexsistant', 'last_x', '2nd_to_last_x', '3rd_to_last_x', '4th_to_last_x']
# Policies per Mechanism
# WARNING: DO NOT delete elements from sH
def last_update(_g, substep, sH, s):
return {"last_x": access_block(sH, "last_x", -1, exclusion_list)}
policies["last_x"] = last_update
def second2last_update(_g, substep, sH, s):
return {"2nd_to_last_x": access_block(sH, "2nd_to_last_x", -2, exclusion_list)}
policies["2nd_to_last_x"] = second2last_update
# Internal States per Mechanism
# WARNING: DO NOT delete elements from sH
def add(y, x):
return lambda _g, substep, sH, s, _input: (y, s[y] + x)
variables['x'] = add('x', 1)
# last_partial_state_update_block
def nonexsistant(_g, substep, sH, s, _input):
return 'nonexsistant', access_block(sH, "nonexsistant", 0, exclusion_list)
variables['nonexsistant'] = nonexsistant
# last_partial_state_update_block
def last_x(_g, substep, sH, s, _input):
return 'last_x', _input["last_x"]
variables['last_x'] = last_x
# 2nd to last partial state update block
def second_to_last_x(_g, substep, sH, s, _input):
return '2nd_to_last_x', _input["2nd_to_last_x"]
variables['2nd_to_last_x'] = second_to_last_x
# 3rd to last partial state update block
def third_to_last_x(_g, substep, sH, s, _input):
return '3rd_to_last_x', access_block(sH, "3rd_to_last_x", -3, exclusion_list)
variables['3rd_to_last_x'] = third_to_last_x
# 4th to last partial state update block
def fourth_to_last_x(_g, substep, sH, s, _input):
return '4th_to_last_x', access_block(sH, "4th_to_last_x", -4, exclusion_list)
variables['4th_to_last_x'] = fourth_to_last_x
genesis_states = {
'x': 0,
'nonexsistant': [],
'last_x': [],
'2nd_to_last_x': [],
'3rd_to_last_x': [],
'4th_to_last_x': []
}
PSUB = {
"policies": policies,
"variables": variables
}
partial_state_update_block = {
"PSUB1": PSUB,
"PSUB2": PSUB,
"PSUB3": PSUB
}
sim_config = config_sim(
{
"N": 1,
"T": range(3),
}
)
append_configs(
sim_configs=sim_config,
initial_state=genesis_states,
partial_state_update_blocks=partial_state_update_block
)

View File

@ -2,7 +2,7 @@ import numpy as np
from cadCAD.configuration import append_configs from cadCAD.configuration import append_configs
from cadCAD.configuration.utils import config_sim from cadCAD.configuration.utils import config_sim
# ToDo: Use
seeds = { seeds = {
'z': np.random.RandomState(1), 'z': np.random.RandomState(1),
'a': np.random.RandomState(2), 'a': np.random.RandomState(2),
@ -13,19 +13,19 @@ seeds = {
# Policies per Mechanism # Policies per Mechanism
def p1m1(_g, step, sL, s): def p1m1(_g, step, sL, s):
return {'param1': 1} return {'policy1': 1}
def p2m1(_g, step, sL, s): def p2m1(_g, step, sL, s):
return {'param2': 2} return {'policy2': 2}
def p1m2(_g, step, sL, s): def p1m2(_g, step, sL, s):
return {'param1': 2, 'param2': 2} return {'policy1': 2, 'policy2': 2}
def p2m2(_g, step, sL, s): def p2m2(_g, step, sL, s):
return {'param1': 2, 'param2': 2} return {'policy1': 2, 'policy2': 2}
def p1m3(_g, step, sL, s): def p1m3(_g, step, sL, s):
return {'param1': 1, 'param2': 2, 'param3': 3} return {'policy1': 1, 'policy2': 2, 'policy3': 3}
def p2m3(_g, step, sL, s): def p2m3(_g, step, sL, s):
return {'param1': 1, 'param2': 2, 'param3': 3} return {'policy1': 1, 'policy2': 2, 'policy3': 3}
# Internal States per Mechanism # Internal States per Mechanism
@ -37,22 +37,19 @@ def policies(_g, step, sH, s, _input):
x = _input x = _input
return (y, x) return (y, x)
# Genesis States # Genesis States
genesis_states = { genesis_states = {
'policies': {}, 'policies': {},
's1': 0, 's1': 0
's2': 0,
} }
raw_exogenous_states = {}
env_processes = {}
variables = { variables = {
's1': add('s1', 1), 's1': add('s1', 1),
's2': add('s2', 1),
"policies": policies "policies": policies
} }
# test_varablies = deepcopy(variables)
# test_varablies['test'] = test
partial_state_update_block = { partial_state_update_block = {
"m1": { "m1": {
@ -87,12 +84,14 @@ sim_config = config_sim(
) )
# Aggregation == Reduce Map / Reduce Map Aggregation
# ToDo: subsequent functions should accept the entire datastructure
# using env functions (include in reg test using / for env proc)
append_configs( append_configs(
sim_configs=sim_config, sim_configs=sim_config,
initial_state=genesis_states, initial_state=genesis_states,
seeds=seeds, seeds=seeds,
raw_exogenous_states=raw_exogenous_states,
env_processes=env_processes,
partial_state_update_blocks=partial_state_update_block, partial_state_update_blocks=partial_state_update_block,
policy_ops=[lambda a, b: a + b, lambda y: y + 10, lambda y: y + 30] policy_ops=[lambda a, b: a + b, lambda y: y * 2] # Default: lambda a, b: a + b ToDO: reduction function requires high lvl explanation
) )

View File

@ -1,7 +1,6 @@
from decimal import Decimal from decimal import Decimal
import numpy as np import numpy as np
from datetime import timedelta from datetime import timedelta
from funcy import compose
import pprint import pprint
from cadCAD.configuration import append_configs from cadCAD.configuration import append_configs
@ -22,6 +21,8 @@ seeds = {
# Optional # Optional
g: Dict[str, List[int]] = { g: Dict[str, List[int]] = {
'alpha': [1], 'alpha': [1],
# 'beta': [2],
# 'gamma': [3],
'beta': [2, 5], 'beta': [2, 5],
'gamma': [3, 4], 'gamma': [3, 4],
'omega': [7] 'omega': [7]
@ -29,7 +30,7 @@ g: Dict[str, List[int]] = {
psu_steps = ['m1', 'm2', 'm3'] psu_steps = ['m1', 'm2', 'm3']
system_substeps = len(psu_steps) system_substeps = len(psu_steps)
var_timestep_trigger = var_substep_trigger(system_substeps) var_timestep_trigger = var_substep_trigger([0, system_substeps])
env_timestep_trigger = env_trigger(system_substeps) env_timestep_trigger = env_trigger(system_substeps)
env_process = {} env_process = {}
psu_block = {k: {"policies": {}, "variables": {}} for k in psu_steps} psu_block = {k: {"policies": {}, "variables": {}} for k in psu_steps}
@ -67,6 +68,7 @@ def s1m1(_g, step, sL, s, _input):
psu_block['m1']["variables"]['s1'] = s1m1 psu_block['m1']["variables"]['s1'] = s1m1
def s2m1(_g, step, sL, s, _input): def s2m1(_g, step, sL, s, _input):
print(_g)
return 's2', _g['beta'] return 's2', _g['beta']
psu_block['m1']["variables"]['s2'] = s2m1 psu_block['m1']["variables"]['s2'] = s2m1
@ -94,22 +96,22 @@ def update_timestamp(_g, step, sL, s, _input):
for m in ['m1','m2','m3']: for m in ['m1','m2','m3']:
# psu_block[m]["variables"]['timestamp'] = update_timestamp # psu_block[m]["variables"]['timestamp'] = update_timestamp
psu_block[m]["variables"]['timestamp'] = var_timestep_trigger(y='timestamp', f=update_timestamp) psu_block[m]["variables"]['timestamp'] = var_timestep_trigger(y='timestamp', f=update_timestamp)
psu_block[m]["variables"]['timestamp'] = var_trigger( # psu_block[m]["variables"]['timestamp'] = var_trigger(
y='timestamp', f=update_timestamp, pre_conditions={'substep': [0, system_substeps]}, cond_op=lambda a, b: a and b # y='timestamp', f=update_timestamp, pre_conditions={'substep': [0, system_substeps]}, cond_op=lambda a, b: a and b
) # )
proc_one_coef_A = 0.7 proc_one_coef = 0.7
def es3p1(_g, step, sL, s, _input): def es3(_g, step, sL, s, _input):
return 's3', s['s3'] return 's3', s['s3'] + proc_one_coef
# use `timestep_trigger` to update every ts # use `timestep_trigger` to update every ts
for m in ['m1','m2','m3']: for m in ['m1','m2','m3']:
psu_block[m]["variables"]['s3'] = var_timestep_trigger(y='s3', f=es3p1) psu_block[m]["variables"]['s3'] = var_timestep_trigger(y='s3', f=es3)
proc_one_coef_B = 1.3
def es4p2(_g, step, sL, s, _input): def es4(_g, step, sL, s, _input):
return 's4', s['s4'] #+ 4 #g['gamma'] + proc_one_coef_B return 's4', s['s4'] + _g['gamma']
for m in ['m1','m2','m3']: for m in ['m1','m2','m3']:
psu_block[m]["variables"]['s4'] = var_timestep_trigger(y='s4', f=es4p2) psu_block[m]["variables"]['s4'] = var_timestep_trigger(y='s4', f=es4)
# ToDo: The number of values entered in sweep should be the # of config objs created, # ToDo: The number of values entered in sweep should be the # of config objs created,
@ -119,16 +121,18 @@ for m in ['m1','m2','m3']:
# Genesis States # Genesis States
genesis_states = { genesis_states = {
's1': Decimal(0.0), 's1': 0.0,
's2': Decimal(0.0), 's2': 0.0,
's3': Decimal(1.0), 's3': 1.0,
's4': Decimal(1.0), 's4': 1.0,
'timestamp': '2018-10-01 15:16:24' 'timestamp': '2018-10-01 15:16:24'
} }
# Environment Process # Environment Process
# ToDo: Validate - make env proc trigger field agnostic # ToDo: Validate - make env proc trigger field agnostic
env_process["s3"] = [lambda x: x + 1, lambda x: x + 1] env_process["s3"] = [lambda _g, x: _g['beta'], lambda _g, x: x + 1]
env_process["s4"] = env_timestep_trigger(trigger_field='field', trigger_vals=[5], funct_list=[lambda x: 1, lambda x: x + 2]) env_process["s4"] = env_timestep_trigger(trigger_field='timestep', trigger_vals=[5], funct_list=[lambda _g, x: _g['beta']])
# config_sim Necessary # config_sim Necessary

View File

@ -21,7 +21,7 @@ cols = [
'udo_policy_tracker_a', 'udo_policies', 'udo_policy_tracker_b', 'udo_policy_tracker_a', 'udo_policies', 'udo_policy_tracker_b',
'timestamp' 'timestamp'
] ]
raw_result, tensor_field = run.main() raw_result, tensor_field = run.execute()
result = pd.DataFrame(raw_result)[['run', 'substep', 'timestep'] + cols] result = pd.DataFrame(raw_result)[['run', 'substep', 'timestep'] + cols]
# result = pd.concat([result.drop(['c'], axis=1), result['c'].apply(pd.Series)], axis=1) # result = pd.concat([result.drop(['c'], axis=1), result['c'].apply(pd.Series)], axis=1)

View File

@ -21,7 +21,7 @@ cols = [
'udo_policies', 'udo_policy_tracker', 'udo_policies', 'udo_policy_tracker',
'timestamp' 'timestamp'
] ]
raw_result, tensor_field = run.main() raw_result, tensor_field = run.execute()
result = pd.DataFrame(raw_result)[['run', 'substep', 'timestep'] + cols] result = pd.DataFrame(raw_result)[['run', 'substep', 'timestep'] + cols]
# result = pd.concat([result.drop(['c'], axis=1), result['c'].apply(pd.Series)], axis=1) # result = pd.concat([result.drop(['c'], axis=1), result['c'].apply(pd.Series)], axis=1)

View File

@ -7,9 +7,12 @@ from datetime import timedelta
from cadCAD.configuration.utils.policyAggregation import get_base_value from cadCAD.configuration.utils.policyAggregation import get_base_value
from cadCAD.configuration import append_configs from cadCAD.configuration import append_configs
from cadCAD.configuration.utils import env_proc_trigger, bound_norm_random, ep_time_step, config_sim from cadCAD.configuration.utils import env_proc_trigger, bound_norm_random, ep_time_step, config_sim, time_step, \
env_trigger
# from cadCAD.configuration.utils import timestep_trigger
from simulations.regression_tests.sweep_config import var_timestep_trigger
from cadCAD.configuration.utils import timestep_trigger
seeds = { seeds = {
'z': np.random.RandomState(1), 'z': np.random.RandomState(1),
@ -70,59 +73,40 @@ def policies(_g, step, sL, s, _input):
return (y, x) return (y, x)
# Exogenous States # Exogenous States
proc_one_coef_A = 0.7 proc_one_coef_A = 0.7
proc_one_coef_B = 1.3 proc_one_coef_B = 1.3
def es3p1(_g, step, sL, s, _input): def es3(_g, step, sL, s, _input):
y = 's3' y = 's3'
x = s['s3'] * bound_norm_random(seeds['a'], proc_one_coef_A, proc_one_coef_B) x = s['s3'] * bound_norm_random(seeds['a'], proc_one_coef_A, proc_one_coef_B)
return (y, x) return (y, x)
def es4p2(_g, step, sL, s, _input): def es4(_g, step, sL, s, _input):
y = 's4' y = 's4'
x = s['s4'] * bound_norm_random(seeds['b'], proc_one_coef_A, proc_one_coef_B) x = s['s4'] * bound_norm_random(seeds['b'], proc_one_coef_A, proc_one_coef_B)
return (y, x) return (y, x)
ts_format = '%Y-%m-%d %H:%M:%S' def update_timestamp(_g, step, sL, s, _input):
t_delta = timedelta(days=0, minutes=0, seconds=1) y = 'timestamp'
def es5p2(_g, step, sL, s, _input): return y, time_step(dt_str=s[y], dt_format='%Y-%m-%d %H:%M:%S', _timedelta=timedelta(days=0, minutes=0, seconds=1))
y = 'timestep'
x = ep_time_step(s, dt_str=s['timestep'], fromat_str=ts_format, _timedelta=t_delta)
return (y, x)
# Environment States
def env_a(x):
return 5
def env_b(x):
return 10
# def what_ever(x):
# return x + 1
# Genesis States # Genesis States
genesis_states = { genesis_states = {
's1': Decimal(0.0), 's1': 0.0,
's2': Decimal(0.0), 's2': 0.0,
's3': Decimal(1.0), 's3': 1.0,
's4': Decimal(1.0) 's4': 1.0,
# 'timestep': '2018-10-01 15:16:24' 'timestamp': '2018-10-01 15:16:24'
} }
# raw_exogenous_states = { # Environment Process
# "s3": es3p1, # ToDo: Depreciation Waring for env_proc_trigger convention
# "s4": es4p2,
# # "timestep": es5p2
# }
env_processes = { env_processes = {
"s3": env_a, "s3": [lambda _g, x: 5],
"s4": env_proc_trigger(1, env_b) "s4": env_trigger(3)(trigger_field='timestep', trigger_vals=[1], funct_list=[lambda _g, x: 10])
} }
@ -135,8 +119,9 @@ partial_state_update_blocks = {
"variables": { "variables": {
"s1": s1m1, "s1": s1m1,
"s2": s2m1, "s2": s2m1,
"s3": es3p1, "s3": es3,
"s4": es4p2, "s4": es4,
"timestamp": update_timestamp
} }
}, },
"m2": { "m2": {
@ -147,8 +132,8 @@ partial_state_update_blocks = {
"variables": { "variables": {
"s1": s1m2, "s1": s1m2,
"s2": s2m2, "s2": s2m2,
# "s3": timestep_trigger(3, 's3', es3p1), # "s3": es3p1,
# "s4": timestep_trigger(3, 's4', es4p2), # "s4": es4p2,
} }
}, },
"m3": { "m3": {
@ -159,8 +144,8 @@ partial_state_update_blocks = {
"variables": { "variables": {
"s1": s1m3, "s1": s1m3,
"s2": s2m3, "s2": s2m3,
# "s3": timestep_trigger(3, 's3', es3p1), # "s3": es3p1,
# "s4": timestep_trigger(3, 's4', es4p2), # "s4": es4p2,
} }
} }
} }
@ -176,9 +161,9 @@ sim_config = config_sim(
append_configs( append_configs(
sim_configs=sim_config, sim_configs=sim_config,
initial_state=genesis_states, initial_state=genesis_states,
seeds=seeds, # seeds=seeds,
raw_exogenous_states={}, #raw_exogenous_states, # raw_exogenous_states=raw_exogenous_states,
env_processes={}, #env_processes, env_processes=env_processes,
partial_state_update_blocks=partial_state_update_blocks, partial_state_update_blocks=partial_state_update_blocks,
policy_ops=[lambda a, b: a + b] policy_ops=[lambda a, b: a + b]
) )

View File

@ -1,15 +1,15 @@
from decimal import Decimal
import numpy as np import numpy as np
from datetime import timedelta from datetime import timedelta
from cadCAD.configuration import append_configs from cadCAD.configuration import append_configs
from cadCAD.configuration.utils import env_proc_trigger, bound_norm_random, ep_time_step, config_sim from cadCAD.configuration.utils import env_proc_trigger, bound_norm_random, ep_time_step, config_sim, env_trigger, \
time_step
seeds = { seeds = {
'z': np.random.RandomState(1), 'z': np.random.RandomState(1),
'a': np.random.RandomState(2), 'a': np.random.RandomState(2),
'b': np.random.RandomState(3), 'b': np.random.RandomState(3),
'c': np.random.RandomState(3) 'c': np.random.RandomState(4)
} }
@ -63,56 +63,38 @@ def s2m3(_g, step, sL, s, _input):
proc_one_coef_A = 0.7 proc_one_coef_A = 0.7
proc_one_coef_B = 1.3 proc_one_coef_B = 1.3
def es3p1(_g, step, sL, s, _input): def es3(_g, step, sL, s, _input):
y = 's3' y = 's3'
x = s['s3'] * bound_norm_random(seeds['a'], proc_one_coef_A, proc_one_coef_B) x = s['s3'] * bound_norm_random(seeds['a'], proc_one_coef_A, proc_one_coef_B)
return (y, x) return (y, x)
def es4p2(_g, step, sL, s, _input): def es4(_g, step, sL, s, _input):
y = 's4' y = 's4'
x = s['s4'] * bound_norm_random(seeds['b'], proc_one_coef_A, proc_one_coef_B) x = s['s4'] * bound_norm_random(seeds['b'], proc_one_coef_A, proc_one_coef_B)
return (y, x) return (y, x)
ts_format = '%Y-%m-%d %H:%M:%S' def update_timestamp(_g, step, sL, s, _input):
t_delta = timedelta(days=0, minutes=0, seconds=1) y = 'timestamp'
def es5p2(_g, step, sL, s, _input): return y, time_step(dt_str=s[y], dt_format='%Y-%m-%d %H:%M:%S', _timedelta=timedelta(days=0, minutes=0, seconds=1))
y = 'timestep'
x = ep_time_step(s, dt_str=s['timestep'], fromat_str=ts_format, _timedelta=t_delta)
return (y, x)
# Environment States
def env_a(x):
return 10
def env_b(x):
return 10
# def what_ever(x):
# return x + 1
# Genesis States # Genesis States
genesis_states = { genesis_states = {
's1': Decimal(0.0), 's1': 0,
's2': Decimal(0.0), 's2': 0,
's3': Decimal(1.0), 's3': 1,
's4': Decimal(1.0), 's4': 1,
# 'timestep': '2018-10-01 15:16:24' 'timestamp': '2018-10-01 15:16:24'
}
raw_exogenous_states = {
"s3": es3p1,
"s4": es4p2,
# "timestep": es5p2
} }
# Environment Process
# ToDo: Depreciation Waring for env_proc_trigger convention
env_processes = { env_processes = {
"s3": env_proc_trigger(1, env_a), "s3": [lambda _g, x: 5],
"s4": env_proc_trigger(1, env_b) "s4": env_trigger(3)(trigger_field='timestep', trigger_vals=[2], funct_list=[lambda _g, x: 10])
} }
partial_state_update_block = { partial_state_update_block = {
"m1": { "m1": {
"policies": { "policies": {
@ -122,6 +104,9 @@ partial_state_update_block = {
"states": { "states": {
"s1": s1m1, "s1": s1m1,
# "s2": s2m1 # "s2": s2m1
"s3": es3,
"s4": es4,
"timestep": update_timestamp
} }
}, },
"m2": { "m2": {
@ -159,7 +144,6 @@ append_configs(
sim_configs=sim_config, sim_configs=sim_config,
initial_state=genesis_states, initial_state=genesis_states,
seeds=seeds, seeds=seeds,
raw_exogenous_states=raw_exogenous_states,
env_processes=env_processes, env_processes=env_processes,
partial_state_update_blocks=partial_state_update_block partial_state_update_blocks=partial_state_update_block
) )

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,150 @@
import networkx as nx
from scipy.stats import expon, gamma
import numpy as np
import matplotlib.pyplot as plt
#helper functions
def get_nodes_by_type(g, node_type_selection):
return [node for node in g.nodes if g.nodes[node]['type']== node_type_selection ]
def get_edges_by_type(g, edge_type_selection):
return [edge for edge in g.edges if g.edges[edge]['type']== edge_type_selection ]
def total_funds_given_total_supply(total_supply):
#can put any bonding curve invariant here for initializatio!
total_funds = total_supply
return total_funds
#maximum share of funds a proposal can take
default_beta = .2 #later we should set this to be param so we can sweep it
# tuning param for the trigger function
default_rho = .001
def trigger_threshold(requested, funds, supply, beta = default_beta, rho = default_rho):
share = requested/funds
if share < beta:
return rho*supply/(beta-share)**2
else:
return np.inf
def initialize_network(n,m, funds_func=total_funds_given_total_supply, trigger_func =trigger_threshold ):
network = nx.DiGraph()
for i in range(n):
network.add_node(i)
network.nodes[i]['type']="participant"
h_rv = expon.rvs(loc=0.0, scale=1000)
network.nodes[i]['holdings'] = h_rv
s_rv = np.random.rand()
network.nodes[i]['sentiment'] = s_rv
participants = get_nodes_by_type(network, 'participant')
initial_supply = np.sum([ network.nodes[i]['holdings'] for i in participants])
initial_funds = funds_func(initial_supply)
#generate initial proposals
for ind in range(m):
j = n+ind
network.add_node(j)
network.nodes[j]['type']="proposal"
network.nodes[j]['conviction']=0
network.nodes[j]['status']='candidate'
network.nodes[j]['age']=0
r_rv = gamma.rvs(3,loc=0.001, scale=10000)
network.node[j]['funds_requested'] = r_rv
network.nodes[j]['trigger']= trigger_threshold(r_rv, initial_funds, initial_supply)
for i in range(n):
network.add_edge(i, j)
rv = np.random.rand()
a_rv = 1-4*(1-rv)*rv #polarized distribution
network.edges[(i, j)]['affinity'] = a_rv
network.edges[(i,j)]['tokens'] = 0
network.edges[(i, j)]['conviction'] = 0
proposals = get_nodes_by_type(network, 'proposal')
total_requested = np.sum([ network.nodes[i]['funds_requested'] for i in proposals])
return network, initial_funds, initial_supply, total_requested
def trigger_sweep(field, trigger_func,xmax=.2,default_alpha=.5):
if field == 'token_supply':
alpha = default_alpha
share_of_funds = np.arange(.001,xmax,.001)
total_supply = np.arange(0,10**9, 10**6)
demo_data_XY = np.outer(share_of_funds,total_supply)
demo_data_Z0=np.empty(demo_data_XY.shape)
demo_data_Z1=np.empty(demo_data_XY.shape)
demo_data_Z2=np.empty(demo_data_XY.shape)
demo_data_Z3=np.empty(demo_data_XY.shape)
for sof_ind in range(len(share_of_funds)):
sof = share_of_funds[sof_ind]
for ts_ind in range(len(total_supply)):
ts = total_supply[ts_ind]
tc = ts /(1-alpha)
trigger = trigger_func(sof, 1, ts)
demo_data_Z0[sof_ind,ts_ind] = np.log10(trigger)
demo_data_Z1[sof_ind,ts_ind] = trigger
demo_data_Z2[sof_ind,ts_ind] = trigger/tc #share of maximum possible conviction
demo_data_Z3[sof_ind,ts_ind] = np.log10(trigger/tc)
return {'log10_trigger':demo_data_Z0,
'trigger':demo_data_Z1,
'share_of_max_conv': demo_data_Z2,
'log10_share_of_max_conv':demo_data_Z3,
'total_supply':total_supply,
'share_of_funds':share_of_funds}
elif field == 'alpha':
alpha = np.arange(.5,1,.01)
share_of_funds = np.arange(.001,xmax,.001)
total_supply = 10**9
demo_data_XY = np.outer(share_of_funds,alpha)
demo_data_Z4=np.empty(demo_data_XY.shape)
demo_data_Z5=np.empty(demo_data_XY.shape)
demo_data_Z6=np.empty(demo_data_XY.shape)
demo_data_Z7=np.empty(demo_data_XY.shape)
for sof_ind in range(len(share_of_funds)):
sof = share_of_funds[sof_ind]
for a_ind in range(len(alpha)):
ts = total_supply
a = alpha[a_ind]
tc = ts /(1-a)
trigger = trigger_func(sof, 1, ts)
demo_data_Z4[sof_ind,a_ind] = np.log10(trigger)
demo_data_Z5[sof_ind,a_ind] = trigger
demo_data_Z6[sof_ind,a_ind] = trigger/tc #share of maximum possible conviction
demo_data_Z7[sof_ind,a_ind] = np.log10(trigger/tc)
return {'log10_trigger':demo_data_Z4,
'trigger':demo_data_Z5,
'share_of_max_conv': demo_data_Z6,
'log10_share_of_max_conv':demo_data_Z7,
'alpha':alpha,
'share_of_funds':share_of_funds}
else:
return "invalid field"
def trigger_plotter(share_of_funds,Z, color_label,y, ylabel,cmap='jet'):
dims = (10, 5)
fig, ax = plt.subplots(figsize=dims)
cf = plt.contourf(share_of_funds, y, Z.T, 100, cmap=cmap)
cbar=plt.colorbar(cf)
plt.axis([share_of_funds[0], share_of_funds[-1], y[0], y[-1]])
#ax.set_xscale('log')
plt.ylabel(ylabel)
plt.xlabel('Share of Funds Requested')
plt.title('Trigger Function Map')
cbar.ax.set_ylabel(color_label)

View File

@ -0,0 +1,548 @@
import numpy as np
from cadCAD.configuration.utils import config_sim
from simulations.validation.conviction_helpers import *
#import networkx as nx
from scipy.stats import expon, gamma
#functions for partial state update block 1
#Driving processes: arrival of participants, proposals and funds
##-----------------------------------------
def gen_new_participant(network, new_participant_holdings):
i = len([node for node in network.nodes])
network.add_node(i)
network.nodes[i]['type']="participant"
s_rv = np.random.rand()
network.nodes[i]['sentiment'] = s_rv
network.nodes[i]['holdings']=new_participant_holdings
for j in get_nodes_by_type(network, 'proposal'):
network.add_edge(i, j)
rv = np.random.rand()
a_rv = 1-4*(1-rv)*rv #polarized distribution
network.edges[(i, j)]['affinity'] = a_rv
network.edges[(i,j)]['tokens'] = a_rv*network.nodes[i]['holdings']
network.edges[(i, j)]['conviction'] = 0
return network
scale_factor = 1000
def gen_new_proposal(network, funds, supply, total_funds, trigger_func):
j = len([node for node in network.nodes])
network.add_node(j)
network.nodes[j]['type']="proposal"
network.nodes[j]['conviction']=0
network.nodes[j]['status']='candidate'
network.nodes[j]['age']=0
rescale = scale_factor*funds/total_funds
r_rv = gamma.rvs(3,loc=0.001, scale=rescale)
network.node[j]['funds_requested'] = r_rv
network.nodes[j]['trigger']= trigger_func(r_rv, funds, supply)
participants = get_nodes_by_type(network, 'participant')
proposing_participant = np.random.choice(participants)
for i in participants:
network.add_edge(i, j)
if i==proposing_participant:
network.edges[(i, j)]['affinity']=1
else:
rv = np.random.rand()
a_rv = 1-4*(1-rv)*rv #polarized distribution
network.edges[(i, j)]['affinity'] = a_rv
network.edges[(i, j)]['conviction'] = 0
network.edges[(i,j)]['tokens'] = 0
return network
def driving_process(params, step, sL, s):
#placeholder plumbing for random processes
arrival_rate = 10/s['sentiment']
rv1 = np.random.rand()
new_participant = bool(rv1<1/arrival_rate)
if new_participant:
h_rv = expon.rvs(loc=0.0, scale=1000)
new_participant_holdings = h_rv
else:
new_participant_holdings = 0
network = s['network']
affinities = [network.edges[e]['affinity'] for e in network.edges ]
median_affinity = np.median(affinities)
proposals = get_nodes_by_type(network, 'proposal')
fund_requests = [network.nodes[j]['funds_requested'] for j in proposals if network.nodes[j]['status']=='candidate' ]
funds = s['funds']
total_funds_requested = np.sum(fund_requests)
proposal_rate = 10/median_affinity * total_funds_requested/funds
rv2 = np.random.rand()
new_proposal = bool(rv2<1/proposal_rate)
sentiment = s['sentiment']
funds = s['funds']
scale_factor = 1+4000*sentiment**2
#this shouldn't happen but expon is throwing domain errors
if scale_factor > 1:
funds_arrival = expon.rvs(loc = 0, scale = scale_factor )
else:
funds_arrival = 0
return({'new_participant':new_participant,
'new_participant_holdings':new_participant_holdings,
'new_proposal':new_proposal,
'funds_arrival':funds_arrival})
#Mechanisms for updating the state based on driving processes
##---
def update_network(params, step, sL, s, _input):
print(params)
print(type(params))
network = s['network']
funds = s['funds']
supply = s['supply']
trigger_func = params['trigger_func']
new_participant = _input['new_participant'] #T/F
new_proposal = _input['new_proposal'] #T/F
if new_participant:
new_participant_holdings = _input['new_participant_holdings']
network = gen_new_participant(network, new_participant_holdings)
if new_proposal:
network= gen_new_proposal(network,funds,supply )
#update age of the existing proposals
proposals = get_nodes_by_type(network, 'proposal')
for j in proposals:
network.nodes[j]['age'] = network.nodes[j]['age']+1
if network.nodes[j]['status'] == 'candidate':
requested = network.nodes[j]['funds_requested']
network.nodes[j]['trigger'] = trigger_func(requested, funds, supply)
else:
network.nodes[j]['trigger'] = np.nan
key = 'network'
value = network
return (key, value)
def increment_funds(params, step, sL, s, _input):
funds = s['funds']
funds_arrival = _input['funds_arrival']
#increment funds
funds = funds + funds_arrival
key = 'funds'
value = funds
return (key, value)
def increment_supply(params, step, sL, s, _input):
supply = s['supply']
supply_arrival = _input['new_participant_holdings']
#increment funds
supply = supply + supply_arrival
key = 'supply'
value = supply
return (key, value)
#functions for partial state update block 2
#Driving processes: completion of previously funded proposals
##-----------------------------------------
def check_progress(params, step, sL, s):
network = s['network']
proposals = get_nodes_by_type(network, 'proposal')
completed = []
for j in proposals:
if network.nodes[j]['status'] == 'active':
grant_size = network.nodes[j]['funds_requested']
base_completion_rate=params['base_completion_rate']
likelihood = 1.0/(base_completion_rate+np.log(grant_size))
if np.random.rand() < likelihood:
completed.append(j)
return({'completed':completed})
#Mechanisms for updating the state based on check progress
##---
def complete_proposal(params, step, sL, s, _input):
network = s['network']
participants = get_nodes_by_type(network, 'participant')
completed = _input['completed']
for j in completed:
network.nodes[j]['status']='completed'
for i in participants:
force = network.edges[(i,j)]['affinity']
sentiment = network.node[i]['sentiment']
network.node[i]['sentiment'] = get_sentimental(sentiment, force, decay=0)
key = 'network'
value = network
return (key, value)
def update_sentiment_on_completion(params, step, sL, s, _input):
network = s['network']
proposals = get_nodes_by_type(network, 'proposal')
completed = _input['completed']
grants_outstanding = np.sum([network.nodes[j]['funds_requested'] for j in proposals if network.nodes[j]['status']=='active'])
grants_completed = np.sum([network.nodes[j]['funds_requested'] for j in completed])
sentiment = s['sentiment']
force = grants_completed/grants_outstanding
mu = params['sentiment_decay']
if (force >=0) and (force <=1):
sentiment = get_sentimental(sentiment, force, mu)
else:
sentiment = get_sentimental(sentiment, 0, mu)
key = 'sentiment'
value = sentiment
return (key, value)
def get_sentimental(sentiment, force, decay=0):
mu = decay
sentiment = sentiment*(1-mu) + force
if sentiment > 1:
sentiment = 1
return sentiment
#functions for partial state update block 3
#Decision processes: trigger function policy
##-----------------------------------------
def trigger_function(params, step, sL, s):
network = s['network']
funds = s['funds']
supply = s['supply']
proposals = get_nodes_by_type(network, 'proposal')
tmin = params['tmin']
accepted = []
triggers = {}
for j in proposals:
if network.nodes[j]['status'] == 'candidate':
requested = network.nodes[j]['funds_requested']
age = network.nodes[j]['age']
threshold = trigger_threshold(requested, funds, supply)
if age > tmin:
conviction = network.nodes[j]['conviction']
if conviction >threshold:
accepted.append(j)
else:
threshold = np.nan
triggers[j] = threshold
return({'accepted':accepted, 'triggers':triggers})
def decrement_funds(params, step, sL, s, _input):
funds = s['funds']
network = s['network']
accepted = _input['accepted']
#decrement funds
for j in accepted:
funds = funds - network.nodes[j]['funds_requested']
key = 'funds'
value = funds
return (key, value)
def update_proposals(params, step, sL, s, _input):
network = s['network']
accepted = _input['accepted']
triggers = _input['triggers']
participants = get_nodes_by_type(network, 'participant')
proposals = get_nodes_by_type(network, 'proposals')
sensitivity = params['sensitivity']
for j in proposals:
network.nodes[j]['trigger'] = triggers[j]
#bookkeeping conviction and participant sentiment
for j in accepted:
network.nodes[j]['status']='active'
network.nodes[j]['conviction']=np.nan
#change status to active
for i in participants:
#operating on edge = (i,j)
#reset tokens assigned to other candidates
network.edges[(i,j)]['tokens']=0
network.edges[(i,j)]['conviction'] = np.nan
#update participants sentiments (positive or negative)
affinities = [network.edges[(i,p)]['affinity'] for p in proposals if not(p in accepted)]
if len(affinities)>1:
max_affinity = np.max(affinities)
force = network.edges[(i,j)]['affinity']-sensitivity*max_affinity
else:
force = 0
#based on what their affinities to the accepted proposals
network.nodes[i]['sentiment'] = get_sentimental(network.nodes[i]['sentiment'], force, False)
key = 'network'
value = network
return (key, value)
def update_sentiment_on_release(params, step, sL, s, _input):
network = s['network']
proposals = get_nodes_by_type(network, 'proposal')
accepted = _input['accepted']
proposals_outstanding = np.sum([network.nodes[j]['funds_requested'] for j in proposals if network.nodes[j]['status']=='candidate'])
proposals_accepted = np.sum([network.nodes[j]['funds_requested'] for j in accepted])
sentiment = s['sentiment']
force = proposals_accepted/proposals_outstanding
if (force >=0) and (force <=1):
sentiment = get_sentimental(sentiment, force, False)
else:
sentiment = get_sentimental(sentiment, 0, False)
key = 'sentiment'
value = sentiment
return (key, value)
def participants_decisions(params, step, sL, s):
network = s['network']
participants = get_nodes_by_type(network, 'participant')
proposals = get_nodes_by_type(network, 'proposal')
candidates = [j for j in proposals if network.nodes[j]['status']=='candidate']
sensitivity = params['sensitivity']
gain = .01
delta_holdings={}
proposals_supported ={}
for i in participants:
force = network.nodes[i]['sentiment']-sensitivity
delta_holdings[i] = network.nodes[i]['holdings']*gain*force
support = []
for j in candidates:
affinity = network.edges[(i, j)]['affinity']
cutoff = sensitivity*np.max([network.edges[(i,p)]['affinity'] for p in candidates])
if cutoff <.5:
cutoff = .5
if affinity > cutoff:
support.append(j)
proposals_supported[i] = support
return({'delta_holdings':delta_holdings, 'proposals_supported':proposals_supported})
def update_tokens(params, step, sL, s, _input):
network = s['network']
delta_holdings = _input['delta_holdings']
proposals = get_nodes_by_type(network, 'proposal')
proposals_supported = _input['proposals_supported']
participants = get_nodes_by_type(network, 'participant')
alpha = params['alpha']
for i in participants:
network.nodes[i]['holdings'] = network.nodes[i]['holdings']+delta_holdings[i]
supported = proposals_supported[i]
total_affinity = np.sum([ network.edges[(i, j)]['affinity'] for j in supported])
for j in proposals:
if j in supported:
normalized_affinity = network.edges[(i, j)]['affinity']/total_affinity
network.edges[(i, j)]['tokens'] = normalized_affinity*network.nodes[i]['holdings']
else:
network.edges[(i, j)]['tokens'] = 0
prior_conviction = network.edges[(i, j)]['conviction']
current_tokens = network.edges[(i, j)]['tokens']
network.edges[(i, j)]['conviction'] =current_tokens+alpha*prior_conviction
for j in proposals:
network.nodes[j]['conviction'] = np.sum([ network.edges[(i, j)]['conviction'] for i in participants])
key = 'network'
value = network
return (key, value)
def update_supply(params, step, sL, s, _input):
supply = s['supply']
delta_holdings = _input['delta_holdings']
delta_supply = np.sum([v for v in delta_holdings.values()])
supply = supply + delta_supply
key = 'supply'
value = supply
return (key, value)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# The Partial State Update Blocks
partial_state_update_blocks = [
{
'policies': {
#new proposals or new participants
'random': driving_process
},
'variables': {
'network': update_network,
'funds':increment_funds,
'supply':increment_supply
}
},
{
'policies': {
'completion': check_progress #see if any of the funded proposals completes
},
'variables': { # The following state variables will be updated simultaneously
'sentiment': update_sentiment_on_completion, #note completing decays sentiment, completing bumps it
'network': complete_proposal #book-keeping
}
},
{
'policies': {
'release': trigger_function #check each proposal to see if it passes
},
'variables': { # The following state variables will be updated simultaneously
'funds': decrement_funds, #funds expended
'sentiment': update_sentiment_on_release, #releasing funds can bump sentiment
'network': update_proposals #reset convictions, and participants sentiments
#update based on affinities
}
},
{
'policies': {
'participants_act': participants_decisions, #high sentiment, high affinity =>buy
#low sentiment, low affinities => burn
#assign tokens to top affinities
},
'variables': {
'supply': update_supply,
'network': update_tokens #update everyones holdings
#and their conviction for each proposal
}
}
]
n= 25 #initial participants
m= 3 #initial proposals
initial_sentiment = .5
network, initial_funds, initial_supply, total_requested = initialize_network(n,m,total_funds_given_total_supply,trigger_threshold)
initial_conditions = {'network':network,
'supply': initial_supply,
'funds':initial_funds,
'sentiment': initial_sentiment}
#power of 1 token forever
# conviction_capactity = [2]
# alpha = [1-1/cc for cc in conviction_capactity]
# print(alpha)
params={
'sensitivity': [.75],
'tmin': [7], #unit days; minimum periods passed before a proposal can pass
'sentiment_decay': [.001], #termed mu in the state update function
'alpha': [0.5, 0.9],
'base_completion_rate': [10],
'trigger_func': [trigger_threshold]
}
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Settings of general simulation parameters, unrelated to the system itself
# `T` is a range with the number of discrete units of time the simulation will run for;
# `N` is the number of times the simulation will be run (Monte Carlo runs)
time_periods_per_run = 250
monte_carlo_runs = 1
simulation_parameters = config_sim({
'T': range(time_periods_per_run),
'N': monte_carlo_runs,
'M': params
})
from cadCAD.configuration import append_configs
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# The configurations above are then packaged into a `Configuration` object
append_configs(
initial_state=initial_conditions, #dict containing variable names and initial values
partial_state_update_blocks=partial_state_update_blocks, #dict containing state update functions
sim_configs=simulation_parameters #dict containing simulation parameters
)
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
from cadCAD import configs
exec_mode = ExecutionMode()
multi_proc_ctx = ExecutionContext(context=exec_mode.multi_proc)
run = Executor(exec_context=multi_proc_ctx, configs=configs)
raw_result, tensor = run.execute()
# exec_mode = ExecutionMode()
# exec_context = ExecutionContext(context=exec_mode.multi_proc)
# # run = Executor(exec_context=exec_context, configs=configs)
# executor = Executor(exec_context, configs) # Pass the configuration object inside an array
# raw_result, tensor = executor.execute() # The `main()` method returns a tuple; its first elements contains the raw results

View File

@ -1,62 +0,0 @@
from cadCAD.configuration import append_configs
from cadCAD.configuration.utils import config_sim
# Policies per Mechanism
# def p(_g, substep, sH, s):
# return {'last_update_block': sH[-1]}
# def policies(_g, substep, sH, s, _input):
# y = 'policies'
# x = _input
# return (y, x)
# policies = {"p1": p, "p2": p}
# last_partial_state_update_block
def last_update_block(_g, substep, sH, s, _input):
return 'sh', sH[-1]
def add(y, x):
return lambda _g, substep, sH, s, _input: (y, s[y] + x)
genesis_states = {
's': 0,
'sh': [{}], # {[], {}}
# 'policies': {},
}
variables = {
's': add('s', 1),
'sh': last_update_block,
# "policies": policies
}
PSUB = {
"policies": {}, #policies,
"variables": variables
}
partial_state_update_block = {
"PSUB1": PSUB,
"PSUB2": PSUB,
"PSUB3": PSUB
}
sim_config = config_sim(
{
"N": 1,
"T": range(3),
}
)
append_configs(
sim_configs=sim_config,
initial_state=genesis_states,
seeds={},
raw_exogenous_states={},
env_processes={},
partial_state_update_blocks=partial_state_update_block
)

View File

@ -2,7 +2,6 @@ import pandas as pd
from tabulate import tabulate from tabulate import tabulate
# The following imports NEED to be in the exact order # The following imports NEED to be in the exact order
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
from simulations.validation import historical_state_access
from cadCAD import configs from cadCAD import configs
exec_mode = ExecutionMode() exec_mode = ExecutionMode()
@ -13,13 +12,11 @@ first_config = configs # only contains config1
single_proc_ctx = ExecutionContext(context=exec_mode.single_proc) single_proc_ctx = ExecutionContext(context=exec_mode.single_proc)
run = Executor(exec_context=single_proc_ctx, configs=first_config) run = Executor(exec_context=single_proc_ctx, configs=first_config)
raw_result, tensor_field = run.main() raw_result, tensor_field = run.execute()
result = pd.DataFrame(raw_result) result = pd.DataFrame(raw_result)
def delSH(d): cols = ['run','substep','timestep','x','nonexsistant','last_x','2nd_to_last_x','3rd_to_last_x','4th_to_last_x']
if 'sh' in d.keys(): result = result[cols]
del d['sh']
return d
result['sh'] = result['sh'].apply(lambda sh: list(map(lambda d: delSH(d), sh)))
print() print()
print("Tensor Field: config1") print("Tensor Field: config1")