fixed bug2: re-included deepcopy

This commit is contained in:
Joshua E. Jodesty 2019-05-16 13:05:30 -04:00
commit 2acf33d1f3
21 changed files with 298 additions and 159 deletions

2
.gitignore vendored
View File

@ -19,3 +19,5 @@ monkeytype.sqlite3
build build
cadCAD.egg-info cadCAD.egg-info
SimCAD.egg-info SimCAD.egg-info
monkeytype.sqlite3

View File

@ -1,10 +1,10 @@
# SimCad # cadCAD
**Warning**: **Warning**:
**Do not** publish this package / software to **any** software repository **except** one permitted by BlockScience. **Do not** publish this package / software to **any** software repository **except** one permitted by BlockScience.
**Description:** **Description:**
SimCAD is a differential games based simulation software package for research, validation, and Computer \ cadCAD is a differential games based simulation software package for research, validation, and Computer \
Aided Design of economic systems. An economic system is treated as a state based model and defined through a \ Aided Design of economic systems. An economic system is treated as a state based model and defined through a \
set of endogenous and exogenous state variables which are updated through mechanisms and environmental \ set of endogenous and exogenous state variables which are updated through mechanisms and environmental \
processes, respectively. Behavioral models, which may be deterministic or stochastic, provide the evolution of \ processes, respectively. Behavioral models, which may be deterministic or stochastic, provide the evolution of \
@ -20,7 +20,7 @@ A/B testing policies, monte carlo analysis and other common numerical methods is
**Option A:** Package Repository Access **Option A:** Package Repository Access
*Note:* Tokens are issued to trial users and BlockScience employees. Please replace <TOKEN> with and issued token in the script below. ***IMPORTANT NOTE:*** Tokens are issued to and meant to be used by trial users and BlockScience employees **ONLY**. Replace \<TOKEN\> with an issued token in the script below.
```bash ```bash
pip3 install pandas pathos fn tabulate pip3 install pandas pathos fn tabulate
pip3 install cadCAD --extra-index-url https://<TOKEN>@repo.fury.io/blockscience/ pip3 install cadCAD --extra-index-url https://<TOKEN>@repo.fury.io/blockscience/
@ -41,51 +41,92 @@ Intructions:
Examples: Examples:
`/simulations/validation/*` `/simulations/validation/*`
**3. Import SimCAD & Run Simulation:** **3. Import cadCAD & Run Simulations:**
Examples: `/simulations/example_run.py` or `/simulations/example_run.ipynb` Examples: `/simulations/*.py` or `/simulations/*.ipynb`
`/simulations/example_run.py`: Single Simulation: `/simulations/single_config_run.py`
```python ```python
import pandas as pd
from tabulate import tabulate from tabulate import tabulate
# The following imports NEED to be in the exact order # The following imports NEED to be in the exact order
from SimCAD.engine import ExecutionMode, ExecutionContext, Executor from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
from validation import config1, config2 from simulations.validation import config1
from SimCAD import configs from cadCAD import configs
exec_mode = ExecutionMode() exec_mode = ExecutionMode()
print("Simulation Execution 1") print("Simulation Execution: Single Configuration")
print() print()
first_config = [configs[0]] # from config1 first_config = configs # only contains config1
single_proc_ctx = ExecutionContext(context=exec_mode.single_proc) single_proc_ctx = ExecutionContext(context=exec_mode.single_proc)
run1 = Executor(exec_context=single_proc_ctx, configs=first_config) run1 = Executor(exec_context=single_proc_ctx, configs=first_config)
run1_raw_result, tensor_field = run1.main() run1_raw_result, tensor_field = run1.main()
result = pd.DataFrame(run1_raw_result) result = pd.DataFrame(run1_raw_result)
print() print()
print("Tensor Field:") print("Tensor Field: config1")
print(tabulate(tensor_field, headers='keys', tablefmt='psql')) print(tabulate(tensor_field, headers='keys', tablefmt='psql'))
print("Output:") print("Output:")
print(tabulate(result, headers='keys', tablefmt='psql')) print(tabulate(result, headers='keys', tablefmt='psql'))
print() print()
```
print("Simulation Execution 2: Pairwise Execution") Parameter Sweep Simulation (Concurrent): `/simulations/param_sweep_run.py`
print() ```python
import pandas as pd
from tabulate import tabulate
# The following imports NEED to be in the exact order
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
from simulations.validation import sweep_config
from cadCAD import configs
exec_mode = ExecutionMode()
print("Simulation Execution: Concurrent Execution")
multi_proc_ctx = ExecutionContext(context=exec_mode.multi_proc) multi_proc_ctx = ExecutionContext(context=exec_mode.multi_proc)
run2 = Executor(exec_context=multi_proc_ctx, configs=configs) run2 = Executor(exec_context=multi_proc_ctx, configs=configs)
i = 0
config_names = ['sweep_config_A', 'sweep_config_B']
for raw_result, tensor_field in run2.main(): for raw_result, tensor_field in run2.main():
result = pd.DataFrame(raw_result) result = pd.DataFrame(raw_result)
print() print()
print("Tensor Field:") print("Tensor Field: " + config_names[i])
print(tabulate(tensor_field, headers='keys', tablefmt='psql')) print(tabulate(tensor_field, headers='keys', tablefmt='psql'))
print("Output:") print("Output:")
print(tabulate(result, headers='keys', tablefmt='psql')) print(tabulate(result, headers='keys', tablefmt='psql'))
print() print()
i += 1
```
Multiple Simulations (Concurrent): `/simulations/multi_config run.py`
```python
import pandas as pd
from tabulate import tabulate
# The following imports NEED to be in the exact order
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
from simulations.validation import config1, config2
from cadCAD import configs
exec_mode = ExecutionMode()
print("Simulation Execution: Concurrent Execution")
multi_proc_ctx = ExecutionContext(context=exec_mode.multi_proc)
run2 = Executor(exec_context=multi_proc_ctx, configs=configs)
i = 0
config_names = ['config1', 'config2']
for raw_result, tensor_field in run2.main():
result = pd.DataFrame(raw_result)
print()
print("Tensor Field: " + config_names[i])
print(tabulate(tensor_field, headers='keys', tablefmt='psql'))
print("Output:")
print(tabulate(result, headers='keys', tablefmt='psql'))
print()
i =+ 1
``` ```
The above can be run in Jupyter. The above can be run in Jupyter.
```bash ```bash
jupyter notebook jupyter notebook
``` ```

View File

@ -1,9 +1,10 @@
from typing import Dict, Callable, List, Tuple
from functools import reduce from functools import reduce
from fn.op import foldr from fn.op import foldr
import pandas as pd import pandas as pd
from pandas.core.frame import DataFrame
from cadCAD import configs from cadCAD import configs
from cadCAD.utils import key_filter from cadCAD.utils import key_filter
from cadCAD.configuration.utils import exo_update_per_ts from cadCAD.configuration.utils import exo_update_per_ts
from cadCAD.configuration.utils.policyAggregation import dict_elemwise_sum from cadCAD.configuration.utils.policyAggregation import dict_elemwise_sum
@ -12,7 +13,8 @@ from cadCAD.configuration.utils.depreciationHandler import sanitize_partial_stat
class Configuration(object): class Configuration(object):
def __init__(self, sim_config={}, initial_state={}, seeds={}, env_processes={}, def __init__(self, sim_config={}, initial_state={}, seeds={}, env_processes={},
exogenous_states={}, partial_state_update_blocks={}, policy_ops=[foldr(dict_elemwise_sum())], **kwargs): exogenous_states={}, partial_state_update_blocks={}, policy_ops=[foldr(dict_elemwise_sum())],
**kwargs) -> None:
self.sim_config = sim_config self.sim_config = sim_config
self.initial_state = initial_state self.initial_state = initial_state
self.seeds = seeds self.seeds = seeds
@ -25,7 +27,8 @@ class Configuration(object):
sanitize_config(self) sanitize_config(self)
def append_configs(sim_configs={}, initial_state={}, seeds={}, raw_exogenous_states={}, env_processes={}, partial_state_update_blocks={}, _exo_update_per_ts=True): def append_configs(sim_configs={}, initial_state={}, seeds={}, raw_exogenous_states={}, env_processes={},
partial_state_update_blocks={}, _exo_update_per_ts: bool = True) -> None:
if _exo_update_per_ts is True: if _exo_update_per_ts is True:
exogenous_states = exo_update_per_ts(raw_exogenous_states) exogenous_states = exo_update_per_ts(raw_exogenous_states)
else: else:
@ -55,22 +58,22 @@ def append_configs(sim_configs={}, initial_state={}, seeds={}, raw_exogenous_sta
class Identity: class Identity:
def __init__(self, policy_id={'identity': 0}): def __init__(self, policy_id: Dict[str, int] = {'identity': 0}) -> None:
self.beh_id_return_val = policy_id self.beh_id_return_val = policy_id
def p_identity(self, var_dict, sub_step, sL, s): def p_identity(self, var_dict, sub_step, sL, s):
return self.beh_id_return_val return self.beh_id_return_val
def policy_identity(self, k): def policy_identity(self, k: str) -> Callable:
return self.p_identity return self.p_identity
def no_state_identity(self, var_dict, sub_step, sL, s, _input): def no_state_identity(self, var_dict, sub_step, sL, s, _input):
return None return None
def state_identity(self, k): def state_identity(self, k: str) -> Callable:
return lambda var_dict, sub_step, sL, s, _input: (k, s[k]) return lambda var_dict, sub_step, sL, s, _input: (k, s[k])
def apply_identity_funcs(self, identity, df, cols): def apply_identity_funcs(self, identity: Callable, df: DataFrame, cols: List[str]) -> List[DataFrame]:
def fillna_with_id_func(identity, df, col): def fillna_with_id_func(identity, df, col):
return df[[col]].fillna(value=identity(col)) return df[[col]].fillna(value=identity(col))
@ -78,7 +81,7 @@ class Identity:
class Processor: class Processor:
def __init__(self, id=Identity()): def __init__(self, id: Identity = Identity()) -> None:
self.id = id self.id = id
self.p_identity = id.p_identity self.p_identity = id.p_identity
self.policy_identity = id.policy_identity self.policy_identity = id.policy_identity
@ -86,7 +89,7 @@ class Processor:
self.state_identity = id.state_identity self.state_identity = id.state_identity
self.apply_identity_funcs = id.apply_identity_funcs self.apply_identity_funcs = id.apply_identity_funcs
def create_matrix_field(self, partial_state_updates, key): def create_matrix_field(self, partial_state_updates, key: str) -> DataFrame:
if key == 'variables': if key == 'variables':
identity = self.state_identity identity = self.state_identity
elif key == 'policies': elif key == 'policies':
@ -99,7 +102,8 @@ class Processor:
else: else:
return pd.DataFrame({'empty': []}) return pd.DataFrame({'empty': []})
def generate_config(self, initial_state, partial_state_updates, exo_proc): def generate_config(self, initial_state, partial_state_updates, exo_proc
) -> List[Tuple[List[Callable], List[Callable]]]:
def no_update_handler(bdf, sdf): def no_update_handler(bdf, sdf):
if (bdf.empty == False) and (sdf.empty == True): if (bdf.empty == False) and (sdf.empty == True):
@ -135,4 +139,4 @@ class Processor:
sdf_values, bdf_values = only_ep_handler(initial_state) sdf_values, bdf_values = only_ep_handler(initial_state)
zipped_list = list(zip(sdf_values, bdf_values)) zipped_list = list(zip(sdf_values, bdf_values))
return list(map(lambda x: (x[0] + exo_proc, x[1]), zipped_list)) return list(map(lambda x: (x[0] + exo_proc, x[1]), zipped_list))

View File

@ -6,7 +6,7 @@ import pandas as pd
# Temporary # Temporary
from cadCAD.configuration.utils.depreciationHandler import sanitize_partial_state_updates from cadCAD.configuration.utils.depreciationHandler import sanitize_partial_state_updates
from cadCAD.utils import dict_filter, contains_type from cadCAD.utils import dict_filter, contains_type, flatten_tabulated_dict, tabulate_dict
# ToDo: Fix - Returns empty when partial_state_update is missing in Configuration # ToDo: Fix - Returns empty when partial_state_update is missing in Configuration
@ -122,4 +122,23 @@ def exo_update_per_ts(ep):
else: else:
return y, s[y] return y, s[y]
return {es: ep_decorator(f, es) for es, f in ep.items()} return {es: ep_decorator(f, es) for es, f in ep.items()}
# Param Sweep enabling middleware
def config_sim(d):
def process_variables(d):
return flatten_tabulated_dict(tabulate_dict(d))
if "M" in d:
return [
{
"N": d["N"],
"T": d["T"],
"M": M
}
for M in process_variables(d["M"])
]
else:
d["M"] = [{}]
return d

View File

@ -30,10 +30,10 @@ def sanitize_partial_state_updates(partial_state_updates):
# Also for backwards compatibility, we accept partial state update blocks both as list or dict # Also for backwards compatibility, we accept partial state update blocks both as list or dict
# No need for a deprecation warning as it's already raised by cadCAD.utils.key_filter # No need for a deprecation warning as it's already raised by cadCAD.utils.key_filter
if (type(new_partial_state_updates)==list): if isinstance(new_partial_state_updates, list):
for v in new_partial_state_updates: for v in new_partial_state_updates:
rename_keys(v) rename_keys(v)
elif (type(new_partial_state_updates)==dict): elif isinstance(new_partial_state_updates, dict):
for k, v in new_partial_state_updates.items(): for k, v in new_partial_state_updates.items():
rename_keys(v) rename_keys(v)

View File

@ -1,20 +0,0 @@
from cadCAD.utils import flatten_tabulated_dict, tabulate_dict
def process_variables(d):
return flatten_tabulated_dict(tabulate_dict(d))
def config_sim(d):
if "M" in d:
return [
{
"N": d["N"],
"T": d["T"],
"M": M
}
for M in process_variables(d["M"])
]
else:
d["M"] = [{}]
return d

View File

@ -14,7 +14,7 @@ def get_base_value(x):
def policy_to_dict(v): def policy_to_dict(v):
return dict(list(zip(map(lambda n: 'b' + str(n + 1), list(range(len(v)))), v))) return dict(list(zip(map(lambda n: 'p' + str(n + 1), list(range(len(v)))), v)))
add = lambda a, b: a + b add = lambda a, b: a + b

View File

@ -1,33 +1,59 @@
from pathos.multiprocessing import ProcessingPool as Pool from typing import Callable, Dict, List, Any, Tuple
from pathos.multiprocessing import ProcessingPool as PPool
from pandas.core.frame import DataFrame
from cadCAD.utils import flatten from cadCAD.utils import flatten
from cadCAD.configuration import Processor from cadCAD.configuration import Configuration, Processor
from cadCAD.configuration.utils import TensorFieldReport from cadCAD.configuration.utils import TensorFieldReport
from cadCAD.engine.simulation import Executor as SimExecutor from cadCAD.engine.simulation import Executor as SimExecutor
VarDictType = Dict[str, List[Any]]
StatesListsType = List[Dict[str, Any]]
ConfigsType = List[Tuple[List[Callable], List[Callable]]]
EnvProcessesType = Dict[str, Callable]
class ExecutionMode: class ExecutionMode:
single_proc = 'single_proc' single_proc = 'single_proc'
multi_proc = 'multi_proc' multi_proc = 'multi_proc'
def single_proc_exec(
simulation_execs: List[Callable],
var_dict_list: List[VarDictType],
states_lists: List[StatesListsType],
configs_structs: List[ConfigsType],
env_processes_list: List[EnvProcessesType],
Ts: List[range],
Ns: List[int]
):
l = [simulation_execs, states_lists, configs_structs, env_processes_list, Ts, Ns]
simulation_exec, states_list, config, env_processes, T, N = list(map(lambda x: x.pop(), l))
result = simulation_exec(var_dict_list, states_list, config, env_processes, T, N)
return flatten(result)
def parallelize_simulations(
simulation_execs: List[Callable],
var_dict_list: List[VarDictType],
states_lists: List[StatesListsType],
configs_structs: List[ConfigsType],
env_processes_list: List[EnvProcessesType],
Ts: List[range],
Ns: List[int]
):
l = list(zip(simulation_execs, var_dict_list, states_lists, configs_structs, env_processes_list, Ts, Ns))
with PPool(len(configs_structs)) as p:
results = p.map(lambda t: t[0](t[1], t[2], t[3], t[4], t[5], t[6]), l)
return results
class ExecutionContext: class ExecutionContext:
def __init__(self, context=ExecutionMode.multi_proc): def __init__(self, context: str = ExecutionMode.multi_proc) -> None:
self.name = context self.name = context
self.method = None self.method = None
def single_proc_exec(simulation_execs, var_dict, states_lists, configs_structs, env_processes_list, Ts, Ns):
l = [simulation_execs, states_lists, configs_structs, env_processes_list, Ts, Ns]
simulation, states_list, config, env_processes, T, N = list(map(lambda x: x.pop(), l))
result = simulation(var_dict, states_list, config, env_processes, T, N)
return flatten(result)
def parallelize_simulations(fs, var_dict_list, states_list, configs, env_processes, Ts, Ns):
l = list(zip(fs, var_dict_list, states_list, configs, env_processes, Ts, Ns))
with Pool(len(configs)) as p:
results = p.map(lambda t: t[0](t[1], t[2], t[3], t[4], t[5], t[6]), l)
return results
if context == 'single_proc': if context == 'single_proc':
self.method = single_proc_exec self.method = single_proc_exec
elif context == 'multi_proc': elif context == 'multi_proc':
@ -35,14 +61,14 @@ class ExecutionContext:
class Executor: class Executor:
def __init__(self, exec_context, configs): def __init__(self, exec_context: ExecutionContext, configs: List[Configuration]) -> None:
self.SimExecutor = SimExecutor self.SimExecutor = SimExecutor
self.exec_method = exec_context.method self.exec_method = exec_context.method
self.exec_context = exec_context.name self.exec_context = exec_context.name
self.configs = configs self.configs = configs
self.main = self.execute self.main = self.execute
def execute(self): def execute(self) -> Tuple[List[Dict[str, Any]], DataFrame]:
config_proc = Processor() config_proc = Processor()
create_tensor_field = TensorFieldReport(config_proc).create_tensor_field create_tensor_field = TensorFieldReport(config_proc).create_tensor_field
@ -64,11 +90,13 @@ class Executor:
config_idx += 1 config_idx += 1
final_result = None
if self.exec_context == ExecutionMode.single_proc: if self.exec_context == ExecutionMode.single_proc:
# ToDO: Deprication Handler - "sanitize" in appropriate place # ToDO: Deprication Handler - "sanitize" in appropriate place
tensor_field = create_tensor_field(partial_state_updates.pop(), eps.pop()) tensor_field = create_tensor_field(partial_state_updates.pop(), eps.pop())
result = self.exec_method(simulation_execs, var_dict_list, states_lists, configs_structs, env_processes_list, Ts, Ns) result = self.exec_method(simulation_execs, var_dict_list, states_lists, configs_structs, env_processes_list, Ts, Ns)
return result, tensor_field final_result = result, tensor_field
elif self.exec_context == ExecutionMode.multi_proc: elif self.exec_context == ExecutionMode.multi_proc:
if len(self.configs) > 1: if len(self.configs) > 1:
simulations = self.exec_method(simulation_execs, var_dict_list, states_lists, configs_structs, env_processes_list, Ts, Ns) simulations = self.exec_method(simulation_execs, var_dict_list, states_lists, configs_structs, env_processes_list, Ts, Ns)
@ -76,4 +104,6 @@ class Executor:
for result, partial_state_updates, ep in list(zip(simulations, partial_state_updates, eps)): for result, partial_state_updates, ep in list(zip(simulations, partial_state_updates, eps)):
results.append((flatten(result), create_tensor_field(partial_state_updates, ep))) results.append((flatten(result), create_tensor_field(partial_state_updates, ep)))
return results final_result = results
return final_result

View File

@ -1,20 +1,38 @@
from typing import Any, Callable, Dict, List, Tuple
from pathos.pools import ThreadPool as TPool
from copy import deepcopy from copy import deepcopy
from fn.op import foldr, call from fn.op import foldr, call
from cadCAD.engine.utils import engine_exception from cadCAD.engine.utils import engine_exception
from cadCAD.utils import flatten
id_exception = engine_exception(KeyError, KeyError, None) id_exception: Callable = engine_exception(KeyError, KeyError, None)
class Executor: class Executor:
def __init__(
self,
policy_ops: List[Callable],
policy_update_exception: Callable = id_exception,
state_update_exception: Callable = id_exception
) -> None:
def __init__(self, policy_ops, policy_update_exception=id_exception, state_update_exception=id_exception): # behavior_ops
self.policy_ops = policy_ops # behavior_ops self.policy_ops = policy_ops
self.state_update_exception = state_update_exception self.state_update_exception = state_update_exception
self.policy_update_exception = policy_update_exception # behavior_update_exception self.policy_update_exception = policy_update_exception
# behavior_update_exception
# get_behavior_input # sL: State Window
def get_policy_input(
self,
var_dict: Dict[str, List[Any]],
sub_step: int,
sL: List[Dict[str, Any]],
s: Dict[str, Any],
funcs: List[Callable]
) -> Dict[str, Any]:
# get_behavior_input
def get_policy_input(self, var_dict, sub_step, sL, s, funcs):
ops = self.policy_ops[::-1] ops = self.policy_ops[::-1]
def get_col_results(var_dict, sub_step, sL, s, funcs): def get_col_results(var_dict, sub_step, sL, s, funcs):
@ -22,23 +40,39 @@ class Executor:
return foldr(call, get_col_results(var_dict, sub_step, sL, s, funcs))(ops) return foldr(call, get_col_results(var_dict, sub_step, sL, s, funcs))(ops)
def apply_env_proc(self, env_processes, state_dict, sub_step): def apply_env_proc(
self,
env_processes: Dict[str, Callable],
state_dict: Dict[str, Any],
sub_step: int
) -> None:
for state in state_dict.keys(): for state in state_dict.keys():
if state in list(env_processes.keys()): if state in list(env_processes.keys()):
env_state = env_processes[state] env_state: Callable = env_processes[state]
if (env_state.__name__ == '_curried') or (env_state.__name__ == 'proc_trigger'): if (env_state.__name__ == '_curried') or (env_state.__name__ == 'proc_trigger'):
state_dict[state] = env_state(sub_step)(state_dict[state]) state_dict[state] = env_state(sub_step)(state_dict[state])
else: else:
state_dict[state] = env_state(state_dict[state]) state_dict[state] = env_state(state_dict[state])
# mech_step # mech_step
def partial_state_update(self, var_dict, sub_step, sL, state_funcs, policy_funcs, env_processes, time_step, run): def partial_state_update(
last_in_obj = deepcopy(sL[-1]) self,
var_dict: Dict[str, List[Any]],
sub_step: int,
sL: Any,
state_funcs: List[Callable],
policy_funcs: List[Callable],
env_processes: Dict[str, Callable],
time_step: int,
run: int
) -> List[Dict[str, Any]]:
_input = self.policy_update_exception(self.get_policy_input(var_dict, sub_step, sL, last_in_obj, policy_funcs)) last_in_obj: Dict[str, Any] = deepcopy(sL[-1])
_input: Dict[str, Any] = self.policy_update_exception(self.get_policy_input(var_dict, sub_step, sL, last_in_obj, policy_funcs))
# ToDo: add env_proc generator to `last_in_copy` iterator as wrapper function # ToDo: add env_proc generator to `last_in_copy` iterator as wrapper function
last_in_copy = dict( last_in_copy: Dict[str, Any] = dict(
[ [
self.state_update_exception(f(var_dict, sub_step, sL, last_in_obj, _input)) for f in state_funcs self.state_update_exception(f(var_dict, sub_step, sL, last_in_obj, _input)) for f in state_funcs
] ]
@ -52,52 +86,92 @@ class Executor:
self.apply_env_proc(env_processes, last_in_copy, last_in_copy['timestep']) self.apply_env_proc(env_processes, last_in_copy, last_in_copy['timestep'])
# ToDo: make 'substep' & 'timestep' reserve fields
last_in_copy['substep'], last_in_copy['timestep'], last_in_copy['run'] = sub_step, time_step, run last_in_copy['substep'], last_in_copy['timestep'], last_in_copy['run'] = sub_step, time_step, run
sL.append(last_in_copy) sL.append(last_in_copy)
del last_in_copy del last_in_copy
return sL return sL
# mech_pipeline - state_update_block
def state_update_pipeline(
self,
var_dict: Dict[str, List[Any]],
states_list: List[Dict[str, Any]],
configs: List[Tuple[List[Callable], List[Callable]]],
env_processes: Dict[str, Callable],
time_step: int,
run: int
) -> List[Dict[str, Any]]:
# mech_pipeline
def state_update_pipeline(self, var_dict, states_list, configs, env_processes, time_step, run):
sub_step = 0 sub_step = 0
states_list_copy = deepcopy(states_list) states_list_copy: List[Dict[str, Any]] = deepcopy(states_list)
genesis_states = states_list_copy[-1] genesis_states: Dict[str, Any] = states_list_copy[-1]
genesis_states['substep'], genesis_states['timestep'] = sub_step, time_step genesis_states['substep'], genesis_states['timestep'] = sub_step, time_step
states_list = [genesis_states] states_list: List[Dict[str, Any]] = [genesis_states]
sub_step += 1 sub_step += 1
for config in configs: for config in configs:
s_conf, p_conf = config[0], config[1] s_conf, p_conf = config[0], config[1]
states_list = self.partial_state_update(var_dict, sub_step, states_list, s_conf, p_conf, env_processes, time_step, run) states_list: List[Dict[str, Any]] = self.partial_state_update(
var_dict, sub_step, states_list, s_conf, p_conf, env_processes, time_step, run
)
sub_step += 1 sub_step += 1
time_step += 1 time_step += 1
return states_list return states_list
def run_pipeline(self, var_dict, states_list, configs, env_processes, time_seq, run): # state_update_pipeline
time_seq = [x + 1 for x in time_seq] def run_pipeline(
simulation_list = [states_list] self,
var_dict: Dict[str, List[Any]],
states_list: List[Dict[str, Any]],
configs: List[Tuple[List[Callable], List[Callable]]],
env_processes: Dict[str, Callable],
time_seq: range,
run: int
) -> List[List[Dict[str, Any]]]:
time_seq: List[int] = [x + 1 for x in time_seq]
simulation_list: List[List[Dict[str, Any]]] = [states_list]
for time_step in time_seq: for time_step in time_seq:
pipe_run = self.state_update_pipeline(var_dict, simulation_list[-1], configs, env_processes, time_step, run) pipe_run: List[Dict[str, Any]] = self.state_update_pipeline(
var_dict, simulation_list[-1], configs, env_processes, time_step, run
)
_, *pipe_run = pipe_run _, *pipe_run = pipe_run
simulation_list.append(pipe_run) simulation_list.append(pipe_run)
return simulation_list return simulation_list
# ToDo: Muiltithreaded Runs def simulation(
def simulation(self, var_dict, states_list, configs, env_processes, time_seq, runs): self,
pipe_run = [] var_dict: Dict[str, List[Any]],
for run in range(runs): states_list: List[Dict[str, Any]],
configs: List[Tuple[List[Callable], List[Callable]]],
env_processes: Dict[str, Callable],
time_seq: range,
runs: int
) -> List[List[Dict[str, Any]]]:
def execute_run(var_dict, states_list, configs, env_processes, time_seq, run) -> List[Dict[str, Any]]:
run += 1 run += 1
states_list_copy = deepcopy(states_list) states_list_copy: List[Dict[str, Any]] = deepcopy(states_list)
head, *tail = self.run_pipeline(var_dict, states_list_copy, configs, env_processes, time_seq, run) head, *tail = self.run_pipeline(var_dict, states_list_copy, configs, env_processes, time_seq, run)
genesis = head.pop()
genesis['substep'], genesis['timestep'], genesis['run'] = 0, 0, run
first_timestep_per_run = [genesis] + tail.pop(0)
pipe_run += [first_timestep_per_run] + tail
del states_list_copy del states_list_copy
return pipe_run genesis: Dict[str, Any] = head.pop()
genesis['substep'], genesis['timestep'], genesis['run'] = 0, 0, run
first_timestep_per_run: List[Dict[str, Any]] = [genesis] + tail.pop(0)
return [first_timestep_per_run] + tail
pipe_run: List[List[Dict[str, Any]]] = flatten(
TPool().map(
lambda run: execute_run(var_dict, states_list, configs, env_processes, time_seq, run),
list(range(runs))
)
)
return pipe_run

View File

@ -39,4 +39,4 @@ def engine_exception(ErrorType, error_message, exception_function, try_function)
def fit_param(param, x): def fit_param(param, x):
return x + param return x + param
# fit_param = lambda param: lambda x: x + param # fit_param = lambda param: lambda x: x + param

View File

@ -1,7 +1,9 @@
from typing import Dict, List
from collections import defaultdict from collections import defaultdict
from itertools import product from itertools import product
import warnings import warnings
def pipe(x): def pipe(x):
return x return x
@ -41,11 +43,11 @@ def dict_filter(dictionary, condition):
return dict([(k, v) for k, v in dictionary.items() if condition(v)]) return dict([(k, v) for k, v in dictionary.items() if condition(v)])
def get_max_dict_val_len(g): def get_max_dict_val_len(g: Dict[str, List[int]]) -> int:
return len(max(g.values(), key=len)) return len(max(g.values(), key=len))
def tabulate_dict(d): def tabulate_dict(d: Dict[str, List[int]]) -> Dict[str, List[int]]:
max_len = get_max_dict_val_len(d) max_len = get_max_dict_val_len(d)
_d = {} _d = {}
for k, vl in d.items(): for k, vl in d.items():
@ -57,7 +59,7 @@ def tabulate_dict(d):
return _d return _d
def flatten_tabulated_dict(d): def flatten_tabulated_dict(d: Dict[str, List[int]]) -> List[Dict[str, int]]:
max_len = get_max_dict_val_len(d) max_len = get_max_dict_val_len(d)
dl = [{} for i in range(max_len)] dl = [{} for i in range(max_len)]
@ -133,4 +135,4 @@ def curry_pot(f, *argv):
# def decorator(f): # def decorator(f):
# f.__name__ = newname # f.__name__ = newname
# return f # return f
# return decorator # return decorator

Binary file not shown.

BIN
dist/cadCAD-0.2.1-py3-none-any.whl vendored Normal file

Binary file not shown.

View File

@ -11,7 +11,7 @@ long_description = "cadCAD is a differential games based simulation software pac
monte carlo analysis and other common numerical methods is provided." monte carlo analysis and other common numerical methods is provided."
setup(name='cadCAD', setup(name='cadCAD',
version='0.2', version='0.2.1',
description="cadCAD: a differential games based simulation software package for research, validation, and \ description="cadCAD: a differential games based simulation software package for research, validation, and \
Computer Aided Design of economic systems", Computer Aided Design of economic systems",
long_description=long_description, long_description=long_description,

View File

@ -9,11 +9,11 @@ exec_mode = ExecutionMode()
print("Simulation Execution: Concurrent Execution") print("Simulation Execution: Concurrent Execution")
multi_proc_ctx = ExecutionContext(context=exec_mode.multi_proc) multi_proc_ctx = ExecutionContext(context=exec_mode.multi_proc)
run2 = Executor(exec_context=multi_proc_ctx, configs=configs) run = Executor(exec_context=multi_proc_ctx, configs=configs)
i = 0 i = 0
config_names = ['config1', 'config2'] config_names = ['config1', 'config2']
for raw_result, tensor_field in run2.main(): for raw_result, tensor_field in run.main():
result = pd.DataFrame(raw_result) result = pd.DataFrame(raw_result)
print() print()
print("Tensor Field: " + config_names[i]) print("Tensor Field: " + config_names[i])
@ -21,4 +21,4 @@ for raw_result, tensor_field in run2.main():
print("Output:") print("Output:")
print(tabulate(result, headers='keys', tablefmt='psql')) print(tabulate(result, headers='keys', tablefmt='psql'))
print() print()
i += 1 i += 1

View File

@ -9,11 +9,11 @@ exec_mode = ExecutionMode()
print("Simulation Execution: Concurrent Execution") print("Simulation Execution: Concurrent Execution")
multi_proc_ctx = ExecutionContext(context=exec_mode.multi_proc) multi_proc_ctx = ExecutionContext(context=exec_mode.multi_proc)
run2 = Executor(exec_context=multi_proc_ctx, configs=configs) run = Executor(exec_context=multi_proc_ctx, configs=configs)
i = 0 i = 0
config_names = ['sweep_config_A', 'sweep_config_B'] config_names = ['sweep_config_A', 'sweep_config_B']
for raw_result, tensor_field in run2.main(): for raw_result, tensor_field in run.main():
result = pd.DataFrame(raw_result) result = pd.DataFrame(raw_result)
print() print()
print("Tensor Field: " + config_names[i]) print("Tensor Field: " + config_names[i])
@ -21,4 +21,4 @@ for raw_result, tensor_field in run2.main():
print("Output:") print("Output:")
print(tabulate(result, headers='keys', tablefmt='psql')) print(tabulate(result, headers='keys', tablefmt='psql'))
print() print()
i += 1 i += 1

View File

@ -11,12 +11,13 @@ print("Simulation Execution: Single Configuration")
print() print()
first_config = configs # only contains config1 first_config = configs # only contains config1
single_proc_ctx = ExecutionContext(context=exec_mode.single_proc) single_proc_ctx = ExecutionContext(context=exec_mode.single_proc)
run1 = Executor(exec_context=single_proc_ctx, configs=first_config) run = Executor(exec_context=single_proc_ctx, configs=first_config)
run1_raw_result, tensor_field = run1.main()
result = pd.DataFrame(run1_raw_result) raw_result, tensor_field = run.main()
result = pd.DataFrame(raw_result)
print() print()
print("Tensor Field: config1") print("Tensor Field: config1")
print(tabulate(tensor_field, headers='keys', tablefmt='psql')) print(tabulate(tensor_field, headers='keys', tablefmt='psql'))
print("Output:") print("Output:")
print(tabulate(result, headers='keys', tablefmt='psql')) print(tabulate(result, headers='keys', tablefmt='psql'))
print() print()

View File

@ -3,8 +3,7 @@ import numpy as np
from datetime import timedelta from datetime import timedelta
from cadCAD.configuration import append_configs from cadCAD.configuration import append_configs
from cadCAD.configuration.utils import proc_trigger, bound_norm_random, ep_time_step from cadCAD.configuration.utils import proc_trigger, bound_norm_random, ep_time_step, config_sim
from cadCAD.configuration.utils.parameterSweep import config_sim
seeds = { seeds = {
@ -21,6 +20,8 @@ def p1m1(_g, step, sL, s):
def p2m1(_g, step, sL, s): def p2m1(_g, step, sL, s):
return {'param2': 4} return {'param2': 4}
# []
def p1m2(_g, step, sL, s): def p1m2(_g, step, sL, s):
return {'param1': 'a', 'param2': 2} return {'param1': 'a', 'param2': 2}
def p2m2(_g, step, sL, s): def p2m2(_g, step, sL, s):
@ -78,8 +79,8 @@ def es4p2(_g, step, sL, s, _input):
ts_format = '%Y-%m-%d %H:%M:%S' ts_format = '%Y-%m-%d %H:%M:%S'
t_delta = timedelta(days=0, minutes=0, seconds=1) t_delta = timedelta(days=0, minutes=0, seconds=1)
def es5p2(_g, step, sL, s, _input): def es5p2(_g, step, sL, s, _input):
y = 'timestep' y = 'timestamp'
x = ep_time_step(s, dt_str=s['timestep'], fromat_str=ts_format, _timedelta=t_delta) x = ep_time_step(s, dt_str=s['timestamp'], fromat_str=ts_format, _timedelta=t_delta)
return (y, x) return (y, x)
@ -98,14 +99,14 @@ genesis_states = {
's2': Decimal(0.0), 's2': Decimal(0.0),
's3': Decimal(1.0), 's3': Decimal(1.0),
's4': Decimal(1.0), 's4': Decimal(1.0),
# 'timestep': '2018-10-01 15:16:24' 'timestamp': '2018-10-01 15:16:24'
} }
raw_exogenous_states = { raw_exogenous_states = {
"s3": es3p1, "s3": es3p1,
"s4": es4p2, "s4": es4p2,
# "timestep": es5p2 "timestamp": es5p2
} }

View File

@ -3,8 +3,8 @@ import numpy as np
from datetime import timedelta from datetime import timedelta
from cadCAD.configuration import append_configs from cadCAD.configuration import append_configs
from cadCAD.configuration.utils import proc_trigger, bound_norm_random, ep_time_step from cadCAD.configuration.utils import proc_trigger, bound_norm_random, ep_time_step, config_sim
from cadCAD.configuration.utils.parameterSweep import config_sim
seeds = { seeds = {
'z': np.random.RandomState(1), 'z': np.random.RandomState(1),
@ -77,8 +77,8 @@ def es4p2(_g, step, sL, s, _input):
ts_format = '%Y-%m-%d %H:%M:%S' ts_format = '%Y-%m-%d %H:%M:%S'
t_delta = timedelta(days=0, minutes=0, seconds=1) t_delta = timedelta(days=0, minutes=0, seconds=1)
def es5p2(_g, step, sL, s, _input): def es5p2(_g, step, sL, s, _input):
y = 'timestep' y = 'timestamp'
x = ep_time_step(s, dt_str=s['timestep'], fromat_str=ts_format, _timedelta=t_delta) x = ep_time_step(s, dt_str=s['timestamp'], fromat_str=ts_format, _timedelta=t_delta)
return (y, x) return (y, x)
@ -97,14 +97,14 @@ genesis_states = {
's2': Decimal(0.0), 's2': Decimal(0.0),
's3': Decimal(1.0), 's3': Decimal(1.0),
's4': Decimal(1.0), 's4': Decimal(1.0),
# 'timestep': '2018-10-01 15:16:24' 'timestamp': '2018-10-01 15:16:24'
} }
raw_exogenous_states = { raw_exogenous_states = {
"s3": es3p1, "s3": es3p1,
"s4": es4p2, "s4": es4p2,
# "timestep": es5p2 "timestamp": es5p2
} }

View File

@ -3,8 +3,7 @@ import numpy as np
from datetime import timedelta from datetime import timedelta
from cadCAD.configuration import append_configs from cadCAD.configuration import append_configs
from cadCAD.configuration.utils import proc_trigger, bound_norm_random, ep_time_step from cadCAD.configuration.utils import proc_trigger, bound_norm_random, ep_time_step, config_sim
from cadCAD.configuration.utils.parameterSweep import config_sim
seeds = { seeds = {

View File

@ -4,8 +4,9 @@ from datetime import timedelta
import pprint import pprint
from cadCAD.configuration import append_configs from cadCAD.configuration import append_configs
from cadCAD.configuration.utils import proc_trigger, ep_time_step from cadCAD.configuration.utils import proc_trigger, ep_time_step, config_sim
from cadCAD.configuration.utils.parameterSweep import config_sim
from typing import Dict, List
pp = pprint.PrettyPrinter(indent=4) pp = pprint.PrettyPrinter(indent=4)
@ -17,7 +18,7 @@ seeds = {
} }
g = { g: Dict[str, List[int]] = {
'alpha': [1], 'alpha': [1],
'beta': [2, 5], 'beta': [2, 5],
'gamma': [3, 4], 'gamma': [3, 4],
@ -93,15 +94,12 @@ def es4p2(_g, step, sL, s, _input):
ts_format = '%Y-%m-%d %H:%M:%S' ts_format = '%Y-%m-%d %H:%M:%S'
t_delta = timedelta(days=0, minutes=0, seconds=1) t_delta = timedelta(days=0, minutes=0, seconds=1)
def es5p2(_g, step, sL, s, _input): def es5p2(_g, step, sL, s, _input):
y = 'timestep' y = 'timestamp'
x = ep_time_step(s, dt_str=s['timestep'], fromat_str=ts_format, _timedelta=t_delta) x = ep_time_step(s, dt_str=s['timestamp'], fromat_str=ts_format, _timedelta=t_delta)
return (y, x) return (y, x)
# Environment States # Environment States
# @curried
# def env_a(param, x):
# return x + param
def env_a(x): def env_a(x):
return x return x
def env_b(x): def env_b(x):
@ -114,7 +112,7 @@ genesis_states = {
's2': Decimal(0.0), 's2': Decimal(0.0),
's3': Decimal(1.0), 's3': Decimal(1.0),
's4': Decimal(1.0), 's4': Decimal(1.0),
# 'timestep': '2018-10-01 15:16:24' 'timestamp': '2018-10-01 15:16:24'
} }
@ -122,26 +120,14 @@ genesis_states = {
raw_exogenous_states = { raw_exogenous_states = {
"s3": es3p1, "s3": es3p1,
"s4": es4p2, "s4": es4p2,
# "timestep": es5p2 'timestamp': es5p2
} }
# ToDo: make env proc trigger field agnostic
# ToDo: input json into function renaming __name__
triggered_env_b = proc_trigger(1, env_b) triggered_env_b = proc_trigger(1, env_b)
env_processes = { env_processes = {
"s3": env_a, #sweep(beta, env_a), "s3": env_a,
"s4": triggered_env_b #rename('parameterized', triggered_env_b) #sweep(beta, triggered_env_b) "s4": triggered_env_b
} }
# parameterized_env_processes = parameterize_states(env_processes)
#
# pp.pprint(parameterized_env_processes)
# exit()
# ToDo: The number of values entered in sweep should be the # of config objs created,
# not dependent on the # of times the sweep is applied
# sweep exo_state func and point to exo-state in every other funtion
# param sweep on genesis states
partial_state_update_block = { partial_state_update_block = {
"m1": { "m1": {