agent perception
This commit is contained in:
parent
c4863a838d
commit
9dbb866bd0
|
|
@ -10,6 +10,8 @@ from cadCAD.configuration.utils import exo_update_per_ts
|
||||||
from cadCAD.configuration.utils.policyAggregation import dict_elemwise_sum
|
from cadCAD.configuration.utils.policyAggregation import dict_elemwise_sum
|
||||||
from cadCAD.configuration.utils.depreciationHandler import sanitize_partial_state_updates, sanitize_config
|
from cadCAD.configuration.utils.depreciationHandler import sanitize_partial_state_updates, sanitize_config
|
||||||
|
|
||||||
|
# policy_ops=[foldr(dict_elemwise_sum())]
|
||||||
|
# policy_ops=[reduce, lambda a, b: {**a, **b}]
|
||||||
|
|
||||||
class Configuration(object):
|
class Configuration(object):
|
||||||
def __init__(self, sim_config={}, initial_state={}, seeds={}, env_processes={},
|
def __init__(self, sim_config={}, initial_state={}, seeds={}, env_processes={},
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,6 @@
|
||||||
from fn.op import foldr
|
from fn.op import foldr
|
||||||
from fn.func import curried
|
from fn.func import curried
|
||||||
|
from collections import Counter
|
||||||
|
|
||||||
def get_base_value(x):
|
def get_base_value(x):
|
||||||
if isinstance(x, str):
|
if isinstance(x, str):
|
||||||
|
|
@ -17,7 +18,7 @@ def policy_to_dict(v):
|
||||||
|
|
||||||
|
|
||||||
add = lambda a, b: a + b
|
add = lambda a, b: a + b
|
||||||
|
# df_union = lambda a, b: ...
|
||||||
|
|
||||||
@curried
|
@curried
|
||||||
def foldr_dict_vals(f, d):
|
def foldr_dict_vals(f, d):
|
||||||
|
|
@ -38,8 +39,43 @@ def dict_op(f, d1, d2):
|
||||||
|
|
||||||
key_set = set(list(d1.keys()) + list(d2.keys()))
|
key_set = set(list(d1.keys()) + list(d2.keys()))
|
||||||
|
|
||||||
return {k: f(set_base_value(d1, d2, k), set_base_value(d2, d1, k)) for k in key_set}
|
|
||||||
|
|
||||||
|
return {k: f(set_base_value(d1, d2, k), set_base_value(d2, d1, k)) for k in key_set}
|
||||||
|
#
|
||||||
|
# @curried
|
||||||
|
# def dict_op(f, d1, d2):
|
||||||
|
# def set_base_value(target_dict, source_dict, key):
|
||||||
|
# if key not in target_dict:
|
||||||
|
# return get_base_value(source_dict[key])
|
||||||
|
# else:
|
||||||
|
# return target_dict[key]
|
||||||
|
#
|
||||||
|
# key_set = set(list(d1.keys()) + list(d2.keys()))
|
||||||
|
#
|
||||||
|
# norm_d1 = {k: set_base_value(d1, d2, k) for k in key_set}
|
||||||
|
# norm_d2 = {k: set_base_value(d2, d1, k) for k in key_set}
|
||||||
|
#
|
||||||
|
# return {k: f(set_base_value(d1, d2, k), set_base_value(d2, d1, k)) for k in key_set}
|
||||||
|
|
||||||
|
|
||||||
|
# @curried
|
||||||
|
# def dict_op(f, d1, d2):
|
||||||
|
# # d1C = Counter(d1)
|
||||||
|
# # d2C = Counter(d2)
|
||||||
|
# def set_base_value(target_dict, source_dict, key):
|
||||||
|
# if key not in target_dict:
|
||||||
|
# return get_base_value(source_dict[key])
|
||||||
|
# else:
|
||||||
|
# return target_dict[key]
|
||||||
|
# key_set = set(list(d1.keys()) + list(d2.keys()))
|
||||||
|
# norm_d1 = Counter({k: set_base_value(d1, d2, k) for k in key_set})
|
||||||
|
# norm_d2 = Counter({k: set_base_value(d2, d1, k) for k in key_set})
|
||||||
|
# # print(norm_d1)
|
||||||
|
# # print(norm_d2)
|
||||||
|
# print(norm_d1 + norm_d2)
|
||||||
|
# # print(f(norm_d1, norm_d2))
|
||||||
|
# print()
|
||||||
|
# return f(norm_d1, norm_d2)
|
||||||
|
|
||||||
def dict_elemwise_sum():
|
def dict_elemwise_sum():
|
||||||
return dict_op(add)
|
return dict_op(add)
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,16 @@
|
||||||
from collections import namedtuple
|
from collections import namedtuple
|
||||||
|
from copy import deepcopy
|
||||||
from inspect import getmembers, ismethod
|
from inspect import getmembers, ismethod
|
||||||
|
from pandas.core.frame import DataFrame
|
||||||
|
|
||||||
|
from cadCAD.utils import SilentDF
|
||||||
|
|
||||||
|
|
||||||
|
def val_switch(v):
|
||||||
|
if isinstance(v, DataFrame) is True:
|
||||||
|
return SilentDF(v)
|
||||||
|
else:
|
||||||
|
return v
|
||||||
|
|
||||||
class udcView(object):
|
class udcView(object):
|
||||||
def __init__(self, d):
|
def __init__(self, d):
|
||||||
|
|
@ -10,9 +20,12 @@ class udcView(object):
|
||||||
# def __repr__(self):
|
# def __repr__(self):
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
members = {}
|
members = {}
|
||||||
functionless = {k: v for k, v in self.__dict__.items() if str(type(v)) != "<class 'method'>" and k != 'obj'}
|
variables = {
|
||||||
members['functions'] = [k for k, v in self.__dict__.items() if str(type(v)) == "<class 'method'>"]
|
k: val_switch(v) for k, v in self.__dict__.items()
|
||||||
members.update(functionless)
|
if str(type(v)) != "<class 'method'>" and k != 'obj' # and isinstance(v, DataFrame) is not True
|
||||||
|
}
|
||||||
|
members['methods'] = [k for k, v in self.__dict__.items() if str(type(v)) == "<class 'method'>"]
|
||||||
|
members.update(variables)
|
||||||
return f"{members}"
|
return f"{members}"
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -22,7 +35,7 @@ class udcBroker(object):
|
||||||
funcs = dict(getmembers(obj, ismethod))
|
funcs = dict(getmembers(obj, ismethod))
|
||||||
filtered_functions = {k: v for k, v in funcs.items() if k not in function_filter}
|
filtered_functions = {k: v for k, v in funcs.items() if k not in function_filter}
|
||||||
d['obj'] = obj
|
d['obj'] = obj
|
||||||
d.update(vars(obj)) # somehow is enough
|
d.update(deepcopy(vars(obj))) # somehow is enough
|
||||||
d.update(filtered_functions)
|
d.update(filtered_functions)
|
||||||
|
|
||||||
self.members_dict = d
|
self.members_dict = d
|
||||||
|
|
@ -37,11 +50,12 @@ class udcBroker(object):
|
||||||
return namedtuple("Hydra", self.members_dict.keys())(*self.members_dict.values())
|
return namedtuple("Hydra", self.members_dict.keys())(*self.members_dict.values())
|
||||||
|
|
||||||
|
|
||||||
def generate_udc_view(udc):
|
|
||||||
|
def UDO(udc):
|
||||||
return udcBroker(udc).get_view()
|
return udcBroker(udc).get_view()
|
||||||
|
|
||||||
|
|
||||||
def next_udc_view(obj_view):
|
def udoPipe(obj_view):
|
||||||
return generate_udc_view(obj_view.obj)
|
return UDO(obj_view.obj)
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -1,4 +1,5 @@
|
||||||
from typing import Any, Callable, Dict, List, Tuple
|
from typing import Any, Callable, Dict, List, Tuple
|
||||||
|
|
||||||
from pathos.pools import ThreadPool as TPool
|
from pathos.pools import ThreadPool as TPool
|
||||||
from copy import deepcopy
|
from copy import deepcopy
|
||||||
from functools import reduce
|
from functools import reduce
|
||||||
|
|
@ -33,22 +34,50 @@ class Executor:
|
||||||
funcs: List[Callable]
|
funcs: List[Callable]
|
||||||
) -> Dict[str, Any]:
|
) -> Dict[str, Any]:
|
||||||
|
|
||||||
ops = self.policy_ops[::-1]
|
# ops = self.policy_ops[::-1]
|
||||||
|
ops = self.policy_ops
|
||||||
|
|
||||||
|
|
||||||
def get_col_results(var_dict, sub_step, sL, s, funcs):
|
def get_col_results(var_dict, sub_step, sL, s, funcs):
|
||||||
return list(map(lambda f: f(var_dict, sub_step, sL, s), funcs))
|
return list(map(lambda f: f(var_dict, sub_step, sL, s), funcs))
|
||||||
|
|
||||||
# return foldr(call, get_col_results(var_dict, sub_step, sL, s, funcs))(ops)
|
def compose(init_reduction_funct, funct_list, val_list):
|
||||||
|
result, i = None, 0
|
||||||
|
composition = lambda x: [reduce(init_reduction_funct, x)] + funct_list
|
||||||
|
for g in composition(val_list):
|
||||||
|
if i == 0:
|
||||||
|
result = g
|
||||||
|
i = 1
|
||||||
|
else:
|
||||||
|
result = g(result)
|
||||||
|
return result
|
||||||
|
|
||||||
col_results = get_col_results(var_dict, sub_step, sL, s, funcs)
|
col_results = get_col_results(var_dict, sub_step, sL, s, funcs)
|
||||||
return reduce(lambda a, b: {**a, **b}, col_results)
|
key_set = list(set(list(reduce(lambda a, b: a + b, list(map(lambda x: list(x.keys()), col_results))))))
|
||||||
|
new_dict = {k: [] for k in key_set}
|
||||||
|
for d in col_results:
|
||||||
|
for k in d.keys():
|
||||||
|
new_dict[k].append(d[k])
|
||||||
|
|
||||||
|
ops_head, *ops_tail = ops
|
||||||
|
return {
|
||||||
|
k: compose(
|
||||||
|
init_reduction_funct=ops_head, # func executed on value list
|
||||||
|
funct_list=ops_tail,
|
||||||
|
val_list=val_list
|
||||||
|
) for k, val_list in new_dict.items()
|
||||||
|
}
|
||||||
|
|
||||||
|
# [f1] = ops
|
||||||
|
# return {k: reduce(f1, val_list) for k, val_list in new_dict.items()}
|
||||||
|
# return foldr(call, col_results)(ops)
|
||||||
|
|
||||||
def apply_env_proc(
|
def apply_env_proc(
|
||||||
self,
|
self,
|
||||||
env_processes: Dict[str, Callable],
|
env_processes: Dict[str, Callable],
|
||||||
state_dict: Dict[str, Any],
|
state_dict: Dict[str, Any],
|
||||||
sub_step: int
|
sub_step: int
|
||||||
) -> None:
|
) -> Dict[str, Any]:
|
||||||
for state in state_dict.keys():
|
for state in state_dict.keys():
|
||||||
if state in list(env_processes.keys()):
|
if state in list(env_processes.keys()):
|
||||||
env_state: Callable = env_processes[state]
|
env_state: Callable = env_processes[state]
|
||||||
|
|
@ -57,6 +86,8 @@ class Executor:
|
||||||
else:
|
else:
|
||||||
state_dict[state] = env_state(state_dict[state])
|
state_dict[state] = env_state(state_dict[state])
|
||||||
|
|
||||||
|
return state_dict
|
||||||
|
|
||||||
# mech_step
|
# mech_step
|
||||||
def partial_state_update(
|
def partial_state_update(
|
||||||
self,
|
self,
|
||||||
|
|
@ -81,16 +112,15 @@ class Executor:
|
||||||
for f in state_funcs:
|
for f in state_funcs:
|
||||||
yield self.state_update_exception(f(var_dict, sub_step, sL, last_in_obj, _input))
|
yield self.state_update_exception(f(var_dict, sub_step, sL, last_in_obj, _input))
|
||||||
|
|
||||||
last_in_copy: Dict[str, Any] = dict(generate_record(state_funcs))
|
def transfer_missing_fields(source, destination):
|
||||||
|
for k in source:
|
||||||
for k in last_in_obj:
|
if k not in destination:
|
||||||
if k not in last_in_copy:
|
destination[k] = source[k]
|
||||||
last_in_copy[k] = last_in_obj[k]
|
del source # last_in_obj
|
||||||
|
return destination
|
||||||
del last_in_obj
|
|
||||||
|
|
||||||
self.apply_env_proc(env_processes, last_in_copy, last_in_copy['timestep'])
|
|
||||||
|
|
||||||
|
last_in_copy: Dict[str, Any] = transfer_missing_fields(last_in_obj, dict(generate_record(state_funcs)))
|
||||||
|
last_in_copy: Dict[str, Any] = self.apply_env_proc(env_processes, last_in_copy, last_in_copy['timestep'])
|
||||||
# ToDo: make 'substep' & 'timestep' reserve fields
|
# ToDo: make 'substep' & 'timestep' reserve fields
|
||||||
last_in_copy['substep'], last_in_copy['timestep'], last_in_copy['run'] = sub_step, time_step, run
|
last_in_copy['substep'], last_in_copy['timestep'], last_in_copy['run'] = sub_step, time_step, run
|
||||||
|
|
||||||
|
|
@ -164,15 +194,18 @@ class Executor:
|
||||||
|
|
||||||
def execute_run(var_dict, states_list, configs, env_processes, time_seq, run) -> List[Dict[str, Any]]:
|
def execute_run(var_dict, states_list, configs, env_processes, time_seq, run) -> List[Dict[str, Any]]:
|
||||||
run += 1
|
run += 1
|
||||||
states_list_copy: List[Dict[str, Any]] = deepcopy(states_list)
|
|
||||||
|
|
||||||
head, *tail = self.run_pipeline(var_dict, states_list_copy, configs, env_processes, time_seq, run)
|
def generate_init_sys_metrics(genesis_states_list):
|
||||||
|
for d in genesis_states_list:
|
||||||
|
d['run'], d['substep'], d['timestep'] = run, int(0), int(0)
|
||||||
|
yield d
|
||||||
|
|
||||||
|
states_list_copy: List[Dict[str, Any]] = list(generate_init_sys_metrics(deepcopy(states_list)))
|
||||||
|
|
||||||
|
first_timestep_per_run: List[Dict[str, Any]] = self.run_pipeline(var_dict, states_list_copy, configs, env_processes, time_seq, run)
|
||||||
del states_list_copy
|
del states_list_copy
|
||||||
|
|
||||||
genesis: Dict[str, Any] = head.pop()
|
return first_timestep_per_run
|
||||||
genesis['substep'], genesis['timestep'], genesis['run'] = 0, 0, run
|
|
||||||
first_timestep_per_run: List[Dict[str, Any]] = [genesis] + tail.pop(0)
|
|
||||||
return [first_timestep_per_run] + tail
|
|
||||||
|
|
||||||
pipe_run: List[List[Dict[str, Any]]] = flatten(
|
pipe_run: List[List[Dict[str, Any]]] = flatten(
|
||||||
TPool().map(
|
TPool().map(
|
||||||
|
|
|
||||||
|
|
@ -1,8 +1,23 @@
|
||||||
|
from functools import reduce
|
||||||
from typing import Dict, List
|
from typing import Dict, List
|
||||||
from collections import defaultdict, Counter
|
from collections import defaultdict, Counter
|
||||||
from itertools import product
|
from itertools import product
|
||||||
import warnings
|
import warnings
|
||||||
|
|
||||||
|
from pandas import DataFrame
|
||||||
|
|
||||||
|
|
||||||
|
class SilentDF(DataFrame):
|
||||||
|
def __repr__(self):
|
||||||
|
return f"{hex(id(DataFrame))})" #"pandas.core.frame.DataFrame"
|
||||||
|
|
||||||
|
|
||||||
|
def val_switch(v):
|
||||||
|
if isinstance(v, DataFrame) is True or isinstance(v, SilentDF) is True:
|
||||||
|
return SilentDF(v)
|
||||||
|
else:
|
||||||
|
return v.x
|
||||||
|
|
||||||
|
|
||||||
class IndexCounter:
|
class IndexCounter:
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
|
|
@ -12,6 +27,11 @@ class IndexCounter:
|
||||||
self.i += 1
|
self.i += 1
|
||||||
return self.i
|
return self.i
|
||||||
|
|
||||||
|
# def compose(*functions):
|
||||||
|
# return reduce(lambda f, g: lambda x: f(g(x)), functions, lambda x: x)
|
||||||
|
|
||||||
|
def compose(*functions):
|
||||||
|
return reduce(lambda f, g: lambda x: f(g(x)), functions, lambda x: x)
|
||||||
|
|
||||||
def pipe(x):
|
def pipe(x):
|
||||||
return x
|
return x
|
||||||
|
|
|
||||||
|
|
@ -1,25 +0,0 @@
|
||||||
import pandas as pd
|
|
||||||
from tabulate import tabulate
|
|
||||||
# The following imports NEED to be in the exact order
|
|
||||||
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
|
|
||||||
from simulations.validation import config_az
|
|
||||||
from cadCAD import configs
|
|
||||||
|
|
||||||
import pprint as pp
|
|
||||||
|
|
||||||
exec_mode = ExecutionMode()
|
|
||||||
|
|
||||||
print("Simulation Execution: Single Configuration")
|
|
||||||
print()
|
|
||||||
first_config = configs # only contains config1
|
|
||||||
single_proc_ctx = ExecutionContext(context=exec_mode.single_proc)
|
|
||||||
run = Executor(exec_context=single_proc_ctx, configs=first_config)
|
|
||||||
|
|
||||||
raw_result, tensor_field = run.main()
|
|
||||||
result = pd.DataFrame(raw_result)
|
|
||||||
print()
|
|
||||||
print("Tensor Field: config1")
|
|
||||||
print(tabulate(tensor_field, headers='keys', tablefmt='psql'))
|
|
||||||
print("Output:")
|
|
||||||
print(tabulate(result, headers='keys', tablefmt='psql'))
|
|
||||||
print()
|
|
||||||
|
|
@ -1,24 +0,0 @@
|
||||||
import pandas as pd
|
|
||||||
from tabulate import tabulate
|
|
||||||
# The following imports NEED to be in the exact order
|
|
||||||
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
|
|
||||||
from simulations.validation import config_az_a
|
|
||||||
from cadCAD import configs
|
|
||||||
|
|
||||||
|
|
||||||
exec_mode = ExecutionMode()
|
|
||||||
|
|
||||||
print("Simulation Execution: Single Configuration")
|
|
||||||
print()
|
|
||||||
first_config = configs # only contains config1
|
|
||||||
single_proc_ctx = ExecutionContext(context=exec_mode.single_proc)
|
|
||||||
run = Executor(exec_context=single_proc_ctx, configs=first_config)
|
|
||||||
|
|
||||||
raw_result, tensor_field = run.main()
|
|
||||||
result = pd.DataFrame(raw_result)
|
|
||||||
print()
|
|
||||||
print("Tensor Field: config1")
|
|
||||||
print(tabulate(tensor_field, headers='keys', tablefmt='psql'))
|
|
||||||
print("Output:")
|
|
||||||
print(tabulate(result, headers='keys', tablefmt='psql'))
|
|
||||||
print()
|
|
||||||
|
|
@ -1,24 +0,0 @@
|
||||||
import pandas as pd
|
|
||||||
from tabulate import tabulate
|
|
||||||
# The following imports NEED to be in the exact order
|
|
||||||
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
|
|
||||||
from simulations.validation import config_az_b
|
|
||||||
from cadCAD import configs
|
|
||||||
|
|
||||||
|
|
||||||
exec_mode = ExecutionMode()
|
|
||||||
|
|
||||||
print("Simulation Execution: Single Configuration")
|
|
||||||
print()
|
|
||||||
first_config = configs # only contains config1
|
|
||||||
single_proc_ctx = ExecutionContext(context=exec_mode.single_proc)
|
|
||||||
run = Executor(exec_context=single_proc_ctx, configs=first_config)
|
|
||||||
|
|
||||||
raw_result, tensor_field = run.main()
|
|
||||||
result = pd.DataFrame(raw_result)
|
|
||||||
print()
|
|
||||||
print("Tensor Field: config1")
|
|
||||||
print(tabulate(tensor_field, headers='keys', tablefmt='psql'))
|
|
||||||
print("Output:")
|
|
||||||
print(tabulate(result, headers='keys', tablefmt='psql'))
|
|
||||||
print()
|
|
||||||
|
|
@ -5,7 +5,6 @@ from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
|
||||||
from simulations.validation import config_udc_json3
|
from simulations.validation import config_udc_json3
|
||||||
from cadCAD import configs
|
from cadCAD import configs
|
||||||
|
|
||||||
import pprint as pp
|
|
||||||
|
|
||||||
exec_mode = ExecutionMode()
|
exec_mode = ExecutionMode()
|
||||||
|
|
||||||
|
|
@ -17,10 +16,22 @@ run = Executor(exec_context=single_proc_ctx, configs=first_config)
|
||||||
|
|
||||||
raw_result, tensor_field = run.main()
|
raw_result, tensor_field = run.main()
|
||||||
result = pd.DataFrame(raw_result)
|
result = pd.DataFrame(raw_result)
|
||||||
|
result = pd.concat([result.drop(['c'], axis=1), result['c'].apply(pd.Series)], axis=1)
|
||||||
|
|
||||||
|
# print(list(result['c']))
|
||||||
|
|
||||||
|
# print(tabulate(result['c'].apply(pd.Series), headers='keys', tablefmt='psql'))
|
||||||
|
|
||||||
print()
|
print()
|
||||||
print("Tensor Field: config1")
|
print("Tensor Field: config1")
|
||||||
print(tabulate(tensor_field, headers='keys', tablefmt='psql'))
|
print(tabulate(tensor_field, headers='keys', tablefmt='psql'))
|
||||||
print("Output:")
|
print("Output:")
|
||||||
print(tabulate(result, headers='keys', tablefmt='psql'))
|
print(tabulate(result, headers='keys', tablefmt='psql'))
|
||||||
print()
|
print()
|
||||||
print(result.info(verbose=True))
|
print(result.info(verbose=True))
|
||||||
|
|
||||||
|
# def f(df, col):
|
||||||
|
# for k in df[col].iloc[0].keys():
|
||||||
|
# df[k] = None
|
||||||
|
# for index, row in df.iterrows():
|
||||||
|
# # df.apply(lambda row:, axis=1)
|
||||||
|
|
@ -2,10 +2,11 @@ import pandas as pd
|
||||||
from tabulate import tabulate
|
from tabulate import tabulate
|
||||||
# The following imports NEED to be in the exact order
|
# The following imports NEED to be in the exact order
|
||||||
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
|
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
|
||||||
from simulations.validation import config_udc_json
|
# from simulations.validation import config1_test_pipe
|
||||||
|
# from simulations.validation import config1
|
||||||
|
from simulations.validation import externalds
|
||||||
from cadCAD import configs
|
from cadCAD import configs
|
||||||
|
|
||||||
|
|
||||||
exec_mode = ExecutionMode()
|
exec_mode = ExecutionMode()
|
||||||
|
|
||||||
print("Simulation Execution: Single Configuration")
|
print("Simulation Execution: Single Configuration")
|
||||||
|
|
@ -14,11 +15,10 @@ first_config = configs # only contains config1
|
||||||
single_proc_ctx = ExecutionContext(context=exec_mode.single_proc)
|
single_proc_ctx = ExecutionContext(context=exec_mode.single_proc)
|
||||||
run = Executor(exec_context=single_proc_ctx, configs=first_config)
|
run = Executor(exec_context=single_proc_ctx, configs=first_config)
|
||||||
|
|
||||||
raw_result, tensor_field = run.main()
|
raw_result, _ = run.main()
|
||||||
result = pd.DataFrame(raw_result)
|
result = pd.DataFrame(raw_result)
|
||||||
print()
|
result.to_csv('/Users/jjodesty/Projects/DiffyQ-SimCAD/simulations/output.csv', index=False)
|
||||||
print("Tensor Field: config1")
|
|
||||||
print(tabulate(tensor_field, headers='keys', tablefmt='psql'))
|
|
||||||
print("Output:")
|
print("Output:")
|
||||||
print(tabulate(result, headers='keys', tablefmt='psql'))
|
print(tabulate(result, headers='keys', tablefmt='psql'))
|
||||||
print()
|
print()
|
||||||
|
|
@ -2,7 +2,9 @@ import pandas as pd
|
||||||
from tabulate import tabulate
|
from tabulate import tabulate
|
||||||
# The following imports NEED to be in the exact order
|
# The following imports NEED to be in the exact order
|
||||||
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
|
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
|
||||||
from simulations.validation import config1
|
# from simulations.validation import config1_test_pipe
|
||||||
|
# from simulations.validation import config1
|
||||||
|
from simulations.validation import externalds
|
||||||
from cadCAD import configs
|
from cadCAD import configs
|
||||||
|
|
||||||
exec_mode = ExecutionMode()
|
exec_mode = ExecutionMode()
|
||||||
|
|
|
||||||
|
|
@ -1,7 +1,11 @@
|
||||||
from decimal import Decimal
|
from decimal import Decimal
|
||||||
|
from functools import reduce
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from datetime import timedelta
|
from datetime import timedelta
|
||||||
|
|
||||||
|
from cadCAD.configuration.utils.policyAggregation import get_base_value
|
||||||
|
|
||||||
from cadCAD.configuration import append_configs
|
from cadCAD.configuration import append_configs
|
||||||
from cadCAD.configuration.utils import proc_trigger, bound_norm_random, ep_time_step, config_sim
|
from cadCAD.configuration.utils import proc_trigger, bound_norm_random, ep_time_step, config_sim
|
||||||
|
|
||||||
|
|
@ -17,7 +21,7 @@ seeds = {
|
||||||
def p1m1(_g, step, sL, s):
|
def p1m1(_g, step, sL, s):
|
||||||
return {'param1': 1}
|
return {'param1': 1}
|
||||||
def p2m1(_g, step, sL, s):
|
def p2m1(_g, step, sL, s):
|
||||||
return {'param2': 4}
|
return {'param1': 1, 'param2': 4}
|
||||||
|
|
||||||
# []
|
# []
|
||||||
|
|
||||||
|
|
@ -60,6 +64,13 @@ def s2m3(_g, step, sL, s, _input):
|
||||||
x = _input['param2']
|
x = _input['param2']
|
||||||
return (y, x)
|
return (y, x)
|
||||||
|
|
||||||
|
def policies(_g, step, sL, s, _input):
|
||||||
|
y = 'policies'
|
||||||
|
x = _input
|
||||||
|
return (y, x)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Exogenous States
|
# Exogenous States
|
||||||
proc_one_coef_A = 0.7
|
proc_one_coef_A = 0.7
|
||||||
|
|
@ -97,7 +108,7 @@ genesis_states = {
|
||||||
's1': Decimal(0.0),
|
's1': Decimal(0.0),
|
||||||
's2': Decimal(0.0),
|
's2': Decimal(0.0),
|
||||||
's3': Decimal(1.0),
|
's3': Decimal(1.0),
|
||||||
's4': Decimal(1.0),
|
's4': Decimal(1.0)
|
||||||
# 'timestep': '2018-10-01 15:16:24'
|
# 'timestep': '2018-10-01 15:16:24'
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -156,12 +167,12 @@ sim_config = config_sim(
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
append_configs(
|
append_configs(
|
||||||
sim_configs=sim_config,
|
sim_configs=sim_config,
|
||||||
initial_state=genesis_states,
|
initial_state=genesis_states,
|
||||||
seeds=seeds,
|
seeds=seeds,
|
||||||
raw_exogenous_states=raw_exogenous_states,
|
raw_exogenous_states=raw_exogenous_states,
|
||||||
env_processes=env_processes,
|
env_processes=env_processes,
|
||||||
partial_state_update_blocks=partial_state_update_block
|
partial_state_update_blocks=partial_state_update_block,
|
||||||
|
policy_ops=[lambda a, b: a + b]
|
||||||
)
|
)
|
||||||
|
|
@ -0,0 +1,106 @@
|
||||||
|
from decimal import Decimal
|
||||||
|
from functools import reduce
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
from datetime import timedelta
|
||||||
|
|
||||||
|
from cadCAD.configuration.utils.policyAggregation import get_base_value
|
||||||
|
|
||||||
|
from cadCAD.configuration import append_configs
|
||||||
|
from cadCAD.configuration.utils import proc_trigger, bound_norm_random, ep_time_step, config_sim
|
||||||
|
|
||||||
|
seeds = {
|
||||||
|
'z': np.random.RandomState(1),
|
||||||
|
'a': np.random.RandomState(2),
|
||||||
|
'b': np.random.RandomState(3),
|
||||||
|
'c': np.random.RandomState(3)
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# Policies per Mechanism
|
||||||
|
def p1m1(_g, step, sL, s):
|
||||||
|
return {'param1': 1}
|
||||||
|
def p2m1(_g, step, sL, s):
|
||||||
|
return {'param2': 2}
|
||||||
|
|
||||||
|
# []
|
||||||
|
|
||||||
|
def p1m2(_g, step, sL, s):
|
||||||
|
return {'param1': 2, 'param2': 2}
|
||||||
|
def p2m2(_g, step, sL, s):
|
||||||
|
return {'param1': 2, 'param2': 2}
|
||||||
|
|
||||||
|
def p1m3(_g, step, sL, s):
|
||||||
|
return {'param1': 1, 'param2': 2, 'param3': 3}
|
||||||
|
def p2m3(_g, step, sL, s):
|
||||||
|
return {'param1': 1, 'param2': 2, 'param3': 3}
|
||||||
|
|
||||||
|
def test_pipeline(_g, step, sL, s):
|
||||||
|
return {'test': 2, 'param2': 2}
|
||||||
|
|
||||||
|
|
||||||
|
# Internal States per Mechanism
|
||||||
|
def policies(_g, step, sL, s, _input):
|
||||||
|
y = 'policies'
|
||||||
|
x = _input
|
||||||
|
return (y, x)
|
||||||
|
|
||||||
|
# Genesis States
|
||||||
|
genesis_states = {
|
||||||
|
'policies': {}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
raw_exogenous_states = {}
|
||||||
|
|
||||||
|
|
||||||
|
env_processes = {}
|
||||||
|
|
||||||
|
|
||||||
|
partial_state_update_block = {
|
||||||
|
"m1": {
|
||||||
|
"policies": {
|
||||||
|
"b1": p1m1,
|
||||||
|
"b2": p2m1
|
||||||
|
},
|
||||||
|
"variables": {
|
||||||
|
"policies": policies
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"m2": {
|
||||||
|
"policies": {
|
||||||
|
"b1": p1m2,
|
||||||
|
"b2": p2m2
|
||||||
|
},
|
||||||
|
"variables": {
|
||||||
|
"policies": policies
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"m3": {
|
||||||
|
"policies": {
|
||||||
|
"b1": p1m3,
|
||||||
|
"b2": p2m3
|
||||||
|
},
|
||||||
|
"variables": {
|
||||||
|
"policies": policies
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
sim_config = config_sim(
|
||||||
|
{
|
||||||
|
"N": 2,
|
||||||
|
"T": range(5),
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
append_configs(
|
||||||
|
sim_configs=sim_config,
|
||||||
|
initial_state=genesis_states,
|
||||||
|
seeds=seeds,
|
||||||
|
raw_exogenous_states=raw_exogenous_states,
|
||||||
|
env_processes=env_processes,
|
||||||
|
partial_state_update_blocks=partial_state_update_block,
|
||||||
|
policy_ops=[lambda a, b: a + b] # , lambda y: y + 100, lambda y: y + 300
|
||||||
|
)
|
||||||
|
|
@ -2,19 +2,30 @@ from datetime import timedelta
|
||||||
|
|
||||||
from cadCAD.configuration import append_configs
|
from cadCAD.configuration import append_configs
|
||||||
from cadCAD.configuration.utils import ep_time_step, config_sim
|
from cadCAD.configuration.utils import ep_time_step, config_sim
|
||||||
from cadCAD.configuration.utils.policyAggregation import dict_op, dict_elemwise_sum
|
# from cadCAD.configuration.utils.policyAggregation import dict_op, dict_elemwise_sum
|
||||||
from cadCAD.configuration.utils.udc import udcBroker, next_udc_view, generate_udc_view
|
from cadCAD.configuration.utils.udo import udcBroker, udoPipe, UDO
|
||||||
|
import pandas as pd
|
||||||
|
from cadCAD.utils import SilentDF, val_switch
|
||||||
|
|
||||||
|
ds = SilentDF(pd.read_csv('/Users/jjodesty/Projects/DiffyQ-SimCAD/simulations/output.csv'))
|
||||||
|
|
||||||
|
|
||||||
# ToDo: Create member for past value
|
|
||||||
class MyClass(object):
|
class MyClass(object):
|
||||||
def __init__(self, x):
|
def __init__(self, x, ds=None):
|
||||||
self.x = x
|
self.x = x
|
||||||
|
self.ds = ds # for setting ds initially or querying
|
||||||
|
|
||||||
def update(self):
|
def update(self):
|
||||||
self.x += 1
|
self.x += 1
|
||||||
return self
|
return self
|
||||||
|
|
||||||
|
def read(self, ds_uri):
|
||||||
|
self.ds = SilentDF(pd.read_csv(ds_uri))
|
||||||
|
return self
|
||||||
|
|
||||||
|
def write(self, ds_uri):
|
||||||
|
pd.to_csv(ds_uri)
|
||||||
|
|
||||||
def getMemID(self):
|
def getMemID(self):
|
||||||
return str(hex(id(self)))
|
return str(hex(id(self)))
|
||||||
|
|
||||||
|
|
@ -23,12 +34,11 @@ class MyClass(object):
|
||||||
|
|
||||||
# can be accessed after an update within the same substep and timestep
|
# can be accessed after an update within the same substep and timestep
|
||||||
|
|
||||||
# udc = MyClassA(0)
|
hydra_state_view = UDO(MyClass(0, ds))
|
||||||
# wrapped_udc = UDC(udc)
|
udc_view_A = UDO(MyClass(0, ds))
|
||||||
# hydra_members = wrapped_udc.get_object()
|
udc_view_B = UDO(MyClass(0, ds))
|
||||||
hydra_state_view = generate_udc_view(MyClass(0))
|
|
||||||
udc_view_B = generate_udc_view(MyClass(0))
|
print(udc_view_A)
|
||||||
udc_view_C = generate_udc_view(MyClass(0))
|
|
||||||
|
|
||||||
# g: Dict[str, List[int]] = {'MyClassB'}
|
# g: Dict[str, List[int]] = {'MyClassB'}
|
||||||
|
|
||||||
|
|
@ -36,17 +46,33 @@ state_dict = {
|
||||||
'a': 0, 'b': 0, 'j': 0,
|
'a': 0, 'b': 0, 'j': 0,
|
||||||
'k': (0, 0), 'q': (0, 0),
|
'k': (0, 0), 'q': (0, 0),
|
||||||
'hydra_state': hydra_state_view,
|
'hydra_state': hydra_state_view,
|
||||||
'policies': {'hydra_B': udc_view_B, 'hydra_C': udc_view_C},
|
'policies': {'hydra_A': udc_view_A, 'hydra_B': udc_view_B},
|
||||||
'timestamp': '2019-01-01 00:00:00'
|
'timestamp': '2019-01-01 00:00:00',
|
||||||
|
'c': {"ds1": None, "ds2": None, "ds3": None, "timestep": None}
|
||||||
}
|
}
|
||||||
|
|
||||||
def p1(_g, step, sL, s):
|
def p1(_g, step, sL, s):
|
||||||
s['policies']['hydra_B'].update()
|
s['policies']['hydra_A'].update()
|
||||||
return {'hydra_B': next_udc_view(s['policies']['hydra_B'])}
|
return {'hydra_A': udoPipe(s['policies']['hydra_A'])}
|
||||||
|
|
||||||
def p2(_g, step, sL, s):
|
def p2(_g, step, sL, s):
|
||||||
s['policies']['hydra_C'].update()
|
s['policies']['hydra_B'].update()
|
||||||
return {'hydra_C': next_udc_view(s['policies']['hydra_C'])}
|
# df = s['policies']['hydra_B'].ds
|
||||||
|
return {'hydra_B': udoPipe(s['policies']['hydra_B'])}
|
||||||
|
|
||||||
|
# ToDo: SilentDF(df) wont work
|
||||||
|
def C(_g, step, sL, s, _input):
|
||||||
|
y = 'c'
|
||||||
|
ds = _input['hydra_B'].ds
|
||||||
|
df = ds[(ds['run'] == s['run']) & (ds['substep'] == s['substep']) & (ds['timestep'] == s['timestep'])].drop(columns=['run', 'substep'])
|
||||||
|
def pop_if_not_empty(l):
|
||||||
|
if len(l) == 0:
|
||||||
|
return None
|
||||||
|
else:
|
||||||
|
return l.pop()
|
||||||
|
|
||||||
|
x = {k: pop_if_not_empty(list(v.values())) for k, v in df.to_dict().items()} # reomve idx
|
||||||
|
return (y, x)
|
||||||
|
|
||||||
def policies(_g, step, sL, s, _input):
|
def policies(_g, step, sL, s, _input):
|
||||||
y = 'policies'
|
y = 'policies'
|
||||||
|
|
@ -63,13 +89,8 @@ def time_model(_g, step, sL, s, _input):
|
||||||
|
|
||||||
def HydraMembers(_g, step, sL, s, _input):
|
def HydraMembers(_g, step, sL, s, _input):
|
||||||
y = 'hydra_state'
|
y = 'hydra_state'
|
||||||
# PROBLEM:
|
|
||||||
# s['hydra_members'].update()
|
|
||||||
# x = s['hydra_members']
|
|
||||||
|
|
||||||
# SOLUTION:
|
|
||||||
s['hydra_state'].update()
|
s['hydra_state'].update()
|
||||||
x = next_udc_view(s['hydra_state'])
|
x = udoPipe(s['hydra_state'])
|
||||||
return (y, x)
|
return (y, x)
|
||||||
|
|
||||||
def repr(_g, step, sL, s, _input):
|
def repr(_g, step, sL, s, _input):
|
||||||
|
|
@ -77,6 +98,9 @@ def repr(_g, step, sL, s, _input):
|
||||||
x = s['hydra_members'].__repr__()
|
x = s['hydra_members'].__repr__()
|
||||||
return (y, x)
|
return (y, x)
|
||||||
|
|
||||||
|
def incriment(y, incr_val):
|
||||||
|
return lambda _g, step, sL, s, _input: (y, s[y] + incr_val)
|
||||||
|
|
||||||
def A(_g, step, sL, s, _input):
|
def A(_g, step, sL, s, _input):
|
||||||
y = 'a'
|
y = 'a'
|
||||||
x = s['a'] + 1
|
x = s['a'] + 1
|
||||||
|
|
@ -87,7 +111,7 @@ def hydra_state_tracker(y):
|
||||||
|
|
||||||
|
|
||||||
def hydra_policy_tracker(y):
|
def hydra_policy_tracker(y):
|
||||||
return lambda _g, step, sL, s, _input: (y, tuple(v.x for k, v in s['policies'].items()))
|
return lambda _g, step, sL, s, _input: (y, tuple(val_switch(v) for k, v in s['policies'].items()))
|
||||||
|
|
||||||
|
|
||||||
# needs M1&2 need behaviors
|
# needs M1&2 need behaviors
|
||||||
|
|
@ -100,6 +124,7 @@ partial_state_update_blocks = {
|
||||||
'states': {
|
'states': {
|
||||||
'a': A,
|
'a': A,
|
||||||
'b': hydra_state_tracker('b'),
|
'b': hydra_state_tracker('b'),
|
||||||
|
'c': C,
|
||||||
'j': hydra_state_tracker('j'),
|
'j': hydra_state_tracker('j'),
|
||||||
'k': hydra_policy_tracker('k'),
|
'k': hydra_policy_tracker('k'),
|
||||||
'q': hydra_policy_tracker('q'),
|
'q': hydra_policy_tracker('q'),
|
||||||
|
|
@ -116,6 +141,7 @@ partial_state_update_blocks = {
|
||||||
'states': {
|
'states': {
|
||||||
'a': A,
|
'a': A,
|
||||||
'b': hydra_state_tracker('b'),
|
'b': hydra_state_tracker('b'),
|
||||||
|
'c': C,
|
||||||
'j': hydra_state_tracker('j'),
|
'j': hydra_state_tracker('j'),
|
||||||
'k': hydra_policy_tracker('k'),
|
'k': hydra_policy_tracker('k'),
|
||||||
'q': hydra_policy_tracker('q'),
|
'q': hydra_policy_tracker('q'),
|
||||||
|
|
@ -131,6 +157,7 @@ partial_state_update_blocks = {
|
||||||
'states': {
|
'states': {
|
||||||
'a': A,
|
'a': A,
|
||||||
'b': hydra_state_tracker('b'),
|
'b': hydra_state_tracker('b'),
|
||||||
|
'c': C,
|
||||||
'j': hydra_state_tracker('j'),
|
'j': hydra_state_tracker('j'),
|
||||||
'k': hydra_policy_tracker('k'),
|
'k': hydra_policy_tracker('k'),
|
||||||
'q': hydra_policy_tracker('q'),
|
'q': hydra_policy_tracker('q'),
|
||||||
|
|
@ -144,8 +171,16 @@ sim_config = config_sim({
|
||||||
"N": 2,
|
"N": 2,
|
||||||
"T": range(4)
|
"T": range(4)
|
||||||
})
|
})
|
||||||
|
z = {'z': 1}
|
||||||
|
|
||||||
append = lambda a, b: [a, b]
|
def addZ(d, z):
|
||||||
update_dict = lambda a, b: a.update(b)
|
d.update(z)
|
||||||
take_first = lambda a, b: [a, b]
|
return d
|
||||||
append_configs(sim_config, state_dict, {}, {}, {}, partial_state_update_blocks)#, policy_ops=[foldr(dict_op(take_first))])
|
|
||||||
|
append_configs(
|
||||||
|
sim_config,
|
||||||
|
state_dict,
|
||||||
|
{}, {}, {},
|
||||||
|
partial_state_update_blocks,
|
||||||
|
policy_ops=[lambda a, b: {**a, **b}]
|
||||||
|
)
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,146 @@
|
||||||
|
from datetime import timedelta
|
||||||
|
|
||||||
|
from cadCAD.configuration import append_configs
|
||||||
|
from cadCAD.configuration.utils import ep_time_step, config_sim
|
||||||
|
from cadCAD.configuration.utils.policyAggregation import dict_op, dict_elemwise_sum
|
||||||
|
from cadCAD.configuration.utils.udo import udcBroker, udoPipe, UDO
|
||||||
|
|
||||||
|
|
||||||
|
# ToDo: Create member for past value
|
||||||
|
class MyClass(object):
|
||||||
|
def __init__(self, x):
|
||||||
|
self.x = x
|
||||||
|
|
||||||
|
def update(self):
|
||||||
|
self.x += 1
|
||||||
|
return self
|
||||||
|
|
||||||
|
def getMemID(self):
|
||||||
|
return str(hex(id(self)))
|
||||||
|
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
# can be accessed after an update within the same substep and timestep
|
||||||
|
|
||||||
|
hydra_state_view = UDO(MyClass(0))
|
||||||
|
udc_view_B = UDO(MyClass(0))
|
||||||
|
udc_view_C = UDO(MyClass(0))
|
||||||
|
|
||||||
|
# g: Dict[str, List[int]] = {'MyClassB'}
|
||||||
|
|
||||||
|
state_dict = {
|
||||||
|
'a': 0, 'b': 0, 'j': 0,
|
||||||
|
'k': (0, 0), 'q': (0, 0),
|
||||||
|
'hydra_state': hydra_state_view,
|
||||||
|
'policies': {'hydra_B': udc_view_B, 'hydra_C': udc_view_C},
|
||||||
|
'timestamp': '2019-01-01 00:00:00'
|
||||||
|
}
|
||||||
|
|
||||||
|
def p1(_g, step, sL, s):
|
||||||
|
s['policies']['hydra_B'].update()
|
||||||
|
return {'hydra_B': udoPipe(s['policies']['hydra_B'])}
|
||||||
|
|
||||||
|
def p2(_g, step, sL, s):
|
||||||
|
s['policies']['hydra_C'].update()
|
||||||
|
return {'hydra_C': udoPipe(s['policies']['hydra_C'])}
|
||||||
|
|
||||||
|
def policies(_g, step, sL, s, _input):
|
||||||
|
y = 'policies'
|
||||||
|
x = _input
|
||||||
|
return (y, x)
|
||||||
|
|
||||||
|
timestep_duration = timedelta(minutes=1) # In this example, a timestep has a duration of 1 minute.
|
||||||
|
ts_format = '%Y-%m-%d %H:%M:%S'
|
||||||
|
def time_model(_g, step, sL, s, _input):
|
||||||
|
y = 'timestamp'
|
||||||
|
x = ep_time_step(s, dt_str=s['timestamp'], fromat_str=ts_format, _timedelta=timestep_duration)
|
||||||
|
return (y, x)
|
||||||
|
|
||||||
|
|
||||||
|
def HydraMembers(_g, step, sL, s, _input):
|
||||||
|
y = 'hydra_state'
|
||||||
|
s['hydra_state'].update()
|
||||||
|
x = udoPipe(s['hydra_state'])
|
||||||
|
return (y, x)
|
||||||
|
|
||||||
|
def repr(_g, step, sL, s, _input):
|
||||||
|
y = 'z'
|
||||||
|
x = s['hydra_members'].__repr__()
|
||||||
|
return (y, x)
|
||||||
|
|
||||||
|
def incriment(y, incr_val):
|
||||||
|
return lambda _g, step, sL, s, _input: (y, s[y] + incr_val)
|
||||||
|
|
||||||
|
def A(_g, step, sL, s, _input):
|
||||||
|
y = 'a'
|
||||||
|
x = s['a'] + 1
|
||||||
|
return (y, x)
|
||||||
|
|
||||||
|
def hydra_state_tracker(y):
|
||||||
|
return lambda _g, step, sL, s, _input: (y, s['hydra_state'].x)
|
||||||
|
|
||||||
|
|
||||||
|
def hydra_policy_tracker(y):
|
||||||
|
return lambda _g, step, sL, s, _input: (y, tuple(v.x for k, v in s['policies'].items()))
|
||||||
|
|
||||||
|
|
||||||
|
# needs M1&2 need behaviors
|
||||||
|
partial_state_update_blocks = {
|
||||||
|
'PSUB1': {
|
||||||
|
'policies': {
|
||||||
|
"b1": p1,
|
||||||
|
"b2": p2
|
||||||
|
},
|
||||||
|
'states': {
|
||||||
|
'a': A,
|
||||||
|
'b': hydra_state_tracker('b'),
|
||||||
|
'j': hydra_state_tracker('j'),
|
||||||
|
'k': hydra_policy_tracker('k'),
|
||||||
|
'q': hydra_policy_tracker('q'),
|
||||||
|
'hydra_state': HydraMembers,
|
||||||
|
'timestamp': time_model,
|
||||||
|
'policies': policies
|
||||||
|
}
|
||||||
|
},
|
||||||
|
'PSUB2': {
|
||||||
|
'policies': {
|
||||||
|
"b1": p1,
|
||||||
|
"b2": p2
|
||||||
|
},
|
||||||
|
'states': {
|
||||||
|
'a': A,
|
||||||
|
'b': hydra_state_tracker('b'),
|
||||||
|
'j': hydra_state_tracker('j'),
|
||||||
|
'k': hydra_policy_tracker('k'),
|
||||||
|
'q': hydra_policy_tracker('q'),
|
||||||
|
'hydra_state': HydraMembers,
|
||||||
|
'policies': policies
|
||||||
|
}
|
||||||
|
},
|
||||||
|
'PSUB3': {
|
||||||
|
'policies': {
|
||||||
|
"b1": p1,
|
||||||
|
"b2": p2
|
||||||
|
},
|
||||||
|
'states': {
|
||||||
|
'a': A,
|
||||||
|
'b': hydra_state_tracker('b'),
|
||||||
|
'j': hydra_state_tracker('j'),
|
||||||
|
'k': hydra_policy_tracker('k'),
|
||||||
|
'q': hydra_policy_tracker('q'),
|
||||||
|
'hydra_state': HydraMembers,
|
||||||
|
'policies': policies
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sim_config = config_sim({
|
||||||
|
"N": 2,
|
||||||
|
"T": range(4)
|
||||||
|
})
|
||||||
|
|
||||||
|
append = lambda a, b: [a, b]
|
||||||
|
update_dict = lambda a, b: a.update(b)
|
||||||
|
take_first = lambda a, b: [a, b]
|
||||||
|
append_configs(sim_config, state_dict, {}, {}, {}, partial_state_update_blocks)#, policy_ops=[foldr(dict_op(take_first))])
|
||||||
|
|
@ -0,0 +1,118 @@
|
||||||
|
from decimal import Decimal
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
from cadCAD.configuration import append_configs
|
||||||
|
from cadCAD.configuration.utils import bound_norm_random, config_sim
|
||||||
|
|
||||||
|
seeds = {
|
||||||
|
'z': np.random.RandomState(1),
|
||||||
|
'a': np.random.RandomState(2),
|
||||||
|
'b': np.random.RandomState(3),
|
||||||
|
'c': np.random.RandomState(3)
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# Policies per Mechanism
|
||||||
|
def p1(_g, step, sL, s):
|
||||||
|
return {'param1': 10}
|
||||||
|
def p2(_g, step, sL, s):
|
||||||
|
return {'param1': 10, 'param2': 40}
|
||||||
|
|
||||||
|
|
||||||
|
# Internal States per Mechanism
|
||||||
|
def s1(_g, step, sL, s, _input):
|
||||||
|
y = 'ds1'
|
||||||
|
x = s['ds1'] + 1
|
||||||
|
return (y, x)
|
||||||
|
def s2(_g, step, sL, s, _input):
|
||||||
|
y = 'ds2'
|
||||||
|
x = _input['param2']
|
||||||
|
return (y, x)
|
||||||
|
|
||||||
|
|
||||||
|
# Exogenous States
|
||||||
|
proc_one_coef_A = 0.7
|
||||||
|
proc_one_coef_B = 1.3
|
||||||
|
|
||||||
|
def es(_g, step, sL, s, _input):
|
||||||
|
y = 'ds3'
|
||||||
|
x = s['ds3'] * bound_norm_random(seeds['a'], proc_one_coef_A, proc_one_coef_B)
|
||||||
|
return (y, x)
|
||||||
|
|
||||||
|
|
||||||
|
# Environment States
|
||||||
|
def env_a(x):
|
||||||
|
return 5
|
||||||
|
def env_b(x):
|
||||||
|
return 10
|
||||||
|
|
||||||
|
|
||||||
|
# Genesis States
|
||||||
|
genesis_states = {
|
||||||
|
'ds1': Decimal(0.0),
|
||||||
|
'ds2': Decimal(0.0),
|
||||||
|
'ds3': Decimal(1.0)
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
raw_exogenous_states = {
|
||||||
|
"ds3": es
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
env_processes = {
|
||||||
|
"ds3": env_a
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
partial_state_update_block = {
|
||||||
|
"m1": {
|
||||||
|
"policies": {
|
||||||
|
"p1": p1,
|
||||||
|
"p2": p2
|
||||||
|
},
|
||||||
|
"variables": {
|
||||||
|
"ds1": s1,
|
||||||
|
"ds2": s2
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"m2": {
|
||||||
|
"policies": {
|
||||||
|
"p1": p1,
|
||||||
|
"p2": p2
|
||||||
|
},
|
||||||
|
"variables": {
|
||||||
|
"ds1": s1,
|
||||||
|
"ds2": s2
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"m3": {
|
||||||
|
"policies": {
|
||||||
|
"p1": p1,
|
||||||
|
"p2": p2
|
||||||
|
},
|
||||||
|
"variables": {
|
||||||
|
"ds1": s1,
|
||||||
|
"ds2": s2
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
sim_config = config_sim(
|
||||||
|
{
|
||||||
|
"N": 2,
|
||||||
|
"T": range(4),
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
append_configs(
|
||||||
|
sim_configs=sim_config,
|
||||||
|
initial_state=genesis_states,
|
||||||
|
seeds=seeds,
|
||||||
|
raw_exogenous_states=raw_exogenous_states,
|
||||||
|
env_processes=env_processes,
|
||||||
|
partial_state_update_blocks=partial_state_update_block,
|
||||||
|
policy_ops=[lambda a, b: a + b]
|
||||||
|
)
|
||||||
Loading…
Reference in New Issue