Reafactor Pt. 6: changed project structure / pip install
This commit is contained in:
parent
f1a9694470
commit
13d8d26ed4
|
|
@ -1,63 +1,59 @@
|
|||
from pathos.multiprocessing import ProcessingPool as Pool
|
||||
|
||||
import pandas as pd
|
||||
from tabulate import tabulate
|
||||
|
||||
from SimCAD.utils import flatten
|
||||
from SimCAD.utils.ui import create_tensor_field
|
||||
from SimCAD.utils.configProcessor import generate_config
|
||||
from SimCAD.engine.simulation import Executor as SimExecutor
|
||||
|
||||
class ExecutionContext(object):
|
||||
class ExecutionContext:
|
||||
|
||||
def __init__(self):
|
||||
def parallelize_simulations(fs, states_list, configs, env_processes, Ts, Ns):
|
||||
l = list(zip(fs, states_list, configs, env_processes, Ts, Ns))
|
||||
with Pool(len(configs)) as p:
|
||||
results = p.map(lambda t: t[0](t[1], t[2], t[3], t[4], t[5]), l)
|
||||
def parallelize_simulations(self, fs, states_list, configs, env_processes, Ts, Ns):
|
||||
l = list(zip(fs, states_list, configs, env_processes, Ts, Ns))
|
||||
with Pool(len(configs)) as p:
|
||||
results = p.map(lambda t: t[0](t[1], t[2], t[3], t[4], t[5]), l)
|
||||
|
||||
return results
|
||||
|
||||
self.parallelize_simulations = parallelize_simulations
|
||||
return results
|
||||
|
||||
|
||||
class Executor(object):
|
||||
class Executor:
|
||||
|
||||
def __init__(self, ExecutionContext, configs):
|
||||
from SimCAD.engine.simulation import Executor
|
||||
|
||||
def execute():
|
||||
ec = ExecutionContext()
|
||||
print(configs)
|
||||
states_lists, Ts, Ns, eps, configs_structs, env_processes_list, mechanisms, simulation_execs = \
|
||||
[], [], [], [], [], [], [], []
|
||||
config_idx = 0
|
||||
for x in configs:
|
||||
states_lists.append([x.state_dict])
|
||||
Ts.append(x.sim_config['T'])
|
||||
Ns.append(x.sim_config['N'])
|
||||
eps.append(list(x.exogenous_states.values()))
|
||||
configs_structs.append(generate_config(x.state_dict, x.mechanisms, eps[config_idx]))
|
||||
env_processes_list.append(x.env_processes)
|
||||
mechanisms.append(x.mechanisms)
|
||||
simulation_execs.append(Executor(x.behavior_ops).simulation)
|
||||
|
||||
config_idx += 1
|
||||
|
||||
# Dimensions: N x r x mechs
|
||||
|
||||
if len(configs) > 1:
|
||||
simulations = ec.parallelize_simulations(simulation_execs, states_lists, configs_structs, env_processes_list, Ts, Ns)
|
||||
results = []
|
||||
for result, mechanism, ep in list(zip(simulations, mechanisms, eps)):
|
||||
print(tabulate(create_tensor_field(mechanism, ep), headers='keys', tablefmt='psql'))
|
||||
results.append(flatten(result))
|
||||
return results
|
||||
else:
|
||||
simulation, states_list, config = simulation_execs.pop(), states_lists.pop(), configs_structs.pop()
|
||||
env_processes, T, N = env_processes_list.pop(), Ts.pop(), Ns.pop()
|
||||
result = simulation(states_list, config, env_processes, T, N)
|
||||
# print(flatten(result))
|
||||
return flatten(result)
|
||||
|
||||
self.SimExecutor = SimExecutor
|
||||
self.ExecutionContext = ExecutionContext
|
||||
self.main = execute
|
||||
self.configs = configs
|
||||
self.main = self.execute
|
||||
|
||||
def execute(self):
|
||||
|
||||
ec = ExecutionContext()
|
||||
print(self.configs)
|
||||
states_lists, Ts, Ns, eps, configs_structs, env_processes_list, mechanisms, simulation_execs = \
|
||||
[], [], [], [], [], [], [], []
|
||||
config_idx = 0
|
||||
for x in self.configs:
|
||||
states_lists.append([x.state_dict])
|
||||
Ts.append(x.sim_config['T'])
|
||||
Ns.append(x.sim_config['N'])
|
||||
eps.append(list(x.exogenous_states.values()))
|
||||
configs_structs.append(generate_config(x.state_dict, x.mechanisms, eps[config_idx]))
|
||||
env_processes_list.append(x.env_processes)
|
||||
mechanisms.append(x.mechanisms)
|
||||
simulation_execs.append(SimExecutor(x.behavior_ops).simulation)
|
||||
|
||||
config_idx += 1
|
||||
|
||||
# Dimensions: N x r x mechs
|
||||
|
||||
if len(self.configs) > 1:
|
||||
simulations = ec.parallelize_simulations(simulation_execs, states_lists, configs_structs, env_processes_list, Ts, Ns)
|
||||
results = []
|
||||
for result, mechanism, ep in list(zip(simulations, mechanisms, eps)):
|
||||
print(tabulate(create_tensor_field(mechanism, ep), headers='keys', tablefmt='psql'))
|
||||
results.append(flatten(result))
|
||||
return results
|
||||
else:
|
||||
simulation, states_list, config = simulation_execs.pop(), states_lists.pop(), configs_structs.pop()
|
||||
env_processes, T, N = env_processes_list.pop(), Ts.pop(), Ns.pop()
|
||||
result = simulation(states_list, config, env_processes, T, N)
|
||||
return flatten(result)
|
||||
|
|
@ -25,7 +25,7 @@ class Executor(object):
|
|||
state_dict[state] = env_processes[state](step)(state_dict[state])
|
||||
|
||||
# remove / modify
|
||||
def exception_handler(f, m_step, sL, last_mut_obj, _input):
|
||||
def exception_handler(self, f, m_step, sL, last_mut_obj, _input):
|
||||
try:
|
||||
return f(m_step, sL, last_mut_obj, _input)
|
||||
except KeyError:
|
||||
|
|
@ -36,12 +36,12 @@ class Executor(object):
|
|||
def mech_step(self, m_step, sL, state_funcs, behavior_funcs, env_processes, t_step, run):
|
||||
last_in_obj = sL[-1]
|
||||
|
||||
_input = Executor.getBehaviorInput(self, m_step, sL, last_in_obj, behavior_funcs)
|
||||
_input = self.getBehaviorInput(m_step, sL, last_in_obj, behavior_funcs)
|
||||
|
||||
# print(sL)
|
||||
|
||||
# *** add env_proc value here as wrapper function ***
|
||||
last_in_copy = dict([ Executor.exception_handler(f, m_step, sL, last_in_obj, _input) for f in state_funcs ])
|
||||
last_in_copy = dict([ self.exception_handler(f, m_step, sL, last_in_obj, _input) for f in state_funcs ])
|
||||
|
||||
for k in last_in_obj:
|
||||
if k not in last_in_copy:
|
||||
|
|
@ -71,7 +71,7 @@ class Executor(object):
|
|||
m_step += 1
|
||||
for config in configs:
|
||||
s_conf, b_conf = config[0], config[1]
|
||||
states_list = Executor.mech_step(self, m_step, states_list, s_conf, b_conf, env_processes, t_step, run)
|
||||
states_list = self.mech_step(m_step, states_list, s_conf, b_conf, env_processes, t_step, run)
|
||||
m_step += 1
|
||||
|
||||
t_step += 1
|
||||
|
|
@ -84,7 +84,7 @@ class Executor(object):
|
|||
time_seq = [x + 1 for x in time_seq]
|
||||
simulation_list = [states_list]
|
||||
for time_step in time_seq:
|
||||
pipe_run = Executor.block_gen(self, simulation_list[-1], configs, env_processes, time_step, run)
|
||||
pipe_run = self.block_gen(simulation_list[-1], configs, env_processes, time_step, run)
|
||||
_, *pipe_run = pipe_run
|
||||
simulation_list.append(pipe_run)
|
||||
|
||||
|
|
@ -97,13 +97,13 @@ class Executor(object):
|
|||
for run in range(runs):
|
||||
run += 1
|
||||
if run == 1:
|
||||
head, *tail = Executor.pipe(self, states_list, configs, env_processes, time_seq, run)
|
||||
head, *tail = self.pipe(states_list, configs, env_processes, time_seq, run)
|
||||
head[-1]['mech_step'], head[-1]['time_step'], head[-1]['run'] = 0, 0, 0
|
||||
simulation_list = [head] + tail
|
||||
pipe_run += simulation_list
|
||||
else:
|
||||
transient_states_list = [pipe_run[-1][-1]]
|
||||
_, *tail = Executor.pipe(self, transient_states_list, configs, env_processes, time_seq, run)
|
||||
_, *tail = self.pipe(transient_states_list, configs, env_processes, time_seq, run)
|
||||
pipe_run += tail
|
||||
|
||||
return pipe_run
|
||||
|
|
@ -118,7 +118,7 @@ env_processes = {
|
|||
|
||||
# [1, 2] = {'b1': ['a'], 'b2', [1]} =
|
||||
# behavior_ops = [ behavior_to_dict, print_fwd, sum_dict_values ]
|
||||
behavior_ops = [ foldr(dict_elemwise_sum()) ]
|
||||
behavior_ops = [foldr(dict_elemwise_sum())]
|
||||
# behavior_ops = []
|
||||
|
||||
# need at least 1 behaviour and 1 state function for the 1st mech with behaviors
|
||||
|
|
|
|||
|
|
@ -21,7 +21,4 @@ run2_raw_results = run2.main()
|
|||
for raw_result in run2_raw_results:
|
||||
result = pd.DataFrame(raw_result)
|
||||
print(tabulate(result, headers='keys', tablefmt='psql'))
|
||||
print()
|
||||
|
||||
|
||||
|
||||
print()
|
||||
Loading…
Reference in New Issue