test partially done

This commit is contained in:
Joshua E. Jodesty 2019-06-07 10:40:45 -04:00
parent 5877d20fc6
commit 964e3f7bc1
15 changed files with 517 additions and 12 deletions

View File

@ -202,19 +202,20 @@ def genereate_psubs(policy_grid, states_grid, policies, state_updates):
return PSUBS
def access_block(sH, y, psu_block_offset, exculsion_list=[]):
exculsion_list += [y]
# ToDo: DO NOT filter sH for every state/policy update. Requires a consumable sH (new sH)
def access_block(state_history, target_field, psu_block_offset, exculsion_list=[]):
exculsion_list += [target_field]
def filter_history(key_list, sH):
filter = lambda key_list: \
lambda d: {k: v for k, v in d.items() if k not in key_list}
return list(map(filter(key_list), sH))
if psu_block_offset < -1:
if len(sH) >= abs(psu_block_offset):
return filter_history(exculsion_list, sH[psu_block_offset])
if len(state_history) >= abs(psu_block_offset):
return filter_history(exculsion_list, state_history[psu_block_offset])
else:
return []
elif psu_block_offset < 0:
return filter_history(exculsion_list, sH[psu_block_offset])
elif psu_block_offset == -1:
return filter_history(exculsion_list, state_history[psu_block_offset])
else:
return []

View File

@ -7,9 +7,15 @@ exclusion_list = ['nonexsistant', 'last_x', '2nd_to_last_x', '3rd_to_last_x', '4
# Policies per Mechanism
# WARNING: DO NOT delete elements from sH
# state_history, target_field, psu_block_offset, exculsion_list
def last_update(_g, substep, sH, s):
return {"last_x": access_block(sH, "last_x", -1, exclusion_list)}
return {"last_x": access_block(
state_history=sH,
target_field="last_x",
psu_block_offset=-1,
exculsion_list=exclusion_list
)
}
policies["last_x"] = last_update
def second2last_update(_g, substep, sH, s):

View File

@ -1,4 +1,3 @@
import numpy as np
from cadCAD.configuration import append_configs
from cadCAD.configuration.utils import config_sim
@ -73,14 +72,12 @@ sim_config = config_sim(
}
)
# Aggregation == Reduce Map / Reduce Map Aggregation
# ToDo: subsequent functions should accept the entire datastructure
# using env functions (include in reg test using / for env proc)
append_configs(
sim_configs=sim_config,
initial_state=genesis_states,
partial_state_update_blocks=partial_state_update_block,
# ToDo: subsequent functions should include policy dict for access to each policy (i.e shouldnt be a map)
policy_ops=[lambda a, b: a + b, lambda y: y * 2] # Default: lambda a, b: a + b ToDO: reduction function requires high lvl explanation
)

View File

@ -0,0 +1,36 @@
import unittest
import pandas as pd
# from tabulate import tabulate
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
from simulations.regression_tests import policy_aggregation
from cadCAD import configs
exec_mode = ExecutionMode()
first_config = configs # only contains config1
single_proc_ctx = ExecutionContext(context=exec_mode.single_proc)
run = Executor(exec_context=single_proc_ctx, configs=first_config)
raw_result, tensor_field = run.execute()
result = pd.DataFrame(raw_result)
class TestStringMethods(unittest.TestCase):
def __init__(self, result: pd.DataFrame, tensor_field: pd.DataFrame) -> None:
self.result = result
self.tensor_field = tensor_field
def test_upper(self):
self.assertEqual('foo'.upper(), 'FOO')
def test_isupper(self):
self.assertTrue('FOO'.isupper())
self.assertFalse('Foo'.isupper())
def test_split(self):
s = 'hello world'
self.assertEqual(s.split(), ['hello', 'world'])
# check that s.split fails when the separator is not a string
with self.assertRaises(TypeError):
s.split(2)
if __name__ == '__main__':
unittest.main()

View File

@ -1,9 +1,12 @@
from pprint import pprint
import pandas as pd
from tabulate import tabulate
# The following imports NEED to be in the exact order
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
from simulations.regression_tests import policy_aggregation
from cadCAD import configs
from testing.utils import generate_assertions
exec_mode = ExecutionMode()
@ -15,6 +18,7 @@ run = Executor(exec_context=single_proc_ctx, configs=first_config)
raw_result, tensor_field = run.execute()
result = pd.DataFrame(raw_result)
print()
print("Tensor Field: config1")
print(tabulate(tensor_field, headers='keys', tablefmt='psql'))

0
testing/__init__.py Normal file
View File

20
testing/example.py Normal file
View File

@ -0,0 +1,20 @@
import unittest
class TestStringMethods(unittest.TestCase):
def test_upper(self):
self.assertEqual('foo'.upper(), 'FOO')
def test_isupper(self):
self.assertTrue('FOO'.isupper())
self.assertFalse('Foo'.isupper())
def test_split(self):
s = 'hello world'
self.assertEqual(s.split(), ['hello', 'world'])
# check that s.split fails when the separator is not a string
with self.assertRaises(TypeError):
s.split(2)
if __name__ == '__main__':
unittest.main()

71
testing/example2.py Normal file
View File

@ -0,0 +1,71 @@
from functools import reduce
import pandas as pd
import unittest
from parameterized import parameterized
from tabulate import tabulate
from testing.system_models.policy_aggregation import run
from testing.generic_test import make_generic_test
from testing.utils import generate_assertions_df
raw_result, tensor_field = run.execute()
result = pd.DataFrame(raw_result)
expected_results = {
(1, 0, 0): {'policies': {}, 's1': 0},
(1, 1, 1): {'policies': {'policy1': 2, 'policy2': 4}, 's1': 500},
(1, 1, 2): {'policies': {'policy1': 8, 'policy2': 8}, 's1': 2},
(1, 1, 3): {'policies': {'policy1': 4, 'policy2': 8, 'policy3': 12}, 's1': 3},
(1, 2, 1): {'policies': {'policy1': 2, 'policy2': 4}, 's1': 4},
(1, 2, 2): {'policies': {'policy1': 8, 'policy2': 8}, 's1': 5},
(1, 2, 3): {'policies': {'policy1': 4, 'policy2': 8, 'policy3': 12}, 's1': 6},
(1, 3, 1): {'policies': {'policy1': 2, 'policy2': 4}, 's1': 7},
(1, 3, 2): {'policies': {'policy1': 8, 'policy2': 8}, 's1': 8},
(1, 3, 3): {'policies': {'policy1': 4, 'policy2': 8, 'policy3': 12}, 's1': 9}
}
params = [["policy_aggregation", result, expected_results, ['policies', 's1']]]
class TestSequence(unittest.TestCase):
@parameterized.expand(params)
def test_validate_results(self, name, result_df, expected_reults, target_cols):
# alt for (*) Exec Debug mode
tested_df = generate_assertions_df(result_df, expected_reults, target_cols)
erroneous = tested_df[(tested_df['test'] == False)]
for index, row in erroneous.iterrows():
expected = expected_reults[(row['run'], row['timestep'], row['substep'])]
unexpected = {k: expected[k] for k in expected if k in row and expected[k] != row[k]}
for key in unexpected.keys():
erroneous[f"invalid_{key}"] = unexpected[key]
# etc.
# def etc.
print()
print(tabulate(erroneous, headers='keys', tablefmt='psql'))
self.assertEqual(reduce(lambda a, b: a and b, tested_df['test']), True)
s = 'hello world'
# self.assertEqual(s.split(), 1)
# # check that s.split fails when the separator is not a string
# with self.assertRaises(AssertionError):
# tested_df[(tested_df['test'] == False)]
# erroneous = tested_df[(tested_df['test'] == False)]
# for index, row in erroneous.iterrows():
# expected = expected_reults[(row['run'], row['timestep'], row['substep'])]
# unexpected = {k: expected[k] for k in expected if k in row and expected[k] != row[k]}
# for key in unexpected.keys():
# erroneous[f"invalid_{key}"] = unexpected[key]
# # etc.
#
# # def etc.
#
# print()
# print(tabulate(erroneous, headers='keys', tablefmt='psql'))
if __name__ == '__main__':
unittest.main()

39
testing/generic_test.py Normal file
View File

@ -0,0 +1,39 @@
import unittest
from parameterized import parameterized
from functools import reduce
from tabulate import tabulate
from testing.utils import generate_assertions_df
# ToDo: Exec Debug mode (*) for which state and policy updates are validated during runtime using `expected_results`
# EXAMPLE: ('state_test' T/F, 'policy_test' T/F)
# ToDo: (Sys Model Config) give `expected_results to` `Configuration` for Exec Debug mode (*)
# ToDo: (expected_results) Function to generate sys metrics keys using system model config
# ToDo: (expected_results) Function to generate target_vals given user input (apply fancy validation lib later on)
# ToDo: Use self.assertRaises(AssertionError)
def make_generic_test(params):
class TestSequence(unittest.TestCase):
@parameterized.expand(params)
def test_validate_results(self, name, result_df, expected_reults, target_cols):
# alt for (*) Exec Debug mode
tested_df = generate_assertions_df(result_df, expected_reults, target_cols)
erroneous = tested_df[(tested_df['test'] == False)]
if erroneous.empty is False:
for index, row in erroneous.iterrows():
expected = expected_reults[(row['run'], row['timestep'], row['substep'])]
unexpected = {k: expected[k] for k in expected if k in row and expected[k] != row[k]}
for key in unexpected.keys():
erroneous[f"invalid_{key}"] = unexpected[key]
# etc.
print()
print(tabulate(erroneous, headers='keys', tablefmt='psql'))
self.assertTrue(reduce(lambda a, b: a and b, tested_df['test']))
# def etc.
return TestSequence

View File

@ -0,0 +1,97 @@
from cadCAD.configuration import append_configs
from cadCAD.configuration.utils import config_sim, access_block
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
from cadCAD import configs
policies, variables = {}, {}
exclusion_list = ['nonexsistant', 'last_x', '2nd_to_last_x', '3rd_to_last_x', '4th_to_last_x']
# Policies per Mechanism
# WARNING: DO NOT delete elements from sH
# state_history, target_field, psu_block_offset, exculsion_list
def last_update(_g, substep, sH, s):
return {"last_x": access_block(
state_history=sH,
target_field="last_x",
psu_block_offset=-1,
exculsion_list=exclusion_list
)
}
policies["last_x"] = last_update
def second2last_update(_g, substep, sH, s):
return {"2nd_to_last_x": access_block(sH, "2nd_to_last_x", -2, exclusion_list)}
policies["2nd_to_last_x"] = second2last_update
# Internal States per Mechanism
# WARNING: DO NOT delete elements from sH
def add(y, x):
return lambda _g, substep, sH, s, _input: (y, s[y] + x)
variables['x'] = add('x', 1)
# last_partial_state_update_block
def nonexsistant(_g, substep, sH, s, _input):
return 'nonexsistant', access_block(sH, "nonexsistant", 0, exclusion_list)
variables['nonexsistant'] = nonexsistant
# last_partial_state_update_block
def last_x(_g, substep, sH, s, _input):
return 'last_x', _input["last_x"]
variables['last_x'] = last_x
# 2nd to last partial state update block
def second_to_last_x(_g, substep, sH, s, _input):
return '2nd_to_last_x', _input["2nd_to_last_x"]
variables['2nd_to_last_x'] = second_to_last_x
# 3rd to last partial state update block
def third_to_last_x(_g, substep, sH, s, _input):
return '3rd_to_last_x', access_block(sH, "3rd_to_last_x", -3, exclusion_list)
variables['3rd_to_last_x'] = third_to_last_x
# 4th to last partial state update block
def fourth_to_last_x(_g, substep, sH, s, _input):
return '4th_to_last_x', access_block(sH, "4th_to_last_x", -4, exclusion_list)
variables['4th_to_last_x'] = fourth_to_last_x
genesis_states = {
'x': 0,
'nonexsistant': [],
'last_x': [],
'2nd_to_last_x': [],
'3rd_to_last_x': [],
'4th_to_last_x': []
}
PSUB = {
"policies": policies,
"variables": variables
}
partial_state_update_block = {
"PSUB1": PSUB,
"PSUB2": PSUB,
"PSUB3": PSUB
}
sim_config = config_sim(
{
"N": 1,
"T": range(3),
}
)
append_configs(
sim_configs=sim_config,
initial_state=genesis_states,
partial_state_update_blocks=partial_state_update_block
)
exec_mode = ExecutionMode()
single_proc_ctx = ExecutionContext(context=exec_mode.single_proc)
run = Executor(exec_context=single_proc_ctx, configs=configs)

View File

@ -0,0 +1,90 @@
from cadCAD.configuration import append_configs
from cadCAD.configuration.utils import config_sim
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
from cadCAD import configs
# Policies per Mechanism
def p1m1(_g, step, sL, s):
return {'policy1': 1}
def p2m1(_g, step, sL, s):
return {'policy2': 2}
def p1m2(_g, step, sL, s):
return {'policy1': 2, 'policy2': 2}
def p2m2(_g, step, sL, s):
return {'policy1': 2, 'policy2': 2}
def p1m3(_g, step, sL, s):
return {'policy1': 1, 'policy2': 2, 'policy3': 3}
def p2m3(_g, step, sL, s):
return {'policy1': 1, 'policy2': 2, 'policy3': 3}
# Internal States per Mechanism
def add(y, x):
return lambda _g, step, sH, s, _input: (y, s[y] + x)
def policies(_g, step, sH, s, _input):
y = 'policies'
x = _input
return (y, x)
# Genesis States
genesis_states = {
'policies': {},
's1': 0
}
variables = {
's1': add('s1', 1),
"policies": policies
}
partial_state_update_block = {
"m1": {
"policies": {
"p1": p1m1,
"p2": p2m1
},
"variables": variables
},
"m2": {
"policies": {
"p1": p1m2,
"p2": p2m2
},
"variables": variables
},
"m3": {
"policies": {
"p1": p1m3,
"p2": p2m3
},
"variables": variables
}
}
sim_config = config_sim(
{
"N": 1,
"T": range(3),
}
)
# Aggregation == Reduce Map / Reduce Map Aggregation
# ToDo: subsequent functions should accept the entire datastructure
# using env functions (include in reg test using / for env proc)
append_configs(
sim_configs=sim_config,
initial_state=genesis_states,
partial_state_update_blocks=partial_state_update_block,
policy_ops=[lambda a, b: a + b, lambda y: y * 2] # Default: lambda a, b: a + b ToDO: reduction function requires high lvl explanation
)
exec_mode = ExecutionMode()
single_proc_ctx = ExecutionContext(context=exec_mode.single_proc)
run = Executor(exec_context=single_proc_ctx, configs=configs)

View File

View File

@ -0,0 +1,84 @@
import unittest
import pandas as pd
from tabulate import tabulate
from testing.generic_test import make_generic_test
from testing.system_models.historical_state_access import run
from testing.utils import generate_assertions_df
raw_result, tensor_field = run.execute()
result = pd.DataFrame(raw_result)
expected_results = {
(1, 0, 0): {'x': 0, 'nonexsistant': [], 'last_x': [], '2nd_to_last_x': [], '3rd_to_last_x': [], '4th_to_last_x': []},
(1, 1, 1): {'x': 1,
'nonexsistant': [],
'last_x': [{'x': 0, 'run': 1, 'substep': 0, 'timestep': 0}],
'2nd_to_last_x': [],
'3rd_to_last_x': [],
'4th_to_last_x': []},
(1, 1, 2): {'x': 2,
'nonexsistant': [],
'last_x': [{'x': 0, 'run': 1, 'substep': 0, 'timestep': 0}],
'2nd_to_last_x': [],
'3rd_to_last_x': [],
'4th_to_last_x': []},
(1, 1, 3): {'x': 3,
'nonexsistant': [],
'last_x': [{'x': 0, 'run': 1, 'substep': 0, 'timestep': 0}],
'2nd_to_last_x': [],
'3rd_to_last_x': [],
'4th_to_last_x': []},
(1, 2, 1): {'x': 4,
'nonexsistant': [],
'last_x': [{'x': 1, 'run': 1, 'substep': 1, 'timestep': 1}, {'x': 2, 'run': 1, 'substep': 2, 'timestep': 1}, {'x': 3, 'run': 1, 'substep': 3, 'timestep': 1}],
'2nd_to_last_x': [{'x': 0, 'run': 1, 'substep': 0, 'timestep': 0}],
'3rd_to_last_x': [],
'4th_to_last_x': []},
(1, 2, 2): {'x': 5,
'nonexsistant': [],
'last_x': [{'x': 1, 'run': 1, 'substep': 1, 'timestep': 1}, {'x': 2, 'run': 1, 'substep': 2, 'timestep': 1}, {'x': 3, 'run': 1, 'substep': 3, 'timestep': 1}],
'2nd_to_last_x': [{'x': 0, 'run': 1, 'substep': 0, 'timestep': 0}],
'3rd_to_last_x': [],
'4th_to_last_x': []},
(1, 2, 3): {'x': 6,
'nonexsistant': [],
'last_x': [{'x': 1, 'run': 1, 'substep': 1, 'timestep': 1}, {'x': 2, 'run': 1, 'substep': 2, 'timestep': 1}, {'x': 3, 'run': 1, 'substep': 3, 'timestep': 1}],
'2nd_to_last_x': [{'x': 0, 'run': 1, 'substep': 0, 'timestep': 0}],
'3rd_to_last_x': [],
'4th_to_last_x': []},
(1, 3, 1): {'x': 7,
'nonexsistant': [],
'last_x': [{'x': 4, 'run': 1, 'substep': 1, 'timestep': 2}, {'x': 5, 'run': 1, 'substep': 2, 'timestep': 2}, {'x': 6, 'run': 1, 'substep': 3, 'timestep': 2}],
'2nd_to_last_x': [{'x': 1, 'run': 1, 'substep': 1, 'timestep': 1}, {'x': 2, 'run': 1, 'substep': 2, 'timestep': 1}, {'x': 3, 'run': 1, 'substep': 3, 'timestep': 1}],
'3rd_to_last_x': [{'x': 0, 'run': 1, 'substep': 0, 'timestep': 0}],
'4th_to_last_x': []},
(1, 3, 2): {'x': 8,
'nonexsistant': [],
'last_x': [{'x': 4, 'run': 1, 'substep': 1, 'timestep': 2}, {'x': 5, 'run': 1, 'substep': 2, 'timestep': 2}, {'x': 6, 'run': 1, 'substep': 3, 'timestep': 2}],
'2nd_to_last_x': [{'x': 1, 'run': 1, 'substep': 1, 'timestep': 1}, {'x': 2, 'run': 1, 'substep': 2, 'timestep': 1}, {'x': 3, 'run': 1, 'substep': 3, 'timestep': 1}],
'3rd_to_last_x': [{'x': 0, 'run': 1, 'substep': 0, 'timestep': 0}],
'4th_to_last_x': []},
(1, 3, 3): {'x': 9,
'nonexsistant': [],
'last_x': [{'x': 4, 'run': 1, 'substep': 1, 'timestep': 2}, {'x': 5, 'run': 1, 'substep': 2, 'timestep': 2}, {'x': 6, 'run': 1, 'substep': 3, 'timestep': 2}],
'2nd_to_last_x': [{'x': 1, 'run': 1, 'substep': 1, 'timestep': 1}, {'x': 2, 'run': 1, 'substep': 2, 'timestep': 1}, {'x': 3, 'run': 1, 'substep': 3, 'timestep': 1}],
'3rd_to_last_x': [{'x': 0, 'run': 1, 'substep': 0, 'timestep': 0}],
'4th_to_last_x': []}
}
params = [["historical_state_access", result, expected_results,
['x', 'nonexsistant', 'last_x', '2nd_to_last_x', '3rd_to_last_x', '4th_to_last_x']]
]
# df = generate_assertions_df(result, expected_results,
# ['x', 'nonexsistant', 'last_x', '2nd_to_last_x', '3rd_to_last_x', '4th_to_last_x']
# )
# print(tabulate(df, headers='keys', tablefmt='psql'))
class GenericTest(make_generic_test(params)):
pass
if __name__ == '__main__':
unittest.main()

View File

@ -0,0 +1,32 @@
import unittest
import pandas as pd
from testing.generic_test import make_generic_test
from testing.system_models.policy_aggregation import run
raw_result, tensor_field = run.execute()
result = pd.DataFrame(raw_result)
expected_results = {
(1, 0, 0): {'policies': {}, 's1': 0},
(1, 1, 1): {'policies': {'policy1': 2, 'policy2': 4}, 's1': 500},
(1, 1, 2): {'policies': {'policy1': 8, 'policy2': 8}, 's1': 2},
(1, 1, 3): {'policies': {'policy1': 4, 'policy2': 8, 'policy3': 12}, 's1': 3},
(1, 2, 1): {'policies': {'policy1': 2, 'policy2': 4}, 's1': 4},
(1, 2, 2): {'policies': {'policy1': 8, 'policy2': 8}, 's1': 5},
(1, 2, 3): {'policies': {'policy1': 4, 'policy2': 8, 'policy3': 12}, 's1': 6},
(1, 3, 1): {'policies': {'policy1': 2, 'policy2': 4}, 's1': 7},
(1, 3, 2): {'policies': {'policy1': 8, 'policy2': 8}, 's1': 8},
(1, 3, 3): {'policies': {'policy1': 4, 'policy2': 8, 'policy3': 12}, 's1': 9}
}
params = [["policy_aggregation", result, expected_results, ['policies', 's1']]]
# df = generate_assertions_df(result, expected_results, ['policies', 's1'])
# print(tabulate(df, headers='keys', tablefmt='psql'))
class GenericTest(make_generic_test(params)):
pass
if __name__ == '__main__':
unittest.main()

28
testing/utils.py Normal file
View File

@ -0,0 +1,28 @@
def gen_metric_row(row):
return ((row['run'], row['timestep'], row['substep']), {'s1': row['s1'], 'policies': row['policies']})
def gen_metric_row(row):
return {
'run': row['run'],
'timestep': row['timestep'],
'substep': row['substep'],
's1': row['s1'],
'policies': row['policies']
}
def gen_metric_dict(df):
return [gen_metric_row(row) for index, row in df.iterrows()]
def generate_assertions_df(df, expected_results, target_cols):
def df_filter(run, timestep, substep):
return df[
(df['run'] == run) & (df['timestep'] == timestep) & (df['substep'] == substep)
][target_cols].to_dict(orient='records')[0]
df['test'] = df.apply(
lambda x: \
df_filter(x['run'], x['timestep'], x['substep']) == expected_results[(x['run'], x['timestep'], x['substep'])]
, axis=1
)
return df