Compare commits
278 Commits
| Author | SHA1 | Date |
|---|---|---|
|
|
f5ca0857f3 | |
|
|
d9ff2997a1 | |
|
|
ddabdbb381 | |
|
|
17316277ff | |
|
|
c326f3c8c0 | |
|
|
422b0fb671 | |
|
|
319f74a89c | |
|
|
154a653c7f | |
|
|
3683aa30dc | |
|
|
837aad310e | |
|
|
4e5dca0cf9 | |
|
|
4fe3419b23 | |
|
|
7c870e584b | |
|
|
b515dd3fdb | |
|
|
5048976f71 | |
|
|
4e1f730c27 | |
|
|
9f96821b89 | |
|
|
460b1ff67c | |
|
|
b8fa090222 | |
|
|
01285e6320 | |
|
|
d3ef3d23f5 | |
|
|
ee8b3de331 | |
|
|
ce3eacd971 | |
|
|
86e683b268 | |
|
|
a2346046f3 | |
|
|
0619764aef | |
|
|
faae27f21e | |
|
|
dd872c3878 | |
|
|
130f85f0ef | |
|
|
bc4ab3113d | |
|
|
c57e2d9840 | |
|
|
81d666ce3e | |
|
|
f00b14d52e | |
|
|
5d0b1c4aec | |
|
|
8e76f3323b | |
|
|
99495d08dc | |
|
|
9a12b5d0d6 | |
|
|
429d2c9e0f | |
|
|
f249814aa9 | |
|
|
4f491bc8c9 | |
|
|
56c38dfd44 | |
|
|
411975913c | |
|
|
f931945eaf | |
|
|
d260754dc1 | |
|
|
d56b5c1c5f | |
|
|
792c62c213 | |
|
|
4f58a169c5 | |
|
|
db7de4fe4f | |
|
|
8768819790 | |
|
|
de9a708d43 | |
|
|
6489a75f1e | |
|
|
ac6e6eebda | |
|
|
342f3a519c | |
|
|
fc655d3741 | |
|
|
f9996163d0 | |
|
|
3c584a05bd | |
|
|
50c830db38 | |
|
|
4b381f81d7 | |
|
|
0d74ec5285 | |
|
|
8ad580e0fb | |
|
|
4faba2a37c | |
|
|
fe9c5f6caa | |
|
|
4dc06f581d | |
|
|
f5d5b28292 | |
|
|
063e56dc76 | |
|
|
2bfd37fecd | |
|
|
d7e6c1ba0d | |
|
|
104da824a2 | |
|
|
ef8e23481a | |
|
|
2a37eb5c02 | |
|
|
7b428ddb81 | |
|
|
7d0a14efbf | |
|
|
9ac9e238bb | |
|
|
747ec36e50 | |
|
|
67c46cfe09 | |
|
|
9399c6b728 | |
|
|
176593ae0f | |
|
|
715e6f9a74 | |
|
|
c55e433920 | |
|
|
bfdc7d0ad3 | |
|
|
d7fe3331f8 | |
|
|
964e3f7bc1 | |
|
|
fe8d9a1eac | |
|
|
5877d20fc6 | |
|
|
45b684f2bb | |
|
|
d892d74e31 | |
|
|
16aa71664d | |
|
|
3019715d83 | |
|
|
0a0d85c257 | |
|
|
4870f2db92 | |
|
|
f224df3ed4 | |
|
|
9f181e6b3f | |
|
|
7f28bae21a | |
|
|
2acf33d1f3 | |
|
|
b020d9e23f | |
|
|
2de989db0a | |
|
|
1c89d28ab5 | |
|
|
01c5945724 | |
|
|
71264c1c8f | |
|
|
3c91040401 | |
|
|
30e1c336e6 | |
|
|
9dbb866bd0 | |
|
|
c4863a838d | |
|
|
875f370c5e | |
|
|
a57e9d5ea3 | |
|
|
30127989c9 | |
|
|
2387dc071b | |
|
|
c05cb7ad05 | |
|
|
c9ecf54d0d | |
|
|
b2b466493b | |
|
|
295968b71f | |
|
|
d56d60d7a3 | |
|
|
ac44a7bee8 | |
|
|
b3b0356a8f | |
|
|
3cf3f45c08 | |
|
|
5f2d0801ca | |
|
|
e37601ae22 | |
|
|
d56e843fcc | |
|
|
7fc2e6503c | |
|
|
9e9f7be17e | |
|
|
cb6acce3d9 | |
|
|
9d9e33b766 | |
|
|
b0934b70aa | |
|
|
2fb0dcf754 | |
|
|
26d4a2d398 | |
|
|
fe1960797e | |
|
|
c0e7f821a2 | |
|
|
d932332fcc | |
|
|
516b77d693 | |
|
|
9c848b5cb9 | |
|
|
398565ecff | |
|
|
a2453e8adf | |
|
|
92559494d3 | |
|
|
dbf8e11d0b | |
|
|
ca81e4c2e2 | |
|
|
6b064707fc | |
|
|
03b53b59af | |
|
|
80e51f6c8c | |
|
|
7f4f6ddd77 | |
|
|
7fb764056f | |
|
|
d9002d4950 | |
|
|
2b9ab7cd46 | |
|
|
1862416b86 | |
|
|
19feab55e0 | |
|
|
e30388ff6b | |
|
|
5863188617 | |
|
|
ef9d73a32c | |
|
|
0c234e2f00 | |
|
|
d04e2bb7e4 | |
|
|
fe730c3e6c | |
|
|
e00605c073 | |
|
|
59ba3d9f21 | |
|
|
69dfaf391a | |
|
|
0895019991 | |
|
|
129b11fa4c | |
|
|
2c4b775d86 | |
|
|
ffd90b9ecd | |
|
|
00f5d53888 | |
|
|
9697ed488a | |
|
|
e2d68a0587 | |
|
|
b910c38ad9 | |
|
|
47cfc12560 | |
|
|
ed2ccf5421 | |
|
|
1988558fd4 | |
|
|
50e4a38df7 | |
|
|
2d4b7b612c | |
|
|
11f394cd8f | |
|
|
2310d5042c | |
|
|
dfb9c433b1 | |
|
|
7cecd7d534 | |
|
|
fcda21d513 | |
|
|
76fb452508 | |
|
|
ef4c0c1968 | |
|
|
06367f0573 | |
|
|
ed2f31cffc | |
|
|
8d56cf2939 | |
|
|
11d7ba7cf1 | |
|
|
2989dc2554 | |
|
|
6064647e4c | |
|
|
df4b7ce747 | |
|
|
20d7d620b7 | |
|
|
e73113754f | |
|
|
e78bfa3c8a | |
|
|
368fcaa13d | |
|
|
522d6dd343 | |
|
|
ddc67531bd | |
|
|
cccb491f2c | |
|
|
eaf2f4d291 | |
|
|
011e322706 | |
|
|
36512142fb | |
|
|
ccdf7ba80d | |
|
|
f7955b78fd | |
|
|
893f1d280a | |
|
|
53c8764563 | |
|
|
ef7b42a39a | |
|
|
b19819bd7d | |
|
|
25aa912c2b | |
|
|
a4c04ee20c | |
|
|
45f8fffe83 | |
|
|
2d752176eb | |
|
|
c58f2d65a6 | |
|
|
6b4ed2dfce | |
|
|
f9b3b1ea18 | |
|
|
52fbac381c | |
|
|
17362884dc | |
|
|
20a8bd3026 | |
|
|
3719ead0b1 | |
|
|
eaf9cf21ff | |
|
|
5729ffc0ed | |
|
|
a9c97467ae | |
|
|
9e277d3cf0 | |
|
|
cd729bf0a1 | |
|
|
e06cb00536 | |
|
|
fae948d885 | |
|
|
0e5daaf723 | |
|
|
20977436ec | |
|
|
2065287a5b | |
|
|
e81801c4cb | |
|
|
b2ae2ded30 | |
|
|
421dc7f184 | |
|
|
02d848e617 | |
|
|
ea56c55049 | |
|
|
d8e911adc0 | |
|
|
311219ca70 | |
|
|
be15871fe9 | |
|
|
5ed273450f | |
|
|
06fd76d096 | |
|
|
a7d79c6806 | |
|
|
18aa5e6da6 | |
|
|
c1da72c1d2 | |
|
|
0116dc49d4 | |
|
|
c6f5e5cce2 | |
|
|
9a7af89691 | |
|
|
06de968a60 | |
|
|
460bbbacd7 | |
|
|
796bf023ec | |
|
|
19503e3d32 | |
|
|
141680e3a1 | |
|
|
f9f945c20f | |
|
|
ae25a9ff04 | |
|
|
609e40ac40 | |
|
|
45530ae91f | |
|
|
43e8b8cfab | |
|
|
df57071821 | |
|
|
16fc324773 | |
|
|
73c6d21f12 | |
|
|
b3b50d0189 | |
|
|
7325980159 | |
|
|
0eeed616e0 | |
|
|
c8634c5331 | |
|
|
54a06a671b | |
|
|
061e60e98c | |
|
|
fe7c5a53fc | |
|
|
6bd54bd9d8 | |
|
|
80ec1a36b9 | |
|
|
cdcc207871 | |
|
|
81a200fe9c | |
|
|
d39bca6700 | |
|
|
1b52a4bebf | |
|
|
ab3a9e370d | |
|
|
4f9e320109 | |
|
|
2bb378fbf2 | |
|
|
9201f9f20e | |
|
|
40f24f0909 | |
|
|
b394f2be46 | |
|
|
a5623cc621 | |
|
|
b7f6d284a7 | |
|
|
7285449242 | |
|
|
f0f7456a76 | |
|
|
427a6a93cc | |
|
|
8a04f670b3 | |
|
|
e2752161c3 | |
|
|
d7a25176ec | |
|
|
a0266641f7 | |
|
|
181b7cf986 | |
|
|
f55124fbb0 | |
|
|
980bba081a | |
|
|
0014700208 |
|
|
@ -1,11 +1,30 @@
|
|||
.idea
|
||||
jupyter notebook
|
||||
.ipynb_checkpoints
|
||||
.DS_Store
|
||||
.idea
|
||||
notebooks/.ipynb_checkpoints
|
||||
SimCAD.egg-info
|
||||
.pytest_cache/
|
||||
notebooks
|
||||
*.egg-info
|
||||
__pycache__
|
||||
Pipfile
|
||||
Pipfile.lock
|
||||
results
|
||||
.mypy_cache
|
||||
simulations/scrapbox
|
||||
*.csv
|
||||
simulations/.ipynb_checkpoints
|
||||
simulations/validation/config3.py
|
||||
cadCAD.egg-info
|
||||
|
||||
build
|
||||
cadCAD.egg-info
|
||||
|
||||
testing/example.py
|
||||
testing/example2.py
|
||||
testing/multi_config_test.py
|
||||
testing/udo.py
|
||||
testing/udo_test.py
|
||||
|
||||
Simulation.md
|
||||
|
||||
monkeytype.sqlite3
|
||||
|
|
@ -0,0 +1,29 @@
|
|||
Authors
|
||||
=======
|
||||
|
||||
cadCAD was originally implemented by Joshua E. Jodesty and designed by Michael Zargham, Markus B. Koch, and
|
||||
Matthew V. Barlin from 2018 to 2019.
|
||||
|
||||
|
||||
Project Maintainers:
|
||||
- Joshua E. Jodesty <joshua@block.science, joshua.jodesty@gmail.com>
|
||||
- Markus B. Koch <markus@block.science>
|
||||
|
||||
|
||||
Contributors:
|
||||
- Joshua E. Jodesty
|
||||
- Markus B. Koch
|
||||
- Matthew V. Barlin
|
||||
- Michael Zargham
|
||||
- Zixuan Zhang
|
||||
- Charles Rice
|
||||
|
||||
|
||||
We’d also like to thank:
|
||||
- Andrew Clark
|
||||
- Nikhil Jamdade
|
||||
- Nick Hirannet
|
||||
- Jonathan Gabler
|
||||
- Chris Frazier
|
||||
- Harry Goodnight
|
||||
- Charlie Hoppes
|
||||
|
|
@ -0,0 +1,21 @@
|
|||
# Contributing to cadCAD (Draft)
|
||||
|
||||
:+1::tada: First off, thanks for taking the time to contribute! :tada::+1:
|
||||
|
||||
The following is a set of guidelines for contributing to cadCAD. These are mostly guidelines, not rules.
|
||||
Use your best judgment, and feel free to propose changes to this document in a pull request.
|
||||
|
||||
### Pull Requests:
|
||||
|
||||
Pull Request (PR) presented as "->".
|
||||
|
||||
General Template:
|
||||
fork/branch -> BlockScience/staging
|
||||
|
||||
Contributing a new feature:
|
||||
fork/feature -> BlockScience/staging
|
||||
|
||||
Contributing to an existing feature:
|
||||
fork/feature -> BlockScience/feature
|
||||
|
||||
Thanks! :heart:
|
||||
|
|
@ -0,0 +1,21 @@
|
|||
MIT License
|
||||
|
||||
Copyright (c) 2018-2019 BlockScience
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
118
README.md
118
README.md
|
|
@ -1,86 +1,50 @@
|
|||
# SimCad
|
||||
```
|
||||
__________ ____
|
||||
________ __ _____/ ____/ | / __ \
|
||||
/ ___/ __` / __ / / / /| | / / / /
|
||||
/ /__/ /_/ / /_/ / /___/ ___ |/ /_/ /
|
||||
\___/\__,_/\__,_/\____/_/ |_/_____/
|
||||
by BlockScience
|
||||
======================================
|
||||
Complex Adaptive Dynamics
|
||||
o i e
|
||||
m d s
|
||||
p e i
|
||||
u d g
|
||||
t n
|
||||
e
|
||||
r
|
||||
```
|
||||
***cadCAD*** is a Python package that assists in the processes of designing, testing and validating complex systems through simulation, with support for Monte Carlo methods, A/B testing and parameter sweeping.
|
||||
|
||||
**Dependencies:**
|
||||
# Getting Started
|
||||
## 1. Installation:
|
||||
Requires [Python 3](https://www.python.org/downloads/)
|
||||
|
||||
**Option A: Install Using [pip](https://pypi.org/project/cadCAD/)**
|
||||
```bash
|
||||
pip install -r requirements.txt
|
||||
pip3 install cadCAD
|
||||
```
|
||||
|
||||
**Project:**
|
||||
|
||||
Example Runs:
|
||||
`/simulations/sim_test.py`
|
||||
|
||||
Example Configurations:
|
||||
`/simulations/validation/`
|
||||
|
||||
**User Interface: Simulation Configuration**
|
||||
|
||||
Configurations:
|
||||
```bash
|
||||
/DiffyQ-SimCAD/ui/config.py
|
||||
**Option B:** Build From Source
|
||||
```
|
||||
pip3 install -r requirements.txt
|
||||
python3 setup.py sdist bdist_wheel
|
||||
pip3 install dist/*.whl
|
||||
```
|
||||
|
||||
**Build Tool & Package Import:**
|
||||
|
||||
## 2. Learn the basics
|
||||
**Tutorials:** available both as [Jupyter Notebooks](tutorials)
|
||||
and [videos](https://www.youtube.com/watch?v=uJEiYHRWA9g&list=PLmWm8ksQq4YKtdRV-SoinhV6LbQMgX1we)
|
||||
|
||||
Step 1. Build & Install Package locally:
|
||||
```bash
|
||||
pip install .
|
||||
pip install -e .
|
||||
```
|
||||
* [Package Creation Tutorial](https://python-packaging.readthedocs.io/en/latest/minimal.html)
|
||||
Familiarize yourself with some system modelling concepts and cadCAD terminology.
|
||||
|
||||
Step 2. Import Package & Run:
|
||||
```python
|
||||
import pandas as pd
|
||||
from tabulate import tabulate
|
||||
## 3. Documentation:
|
||||
* [System Model Configuration](documentation)
|
||||
* [System Simulation Execution](documentation/Simulation_Execution.md)
|
||||
* [Policy Aggregation](documentation/Policy_Aggregation.md)
|
||||
* [System Model Parameter Sweep](documentation/System_Model_Parameter_Sweep.md)
|
||||
|
||||
# The following imports NEED to be in the exact same order
|
||||
from SimCAD.engine import ExecutionMode, ExecutionContext, Executor
|
||||
from simulations.validation import config1, config2
|
||||
from SimCAD import configs
|
||||
|
||||
# ToDo: pass ExecutionContext with execution method as ExecutionContext input
|
||||
|
||||
exec_mode = ExecutionMode()
|
||||
|
||||
|
||||
print("Simulation Execution 1")
|
||||
print()
|
||||
first_config = [configs[0]] # from config1
|
||||
single_proc_ctx = ExecutionContext(context=exec_mode.single_proc)
|
||||
run1 = Executor(exec_context=single_proc_ctx, configs=first_config)
|
||||
run1_raw_result, tensor_field = run1.main()
|
||||
result = pd.DataFrame(run1_raw_result)
|
||||
# result.to_csv('~/Projects/DiffyQ-SimCAD/results/config4.csv', sep=',')
|
||||
print()
|
||||
print("Tensor Field:")
|
||||
print(tabulate(tensor_field, headers='keys', tablefmt='psql'))
|
||||
print("Output:")
|
||||
print(tabulate(result, headers='keys', tablefmt='psql'))
|
||||
print()
|
||||
|
||||
print("Simulation Execution 2: Pairwise Execution")
|
||||
print()
|
||||
multi_proc_ctx = ExecutionContext(context=exec_mode.multi_proc)
|
||||
run2 = Executor(exec_context=multi_proc_ctx, configs=configs)
|
||||
for raw_result, tensor_field in run2.main():
|
||||
result = pd.DataFrame(raw_result)
|
||||
print()
|
||||
print("Tensor Field:")
|
||||
print(tabulate(tensor_field, headers='keys', tablefmt='psql'))
|
||||
print("Output:")
|
||||
print(tabulate(result, headers='keys', tablefmt='psql'))
|
||||
print()
|
||||
```
|
||||
|
||||
Same can be run in Jupyter .
|
||||
```bash
|
||||
jupyter notebook
|
||||
```
|
||||
|
||||
Notebooks Directory:
|
||||
`/DiffyQ-SimCAD/notebooks/`
|
||||
|
||||
|
||||
**Warning**:
|
||||
**Do Not** publish this package / software to **Any** software repository **except** [DiffyQ-SimCAD's staging branch](https://github.com/BlockScience/DiffyQ-SimCAD/tree/staging) or its **Fork**
|
||||
## 4. Connect
|
||||
Find other cadCAD users at our [Discourse](https://community.cadcad.org/). We are a small but rapidly growing community.
|
||||
|
|
|
|||
|
|
@ -1 +0,0 @@
|
|||
configs = []
|
||||
|
|
@ -1,101 +0,0 @@
|
|||
from functools import reduce
|
||||
from fn.op import foldr
|
||||
import pandas as pd
|
||||
|
||||
from SimCAD.utils import key_filter
|
||||
from SimCAD.configuration.utils.behaviorAggregation import dict_elemwise_sum
|
||||
|
||||
|
||||
class Configuration:
|
||||
def __init__(self, sim_config, state_dict, seed, exogenous_states, env_processes, mechanisms, behavior_ops=[foldr(dict_elemwise_sum())]):
|
||||
self.sim_config = sim_config
|
||||
self.state_dict = state_dict
|
||||
self.seed = seed
|
||||
self.exogenous_states = exogenous_states
|
||||
self.env_processes = env_processes
|
||||
self.behavior_ops = behavior_ops
|
||||
self.mechanisms = mechanisms
|
||||
|
||||
|
||||
class Identity:
|
||||
def __init__(self, behavior_id={'identity': 0}):
|
||||
self.beh_id_return_val = behavior_id
|
||||
|
||||
def b_identity(self, step, sL, s):
|
||||
return self.beh_id_return_val
|
||||
|
||||
def behavior_identity(self, k):
|
||||
return self.b_identity
|
||||
|
||||
def no_state_identity(self, step, sL, s, _input):
|
||||
return None
|
||||
|
||||
def state_identity(self, k):
|
||||
return lambda step, sL, s, _input: (k, s[k])
|
||||
|
||||
def apply_identity_funcs(self, identity, df, cols):
|
||||
def fillna_with_id_func(identity, df, col):
|
||||
return df[[col]].fillna(value=identity(col))
|
||||
|
||||
return list(map(lambda col: fillna_with_id_func(identity, df, col), cols))
|
||||
|
||||
|
||||
class Processor:
|
||||
def __init__(self, id=Identity()):
|
||||
self.id = id
|
||||
self.b_identity = id.b_identity
|
||||
self.behavior_identity = id.behavior_identity
|
||||
self.no_state_identity = id.no_state_identity
|
||||
self.state_identity = id.state_identity
|
||||
self.apply_identity_funcs = id.apply_identity_funcs
|
||||
|
||||
# Make returntype chosen by user.
|
||||
def create_matrix_field(self, mechanisms, key):
|
||||
if key == 'states':
|
||||
identity = self.state_identity
|
||||
elif key == 'behaviors':
|
||||
identity = self.behavior_identity
|
||||
df = pd.DataFrame(key_filter(mechanisms, key))
|
||||
col_list = self.apply_identity_funcs(identity, df, list(df.columns))
|
||||
if len(col_list) != 0:
|
||||
return reduce((lambda x, y: pd.concat([x, y], axis=1)), col_list)
|
||||
else:
|
||||
return pd.DataFrame({'empty': []})
|
||||
|
||||
# Maybe Refactor to only use dictionary BUT I used dfs to fill NAs. Perhaps fill
|
||||
def generate_config(self, state_dict, mechanisms, exo_proc):
|
||||
|
||||
# ToDo: include False / False case
|
||||
# ToDo: Use Range multiplier instead for loop iterator
|
||||
def no_update_handler(bdf, sdf):
|
||||
if (bdf.empty == False) and (sdf.empty == True):
|
||||
bdf_values = bdf.values.tolist()
|
||||
sdf_values = [[self.no_state_identity] * len(bdf_values) for m in range(len(mechanisms))]
|
||||
return sdf_values, bdf_values
|
||||
elif (bdf.empty == True) and (sdf.empty == False):
|
||||
sdf_values = sdf.values.tolist()
|
||||
bdf_values = [[self.b_identity] * len(sdf_values) for m in range(len(mechanisms))]
|
||||
return sdf_values, bdf_values
|
||||
else:
|
||||
sdf_values = sdf.values.tolist()
|
||||
bdf_values = bdf.values.tolist()
|
||||
return sdf_values, bdf_values
|
||||
|
||||
def only_ep_handler(state_dict):
|
||||
sdf_functions = [
|
||||
lambda step, sL, s, _input: (k, v) for k, v in zip(state_dict.keys(), state_dict.values())
|
||||
]
|
||||
sdf_values = [sdf_functions]
|
||||
bdf_values = [[self.b_identity] * len(sdf_values)]
|
||||
return sdf_values, bdf_values
|
||||
|
||||
if len(mechanisms) != 0:
|
||||
bdf = self.create_matrix_field(mechanisms, 'behaviors')
|
||||
sdf = self.create_matrix_field(mechanisms, 'states')
|
||||
sdf_values, bdf_values = no_update_handler(bdf, sdf)
|
||||
zipped_list = list(zip(sdf_values, bdf_values))
|
||||
else:
|
||||
sdf_values, bdf_values = only_ep_handler(state_dict)
|
||||
zipped_list = list(zip(sdf_values, bdf_values))
|
||||
|
||||
return list(map(lambda x: (x[0] + exo_proc, x[1]), zipped_list))
|
||||
|
|
@ -1,61 +0,0 @@
|
|||
from datetime import datetime, timedelta
|
||||
from decimal import Decimal
|
||||
from fn.func import curried
|
||||
import pandas as pd
|
||||
|
||||
|
||||
class TensorFieldReport:
|
||||
def __init__(self, config_proc):
|
||||
self.config_proc = config_proc
|
||||
|
||||
# ??? dont for-loop to apply exo_procs, use exo_proc struct
|
||||
def create_tensor_field(self, mechanisms, exo_proc, keys=['behaviors', 'states']):
|
||||
dfs = [self.config_proc.create_matrix_field(mechanisms, k) for k in keys]
|
||||
df = pd.concat(dfs, axis=1)
|
||||
for es, i in zip(exo_proc, range(len(exo_proc))):
|
||||
df['es' + str(i + 1)] = es
|
||||
df['m'] = df.index + 1
|
||||
return df
|
||||
|
||||
|
||||
def bound_norm_random(rng, low, high):
|
||||
# Add RNG Seed
|
||||
res = rng.normal((high+low)/2,(high-low)/6)
|
||||
if (res<low or res>high):
|
||||
res = bound_norm_random(rng, low, high)
|
||||
return Decimal(res)
|
||||
|
||||
|
||||
@curried
|
||||
def proc_trigger(trigger_step, update_f, step):
|
||||
if step == trigger_step:
|
||||
return update_f
|
||||
else:
|
||||
return lambda x: x
|
||||
|
||||
|
||||
# accept timedelta instead of timedelta params
|
||||
t_delta = timedelta(days=0, minutes=0, seconds=30)
|
||||
def time_step(dt_str, dt_format='%Y-%m-%d %H:%M:%S', _timedelta = t_delta):
|
||||
dt = datetime.strptime(dt_str, dt_format)
|
||||
t = dt + _timedelta
|
||||
return t.strftime(dt_format)
|
||||
|
||||
|
||||
# accept timedelta instead of timedelta params
|
||||
t_delta = timedelta(days=0, minutes=0, seconds=1)
|
||||
def ep_time_step(s, dt_str, fromat_str='%Y-%m-%d %H:%M:%S', _timedelta = t_delta):
|
||||
if s['mech_step'] == 0:
|
||||
return time_step(dt_str, fromat_str, _timedelta)
|
||||
else:
|
||||
return dt_str
|
||||
|
||||
|
||||
def exo_update_per_ts(ep):
|
||||
@curried
|
||||
def ep_decorator(f, y, step, sL, s, _input):
|
||||
if s['mech_step'] + 1 == 1: # inside f body to reduce performance costs
|
||||
return f(step, sL, s, _input)
|
||||
else:
|
||||
return (y, s[y])
|
||||
return {es: ep_decorator(f, es) for es, f in ep.items()}
|
||||
|
|
@ -1,82 +0,0 @@
|
|||
from pathos.multiprocessing import ProcessingPool as Pool
|
||||
from tabulate import tabulate
|
||||
|
||||
from SimCAD.utils import flatten
|
||||
from SimCAD.configuration import Processor
|
||||
from SimCAD.configuration.utils import TensorFieldReport
|
||||
from SimCAD.engine.simulation import Executor as SimExecutor
|
||||
|
||||
|
||||
class ExecutionMode:
|
||||
single_proc = 'single_proc'
|
||||
multi_proc = 'multi_proc'
|
||||
|
||||
|
||||
class ExecutionContext:
|
||||
|
||||
def __init__(self, context=ExecutionMode.multi_proc):
|
||||
self.name = context
|
||||
self.method = None
|
||||
|
||||
def single_proc_exec(simulation_execs, states_lists, configs_structs, env_processes_list, Ts, Ns):
|
||||
l = [simulation_execs, states_lists, configs_structs, env_processes_list, Ts, Ns]
|
||||
simulation, states_list, config, env_processes, T, N = list(map(lambda x: x.pop(), l))
|
||||
result = simulation(states_list, config, env_processes, T, N)
|
||||
return flatten(result)
|
||||
|
||||
def parallelize_simulations(fs, states_list, configs, env_processes, Ts, Ns):
|
||||
l = list(zip(fs, states_list, configs, env_processes, Ts, Ns))
|
||||
with Pool(len(configs)) as p:
|
||||
results = p.map(lambda t: t[0](t[1], t[2], t[3], t[4], t[5]), l)
|
||||
return results
|
||||
|
||||
if context == 'single_proc':
|
||||
self.method = single_proc_exec
|
||||
elif context == 'multi_proc':
|
||||
self.method = parallelize_simulations
|
||||
|
||||
|
||||
class Executor:
|
||||
|
||||
def __init__(self, exec_context, configs):
|
||||
self.SimExecutor = SimExecutor
|
||||
self.exec_method = exec_context.method
|
||||
self.exec_context = exec_context.name
|
||||
self.configs = configs
|
||||
self.main = self.execute
|
||||
|
||||
|
||||
def execute(self):
|
||||
|
||||
config_proc = Processor()
|
||||
create_tensor_field = TensorFieldReport(config_proc).create_tensor_field
|
||||
|
||||
print(self.exec_context+": "+str(self.configs))
|
||||
states_lists, Ts, Ns, eps, configs_structs, env_processes_list, mechanisms, simulation_execs = \
|
||||
[], [], [], [], [], [], [], []
|
||||
config_idx = 0
|
||||
for x in self.configs:
|
||||
states_lists.append([x.state_dict])
|
||||
Ts.append(x.sim_config['T'])
|
||||
Ns.append(x.sim_config['N'])
|
||||
eps.append(list(x.exogenous_states.values()))
|
||||
configs_structs.append(config_proc.generate_config(x.state_dict, x.mechanisms, eps[config_idx]))
|
||||
env_processes_list.append(x.env_processes)
|
||||
mechanisms.append(x.mechanisms)
|
||||
simulation_execs.append(SimExecutor(x.behavior_ops).simulation)
|
||||
|
||||
config_idx += 1
|
||||
|
||||
# Dimensions: N x r x mechs
|
||||
|
||||
if self.exec_context == ExecutionMode.single_proc:
|
||||
tensor_field = create_tensor_field(mechanisms.pop(), eps.pop())
|
||||
result = self.exec_method(simulation_execs, states_lists, configs_structs, env_processes_list, Ts, Ns)
|
||||
return (result, tensor_field)
|
||||
elif self.exec_context == ExecutionMode.multi_proc:
|
||||
if len(self.configs) > 1:
|
||||
simulations = self.exec_method(simulation_execs, states_lists, configs_structs, env_processes_list, Ts, Ns)
|
||||
results = []
|
||||
for result, mechanism, ep in list(zip(simulations, mechanisms, eps)):
|
||||
results.append((flatten(result), create_tensor_field(mechanism, ep)))
|
||||
return results
|
||||
|
|
@ -1,102 +0,0 @@
|
|||
from copy import deepcopy
|
||||
from fn.op import foldr, call
|
||||
|
||||
from SimCAD.utils import rename
|
||||
from SimCAD.engine.utils import engine_exception
|
||||
|
||||
|
||||
id_exception = engine_exception(KeyError, KeyError, None)
|
||||
|
||||
|
||||
class Executor:
|
||||
def __init__(self, behavior_ops, behavior_update_exception=id_exception, state_update_exception=id_exception):
|
||||
self.behavior_ops = behavior_ops
|
||||
self.state_update_exception = state_update_exception
|
||||
self.behavior_update_exception = behavior_update_exception
|
||||
|
||||
# Data Type reduction
|
||||
def get_behavior_input(self, step, sL, s, funcs):
|
||||
ops = self.behavior_ops[::-1]
|
||||
|
||||
def get_col_results(step, sL, s, funcs):
|
||||
return list(map(lambda f: f(step, sL, s), funcs))
|
||||
|
||||
return foldr(call, get_col_results(step, sL, s, funcs))(ops)
|
||||
|
||||
def apply_env_proc(self, env_processes, state_dict, step):
|
||||
for state in state_dict.keys():
|
||||
if state in list(env_processes.keys()):
|
||||
env_state = env_processes[state]
|
||||
if env_state.__name__ == '_curried': # might want to change
|
||||
state_dict[state] = env_state(step)(state_dict[state])
|
||||
else:
|
||||
state_dict[state] = env_state(state_dict[state])
|
||||
|
||||
|
||||
def mech_step(self, m_step, sL, state_funcs, behavior_funcs, env_processes, t_step, run):
|
||||
last_in_obj = sL[-1]
|
||||
|
||||
_input = self.state_update_exception(self.get_behavior_input(m_step, sL, last_in_obj, behavior_funcs))
|
||||
|
||||
# ToDo: add env_proc generator to `last_in_copy` iterator as wrapper function
|
||||
last_in_copy = dict([self.behavior_update_exception(f(m_step, sL, last_in_obj, _input)) for f in state_funcs])
|
||||
|
||||
for k in last_in_obj:
|
||||
if k not in last_in_copy:
|
||||
last_in_copy[k] = last_in_obj[k]
|
||||
|
||||
del last_in_obj
|
||||
|
||||
# make env proc trigger field agnostic
|
||||
self.apply_env_proc(env_processes, last_in_copy, last_in_copy['timestamp']) # mutating last_in_copy
|
||||
|
||||
last_in_copy["mech_step"], last_in_copy["time_step"], last_in_copy['run'] = m_step, t_step, run
|
||||
sL.append(last_in_copy)
|
||||
del last_in_copy
|
||||
|
||||
return sL
|
||||
|
||||
def mech_pipeline(self, states_list, configs, env_processes, t_step, run):
|
||||
m_step = 0
|
||||
states_list_copy = deepcopy(states_list)
|
||||
# print(states_list_copy)
|
||||
# remove copy
|
||||
genesis_states = states_list_copy[-1]
|
||||
genesis_states['mech_step'], genesis_states['time_step'] = m_step, t_step
|
||||
states_list = [genesis_states]
|
||||
|
||||
m_step += 1
|
||||
for config in configs:
|
||||
s_conf, b_conf = config[0], config[1]
|
||||
states_list = self.mech_step(m_step, states_list, s_conf, b_conf, env_processes, t_step, run)
|
||||
m_step += 1
|
||||
|
||||
t_step += 1
|
||||
|
||||
return states_list
|
||||
|
||||
# rename pipe
|
||||
def block_pipeline(self, states_list, configs, env_processes, time_seq, run):
|
||||
time_seq = [x + 1 for x in time_seq]
|
||||
simulation_list = [states_list]
|
||||
for time_step in time_seq:
|
||||
pipe_run = self.mech_pipeline(simulation_list[-1], configs, env_processes, time_step, run)
|
||||
_, *pipe_run = pipe_run
|
||||
simulation_list.append(pipe_run)
|
||||
|
||||
return simulation_list
|
||||
|
||||
# Del _ / head
|
||||
def simulation(self, states_list, configs, env_processes, time_seq, runs):
|
||||
pipe_run = []
|
||||
for run in range(runs):
|
||||
run += 1
|
||||
states_list_copy = deepcopy(states_list) # WHY ???
|
||||
head, *tail = self.block_pipeline(states_list_copy, configs, env_processes, time_seq, run)
|
||||
genesis = head.pop()
|
||||
genesis['mech_step'], genesis['time_step'], genesis['run'] = 0, 0, run
|
||||
first_timestep_per_run = [genesis] + tail.pop(0)
|
||||
pipe_run += [first_timestep_per_run] + tail
|
||||
del states_list_copy
|
||||
|
||||
return pipe_run
|
||||
|
|
@ -1,32 +0,0 @@
|
|||
# from fn.func import curried
|
||||
|
||||
def pipe(x):
|
||||
return x
|
||||
|
||||
|
||||
def print_pipe(x):
|
||||
print(x)
|
||||
return x
|
||||
|
||||
|
||||
def flatten(l):
|
||||
return [item for sublist in l for item in sublist]
|
||||
|
||||
|
||||
def flatmap(f, items):
|
||||
return list(map(f, items))
|
||||
|
||||
|
||||
def key_filter(l, keyname):
|
||||
return [v[keyname] for k, v in l.items()]
|
||||
|
||||
# @curried
|
||||
def rename(new_name, f):
|
||||
f.__name__ = new_name
|
||||
return f
|
||||
#
|
||||
# def rename(newname):
|
||||
# def decorator(f):
|
||||
# f.__name__ = newname
|
||||
# return f
|
||||
# return decorator
|
||||
|
|
@ -0,0 +1,15 @@
|
|||
Complex Adaptive Dynamics
|
||||
o i e
|
||||
m d s
|
||||
p e i
|
||||
u d g
|
||||
t n
|
||||
e
|
||||
r
|
||||
|
||||
__________ ____
|
||||
________ __ _____/ ____/ | / __ \
|
||||
/ ___/ __` / __ / / / /| | / / / /
|
||||
/ /__/ /_/ / /_/ / /___/ ___ |/ /_/ /
|
||||
\___/\__,_/\__,_/\____/_/ |_/_____/
|
||||
by BlockScience
|
||||
|
|
@ -0,0 +1,2 @@
|
|||
name = "cadCAD"
|
||||
configs = []
|
||||
|
|
@ -0,0 +1,137 @@
|
|||
from typing import Dict, Callable, List, Tuple
|
||||
from functools import reduce
|
||||
import pandas as pd
|
||||
from pandas.core.frame import DataFrame
|
||||
|
||||
from cadCAD import configs
|
||||
from cadCAD.utils import key_filter
|
||||
from cadCAD.configuration.utils import exo_update_per_ts
|
||||
from cadCAD.configuration.utils.policyAggregation import dict_elemwise_sum
|
||||
from cadCAD.configuration.utils.depreciationHandler import sanitize_partial_state_updates, sanitize_config
|
||||
|
||||
|
||||
class Configuration(object):
|
||||
def __init__(self, sim_config={}, initial_state={}, seeds={}, env_processes={},
|
||||
exogenous_states={}, partial_state_update_blocks={}, policy_ops=[lambda a, b: a + b],
|
||||
**kwargs) -> None:
|
||||
# print(exogenous_states)
|
||||
self.sim_config = sim_config
|
||||
self.initial_state = initial_state
|
||||
self.seeds = seeds
|
||||
self.env_processes = env_processes
|
||||
self.exogenous_states = exogenous_states
|
||||
self.partial_state_updates = partial_state_update_blocks
|
||||
self.policy_ops = policy_ops
|
||||
self.kwargs = kwargs
|
||||
|
||||
sanitize_config(self)
|
||||
|
||||
|
||||
def append_configs(sim_configs={}, initial_state={}, seeds={}, raw_exogenous_states={}, env_processes={},
|
||||
partial_state_update_blocks={}, policy_ops=[lambda a, b: a + b], _exo_update_per_ts: bool = True) -> None:
|
||||
if _exo_update_per_ts is True:
|
||||
exogenous_states = exo_update_per_ts(raw_exogenous_states)
|
||||
else:
|
||||
exogenous_states = raw_exogenous_states
|
||||
|
||||
if isinstance(sim_configs, dict):
|
||||
sim_configs = [sim_configs]
|
||||
|
||||
for sim_config in sim_configs:
|
||||
config = Configuration(
|
||||
sim_config=sim_config,
|
||||
initial_state=initial_state,
|
||||
seeds=seeds,
|
||||
exogenous_states=exogenous_states,
|
||||
env_processes=env_processes,
|
||||
partial_state_update_blocks=partial_state_update_blocks,
|
||||
policy_ops=policy_ops
|
||||
)
|
||||
print(sim_configs)
|
||||
#for each sim config create new config
|
||||
configs.append(config)
|
||||
|
||||
|
||||
class Identity:
|
||||
def __init__(self, policy_id: Dict[str, int] = {'identity': 0}) -> None:
|
||||
self.beh_id_return_val = policy_id
|
||||
|
||||
def p_identity(self, var_dict, sub_step, sL, s):
|
||||
return self.beh_id_return_val
|
||||
|
||||
def policy_identity(self, k: str) -> Callable:
|
||||
return self.p_identity
|
||||
|
||||
def no_state_identity(self, var_dict, sub_step, sL, s, _input):
|
||||
return None
|
||||
|
||||
def state_identity(self, k: str) -> Callable:
|
||||
return lambda var_dict, sub_step, sL, s, _input: (k, s[k])
|
||||
|
||||
def apply_identity_funcs(self, identity: Callable, df: DataFrame, cols: List[str]) -> List[DataFrame]:
|
||||
def fillna_with_id_func(identity, df, col):
|
||||
return df[[col]].fillna(value=identity(col))
|
||||
|
||||
return list(map(lambda col: fillna_with_id_func(identity, df, col), cols))
|
||||
|
||||
|
||||
class Processor:
|
||||
def __init__(self, id: Identity = Identity()) -> None:
|
||||
self.id = id
|
||||
self.p_identity = id.p_identity
|
||||
self.policy_identity = id.policy_identity
|
||||
self.no_state_identity = id.no_state_identity
|
||||
self.state_identity = id.state_identity
|
||||
self.apply_identity_funcs = id.apply_identity_funcs
|
||||
|
||||
def create_matrix_field(self, partial_state_updates, key: str) -> DataFrame:
|
||||
if key == 'variables':
|
||||
identity = self.state_identity
|
||||
elif key == 'policies':
|
||||
identity = self.policy_identity
|
||||
|
||||
df = pd.DataFrame(key_filter(partial_state_updates, key))
|
||||
col_list = self.apply_identity_funcs(identity, df, list(df.columns))
|
||||
if len(col_list) != 0:
|
||||
return reduce((lambda x, y: pd.concat([x, y], axis=1)), col_list)
|
||||
else:
|
||||
return pd.DataFrame({'empty': []})
|
||||
|
||||
def generate_config(self, initial_state, partial_state_updates, exo_proc
|
||||
) -> List[Tuple[List[Callable], List[Callable]]]:
|
||||
|
||||
def no_update_handler(bdf, sdf):
|
||||
if (bdf.empty == False) and (sdf.empty == True):
|
||||
bdf_values = bdf.values.tolist()
|
||||
sdf_values = [[self.no_state_identity] * len(bdf_values) for m in range(len(partial_state_updates))]
|
||||
return sdf_values, bdf_values
|
||||
elif (bdf.empty == True) and (sdf.empty == False):
|
||||
sdf_values = sdf.values.tolist()
|
||||
bdf_values = [[self.p_identity] * len(sdf_values) for m in range(len(partial_state_updates))]
|
||||
return sdf_values, bdf_values
|
||||
else:
|
||||
sdf_values = sdf.values.tolist()
|
||||
bdf_values = bdf.values.tolist()
|
||||
return sdf_values, bdf_values
|
||||
|
||||
def only_ep_handler(state_dict):
|
||||
sdf_functions = [
|
||||
lambda var_dict, sub_step, sL, s, _input: (k, v) for k, v in zip(state_dict.keys(), state_dict.values())
|
||||
]
|
||||
sdf_values = [sdf_functions]
|
||||
bdf_values = [[self.p_identity] * len(sdf_values)]
|
||||
return sdf_values, bdf_values
|
||||
|
||||
if len(partial_state_updates) != 0:
|
||||
# backwards compatibility # ToDo: Move this
|
||||
partial_state_updates = sanitize_partial_state_updates(partial_state_updates)
|
||||
|
||||
bdf = self.create_matrix_field(partial_state_updates, 'policies')
|
||||
sdf = self.create_matrix_field(partial_state_updates, 'variables')
|
||||
sdf_values, bdf_values = no_update_handler(bdf, sdf)
|
||||
zipped_list = list(zip(sdf_values, bdf_values))
|
||||
else:
|
||||
sdf_values, bdf_values = only_ep_handler(initial_state)
|
||||
zipped_list = list(zip(sdf_values, bdf_values))
|
||||
|
||||
return list(map(lambda x: (x[0] + exo_proc, x[1]), zipped_list))
|
||||
|
|
@ -0,0 +1,223 @@
|
|||
from datetime import datetime, timedelta
|
||||
from copy import deepcopy
|
||||
from functools import reduce
|
||||
from fn.func import curried
|
||||
from funcy import curry
|
||||
import pandas as pd
|
||||
|
||||
from cadCAD.configuration.utils.depreciationHandler import sanitize_partial_state_updates
|
||||
from cadCAD.utils import dict_filter, contains_type, flatten_tabulated_dict, tabulate_dict
|
||||
|
||||
|
||||
class TensorFieldReport:
|
||||
def __init__(self, config_proc):
|
||||
self.config_proc = config_proc
|
||||
|
||||
# ToDo: backwards compatibility
|
||||
def create_tensor_field(self, partial_state_updates, exo_proc, keys = ['policies', 'variables']):
|
||||
|
||||
partial_state_updates = sanitize_partial_state_updates(partial_state_updates) # Temporary
|
||||
|
||||
dfs = [self.config_proc.create_matrix_field(partial_state_updates, k) for k in keys]
|
||||
df = pd.concat(dfs, axis=1)
|
||||
for es, i in zip(exo_proc, range(len(exo_proc))):
|
||||
df['es' + str(i + 1)] = es
|
||||
df['m'] = df.index + 1
|
||||
return df
|
||||
|
||||
|
||||
def state_update(y, x):
|
||||
return lambda var_dict, sub_step, sL, s, _input: (y, x)
|
||||
|
||||
|
||||
def bound_norm_random(rng, low, high):
|
||||
res = rng.normal((high+low)/2, (high-low)/6)
|
||||
if res < low or res > high:
|
||||
res = bound_norm_random(rng, low, high)
|
||||
# return Decimal(res)
|
||||
return float(res)
|
||||
|
||||
|
||||
@curried
|
||||
def env_proc_trigger(timestep, f, time):
|
||||
if time == timestep:
|
||||
return f
|
||||
else:
|
||||
return lambda x: x
|
||||
|
||||
|
||||
tstep_delta = timedelta(days=0, minutes=0, seconds=30)
|
||||
def time_step(dt_str, dt_format='%Y-%m-%d %H:%M:%S', _timedelta = tstep_delta):
|
||||
# print(dt_str)
|
||||
dt = datetime.strptime(dt_str, dt_format)
|
||||
t = dt + _timedelta
|
||||
return t.strftime(dt_format)
|
||||
|
||||
|
||||
ep_t_delta = timedelta(days=0, minutes=0, seconds=1)
|
||||
def ep_time_step(s_condition, dt_str, fromat_str='%Y-%m-%d %H:%M:%S', _timedelta = ep_t_delta):
|
||||
# print(dt_str)
|
||||
if s_condition:
|
||||
return time_step(dt_str, fromat_str, _timedelta)
|
||||
else:
|
||||
return dt_str
|
||||
|
||||
|
||||
def partial_state_sweep_filter(state_field, partial_state_updates):
|
||||
partial_state_dict = dict([(k, v[state_field]) for k, v in partial_state_updates.items()])
|
||||
return dict([
|
||||
(k, dict_filter(v, lambda v: isinstance(v, list))) for k, v in partial_state_dict.items()
|
||||
if contains_type(list(v.values()), list)
|
||||
])
|
||||
|
||||
|
||||
def state_sweep_filter(raw_exogenous_states):
|
||||
return dict([(k, v) for k, v in raw_exogenous_states.items() if isinstance(v, list)])
|
||||
|
||||
|
||||
@curried
|
||||
def sweep_partial_states(_type, in_config):
|
||||
configs = []
|
||||
# filtered_mech_states
|
||||
filtered_partial_states = partial_state_sweep_filter(_type, in_config.partial_state_updates)
|
||||
if len(filtered_partial_states) > 0:
|
||||
for partial_state, state_dict in filtered_partial_states.items():
|
||||
for state, state_funcs in state_dict.items():
|
||||
for f in state_funcs:
|
||||
config = deepcopy(in_config)
|
||||
config.partial_state_updates[partial_state][_type][state] = f
|
||||
configs.append(config)
|
||||
del config
|
||||
else:
|
||||
configs = [in_config]
|
||||
|
||||
return configs
|
||||
|
||||
|
||||
@curried
|
||||
def sweep_states(state_type, states, in_config):
|
||||
configs = []
|
||||
filtered_states = state_sweep_filter(states)
|
||||
if len(filtered_states) > 0:
|
||||
for state, state_funcs in filtered_states.items():
|
||||
for f in state_funcs:
|
||||
config = deepcopy(in_config)
|
||||
exploded_states = deepcopy(states)
|
||||
exploded_states[state] = f
|
||||
if state_type == 'exogenous':
|
||||
config.exogenous_states = exploded_states
|
||||
elif state_type == 'environmental':
|
||||
config.env_processes = exploded_states
|
||||
configs.append(config)
|
||||
del config, exploded_states
|
||||
else:
|
||||
configs = [in_config]
|
||||
|
||||
return configs
|
||||
|
||||
|
||||
def exo_update_per_ts(ep):
|
||||
@curried
|
||||
def ep_decorator(f, y, var_dict, sub_step, sL, s, _input):
|
||||
if s['substep'] + 1 == 1:
|
||||
return f(var_dict, sub_step, sL, s, _input)
|
||||
else:
|
||||
return y, s[y]
|
||||
|
||||
return {es: ep_decorator(f, es) for es, f in ep.items()}
|
||||
|
||||
|
||||
def trigger_condition(s, pre_conditions, cond_opp):
|
||||
condition_bools = [s[field] in precondition_values for field, precondition_values in pre_conditions.items()]
|
||||
return reduce(cond_opp, condition_bools)
|
||||
|
||||
|
||||
def apply_state_condition(pre_conditions, cond_opp, y, f, _g, step, sL, s, _input):
|
||||
if trigger_condition(s, pre_conditions, cond_opp):
|
||||
return f(_g, step, sL, s, _input)
|
||||
else:
|
||||
return y, s[y]
|
||||
|
||||
|
||||
def var_trigger(y, f, pre_conditions, cond_op):
|
||||
return lambda _g, step, sL, s, _input: apply_state_condition(pre_conditions, cond_op, y, f, _g, step, sL, s, _input)
|
||||
|
||||
|
||||
def var_substep_trigger(substeps):
|
||||
def trigger(end_substep, y, f):
|
||||
pre_conditions = {'substep': substeps}
|
||||
cond_opp = lambda a, b: a and b
|
||||
return var_trigger(y, f, pre_conditions, cond_opp)
|
||||
|
||||
return lambda y, f: curry(trigger)(substeps)(y)(f)
|
||||
|
||||
|
||||
def env_trigger(end_substep):
|
||||
def trigger(end_substep, trigger_field, trigger_vals, funct_list):
|
||||
def env_update(state_dict, sweep_dict, target_value):
|
||||
state_dict_copy = deepcopy(state_dict)
|
||||
# Use supstep to simulate current sysMetrics
|
||||
if state_dict_copy['substep'] == end_substep:
|
||||
state_dict_copy['timestep'] = state_dict_copy['timestep'] + 1
|
||||
|
||||
if state_dict_copy[trigger_field] in trigger_vals:
|
||||
for g in funct_list:
|
||||
target_value = g(sweep_dict, target_value)
|
||||
|
||||
del state_dict_copy
|
||||
return target_value
|
||||
|
||||
return env_update
|
||||
|
||||
return lambda trigger_field, trigger_vals, funct_list: \
|
||||
curry(trigger)(end_substep)(trigger_field)(trigger_vals)(funct_list)
|
||||
|
||||
|
||||
def config_sim(d):
|
||||
def process_variables(d):
|
||||
return flatten_tabulated_dict(tabulate_dict(d))
|
||||
|
||||
if "M" in d:
|
||||
return [{"N": d["N"], "T": d["T"], "M": M} for M in process_variables(d["M"])]
|
||||
else:
|
||||
d["M"] = [{}]
|
||||
return d
|
||||
|
||||
|
||||
def psub_list(psu_block, psu_steps):
|
||||
return [psu_block[psu] for psu in psu_steps]
|
||||
|
||||
|
||||
def psub(policies, state_updates):
|
||||
return {
|
||||
'policies': policies,
|
||||
'states': state_updates
|
||||
}
|
||||
|
||||
|
||||
def genereate_psubs(policy_grid, states_grid, policies, state_updates):
|
||||
PSUBS = []
|
||||
for policy_ids, state_list in zip(policy_grid, states_grid):
|
||||
filtered_policies = {k: v for (k, v) in policies.items() if k in policy_ids}
|
||||
filtered_state_updates = {k: v for (k, v) in state_updates.items() if k in state_list}
|
||||
PSUBS.append(psub(filtered_policies, filtered_state_updates))
|
||||
|
||||
return PSUBS
|
||||
|
||||
|
||||
def access_block(state_history, target_field, psu_block_offset, exculsion_list=[]):
|
||||
exculsion_list += [target_field]
|
||||
def filter_history(key_list, sH):
|
||||
filter = lambda key_list: \
|
||||
lambda d: {k: v for k, v in d.items() if k not in key_list}
|
||||
return list(map(filter(key_list), sH))
|
||||
|
||||
if psu_block_offset < -1:
|
||||
if len(state_history) >= abs(psu_block_offset):
|
||||
return filter_history(exculsion_list, state_history[psu_block_offset])
|
||||
else:
|
||||
return []
|
||||
elif psu_block_offset == -1:
|
||||
return filter_history(exculsion_list, state_history[psu_block_offset])
|
||||
else:
|
||||
return []
|
||||
|
|
@ -0,0 +1,35 @@
|
|||
from copy import deepcopy
|
||||
|
||||
|
||||
def sanitize_config(config):
|
||||
for key, value in config.kwargs.items():
|
||||
if key == 'state_dict':
|
||||
config.initial_state = value
|
||||
elif key == 'seed':
|
||||
config.seeds = value
|
||||
elif key == 'mechanisms':
|
||||
config.partial_state_updates = value
|
||||
|
||||
if config.initial_state == {}:
|
||||
raise Exception('The initial conditions of the system have not been set')
|
||||
|
||||
|
||||
def sanitize_partial_state_updates(partial_state_updates):
|
||||
new_partial_state_updates = deepcopy(partial_state_updates)
|
||||
def rename_keys(d):
|
||||
if 'behaviors' in d:
|
||||
d['policies'] = d.pop('behaviors')
|
||||
|
||||
if 'states' in d:
|
||||
d['variables'] = d.pop('states')
|
||||
|
||||
|
||||
if isinstance(new_partial_state_updates, list):
|
||||
for v in new_partial_state_updates:
|
||||
rename_keys(v)
|
||||
elif isinstance(new_partial_state_updates, dict):
|
||||
for k, v in new_partial_state_updates.items():
|
||||
rename_keys(v)
|
||||
|
||||
del partial_state_updates
|
||||
return new_partial_state_updates
|
||||
|
|
@ -2,18 +2,19 @@ from fn.op import foldr
|
|||
from fn.func import curried
|
||||
|
||||
|
||||
def get_base_value(datatype):
|
||||
if datatype is str:
|
||||
def get_base_value(x):
|
||||
if isinstance(x, str):
|
||||
return ''
|
||||
elif datatype is int:
|
||||
elif isinstance(x, int):
|
||||
return 0
|
||||
elif datatype is list:
|
||||
elif isinstance(x, list):
|
||||
return []
|
||||
return 0
|
||||
else:
|
||||
return 0
|
||||
|
||||
|
||||
def behavior_to_dict(v):
|
||||
return dict(list(zip(map(lambda n: 'b' + str(n + 1), list(range(len(v)))), v)))
|
||||
def policy_to_dict(v):
|
||||
return dict(list(zip(map(lambda n: 'p' + str(n + 1), list(range(len(v)))), v)))
|
||||
|
||||
|
||||
add = lambda a, b: a + b
|
||||
|
|
@ -27,13 +28,12 @@ def foldr_dict_vals(f, d):
|
|||
def sum_dict_values():
|
||||
return foldr_dict_vals(add)
|
||||
|
||||
# AttributeError: 'int' object has no attribute 'keys'
|
||||
# config7c
|
||||
|
||||
@curried
|
||||
def dict_op(f, d1, d2):
|
||||
def set_base_value(target_dict, source_dict, key):
|
||||
if key not in target_dict:
|
||||
return get_base_value(type(source_dict[key]))
|
||||
return get_base_value(source_dict[key])
|
||||
else:
|
||||
return target_dict[key]
|
||||
|
||||
|
|
@ -41,9 +41,5 @@ def dict_op(f, d1, d2):
|
|||
|
||||
return {k: f(set_base_value(d1, d2, k), set_base_value(d2, d1, k)) for k in key_set}
|
||||
|
||||
|
||||
def dict_elemwise_sum():
|
||||
return dict_op(add)
|
||||
|
||||
|
||||
# class BehaviorAggregation:
|
||||
|
|
@ -0,0 +1,59 @@
|
|||
from collections import namedtuple
|
||||
from inspect import getmembers, ismethod
|
||||
from pandas.core.frame import DataFrame
|
||||
|
||||
from cadCAD.utils import SilentDF
|
||||
|
||||
|
||||
def val_switch(v):
|
||||
if isinstance(v, DataFrame) is True:
|
||||
return SilentDF(v)
|
||||
else:
|
||||
return v
|
||||
|
||||
|
||||
class udcView(object):
|
||||
def __init__(self, d, masked_members):
|
||||
self.__dict__ = d
|
||||
self.masked_members = masked_members
|
||||
|
||||
def __repr__(self):
|
||||
members = {}
|
||||
variables = {
|
||||
k: val_switch(v) for k, v in self.__dict__.items()
|
||||
if str(type(v)) != "<class 'method'>" and k not in self.masked_members # and isinstance(v, DataFrame) is not True
|
||||
}
|
||||
members['methods'] = [k for k, v in self.__dict__.items() if str(type(v)) == "<class 'method'>"]
|
||||
|
||||
members.update(variables)
|
||||
return f"{members}"
|
||||
|
||||
|
||||
class udcBroker(object):
|
||||
def __init__(self, obj, function_filter=['__init__']):
|
||||
d = {}
|
||||
funcs = dict(getmembers(obj, ismethod))
|
||||
filtered_functions = {k: v for k, v in funcs.items() if k not in function_filter}
|
||||
d['obj'] = obj
|
||||
# d.update(deepcopy(vars(obj))) # somehow is enough
|
||||
d.update(vars(obj)) # somehow is enough
|
||||
d.update(filtered_functions)
|
||||
|
||||
self.members_dict = d
|
||||
|
||||
def get_members(self):
|
||||
return self.members_dict
|
||||
|
||||
def get_view(self, masked_members):
|
||||
return udcView(self.members_dict, masked_members)
|
||||
|
||||
def get_namedtuple(self):
|
||||
return namedtuple("Hydra", self.members_dict.keys())(*self.members_dict.values())
|
||||
|
||||
|
||||
def UDO(udo, masked_members=['obj']):
|
||||
return udcBroker(udo).get_view(masked_members)
|
||||
|
||||
|
||||
def udoPipe(obj_view):
|
||||
return UDO(obj_view.obj, obj_view.masked_members)
|
||||
|
|
@ -0,0 +1,119 @@
|
|||
from typing import Callable, Dict, List, Any, Tuple
|
||||
from pathos.multiprocessing import ProcessingPool as PPool
|
||||
from pandas.core.frame import DataFrame
|
||||
|
||||
from cadCAD.utils import flatten
|
||||
from cadCAD.configuration import Configuration, Processor
|
||||
from cadCAD.configuration.utils import TensorFieldReport
|
||||
from cadCAD.engine.simulation import Executor as SimExecutor
|
||||
|
||||
VarDictType = Dict[str, List[Any]]
|
||||
StatesListsType = List[Dict[str, Any]]
|
||||
ConfigsType = List[Tuple[List[Callable], List[Callable]]]
|
||||
EnvProcessesType = Dict[str, Callable]
|
||||
|
||||
|
||||
class ExecutionMode:
|
||||
single_proc = 'single_proc'
|
||||
multi_proc = 'multi_proc'
|
||||
|
||||
|
||||
def single_proc_exec(
|
||||
simulation_execs: List[Callable],
|
||||
var_dict_list: List[VarDictType],
|
||||
states_lists: List[StatesListsType],
|
||||
configs_structs: List[ConfigsType],
|
||||
env_processes_list: List[EnvProcessesType],
|
||||
Ts: List[range],
|
||||
Ns: List[int]
|
||||
):
|
||||
l = [simulation_execs, states_lists, configs_structs, env_processes_list, Ts, Ns]
|
||||
simulation_exec, states_list, config, env_processes, T, N = list(map(lambda x: x.pop(), l))
|
||||
result = simulation_exec(var_dict_list, states_list, config, env_processes, T, N)
|
||||
return flatten(result)
|
||||
|
||||
|
||||
def parallelize_simulations(
|
||||
simulation_execs: List[Callable],
|
||||
var_dict_list: List[VarDictType],
|
||||
states_lists: List[StatesListsType],
|
||||
configs_structs: List[ConfigsType],
|
||||
env_processes_list: List[EnvProcessesType],
|
||||
Ts: List[range],
|
||||
Ns: List[int]
|
||||
):
|
||||
l = list(zip(simulation_execs, var_dict_list, states_lists, configs_structs, env_processes_list, Ts, Ns))
|
||||
with PPool(len(configs_structs)) as p:
|
||||
results = p.map(lambda t: t[0](t[1], t[2], t[3], t[4], t[5], t[6]), l)
|
||||
return results
|
||||
|
||||
|
||||
class ExecutionContext:
|
||||
def __init__(self, context: str = ExecutionMode.multi_proc) -> None:
|
||||
self.name = context
|
||||
self.method = None
|
||||
|
||||
if context == 'single_proc':
|
||||
self.method = single_proc_exec
|
||||
elif context == 'multi_proc':
|
||||
self.method = parallelize_simulations
|
||||
|
||||
|
||||
class Executor:
|
||||
def __init__(self, exec_context: ExecutionContext, configs: List[Configuration]) -> None:
|
||||
self.SimExecutor = SimExecutor
|
||||
self.exec_method = exec_context.method
|
||||
self.exec_context = exec_context.name
|
||||
self.configs = configs
|
||||
|
||||
def execute(self) -> Tuple[List[Dict[str, Any]], DataFrame]:
|
||||
config_proc = Processor()
|
||||
create_tensor_field = TensorFieldReport(config_proc).create_tensor_field
|
||||
|
||||
|
||||
print(r'''
|
||||
__________ ____
|
||||
________ __ _____/ ____/ | / __ \
|
||||
/ ___/ __` / __ / / / /| | / / / /
|
||||
/ /__/ /_/ / /_/ / /___/ ___ |/ /_/ /
|
||||
\___/\__,_/\__,_/\____/_/ |_/_____/
|
||||
by BlockScience
|
||||
''')
|
||||
print(f'Execution Mode: {self.exec_context + ": " + str(self.configs)}')
|
||||
print(f'Configurations: {self.configs}')
|
||||
|
||||
var_dict_list, states_lists, Ts, Ns, eps, configs_structs, env_processes_list, partial_state_updates, simulation_execs = \
|
||||
[], [], [], [], [], [], [], [], []
|
||||
config_idx = 0
|
||||
|
||||
for x in self.configs:
|
||||
|
||||
Ts.append(x.sim_config['T'])
|
||||
Ns.append(x.sim_config['N'])
|
||||
var_dict_list.append(x.sim_config['M'])
|
||||
states_lists.append([x.initial_state])
|
||||
eps.append(list(x.exogenous_states.values()))
|
||||
configs_structs.append(config_proc.generate_config(x.initial_state, x.partial_state_updates, eps[config_idx]))
|
||||
# print(env_processes_list)
|
||||
env_processes_list.append(x.env_processes)
|
||||
partial_state_updates.append(x.partial_state_updates)
|
||||
simulation_execs.append(SimExecutor(x.policy_ops).simulation)
|
||||
|
||||
config_idx += 1
|
||||
|
||||
final_result = None
|
||||
|
||||
if self.exec_context == ExecutionMode.single_proc:
|
||||
tensor_field = create_tensor_field(partial_state_updates.pop(), eps.pop())
|
||||
result = self.exec_method(simulation_execs, var_dict_list, states_lists, configs_structs, env_processes_list, Ts, Ns)
|
||||
final_result = result, tensor_field
|
||||
elif self.exec_context == ExecutionMode.multi_proc:
|
||||
# if len(self.configs) > 1:
|
||||
simulations = self.exec_method(simulation_execs, var_dict_list, states_lists, configs_structs, env_processes_list, Ts, Ns)
|
||||
results = []
|
||||
for result, partial_state_updates, ep in list(zip(simulations, partial_state_updates, eps)):
|
||||
results.append((flatten(result), create_tensor_field(partial_state_updates, ep)))
|
||||
|
||||
final_result = results
|
||||
|
||||
return final_result
|
||||
|
|
@ -0,0 +1,234 @@
|
|||
from typing import Any, Callable, Dict, List, Tuple
|
||||
from pathos.pools import ThreadPool as TPool
|
||||
from copy import deepcopy
|
||||
from functools import reduce
|
||||
|
||||
from cadCAD.engine.utils import engine_exception
|
||||
from cadCAD.utils import flatten
|
||||
|
||||
id_exception: Callable = engine_exception(KeyError, KeyError, None)
|
||||
|
||||
|
||||
class Executor:
|
||||
def __init__(
|
||||
self,
|
||||
policy_ops: List[Callable],
|
||||
policy_update_exception: Callable = id_exception,
|
||||
state_update_exception: Callable = id_exception
|
||||
) -> None:
|
||||
|
||||
self.policy_ops = policy_ops
|
||||
self.state_update_exception = state_update_exception
|
||||
self.policy_update_exception = policy_update_exception
|
||||
|
||||
def get_policy_input(
|
||||
self,
|
||||
sweep_dict: Dict[str, List[Any]],
|
||||
sub_step: int,
|
||||
sL: List[Dict[str, Any]],
|
||||
s: Dict[str, Any],
|
||||
funcs: List[Callable]
|
||||
) -> Dict[str, Any]:
|
||||
|
||||
ops = self.policy_ops
|
||||
|
||||
def get_col_results(sweep_dict, sub_step, sL, s, funcs):
|
||||
return list(map(lambda f: f(sweep_dict, sub_step, sL, s), funcs))
|
||||
|
||||
def compose(init_reduction_funct, funct_list, val_list):
|
||||
result, i = None, 0
|
||||
composition = lambda x: [reduce(init_reduction_funct, x)] + funct_list
|
||||
for g in composition(val_list):
|
||||
if i == 0:
|
||||
result = g
|
||||
i = 1
|
||||
else:
|
||||
result = g(result)
|
||||
return result
|
||||
|
||||
col_results = get_col_results(sweep_dict, sub_step, sL, s, funcs)
|
||||
key_set = list(set(list(reduce(lambda a, b: a + b, list(map(lambda x: list(x.keys()), col_results))))))
|
||||
new_dict = {k: [] for k in key_set}
|
||||
for d in col_results:
|
||||
for k in d.keys():
|
||||
new_dict[k].append(d[k])
|
||||
|
||||
ops_head, *ops_tail = ops
|
||||
return {
|
||||
k: compose(
|
||||
init_reduction_funct=ops_head, # func executed on value list
|
||||
funct_list=ops_tail,
|
||||
val_list=val_list
|
||||
) for k, val_list in new_dict.items()
|
||||
}
|
||||
|
||||
|
||||
def apply_env_proc(
|
||||
self,
|
||||
sweep_dict,
|
||||
env_processes: Dict[str, Callable],
|
||||
state_dict: Dict[str, Any],
|
||||
) -> Dict[str, Any]:
|
||||
|
||||
def env_composition(target_field, state_dict, target_value):
|
||||
function_type = type(lambda x: x)
|
||||
env_update = env_processes[target_field]
|
||||
if isinstance(env_update, list):
|
||||
for f in env_update:
|
||||
target_value = f(sweep_dict, target_value)
|
||||
elif isinstance(env_update, function_type):
|
||||
target_value = env_update(state_dict, sweep_dict, target_value)
|
||||
else:
|
||||
target_value = env_update
|
||||
|
||||
return target_value
|
||||
|
||||
filtered_state_dict = {k: v for k, v in state_dict.items() if k in env_processes.keys()}
|
||||
env_proc_dict = {
|
||||
target_field: env_composition(target_field, state_dict, target_value)
|
||||
for target_field, target_value in filtered_state_dict.items()
|
||||
}
|
||||
|
||||
for k, v in env_proc_dict.items():
|
||||
state_dict[k] = v
|
||||
|
||||
return state_dict
|
||||
|
||||
# mech_step
|
||||
def partial_state_update(
|
||||
self,
|
||||
sweep_dict: Dict[str, List[Any]],
|
||||
sub_step: int,
|
||||
sL: Any,
|
||||
sH: Any,
|
||||
state_funcs: List[Callable],
|
||||
policy_funcs: List[Callable],
|
||||
env_processes: Dict[str, Callable],
|
||||
time_step: int,
|
||||
run: int
|
||||
) -> List[Dict[str, Any]]:
|
||||
|
||||
last_in_obj: Dict[str, Any] = deepcopy(sL[-1])
|
||||
_input: Dict[str, Any] = self.policy_update_exception(
|
||||
self.get_policy_input(sweep_dict, sub_step, sH, last_in_obj, policy_funcs)
|
||||
)
|
||||
|
||||
|
||||
def generate_record(state_funcs):
|
||||
for f in state_funcs:
|
||||
yield self.state_update_exception(f(sweep_dict, sub_step, sH, last_in_obj, _input))
|
||||
|
||||
def transfer_missing_fields(source, destination):
|
||||
for k in source:
|
||||
if k not in destination:
|
||||
destination[k] = source[k]
|
||||
del source # last_in_obj
|
||||
return destination
|
||||
|
||||
last_in_copy: Dict[str, Any] = transfer_missing_fields(last_in_obj, dict(generate_record(state_funcs)))
|
||||
last_in_copy: Dict[str, Any] = self.apply_env_proc(sweep_dict, env_processes, last_in_copy)
|
||||
last_in_copy['substep'], last_in_copy['timestep'], last_in_copy['run'] = sub_step, time_step, run
|
||||
|
||||
sL.append(last_in_copy)
|
||||
del last_in_copy
|
||||
|
||||
return sL
|
||||
|
||||
# mech_pipeline - state_update_block
|
||||
def state_update_pipeline(
|
||||
self,
|
||||
sweep_dict: Dict[str, List[Any]],
|
||||
simulation_list, #states_list: List[Dict[str, Any]],
|
||||
configs: List[Tuple[List[Callable], List[Callable]]],
|
||||
env_processes: Dict[str, Callable],
|
||||
time_step: int,
|
||||
run: int
|
||||
) -> List[Dict[str, Any]]:
|
||||
|
||||
sub_step = 0
|
||||
states_list_copy: List[Dict[str, Any]] = deepcopy(simulation_list[-1])
|
||||
genesis_states: Dict[str, Any] = states_list_copy[-1]
|
||||
|
||||
if len(states_list_copy) == 1:
|
||||
genesis_states['substep'] = sub_step
|
||||
# genesis_states['timestep'] = 0
|
||||
# else:
|
||||
# genesis_states['timestep'] = time_step
|
||||
|
||||
del states_list_copy
|
||||
states_list: List[Dict[str, Any]] = [genesis_states]
|
||||
|
||||
sub_step += 1
|
||||
|
||||
for [s_conf, p_conf] in configs: # tensor field
|
||||
|
||||
states_list: List[Dict[str, Any]] = self.partial_state_update(
|
||||
sweep_dict, sub_step, states_list, simulation_list, s_conf, p_conf, env_processes, time_step, run
|
||||
)
|
||||
sub_step += 1
|
||||
|
||||
time_step += 1
|
||||
|
||||
return states_list
|
||||
|
||||
# state_update_pipeline
|
||||
def run_pipeline(
|
||||
self,
|
||||
sweep_dict: Dict[str, List[Any]],
|
||||
states_list: List[Dict[str, Any]],
|
||||
configs: List[Tuple[List[Callable], List[Callable]]],
|
||||
env_processes: Dict[str, Callable],
|
||||
time_seq: range,
|
||||
run: int
|
||||
) -> List[List[Dict[str, Any]]]:
|
||||
|
||||
time_seq: List[int] = [x + 1 for x in time_seq]
|
||||
simulation_list: List[List[Dict[str, Any]]] = [states_list]
|
||||
|
||||
for time_step in time_seq:
|
||||
pipe_run: List[Dict[str, Any]] = self.state_update_pipeline(
|
||||
sweep_dict, simulation_list, configs, env_processes, time_step, run
|
||||
)
|
||||
|
||||
_, *pipe_run = pipe_run
|
||||
simulation_list.append(pipe_run)
|
||||
|
||||
return simulation_list
|
||||
|
||||
def simulation(
|
||||
self,
|
||||
sweep_dict: Dict[str, List[Any]],
|
||||
states_list: List[Dict[str, Any]],
|
||||
configs: List[Tuple[List[Callable], List[Callable]]],
|
||||
env_processes: Dict[str, Callable],
|
||||
time_seq: range,
|
||||
runs: int
|
||||
) -> List[List[Dict[str, Any]]]:
|
||||
|
||||
def execute_run(sweep_dict, states_list, configs, env_processes, time_seq, run) -> List[Dict[str, Any]]:
|
||||
run += 1
|
||||
|
||||
def generate_init_sys_metrics(genesis_states_list):
|
||||
for d in genesis_states_list:
|
||||
d['run'], d['substep'], d['timestep'] = run, 0, 0
|
||||
yield d
|
||||
|
||||
states_list_copy: List[Dict[str, Any]] = list(generate_init_sys_metrics(deepcopy(states_list)))
|
||||
|
||||
first_timestep_per_run: List[Dict[str, Any]] = self.run_pipeline(
|
||||
sweep_dict, states_list_copy, configs, env_processes, time_seq, run
|
||||
)
|
||||
del states_list_copy
|
||||
|
||||
return first_timestep_per_run
|
||||
|
||||
tp = TPool(runs)
|
||||
pipe_run: List[List[Dict[str, Any]]] = flatten(
|
||||
tp.map(
|
||||
lambda run: execute_run(sweep_dict, states_list, configs, env_processes, time_seq, run),
|
||||
list(range(runs))
|
||||
)
|
||||
)
|
||||
|
||||
tp.clear()
|
||||
return pipe_run
|
||||
|
|
@ -33,9 +33,6 @@ def engine_exception(ErrorType, error_message, exception_function, try_function)
|
|||
return exception_function
|
||||
|
||||
|
||||
# def exception_handler(f, m_step, sL, last_mut_obj, _input):
|
||||
# try:
|
||||
# return f(m_step, sL, last_mut_obj, _input)
|
||||
# except KeyError:
|
||||
# print("Exception")
|
||||
# return f(m_step, sL, sL[-2], _input)
|
||||
@curried
|
||||
def fit_param(param, x):
|
||||
return x + param
|
||||
|
|
@ -0,0 +1,142 @@
|
|||
from functools import reduce
|
||||
from typing import Dict, List
|
||||
from collections import defaultdict, Counter
|
||||
from itertools import product
|
||||
import warnings
|
||||
|
||||
from pandas import DataFrame
|
||||
|
||||
|
||||
class SilentDF(DataFrame):
|
||||
def __repr__(self):
|
||||
return str(hex(id(DataFrame))) #"pandas.core.frame.DataFrame"
|
||||
|
||||
|
||||
def append_dict(dict, new_dict):
|
||||
dict.update(new_dict)
|
||||
return dict
|
||||
|
||||
|
||||
class IndexCounter:
|
||||
def __init__(self):
|
||||
self.i = 0
|
||||
|
||||
def __call__(self):
|
||||
self.i += 1
|
||||
return self.i
|
||||
|
||||
|
||||
def compose(*functions):
|
||||
return reduce(lambda f, g: lambda x: f(g(x)), functions, lambda x: x)
|
||||
|
||||
def pipe(x):
|
||||
return x
|
||||
|
||||
|
||||
def print_pipe(x):
|
||||
print(x)
|
||||
return x
|
||||
|
||||
|
||||
def flattenDict(l):
|
||||
def tupalize(k, vs):
|
||||
l = []
|
||||
if isinstance(vs, list):
|
||||
for v in vs:
|
||||
l.append((k, v))
|
||||
else:
|
||||
l.append((k, vs))
|
||||
return l
|
||||
|
||||
flat_list = [tupalize(k, vs) for k, vs in l.items()]
|
||||
flat_dict = [dict(items) for items in product(*flat_list)]
|
||||
return flat_dict
|
||||
|
||||
|
||||
def flatten(l):
|
||||
if isinstance(l, list):
|
||||
return [item for sublist in l for item in sublist]
|
||||
elif isinstance(l, dict):
|
||||
return flattenDict(l)
|
||||
|
||||
|
||||
def flatMap(f, collection):
|
||||
return flatten(list(map(f, collection)))
|
||||
|
||||
|
||||
def dict_filter(dictionary, condition):
|
||||
return dict([(k, v) for k, v in dictionary.items() if condition(v)])
|
||||
|
||||
|
||||
def get_max_dict_val_len(g: Dict[str, List[int]]) -> int:
|
||||
return len(max(g.values(), key=len))
|
||||
|
||||
|
||||
def tabulate_dict(d: Dict[str, List[int]]) -> Dict[str, List[int]]:
|
||||
max_len = get_max_dict_val_len(d)
|
||||
_d = {}
|
||||
for k, vl in d.items():
|
||||
if len(vl) != max_len:
|
||||
_d[k] = vl + list([vl[-1]] * (max_len-1))
|
||||
else:
|
||||
_d[k] = vl
|
||||
|
||||
return _d
|
||||
|
||||
|
||||
def flatten_tabulated_dict(d: Dict[str, List[int]]) -> List[Dict[str, int]]:
|
||||
max_len = get_max_dict_val_len(d)
|
||||
dl = [{} for i in range(max_len)]
|
||||
|
||||
for k, vl in d.items():
|
||||
for v, i in zip(vl, list(range(len(vl)))):
|
||||
dl[i][k] = v
|
||||
|
||||
return dl
|
||||
|
||||
|
||||
def contains_type(_collection, type):
|
||||
return any(isinstance(x, type) for x in _collection)
|
||||
|
||||
|
||||
def drop_right(l, n):
|
||||
return l[:len(l) - n]
|
||||
|
||||
|
||||
def key_filter(l, keyname):
|
||||
if (type(l) == list):
|
||||
return [v[keyname] for v in l]
|
||||
# Keeping support to dictionaries for backwards compatibility
|
||||
# Should be removed in the future
|
||||
warnings.warn(
|
||||
"The use of a dictionary to describe Partial State Update Blocks will be deprecated. Use a list instead.",
|
||||
FutureWarning)
|
||||
return [v[keyname] for k, v in l.items()]
|
||||
|
||||
|
||||
def groupByKey(l):
|
||||
d = defaultdict(list)
|
||||
for key, value in l:
|
||||
d[key].append(value)
|
||||
return list(dict(d).items()).pop()
|
||||
|
||||
|
||||
# @curried
|
||||
def rename(new_name, f):
|
||||
f.__name__ = new_name
|
||||
return f
|
||||
|
||||
|
||||
def curry_pot(f, *argv):
|
||||
sweep_ind = f.__name__[0:5] == 'sweep'
|
||||
arg_len = len(argv)
|
||||
if sweep_ind is True and arg_len == 4:
|
||||
return f(argv[0])(argv[1])(argv[2])(argv[3])
|
||||
elif sweep_ind is False and arg_len == 4:
|
||||
return f(argv[0], argv[1], argv[2], argv[3])
|
||||
elif sweep_ind is True and arg_len == 3:
|
||||
return f(argv[0])(argv[1])(argv[2])
|
||||
elif sweep_ind is False and arg_len == 3:
|
||||
return f(argv[0], argv[1], argv[2])
|
||||
else:
|
||||
raise TypeError('curry_pot() needs 3 or 4 positional arguments')
|
||||
|
|
@ -0,0 +1,51 @@
|
|||
from funcy import curry
|
||||
from cadCAD.configuration.utils import ep_time_step, time_step
|
||||
|
||||
|
||||
def increment(y, incr_by):
|
||||
return lambda _g, step, sL, s, _input: (y, s[y] + incr_by)
|
||||
|
||||
|
||||
def track(y):
|
||||
return lambda _g, step, sL, s, _input: (y, s[y].x)
|
||||
|
||||
|
||||
def simple_state_update(y, x):
|
||||
return lambda _g, step, sH, s, _input: (y, x)
|
||||
|
||||
|
||||
def simple_policy_update(y):
|
||||
return lambda _g, step, sH, s: y
|
||||
|
||||
|
||||
def update_timestamp(y, timedelta, format):
|
||||
return lambda _g, step, sL, s, _input: (
|
||||
y,
|
||||
ep_time_step(s, dt_str=s[y], fromat_str=format, _timedelta=timedelta)
|
||||
)
|
||||
|
||||
|
||||
def apply(f, y: str, incr_by: int):
|
||||
return lambda _g, step, sL, s, _input: (y, curry(f)(s[y])(incr_by))
|
||||
|
||||
|
||||
def add(y: str, incr_by):
|
||||
return apply(lambda a, b: a + b, y, incr_by)
|
||||
|
||||
|
||||
def increment_state_by_int(y: str, incr_by: int):
|
||||
return lambda _g, step, sL, s, _input: (y, s[y] + incr_by)
|
||||
|
||||
|
||||
def s(y, x):
|
||||
return lambda _g, step, sH, s, _input: (y, x)
|
||||
|
||||
|
||||
def time_model(y, substeps, time_delta, ts_format='%Y-%m-%d %H:%M:%S'):
|
||||
def apply_incriment_condition(s):
|
||||
if s['substep'] == 0 or s['substep'] == substeps:
|
||||
return y, time_step(dt_str=s[y], dt_format=ts_format, _timedelta=time_delta)
|
||||
else:
|
||||
return y, s[y]
|
||||
return lambda _g, step, sL, s, _input: apply_incriment_condition(s)
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 502 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 528 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 578 KiB |
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
Binary file not shown.
|
Before Width: | Height: | Size: 280 KiB |
File diff suppressed because one or more lines are too long
Binary file not shown.
Binary file not shown.
|
|
@ -0,0 +1,91 @@
|
|||
Historical State Access
|
||||
==
|
||||
#### Motivation
|
||||
The current state (values of state variables) is accessed through the `s` list. When the user requires previous state variable values, they may be accessed through the state history list, `sH`. Accessing the state history should be implemented without creating unintended feedback loops on the current state.
|
||||
|
||||
The 3rd parameter of state and policy update functions (labeled as `sH` of type `List[List[dict]]`) provides access to past Partial State Update Block (PSUB) given a negative offset number. `access_block` is used to access past PSUBs (`List[dict]`) from `sH`. For example, an offset of `-2` denotes the second to last PSUB.
|
||||
|
||||
#### Exclusion List
|
||||
Create a list of states to exclude from the reported PSU.
|
||||
```python
|
||||
exclusion_list = [
|
||||
'nonexistent', 'last_x', '2nd_to_last_x', '3rd_to_last_x', '4th_to_last_x'
|
||||
]
|
||||
```
|
||||
##### Example Policy Updates
|
||||
###### Last partial state update
|
||||
```python
|
||||
def last_update(_params, substep, sH, s):
|
||||
return {"last_x": access_block(
|
||||
state_history=sH,
|
||||
target_field="last_x", # Add a field to the exclusion list
|
||||
psu_block_offset=-1,
|
||||
exculsion_list=exclusion_list
|
||||
)
|
||||
}
|
||||
```
|
||||
* Note: Although `target_field` adding a field to the exclusion may seem redundant, it is useful in the case of the exclusion list being empty while the `target_field` is assigned to a state or a policy key.
|
||||
##### Define State Updates
|
||||
###### 2nd to last partial state update
|
||||
```python
|
||||
def second2last_update(_params, substep, sH, s):
|
||||
return {"2nd_to_last_x": access_block(sH, "2nd_to_last_x", -2, exclusion_list)}
|
||||
```
|
||||
|
||||
|
||||
###### 3rd to last partial state update
|
||||
```python
|
||||
def third_to_last_x(_params, substep, sH, s, _input):
|
||||
return '3rd_to_last_x', access_block(sH, "3rd_to_last_x", -3, exclusion_list)
|
||||
```
|
||||
###### 4rd to last partial state update
|
||||
```python
|
||||
def fourth_to_last_x(_params, substep, sH, s, _input):
|
||||
return '4th_to_last_x', access_block(sH, "4th_to_last_x", -4, exclusion_list)
|
||||
```
|
||||
###### Non-exsistent partial state update
|
||||
* `psu_block_offset >= 0` doesn't exist
|
||||
```python
|
||||
def nonexistent(_params, substep, sH, s, _input):
|
||||
return 'nonexistent', access_block(sH, "nonexistent", 0, exclusion_list)
|
||||
```
|
||||
|
||||
#### [Example Simulation:](examples/historical_state_access.py)
|
||||
|
||||
|
||||
#### Example Output:
|
||||
###### State History
|
||||
```
|
||||
+----+-------+-----------+------------+-----+
|
||||
| | run | substep | timestep | x |
|
||||
|----+-------+-----------+------------+-----|
|
||||
| 0 | 1 | 0 | 0 | 0 |
|
||||
| 1 | 1 | 1 | 1 | 1 |
|
||||
| 2 | 1 | 2 | 1 | 2 |
|
||||
| 3 | 1 | 3 | 1 | 3 |
|
||||
| 4 | 1 | 1 | 2 | 4 |
|
||||
| 5 | 1 | 2 | 2 | 5 |
|
||||
| 6 | 1 | 3 | 2 | 6 |
|
||||
| 7 | 1 | 1 | 3 | 7 |
|
||||
| 8 | 1 | 2 | 3 | 8 |
|
||||
| 9 | 1 | 3 | 3 | 9 |
|
||||
+----+-------+-----------+------------+-----+
|
||||
```
|
||||
###### Accessed State History:
|
||||
Example: `last_x`
|
||||
```
|
||||
+----+-----------------------------------------------------------------------------------------------------------------------------------------------------+
|
||||
| | last_x |
|
||||
|----+-----------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| 0 | [] |
|
||||
| 1 | [{'x': 0, 'run': 1, 'substep': 0, 'timestep': 0}] |
|
||||
| 2 | [{'x': 0, 'run': 1, 'substep': 0, 'timestep': 0}] |
|
||||
| 3 | [{'x': 0, 'run': 1, 'substep': 0, 'timestep': 0}] |
|
||||
| 4 | [{'x': 1, 'run': 1, 'substep': 1, 'timestep': 1}, {'x': 2, 'run': 1, 'substep': 2, 'timestep': 1}, {'x': 3, 'run': 1, 'substep': 3, 'timestep': 1}] |
|
||||
| 5 | [{'x': 1, 'run': 1, 'substep': 1, 'timestep': 1}, {'x': 2, 'run': 1, 'substep': 2, 'timestep': 1}, {'x': 3, 'run': 1, 'substep': 3, 'timestep': 1}] |
|
||||
| 6 | [{'x': 1, 'run': 1, 'substep': 1, 'timestep': 1}, {'x': 2, 'run': 1, 'substep': 2, 'timestep': 1}, {'x': 3, 'run': 1, 'substep': 3, 'timestep': 1}] |
|
||||
| 7 | [{'x': 4, 'run': 1, 'substep': 1, 'timestep': 2}, {'x': 5, 'run': 1, 'substep': 2, 'timestep': 2}, {'x': 6, 'run': 1, 'substep': 3, 'timestep': 2}] |
|
||||
| 8 | [{'x': 4, 'run': 1, 'substep': 1, 'timestep': 2}, {'x': 5, 'run': 1, 'substep': 2, 'timestep': 2}, {'x': 6, 'run': 1, 'substep': 3, 'timestep': 2}] |
|
||||
| 9 | [{'x': 4, 'run': 1, 'substep': 1, 'timestep': 2}, {'x': 5, 'run': 1, 'substep': 2, 'timestep': 2}, {'x': 6, 'run': 1, 'substep': 3, 'timestep': 2}] |
|
||||
+----+-----------------------------------------------------------------------------------------------------------------------------------------------------+
|
||||
```
|
||||
|
|
@ -0,0 +1,77 @@
|
|||
Policy Aggregation
|
||||
==
|
||||
|
||||
For each Partial State Update, multiple policy dictionaries are aggregated into a single dictionary to be imputted into
|
||||
all state functions using an initial reduction function and optional subsequent map functions.
|
||||
|
||||
#### Aggregate Function Composition:
|
||||
```python
|
||||
# Reduce Function
|
||||
add = lambda a, b: a + b # Used to add policy values of the same key
|
||||
# Map Function
|
||||
mult_by_2 = lambda y: y * 2 # Used to multiply all policy values by 2
|
||||
policy_ops=[add, mult_by_2]
|
||||
```
|
||||
|
||||
##### Example Policy Updates per Partial State Update (PSU)
|
||||
```python
|
||||
def p1_psu1(_params, step, sH, s):
|
||||
return {'policy1': 1}
|
||||
def p2_psu1(_params, step, sH, s):
|
||||
return {'policy2': 2}
|
||||
```
|
||||
* `add` not applicable due to lack of redundant policies
|
||||
* `mult_by_2` applied to all policies
|
||||
* Result: `{'policy1': 2, 'policy2': 4}`
|
||||
|
||||
```python
|
||||
def p1_psu2(_params, step, sH, s):
|
||||
return {'policy1': 2, 'policy2': 2}
|
||||
def p2_psu2(_params, step, sH, s):
|
||||
return {'policy1': 2, 'policy2': 2}
|
||||
```
|
||||
* `add` applicable due to redundant policies
|
||||
* `mult_by_2` applied to all policies
|
||||
* Result: `{'policy1': 8, 'policy2': 8}`
|
||||
|
||||
```python
|
||||
def p1_psu3(_params, step, sH, s):
|
||||
return {'policy1': 1, 'policy2': 2, 'policy3': 3}
|
||||
def p2_psu3(_params, step, sH, s):
|
||||
return {'policy1': 1, 'policy2': 2, 'policy3': 3}
|
||||
```
|
||||
* `add` applicable due to redundant policies
|
||||
* `mult_by_2` applied to all policies
|
||||
* Result: `{'policy1': 4, 'policy2': 8, 'policy3': 12}`
|
||||
|
||||
#### Aggregate Policies using functions
|
||||
```python
|
||||
from cadCAD.configuration import append_configs
|
||||
|
||||
append_configs(
|
||||
sim_configs=???,
|
||||
initial_state=???,
|
||||
partial_state_update_blocks=???,
|
||||
policy_ops=[add, mult_by_2] # Default: [lambda a, b: a + b]
|
||||
)
|
||||
```
|
||||
|
||||
#### Example
|
||||
##### * [System Model Configuration](examples/policy_aggregation.py)
|
||||
##### * Simulation Results:
|
||||
```
|
||||
+----+---------------------------------------------+-------+------+-----------+------------+
|
||||
| | policies | run | s1 | substep | timestep |
|
||||
|----+---------------------------------------------+-------+------+-----------+------------|
|
||||
| 0 | {} | 1 | 0 | 0 | 0 |
|
||||
| 1 | {'policy1': 2, 'policy2': 4} | 1 | 1 | 1 | 1 |
|
||||
| 2 | {'policy1': 8, 'policy2': 8} | 1 | 2 | 2 | 1 |
|
||||
| 3 | {'policy3': 12, 'policy1': 4, 'policy2': 8} | 1 | 3 | 3 | 1 |
|
||||
| 4 | {'policy1': 2, 'policy2': 4} | 1 | 4 | 1 | 2 |
|
||||
| 5 | {'policy1': 8, 'policy2': 8} | 1 | 5 | 2 | 2 |
|
||||
| 6 | {'policy3': 12, 'policy1': 4, 'policy2': 8} | 1 | 6 | 3 | 2 |
|
||||
| 7 | {'policy1': 2, 'policy2': 4} | 1 | 7 | 1 | 3 |
|
||||
| 8 | {'policy1': 8, 'policy2': 8} | 1 | 8 | 2 | 3 |
|
||||
| 9 | {'policy3': 12, 'policy1': 4, 'policy2': 8} | 1 | 9 | 3 | 3 |
|
||||
+----+---------------------------------------------+-------+------+-----------+------------+
|
||||
```
|
||||
|
|
@ -0,0 +1,241 @@
|
|||
Simulation Configuration
|
||||
==
|
||||
|
||||
## Introduction
|
||||
|
||||
Given a **Simulation Configuration**, cadCAD produces datasets that represent the evolution of the state of a system
|
||||
over [discrete time](https://en.wikipedia.org/wiki/Discrete_time_and_continuous_time#Discrete_time). The state of the
|
||||
system is described by a set of [State Variables](#State-Variables). The dynamic of the system is described by
|
||||
[Policy Functions](#Policy-Functions) and [State Update Functions](#State-Update-Functions), which are evaluated by
|
||||
cadCAD according to the definitions set by the user in [Partial State Update Blocks](#Partial-State-Update-Blocks).
|
||||
|
||||
A Simulation Configuration is comprised of a [System Model](#System-Model) and a set of
|
||||
[Simulation Properties](#Simulation-Properties)
|
||||
|
||||
`append_configs`, stores a **Simulation Configuration** to be [Executed](/JS4Q9oayQASihxHBJzz4Ug) by cadCAD
|
||||
|
||||
```python
|
||||
from cadCAD.configuration import append_configs
|
||||
|
||||
append_configs(
|
||||
initial_state = ..., # System Model
|
||||
partial_state_update_blocks = .., # System Model
|
||||
policy_ops = ..., # System Model
|
||||
sim_configs = ... # Simulation Properties
|
||||
)
|
||||
```
|
||||
Parameters:
|
||||
* **initial_state** : _dict_ - [State Variables](#State-Variables) and their initial values
|
||||
* **partial_state_update_blocks** : List[dict[dict]] - List of [Partial State Update Blocks](#Partial-State-Update-Blocks)
|
||||
* **policy_ops** : List[functions] - See [Policy Aggregation](/63k2ncjITuqOPCUHzK7Viw)
|
||||
* **sim_configs** - See [System Model Parameter Sweep](/4oJ_GT6zRWW8AO3yMhFKrg)
|
||||
|
||||
## Simulation Properties
|
||||
|
||||
Simulation properties are passed to `append_configs` in the `sim_configs` parameter. To construct this parameter, we
|
||||
use the `config_sim` function in `cadCAD.configuration.utils`
|
||||
|
||||
```python
|
||||
from cadCAD.configuration.utils import config_sim
|
||||
|
||||
c = config_sim({
|
||||
"N": ...,
|
||||
"T": range(...),
|
||||
"M": ...
|
||||
})
|
||||
|
||||
append_configs(
|
||||
...
|
||||
sim_configs = c # Simulation Properties
|
||||
)
|
||||
```
|
||||
|
||||
### T - Simulation Length
|
||||
Computer simulations run in discrete time:
|
||||
|
||||
>Discrete time views values of variables as occurring at distinct, separate "points in time", or equivalently as being
|
||||
unchanged throughout each non-zero region of time ("time period")—that is, time is viewed as a discrete variable. (...)
|
||||
This view of time corresponds to a digital clock that gives a fixed reading of 10:37 for a while, and then jumps to a
|
||||
new fixed reading of 10:38, etc.
|
||||
([source: Wikipedia](https://en.wikipedia.org/wiki/Discrete_time_and_continuous_time#Discrete_time))
|
||||
|
||||
As is common in many simulation tools, in cadCAD too we refer to each discrete unit of time as a **timestep**. cadCAD
|
||||
increments a "time counter", and at each step it updates the state variables according to the equations that describe
|
||||
the system.
|
||||
|
||||
The main simulation property that the user must set when creating a Simulation Configuration is the number of timesteps
|
||||
in the simulation. In other words, for how long do they want to simulate the system that has been modeled.
|
||||
|
||||
### N - Number of Runs
|
||||
|
||||
cadCAD facilitates running multiple simulations of the same system sequentially, reporting the results of all those
|
||||
runs in a single dataset. This is especially helpful for running
|
||||
[Monte Carlo Simulations](../tutorials/robot-marbles-part-4/robot-marbles-part-4.ipynb).
|
||||
|
||||
### M - Parameters of the System
|
||||
|
||||
Parameters of the system, passed to the state update functions and the policy functions in the `params` parameter are
|
||||
defined here. See [System Model Parameter Sweep](/4oJ_GT6zRWW8AO3yMhFKrg) for more information.
|
||||
|
||||
## System Model
|
||||
The System Model describes the system that will be simulated in cadCAD. It is comprised of a set of
|
||||
[State Variables](###Sate-Variables) and the [State Update Functions](#State-Update-Functions) that determine the
|
||||
evolution of the state of the system over time. [Policy Functions](#Policy-Functions) (representations of user policies
|
||||
or internal system control policies) may also be part of a System Model.
|
||||
|
||||
### State Variables
|
||||
>A state variable is one of the set of variables that are used to describe the mathematical "state" of a dynamical
|
||||
system. Intuitively, the state of a system describes enough about the system to determine its future behaviour in the
|
||||
absence of any external forces affecting the system. ([source: Wikipedia](https://en.wikipedia.org/wiki/State_variable))
|
||||
|
||||
cadCAD can handle state variables of any Python data type, including custom classes. It is up to the user of cadCAD to
|
||||
determine the state variables needed to **sufficiently and accurately** describe the system they are interested in.
|
||||
|
||||
State Variables are passed to `append_configs` along with its initial values, as a Python `dict` where the `dict_keys`
|
||||
are the names of the variables and the `dict_values` are their initial values.
|
||||
|
||||
```python
|
||||
from cadCAD.configuration import append_configs
|
||||
|
||||
genesis_states = {
|
||||
'state_variable_1': 0,
|
||||
'state_variable_2': 0,
|
||||
'state_variable_3': 1.5,
|
||||
'timestamp': '2019-01-01 00:00:00'
|
||||
}
|
||||
|
||||
append_configs(
|
||||
initial_state = genesis_states,
|
||||
...
|
||||
)
|
||||
```
|
||||
### State Update Functions
|
||||
State Update Functions represent equations according to which the state variables change over time. Each state update
|
||||
function must return a tuple containing a string with the name of the state variable being updated and its new value.
|
||||
Each state update function can only modify a single state variable. The general structure of a state update function is:
|
||||
```python
|
||||
def state_update_function_A(_params, substep, sH, s, _input):
|
||||
...
|
||||
return 'state_variable_name', new_value
|
||||
```
|
||||
Parameters:
|
||||
* **_params** : _dict_ - [System parameters](/4oJ_GT6zRWW8AO3yMhFKrg)
|
||||
* **substep** : _int_ - Current [substep](#Substep)
|
||||
* **sH** : _list[list[dict_]] - Historical values of all state variables for the simulation. See
|
||||
[Historical State Access](/smiyQTnATtC9xPwvF8KbBQ) for details
|
||||
* **s** : _dict_ - Current state of the system, where the `dict_keys` are the names of the state variables and the
|
||||
`dict_values` are their current values.
|
||||
* **_input** : _dict_ - Aggregation of the signals of all policy functions in the current
|
||||
[Partial State Update Block](#Partial-State-Update-Block)
|
||||
|
||||
Return:
|
||||
* _tuple_ containing a string with the name of the state variable being updated and its new value.
|
||||
|
||||
State update functions should not modify any of the parameters passed to it, as those are mutable Python objects that
|
||||
cadCAD relies on in order to run the simulation according to the specifications.
|
||||
|
||||
### Policy Functions
|
||||
A Policy Function computes one or more signals to be passed to [State Update Functions](#State-Update-Functions)
|
||||
(via the _\_input_ parameter). Read
|
||||
[this article](../tutorials/robot-marbles-part-2/robot-marbles-part-2.ipynb)
|
||||
for details on why and when to use policy functions.
|
||||
|
||||
<!-- We would then expand the tutorials with these kind of concepts
|
||||
#### Policies
|
||||
Policies consist of the potential action made available through mechanisms. The action taken is expected to be the
|
||||
result of a conditional determination of the past state.
|
||||
|
||||
While executed the same, the modeller can approach policies dependent on the availability of a mechanism to a population.
|
||||
|
||||
- ***Control Policy***
|
||||
When the controlling or deploying entity has the ability to act in order to affect some aspect of the system, this is a
|
||||
control policy.
|
||||
- ***User Policy*** model agent behaviors in reaction to state variables and exogenous variables. The resulted user
|
||||
action will become an input to PSUs. Note that user behaviors should not directly update value of state variables.
|
||||
The action taken, as well as the potential to act, through a mechanism is a behavior. -->
|
||||
|
||||
The general structure of a policy function is:
|
||||
```python
|
||||
def policy_function_1(_params, substep, sH, s):
|
||||
...
|
||||
return {'signal_1': value_1, ..., 'signal_N': value_N}
|
||||
```
|
||||
Parameters:
|
||||
* **_params** : _dict_ - [System parameters](/4oJ_GT6zRWW8AO3yMhFKrg)
|
||||
* **substep** : _int_ - Current [substep](#Substep)
|
||||
* **sH** : _list[list[dict_]] - Historical values of all state variables for the simulation. See
|
||||
[Historical State Access](/smiyQTnATtC9xPwvF8KbBQ) for details
|
||||
* **s** : _dict_ - Current state of the system, where the `dict_keys` are the names of the state variables and the
|
||||
`dict_values` are their current values.
|
||||
|
||||
Return:
|
||||
* _dict_ of signals to be passed to the state update functions in the same
|
||||
[Partial State Update Block](#Partial-State-Update-Blocks)
|
||||
|
||||
Policy functions should not modify any of the parameters passed to it, as those are mutable Python objects that cadCAD
|
||||
relies on in order to run the simulation according to the specifications.
|
||||
|
||||
At each [Partial State Update Block](#Partial-State-Update-Blocks) (PSUB), the `dicts` returned by all policy functions
|
||||
within that PSUB dictionaries are aggregated into a single `dict` using an initial reduction function
|
||||
(a key-wise operation, default: `dic1['keyA'] + dic2['keyA']`) and optional subsequent map functions. The resulting
|
||||
aggregated `dict` is then passed as the `_input` parameter to the state update functions in that PSUB. For more
|
||||
information on how to modify the aggregation method, see [Policy Aggregation](/63k2ncjITuqOPCUHzK7Viw).
|
||||
|
||||
### Partial State Update Blocks
|
||||
|
||||
A **Partial State Update Block** (PSUB) is a set of State Update Functions and Policy Functions such that State Update
|
||||
Functions in the set are independent from each other and Policies in the set are independent from each other and from
|
||||
the State Update Functions in the set. In other words, if a state variable is updated in a PSUB, its new value cannot
|
||||
impact the State Update Functions and Policy Functions in that PSUB - only those in the next PSUB.
|
||||
|
||||

|
||||
|
||||
Partial State Update Blocks are passed to `append_configs` as a List of Python `dicts` where the `dict_keys` are named
|
||||
`"policies"` and `"variables"` and the values are also Python `dicts` where the keys are the names of the policy and
|
||||
state update functions and the values are the functions.
|
||||
|
||||
```python
|
||||
PSUBs = [
|
||||
{
|
||||
"policies": {
|
||||
"b_1": policy_function_1,
|
||||
...
|
||||
"b_J": policy_function_J
|
||||
},
|
||||
"variables": {
|
||||
"s_1": state_update_function_1,
|
||||
...
|
||||
"s_K": state_update_function_K
|
||||
}
|
||||
}, #PSUB_1,
|
||||
{...}, #PSUB_2,
|
||||
...
|
||||
{...} #PSUB_M
|
||||
]
|
||||
|
||||
append_configs(
|
||||
...
|
||||
partial_state_update_blocks = PSUBs,
|
||||
...
|
||||
)
|
||||
|
||||
```
|
||||
|
||||
#### Substep
|
||||
At each timestep, cadCAD iterates over the `partial_state_update_blocks` list. For each Partial State Update Block,
|
||||
cadCAD returns a record containing the state of the system at the end of that PSUB. We refer to that subdivision of a
|
||||
timestep as a `substep`.
|
||||
|
||||
## Result Dataset
|
||||
|
||||
cadCAD returns a dataset containing the evolution of the state variables defined by the user over time, with three `int`
|
||||
indexes:
|
||||
* `run` - id of the [run](#N-Number-of-Runs)
|
||||
* `timestep` - discrete unit of time (the total number of timesteps is defined by the user in the
|
||||
[T Simulation Parameter](#T-Simulation-Length))
|
||||
* `substep` - subdivision of timestep (the number of [substeps](#Substeps) is the same as the number of Partial State
|
||||
Update Blocks)
|
||||
|
||||
Therefore, the total number of records in the resulting dataset is `N` x `T` x `len(partial_state_update_blocks)`
|
||||
|
||||
#### [System Simulation Execution](Simulation_Execution.md)
|
||||
|
|
@ -0,0 +1,160 @@
|
|||
Simulation Execution
|
||||
==
|
||||
System Simulations are executed with the execution engine executor (`cadCAD.engine.Executor`) given System Model
|
||||
Configurations. There are multiple simulation Execution Modes and Execution Contexts.
|
||||
|
||||
### Steps:
|
||||
1. #### *Choose Execution Mode*:
|
||||
* ##### Simulation Execution Modes:
|
||||
`cadCAD` executes a process per System Model Configuration and a thread per System Simulation.
|
||||
##### Class: `cadCAD.engine.ExecutionMode`
|
||||
##### Attributes:
|
||||
* **Single Process:** A single process Execution Mode for a single System Model Configuration (Example:
|
||||
`cadCAD.engine.ExecutionMode().single_proc`).
|
||||
* **Multi-Process:** Multiple process Execution Mode for System Model Simulations which executes on a thread per
|
||||
given System Model Configuration (Example: `cadCAD.engine.ExecutionMode().multi_proc`).
|
||||
2. #### *Create Execution Context using Execution Mode:*
|
||||
```python
|
||||
from cadCAD.engine import ExecutionMode, ExecutionContext
|
||||
exec_mode = ExecutionMode()
|
||||
single_proc_ctx = ExecutionContext(context=exec_mode.single_proc)
|
||||
```
|
||||
3. #### *Create Simulation Executor*
|
||||
```python
|
||||
from cadCAD.engine import Executor
|
||||
from cadCAD import configs
|
||||
simulation = Executor(exec_context=single_proc_ctx, configs=configs)
|
||||
```
|
||||
4. #### *Execute Simulation: Produce System Event Dataset*
|
||||
A Simulation execution produces a System Event Dataset and the Tensor Field applied to initial states used to create it.
|
||||
```python
|
||||
import pandas as pd
|
||||
raw_system_events, tensor_field = simulation.execute()
|
||||
|
||||
# Simulation Result Types:
|
||||
# raw_system_events: List[dict]
|
||||
# tensor_field: pd.DataFrame
|
||||
|
||||
# Result System Events DataFrame
|
||||
simulation_result = pd.DataFrame(raw_system_events)
|
||||
```
|
||||
|
||||
##### Example Tensor Field
|
||||
```
|
||||
+----+-----+--------------------------------+--------------------------------+
|
||||
| | m | b1 | s1 |
|
||||
|----+-----+--------------------------------+--------------------------------|
|
||||
| 0 | 1 | <function p1m1 at 0x10c458ea0> | <function s1m1 at 0x10c464510> |
|
||||
| 1 | 2 | <function p1m2 at 0x10c464048> | <function s1m2 at 0x10c464620> |
|
||||
| 2 | 3 | <function p1m3 at 0x10c464400> | <function s1m3 at 0x10c464730> |
|
||||
+----+-----+--------------------------------+--------------------------------+
|
||||
```
|
||||
|
||||
##### Example Result: System Events DataFrame
|
||||
```
|
||||
+----+-------+------------+-----------+------+-----------+
|
||||
| | run | timestep | substep | s1 | s2 |
|
||||
|----+-------+------------+-----------+------+-----------|
|
||||
| 0 | 1 | 0 | 0 | 0 | 0.0 |
|
||||
| 1 | 1 | 1 | 1 | 1 | 4 |
|
||||
| 2 | 1 | 1 | 2 | 2 | 6 |
|
||||
| 3 | 1 | 1 | 3 | 3 | [ 30 300] |
|
||||
| 4 | 2 | 0 | 0 | 0 | 0.0 |
|
||||
| 5 | 2 | 1 | 1 | 1 | 4 |
|
||||
| 6 | 2 | 1 | 2 | 2 | 6 |
|
||||
| 7 | 2 | 1 | 3 | 3 | [ 30 300] |
|
||||
+----+-------+------------+-----------+------+-----------+
|
||||
```
|
||||
|
||||
### Execution Examples:
|
||||
##### Single Simulation Execution (Single Process Execution)
|
||||
Example System Model Configurations:
|
||||
* [System Model A](examples/sys_model_A.py): `/documentation/examples/sys_model_A.py`
|
||||
* [System Model B](examples/sys_model_B.py): `/documentation/examples/sys_model_B.py`
|
||||
Example Simulation Executions:
|
||||
* [System Model A](examples/sys_model_A_exec.py): `/documentation/examples/sys_model_A_exec.py`
|
||||
* [System Model B](examples/sys_model_B_exec.py): `/documentation/examples/sys_model_B_exec.py`
|
||||
```python
|
||||
import pandas as pd
|
||||
from tabulate import tabulate
|
||||
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
|
||||
from documentation.examples import sys_model_A
|
||||
from cadCAD import configs
|
||||
|
||||
exec_mode = ExecutionMode()
|
||||
|
||||
# Single Process Execution using a Single System Model Configuration:
|
||||
# sys_model_A
|
||||
sys_model_A = [configs[0]] # sys_model_A
|
||||
single_proc_ctx = ExecutionContext(context=exec_mode.single_proc)
|
||||
sys_model_A_simulation = Executor(exec_context=single_proc_ctx, configs=sys_model_A)
|
||||
|
||||
sys_model_A_raw_result, sys_model_A_tensor_field = sys_model_A_simulation.execute()
|
||||
sys_model_A_result = pd.DataFrame(sys_model_A_raw_result)
|
||||
print()
|
||||
print("Tensor Field: sys_model_A")
|
||||
print(tabulate(sys_model_A_tensor_field, headers='keys', tablefmt='psql'))
|
||||
print("Result: System Events DataFrame")
|
||||
print(tabulate(sys_model_A_result, headers='keys', tablefmt='psql'))
|
||||
print()
|
||||
```
|
||||
|
||||
##### Multiple Simulation Execution
|
||||
|
||||
* ##### *Multi Process Execution*
|
||||
Documentation: Simulation Execution
|
||||
[Example Simulation Executions::](examples/sys_model_AB_exec.py) `/documentation/examples/sys_model_AB_exec.py`
|
||||
Example System Model Configurations:
|
||||
* [System Model A](examples/sys_model_A.py): `/documentation/examples/sys_model_A.py`
|
||||
* [System Model B](examples/sys_model_B.py): `/documentation/examples/sys_model_B.py`
|
||||
```python
|
||||
import pandas as pd
|
||||
from tabulate import tabulate
|
||||
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
|
||||
from documentation.examples import sys_model_A, sys_model_B
|
||||
from cadCAD import configs
|
||||
|
||||
exec_mode = ExecutionMode()
|
||||
|
||||
# # Multiple Processes Execution using Multiple System Model Configurations:
|
||||
# # sys_model_A & sys_model_B
|
||||
multi_proc_ctx = ExecutionContext(context=exec_mode.multi_proc)
|
||||
sys_model_AB_simulation = Executor(exec_context=multi_proc_ctx, configs=configs)
|
||||
|
||||
i = 0
|
||||
config_names = ['sys_model_A', 'sys_model_B']
|
||||
for sys_model_AB_raw_result, sys_model_AB_tensor_field in sys_model_AB_simulation.execute():
|
||||
sys_model_AB_result = pd.DataFrame(sys_model_AB_raw_result)
|
||||
print()
|
||||
print(f"Tensor Field: {config_names[i]}")
|
||||
print(tabulate(sys_model_AB_tensor_field, headers='keys', tablefmt='psql'))
|
||||
print("Result: System Events DataFrame:")
|
||||
print(tabulate(sys_model_AB_result, headers='keys', tablefmt='psql'))
|
||||
print()
|
||||
i += 1
|
||||
```
|
||||
|
||||
* ##### [*System Model Parameter Sweep*](System_Model_Parameter_Sweep.md)
|
||||
|
||||
[Example:](examples/param_sweep.py) `/documentation/examples/param_sweep.py`
|
||||
```python
|
||||
import pandas as pd
|
||||
from tabulate import tabulate
|
||||
# The following imports NEED to be in the exact order
|
||||
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
|
||||
from documentation.examples import param_sweep
|
||||
from cadCAD import configs
|
||||
|
||||
exec_mode = ExecutionMode()
|
||||
multi_proc_ctx = ExecutionContext(context=exec_mode.multi_proc)
|
||||
run = Executor(exec_context=multi_proc_ctx, configs=configs)
|
||||
|
||||
for raw_result, tensor_field in run.execute():
|
||||
result = pd.DataFrame(raw_result)
|
||||
print()
|
||||
print("Tensor Field:")
|
||||
print(tabulate(tensor_field, headers='keys', tablefmt='psql'))
|
||||
print("Output:")
|
||||
print(tabulate(result, headers='keys', tablefmt='psql'))
|
||||
print()
|
||||
```
|
||||
|
|
@ -0,0 +1,72 @@
|
|||
System Model Parameter Sweep
|
||||
==
|
||||
Parametrization of a System Model configuration that produces multiple configurations.
|
||||
|
||||
##### Set Parameters
|
||||
```python
|
||||
params = {
|
||||
'alpha': [1],
|
||||
'beta': [2, 5],
|
||||
'gamma': [3, 4],
|
||||
'omega': [7]
|
||||
}
|
||||
```
|
||||
The parameters above produce 2 simulations.
|
||||
* Simulation 1:
|
||||
* `alpha = 1`
|
||||
* `beta = 2`
|
||||
* `gamma = 3`
|
||||
* `omega = 7`
|
||||
* Simulation 2:
|
||||
* `alpha = 1`
|
||||
* `beta = 5`
|
||||
* `gamma = 4`
|
||||
* `omega = 7`
|
||||
|
||||
All parameters can also be set to include a single parameter each, which will result in a single simulation.
|
||||
|
||||
##### Example State Updates
|
||||
|
||||
Previous State:
|
||||
`y = 0`
|
||||
|
||||
```python
|
||||
def state_update(_params, step, sH, s, _input):
|
||||
y = 'state'
|
||||
x = s['state'] + _params['alpha'] + _params['gamma']
|
||||
return y, x
|
||||
```
|
||||
* Updated State:
|
||||
* Simulation 1: `y = 4 = 0 + 1 + 3`
|
||||
* Simulation 2: `y = 5 = 0 + 1 + 4`
|
||||
|
||||
##### Example Policy Updates
|
||||
```python
|
||||
# Internal States per Mechanism
|
||||
def policies(_params, step, sH, s):
|
||||
return {'beta': _params['beta'], 'gamma': _params['gamma']}
|
||||
```
|
||||
* Simulation 1: `{'beta': 2, 'gamma': 3]}`
|
||||
* Simulation 2: `{'beta': 5, 'gamma': 4}`
|
||||
|
||||
##### Configure Simulation
|
||||
```python
|
||||
from cadCAD.configuration.utils import config_sim
|
||||
|
||||
g = {
|
||||
'alpha': [1],
|
||||
'beta': [2, 5],
|
||||
'gamma': [3, 4],
|
||||
'omega': [7]
|
||||
}
|
||||
|
||||
sim_config = config_sim(
|
||||
{
|
||||
"N": 2,
|
||||
"T": range(5),
|
||||
"M": g,
|
||||
}
|
||||
)
|
||||
```
|
||||
#### Example
|
||||
##### * [System Model Configuration](examples/param_sweep.py)
|
||||
|
|
@ -0,0 +1,45 @@
|
|||
from pprint import pprint
|
||||
|
||||
import pandas as pd
|
||||
from tabulate import tabulate
|
||||
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
|
||||
from documentation.examples import sys_model_A, sys_model_B
|
||||
from cadCAD import configs
|
||||
|
||||
exec_mode = ExecutionMode()
|
||||
|
||||
# Single Process Execution using a Single System Model Configuration:
|
||||
# sys_model_A
|
||||
sys_model_A = [configs[0]]
|
||||
single_proc_ctx = ExecutionContext(context=exec_mode.single_proc)
|
||||
sys_model_A_simulation = Executor(exec_context=single_proc_ctx, configs=sys_model_A)
|
||||
|
||||
sys_model_A_raw_result, sys_model_A_tensor_field = sys_model_A_simulation.execute()
|
||||
sys_model_A_result = pd.DataFrame(sys_model_A_raw_result)
|
||||
print()
|
||||
print("Tensor Field: sys_model_A")
|
||||
print(tabulate(sys_model_A_tensor_field, headers='keys', tablefmt='psql'))
|
||||
print("Result: System Events DataFrame")
|
||||
print(tabulate(sys_model_A_result, headers='keys', tablefmt='psql'))
|
||||
print()
|
||||
|
||||
# # Multiple Processes Execution using Multiple System Model Configurations:
|
||||
# # sys_model_A & sys_model_B
|
||||
multi_proc_ctx = ExecutionContext(context=exec_mode.multi_proc)
|
||||
sys_model_AB_simulation = Executor(exec_context=multi_proc_ctx, configs=configs)
|
||||
|
||||
|
||||
|
||||
i = 0
|
||||
config_names = ['sys_model_A', 'sys_model_B']
|
||||
for sys_model_AB_raw_result, sys_model_AB_tensor_field in sys_model_AB_simulation.execute():
|
||||
print()
|
||||
pprint(sys_model_AB_raw_result)
|
||||
# sys_model_AB_result = pd.DataFrame(sys_model_AB_raw_result)
|
||||
print()
|
||||
print(f"Tensor Field: {config_names[i]}")
|
||||
print(tabulate(sys_model_AB_tensor_field, headers='keys', tablefmt='psql'))
|
||||
# print("Result: System Events DataFrame:")
|
||||
# print(tabulate(sys_model_AB_result, headers='keys', tablefmt='psql'))
|
||||
# print()
|
||||
i += 1
|
||||
|
|
@ -0,0 +1,110 @@
|
|||
import pandas as pd
|
||||
from tabulate import tabulate
|
||||
from cadCAD.configuration import append_configs
|
||||
from cadCAD.configuration.utils import config_sim, access_block
|
||||
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
|
||||
from cadCAD import configs
|
||||
|
||||
|
||||
policies, variables = {}, {}
|
||||
exclusion_list = ['nonexsistant', 'last_x', '2nd_to_last_x', '3rd_to_last_x', '4th_to_last_x']
|
||||
|
||||
# Policies per Mechanism
|
||||
|
||||
# state_history, target_field, psu_block_offset, exculsion_list
|
||||
def last_update(_g, substep, sH, s):
|
||||
return {"last_x": access_block(
|
||||
state_history=sH,
|
||||
target_field="last_x",
|
||||
psu_block_offset=-1,
|
||||
exculsion_list=exclusion_list
|
||||
)
|
||||
}
|
||||
policies["last_x"] = last_update
|
||||
|
||||
def second2last_update(_g, substep, sH, s):
|
||||
return {"2nd_to_last_x": access_block(sH, "2nd_to_last_x", -2, exclusion_list)}
|
||||
policies["2nd_to_last_x"] = second2last_update
|
||||
|
||||
|
||||
# Internal States per Mechanism
|
||||
|
||||
# WARNING: DO NOT delete elements from sH
|
||||
def add(y, x):
|
||||
return lambda _g, substep, sH, s, _input: (y, s[y] + x)
|
||||
variables['x'] = add('x', 1)
|
||||
|
||||
# last_partial_state_update_block
|
||||
def nonexsistant(_g, substep, sH, s, _input):
|
||||
return 'nonexsistant', access_block(sH, "nonexsistant", 0, exclusion_list)
|
||||
variables['nonexsistant'] = nonexsistant
|
||||
|
||||
# last_partial_state_update_block
|
||||
def last_x(_g, substep, sH, s, _input):
|
||||
return 'last_x', _input["last_x"]
|
||||
variables['last_x'] = last_x
|
||||
|
||||
# 2nd to last partial state update block
|
||||
def second_to_last_x(_g, substep, sH, s, _input):
|
||||
return '2nd_to_last_x', _input["2nd_to_last_x"]
|
||||
variables['2nd_to_last_x'] = second_to_last_x
|
||||
|
||||
# 3rd to last partial state update block
|
||||
def third_to_last_x(_g, substep, sH, s, _input):
|
||||
return '3rd_to_last_x', access_block(sH, "3rd_to_last_x", -3, exclusion_list)
|
||||
variables['3rd_to_last_x'] = third_to_last_x
|
||||
|
||||
# 4th to last partial state update block
|
||||
def fourth_to_last_x(_g, substep, sH, s, _input):
|
||||
return '4th_to_last_x', access_block(sH, "4th_to_last_x", -4, exclusion_list)
|
||||
variables['4th_to_last_x'] = fourth_to_last_x
|
||||
|
||||
|
||||
genesis_states = {
|
||||
'x': 0,
|
||||
'nonexsistant': [],
|
||||
'last_x': [],
|
||||
'2nd_to_last_x': [],
|
||||
'3rd_to_last_x': [],
|
||||
'4th_to_last_x': []
|
||||
}
|
||||
|
||||
PSUB = {
|
||||
"policies": policies,
|
||||
"variables": variables
|
||||
}
|
||||
|
||||
psubs = {
|
||||
"PSUB1": PSUB,
|
||||
"PSUB2": PSUB,
|
||||
"PSUB3": PSUB
|
||||
}
|
||||
|
||||
sim_config = config_sim(
|
||||
{
|
||||
"N": 1,
|
||||
"T": range(3),
|
||||
}
|
||||
)
|
||||
|
||||
append_configs(
|
||||
sim_configs=sim_config,
|
||||
initial_state=genesis_states,
|
||||
partial_state_update_blocks=psubs
|
||||
)
|
||||
|
||||
exec_mode = ExecutionMode()
|
||||
single_proc_ctx = ExecutionContext(context=exec_mode.single_proc)
|
||||
run = Executor(exec_context=single_proc_ctx, configs=configs)
|
||||
|
||||
raw_result, tensor_field = run.execute()
|
||||
result = pd.DataFrame(raw_result)
|
||||
cols = ['run','substep','timestep','x','nonexsistant','last_x','2nd_to_last_x','3rd_to_last_x','4th_to_last_x']
|
||||
result = result[cols]
|
||||
|
||||
print()
|
||||
print("Tensor Field:")
|
||||
print(tabulate(tensor_field, headers='keys', tablefmt='psql'))
|
||||
print("Output:")
|
||||
print(tabulate(result, headers='keys', tablefmt='psql'))
|
||||
print()
|
||||
|
|
@ -0,0 +1,116 @@
|
|||
import pprint
|
||||
from typing import Dict, List
|
||||
|
||||
import pandas as pd
|
||||
from tabulate import tabulate
|
||||
|
||||
from cadCAD.configuration import append_configs
|
||||
from cadCAD.configuration.utils import env_trigger, var_substep_trigger, config_sim, psub_list
|
||||
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
|
||||
from cadCAD import configs
|
||||
|
||||
pp = pprint.PrettyPrinter(indent=4)
|
||||
|
||||
|
||||
def some_function(x):
|
||||
return x
|
||||
|
||||
|
||||
g: Dict[str, List[int]] = {
|
||||
'alpha': [1],
|
||||
'beta': [2, 5],
|
||||
'gamma': [3, 4],
|
||||
'omega': [some_function]
|
||||
}
|
||||
|
||||
psu_steps = ['1', '2', '3']
|
||||
system_substeps = len(psu_steps)
|
||||
var_timestep_trigger = var_substep_trigger([0, system_substeps])
|
||||
env_timestep_trigger = env_trigger(system_substeps)
|
||||
env_process = {}
|
||||
|
||||
|
||||
# Policies
|
||||
def gamma(_params, step, sH, s):
|
||||
return {'gamma': _params['gamma']}
|
||||
|
||||
|
||||
def omega(_params, step, sH, s):
|
||||
return {'omega': _params['omega'](7)}
|
||||
|
||||
|
||||
# Internal States
|
||||
def alpha(_params, step, sH, s, _input):
|
||||
return 'alpha', _params['alpha']
|
||||
|
||||
def alpha_plus_gamma(_params, step, sH, s, _input):
|
||||
return 'alpha_plus_gamma', _params['alpha'] + _params['gamma']
|
||||
|
||||
|
||||
def beta(_params, step, sH, s, _input):
|
||||
return 'beta', _params['beta']
|
||||
|
||||
|
||||
def policies(_params, step, sH, s, _input):
|
||||
return 'policies', _input
|
||||
|
||||
|
||||
def sweeped(_params, step, sH, s, _input):
|
||||
return 'sweeped', {'beta': _params['beta'], 'gamma': _params['gamma']}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
genesis_states = {
|
||||
'alpha_plus_gamma': 0,
|
||||
'alpha': 0,
|
||||
'beta': 0,
|
||||
'policies': {},
|
||||
'sweeped': {}
|
||||
}
|
||||
|
||||
env_process['sweeped'] = env_timestep_trigger(trigger_field='timestep', trigger_vals=[5], funct_list=[lambda _g, x: _g['beta']])
|
||||
|
||||
sim_config = config_sim(
|
||||
{
|
||||
"N": 2,
|
||||
"T": range(5),
|
||||
"M": g,
|
||||
}
|
||||
)
|
||||
|
||||
psu_block = {k: {"policies": {}, "variables": {}} for k in psu_steps}
|
||||
for m in psu_steps:
|
||||
psu_block[m]['policies']['gamma'] = gamma
|
||||
psu_block[m]['policies']['omega'] = omega
|
||||
psu_block[m]["variables"]['alpha'] = alpha_plus_gamma
|
||||
psu_block[m]["variables"]['alpha_plus_gamma'] = alpha
|
||||
psu_block[m]["variables"]['beta'] = beta
|
||||
psu_block[m]['variables']['policies'] = policies
|
||||
psu_block[m]["variables"]['sweeped'] = var_timestep_trigger(y='sweeped', f=sweeped)
|
||||
|
||||
psubs = psub_list(psu_block, psu_steps)
|
||||
print()
|
||||
pp.pprint(psu_block)
|
||||
print()
|
||||
|
||||
append_configs(
|
||||
sim_configs=sim_config,
|
||||
initial_state=genesis_states,
|
||||
env_processes=env_process,
|
||||
partial_state_update_blocks=psubs
|
||||
)
|
||||
|
||||
exec_mode = ExecutionMode()
|
||||
multi_proc_ctx = ExecutionContext(context=exec_mode.multi_proc)
|
||||
run = Executor(exec_context=multi_proc_ctx, configs=configs)
|
||||
|
||||
for raw_result, tensor_field in run.execute():
|
||||
result = pd.DataFrame(raw_result)
|
||||
print()
|
||||
print("Tensor Field:")
|
||||
print(tabulate(tensor_field, headers='keys', tablefmt='psql'))
|
||||
print("Output:")
|
||||
print(tabulate(result, headers='keys', tablefmt='psql'))
|
||||
print()
|
||||
|
|
@ -0,0 +1,98 @@
|
|||
import pandas as pd
|
||||
from tabulate import tabulate
|
||||
|
||||
from cadCAD.configuration import append_configs
|
||||
from cadCAD.configuration.utils import config_sim
|
||||
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
|
||||
from cadCAD import configs
|
||||
|
||||
# Policies per Mechanism
|
||||
def p1m1(_g, step, sH, s):
|
||||
return {'policy1': 1}
|
||||
def p2m1(_g, step, sH, s):
|
||||
return {'policy2': 2}
|
||||
|
||||
def p1m2(_g, step, sH, s):
|
||||
return {'policy1': 2, 'policy2': 2}
|
||||
def p2m2(_g, step, sH, s):
|
||||
return {'policy1': 2, 'policy2': 2}
|
||||
|
||||
def p1m3(_g, step, sH, s):
|
||||
return {'policy1': 1, 'policy2': 2, 'policy3': 3}
|
||||
def p2m3(_g, step, sH, s):
|
||||
return {'policy1': 1, 'policy2': 2, 'policy3': 3}
|
||||
|
||||
|
||||
# Internal States per Mechanism
|
||||
def add(y, x):
|
||||
return lambda _g, step, sH, s, _input: (y, s[y] + x)
|
||||
|
||||
def policies(_g, step, sH, s, _input):
|
||||
y = 'policies'
|
||||
x = _input
|
||||
return (y, x)
|
||||
|
||||
|
||||
# Genesis States
|
||||
genesis_states = {
|
||||
'policies': {},
|
||||
's1': 0
|
||||
}
|
||||
|
||||
variables = {
|
||||
's1': add('s1', 1),
|
||||
"policies": policies
|
||||
}
|
||||
|
||||
psubs = {
|
||||
"m1": {
|
||||
"policies": {
|
||||
"p1": p1m1,
|
||||
"p2": p2m1
|
||||
},
|
||||
"variables": variables
|
||||
},
|
||||
"m2": {
|
||||
"policies": {
|
||||
"p1": p1m2,
|
||||
"p2": p2m2
|
||||
},
|
||||
"variables": variables
|
||||
},
|
||||
"m3": {
|
||||
"policies": {
|
||||
"p1": p1m3,
|
||||
"p2": p2m3
|
||||
},
|
||||
"variables": variables
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
sim_config = config_sim(
|
||||
{
|
||||
"N": 1,
|
||||
"T": range(3),
|
||||
}
|
||||
)
|
||||
|
||||
append_configs(
|
||||
sim_configs=sim_config,
|
||||
initial_state=genesis_states,
|
||||
partial_state_update_blocks=psubs,
|
||||
policy_ops=[lambda a, b: a + b, lambda y: y * 2] # Default: lambda a, b: a + b
|
||||
)
|
||||
|
||||
exec_mode = ExecutionMode()
|
||||
single_proc_ctx = ExecutionContext(context=exec_mode.single_proc)
|
||||
run = Executor(exec_context=single_proc_ctx, configs=configs)
|
||||
|
||||
raw_result, tensor_field = run.execute()
|
||||
result = pd.DataFrame(raw_result)
|
||||
|
||||
print()
|
||||
print("Tensor Field:")
|
||||
print(tabulate(tensor_field, headers='keys', tablefmt='psql'))
|
||||
print("Output:")
|
||||
print(tabulate(result, headers='keys', tablefmt='psql'))
|
||||
print()
|
||||
|
|
@ -0,0 +1,159 @@
|
|||
import numpy as np
|
||||
from datetime import timedelta
|
||||
|
||||
|
||||
from cadCAD.configuration import append_configs
|
||||
from cadCAD.configuration.utils import bound_norm_random, config_sim, time_step, env_trigger
|
||||
|
||||
seeds = {
|
||||
'z': np.random.RandomState(1),
|
||||
'a': np.random.RandomState(2),
|
||||
'b': np.random.RandomState(3),
|
||||
'c': np.random.RandomState(4)
|
||||
}
|
||||
|
||||
|
||||
# Policies per Mechanism
|
||||
def p1m1(_g, step, sH, s):
|
||||
return {'param1': 1}
|
||||
def p2m1(_g, step, sH, s):
|
||||
return {'param1': 1, 'param2': 4}
|
||||
|
||||
def p1m2(_g, step, sH, s):
|
||||
return {'param1': 'a', 'param2': 2}
|
||||
def p2m2(_g, step, sH, s):
|
||||
return {'param1': 'b', 'param2': 4}
|
||||
|
||||
def p1m3(_g, step, sH, s):
|
||||
return {'param1': ['c'], 'param2': np.array([10, 100])}
|
||||
def p2m3(_g, step, sH, s):
|
||||
return {'param1': ['d'], 'param2': np.array([20, 200])}
|
||||
|
||||
|
||||
# Internal States per Mechanism
|
||||
def s1m1(_g, step, sH, s, _input):
|
||||
y = 's1'
|
||||
x = s['s1'] + 1
|
||||
return (y, x)
|
||||
def s2m1(_g, step, sH, s, _input):
|
||||
y = 's2'
|
||||
x = _input['param2']
|
||||
return (y, x)
|
||||
|
||||
def s1m2(_g, step, sH, s, _input):
|
||||
y = 's1'
|
||||
x = s['s1'] + 1
|
||||
return (y, x)
|
||||
def s2m2(_g, step, sH, s, _input):
|
||||
y = 's2'
|
||||
x = _input['param2']
|
||||
return (y, x)
|
||||
|
||||
def s1m3(_g, step, sH, s, _input):
|
||||
y = 's1'
|
||||
x = s['s1'] + 1
|
||||
return (y, x)
|
||||
def s2m3(_g, step, sH, s, _input):
|
||||
y = 's2'
|
||||
x = _input['param2']
|
||||
return (y, x)
|
||||
|
||||
def policies(_g, step, sH, s, _input):
|
||||
y = 'policies'
|
||||
x = _input
|
||||
return (y, x)
|
||||
|
||||
|
||||
# Exogenous States
|
||||
proc_one_coef_A = 0.7
|
||||
proc_one_coef_B = 1.3
|
||||
|
||||
def es3(_g, step, sH, s, _input):
|
||||
y = 's3'
|
||||
x = s['s3'] * bound_norm_random(seeds['a'], proc_one_coef_A, proc_one_coef_B)
|
||||
return (y, x)
|
||||
|
||||
def es4(_g, step, sH, s, _input):
|
||||
y = 's4'
|
||||
x = s['s4'] * bound_norm_random(seeds['b'], proc_one_coef_A, proc_one_coef_B)
|
||||
return (y, x)
|
||||
|
||||
def update_timestamp(_g, step, sH, s, _input):
|
||||
y = 'timestamp'
|
||||
return y, time_step(dt_str=s[y], dt_format='%Y-%m-%d %H:%M:%S', _timedelta=timedelta(days=0, minutes=0, seconds=1))
|
||||
|
||||
|
||||
# Genesis States
|
||||
genesis_states = {
|
||||
's1': 0.0,
|
||||
's2': 0.0,
|
||||
's3': 1.0,
|
||||
's4': 1.0,
|
||||
'timestamp': '2018-10-01 15:16:24'
|
||||
}
|
||||
|
||||
|
||||
# Environment Process
|
||||
# ToDo: Depreciation Waring for env_proc_trigger convention
|
||||
trigger_timestamps = ['2018-10-01 15:16:25', '2018-10-01 15:16:27', '2018-10-01 15:16:29']
|
||||
env_processes = {
|
||||
"s3": [lambda _g, x: 5],
|
||||
"s4": env_trigger(3)(trigger_field='timestamp', trigger_vals=trigger_timestamps, funct_list=[lambda _g, x: 10])
|
||||
}
|
||||
|
||||
|
||||
psubs = [
|
||||
{
|
||||
"policies": {
|
||||
"b1": p1m1,
|
||||
"b2": p2m1
|
||||
},
|
||||
"variables": {
|
||||
"s1": s1m1,
|
||||
"s2": s2m1,
|
||||
"s3": es3,
|
||||
"s4": es4,
|
||||
"timestamp": update_timestamp
|
||||
}
|
||||
},
|
||||
{
|
||||
"policies": {
|
||||
"b1": p1m2,
|
||||
"b2": p2m2
|
||||
},
|
||||
"variables": {
|
||||
"s1": s1m2,
|
||||
"s2": s2m2,
|
||||
# "s3": es3p1,
|
||||
# "s4": es4p2,
|
||||
}
|
||||
},
|
||||
{
|
||||
"policies": {
|
||||
"b1": p1m3,
|
||||
"b2": p2m3
|
||||
},
|
||||
"variables": {
|
||||
"s1": s1m3,
|
||||
"s2": s2m3,
|
||||
# "s3": es3p1,
|
||||
# "s4": es4p2,
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
sim_config = config_sim(
|
||||
{
|
||||
"N": 2,
|
||||
"T": range(1),
|
||||
}
|
||||
)
|
||||
|
||||
append_configs(
|
||||
sim_configs=sim_config,
|
||||
initial_state=genesis_states,
|
||||
env_processes=env_processes,
|
||||
partial_state_update_blocks=psubs,
|
||||
policy_ops=[lambda a, b: a + b]
|
||||
)
|
||||
|
|
@ -0,0 +1,24 @@
|
|||
import pandas as pd
|
||||
from tabulate import tabulate
|
||||
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
|
||||
from documentation.examples import sys_model_A, sys_model_B
|
||||
from cadCAD import configs
|
||||
|
||||
exec_mode = ExecutionMode()
|
||||
|
||||
# # Multiple Processes Execution using Multiple System Model Configurations:
|
||||
# # sys_model_A & sys_model_B
|
||||
multi_proc_ctx = ExecutionContext(context=exec_mode.multi_proc)
|
||||
sys_model_AB_simulation = Executor(exec_context=multi_proc_ctx, configs=configs)
|
||||
|
||||
i = 0
|
||||
config_names = ['sys_model_A', 'sys_model_B']
|
||||
for sys_model_AB_raw_result, sys_model_AB_tensor_field in sys_model_AB_simulation.execute():
|
||||
sys_model_AB_result = pd.DataFrame(sys_model_AB_raw_result)
|
||||
print()
|
||||
print(f"Tensor Field: {config_names[i]}")
|
||||
print(tabulate(sys_model_AB_tensor_field, headers='keys', tablefmt='psql'))
|
||||
print("Result: System Events DataFrame:")
|
||||
print(tabulate(sys_model_AB_result, headers='keys', tablefmt='psql'))
|
||||
print()
|
||||
i += 1
|
||||
|
|
@ -0,0 +1,22 @@
|
|||
import pandas as pd
|
||||
from tabulate import tabulate
|
||||
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
|
||||
from documentation.examples import sys_model_A
|
||||
from cadCAD import configs
|
||||
|
||||
exec_mode = ExecutionMode()
|
||||
|
||||
# Single Process Execution using a Single System Model Configuration:
|
||||
# sys_model_A
|
||||
sys_model_A = [configs[0]] # sys_model_A
|
||||
single_proc_ctx = ExecutionContext(context=exec_mode.single_proc)
|
||||
sys_model_A_simulation = Executor(exec_context=single_proc_ctx, configs=sys_model_A)
|
||||
|
||||
sys_model_A_raw_result, sys_model_A_tensor_field = sys_model_A_simulation.execute()
|
||||
sys_model_A_result = pd.DataFrame(sys_model_A_raw_result)
|
||||
print()
|
||||
print("Tensor Field: sys_model_A")
|
||||
print(tabulate(sys_model_A_tensor_field, headers='keys', tablefmt='psql'))
|
||||
print("Result: System Events DataFrame")
|
||||
print(tabulate(sys_model_A_result, headers='keys', tablefmt='psql'))
|
||||
print()
|
||||
|
|
@ -0,0 +1,147 @@
|
|||
import numpy as np
|
||||
from datetime import timedelta
|
||||
|
||||
from cadCAD.configuration import append_configs
|
||||
from cadCAD.configuration.utils import bound_norm_random, config_sim, env_trigger, time_step
|
||||
|
||||
seeds = {
|
||||
'z': np.random.RandomState(1),
|
||||
'a': np.random.RandomState(2),
|
||||
'b': np.random.RandomState(3),
|
||||
'c': np.random.RandomState(3)
|
||||
}
|
||||
|
||||
|
||||
# Policies per Mechanism
|
||||
def p1m1(_g, step, sH, s):
|
||||
return {'param1': 1}
|
||||
def p2m1(_g, step, sH, s):
|
||||
return {'param2': 4}
|
||||
|
||||
def p1m2(_g, step, sH, s):
|
||||
return {'param1': 'a', 'param2': 2}
|
||||
def p2m2(_g, step, sH, s):
|
||||
return {'param1': 'b', 'param2': 4}
|
||||
|
||||
def p1m3(_g, step, sH, s):
|
||||
return {'param1': ['c'], 'param2': np.array([10, 100])}
|
||||
def p2m3(_g, step, sH, s):
|
||||
return {'param1': ['d'], 'param2': np.array([20, 200])}
|
||||
|
||||
|
||||
# Internal States per Mechanism
|
||||
def s1m1(_g, step, sH, s, _input):
|
||||
y = 's1'
|
||||
x = _input['param1']
|
||||
return (y, x)
|
||||
def s2m1(_g, step, sH, s, _input):
|
||||
y = 's2'
|
||||
x = _input['param2']
|
||||
return (y, x)
|
||||
|
||||
def s1m2(_g, step, sH, s, _input):
|
||||
y = 's1'
|
||||
x = _input['param1']
|
||||
return (y, x)
|
||||
def s2m2(_g, step, sH, s, _input):
|
||||
y = 's2'
|
||||
x = _input['param2']
|
||||
return (y, x)
|
||||
|
||||
def s1m3(_g, step, sH, s, _input):
|
||||
y = 's1'
|
||||
x = _input['param1']
|
||||
return (y, x)
|
||||
def s2m3(_g, step, sH, s, _input):
|
||||
y = 's2'
|
||||
x = _input['param2']
|
||||
return (y, x)
|
||||
|
||||
|
||||
# Exogenous States
|
||||
proc_one_coef_A = 0.7
|
||||
proc_one_coef_B = 1.3
|
||||
|
||||
def es3(_g, step, sH, s, _input):
|
||||
y = 's3'
|
||||
x = s['s3'] * bound_norm_random(seeds['a'], proc_one_coef_A, proc_one_coef_B)
|
||||
return (y, x)
|
||||
|
||||
def es4(_g, step, sH, s, _input):
|
||||
y = 's4'
|
||||
x = s['s4'] * bound_norm_random(seeds['b'], proc_one_coef_A, proc_one_coef_B)
|
||||
return (y, x)
|
||||
|
||||
def update_timestamp(_g, step, sH, s, _input):
|
||||
y = 'timestamp'
|
||||
return y, time_step(dt_str=s[y], dt_format='%Y-%m-%d %H:%M:%S', _timedelta=timedelta(days=0, minutes=0, seconds=1))
|
||||
|
||||
|
||||
# Genesis States
|
||||
genesis_states = {
|
||||
's1': 0,
|
||||
's2': 0,
|
||||
's3': 1,
|
||||
's4': 1,
|
||||
'timestamp': '2018-10-01 15:16:24'
|
||||
}
|
||||
|
||||
|
||||
# Environment Process
|
||||
# ToDo: Depreciation Waring for env_proc_trigger convention
|
||||
trigger_timestamps = ['2018-10-01 15:16:25', '2018-10-01 15:16:27', '2018-10-01 15:16:29']
|
||||
env_processes = {
|
||||
"s3": [lambda _g, x: 5],
|
||||
"s4": env_trigger(3)(trigger_field='timestamp', trigger_vals=trigger_timestamps, funct_list=[lambda _g, x: 10])
|
||||
}
|
||||
|
||||
psubs = [
|
||||
{
|
||||
"policies": {
|
||||
"b1": p1m1,
|
||||
# "b2": p2m1
|
||||
},
|
||||
"states": {
|
||||
"s1": s1m1,
|
||||
# "s2": s2m1
|
||||
"s3": es3,
|
||||
"s4": es4,
|
||||
"timestep": update_timestamp
|
||||
}
|
||||
},
|
||||
{
|
||||
"policies": {
|
||||
"b1": p1m2,
|
||||
# "b2": p2m2
|
||||
},
|
||||
"states": {
|
||||
"s1": s1m2,
|
||||
# "s2": s2m2
|
||||
}
|
||||
},
|
||||
{
|
||||
"policies": {
|
||||
"b1": p1m3,
|
||||
"b2": p2m3
|
||||
},
|
||||
"states": {
|
||||
"s1": s1m3,
|
||||
"s2": s2m3
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
sim_config = config_sim(
|
||||
{
|
||||
"N": 2,
|
||||
"T": range(5),
|
||||
}
|
||||
)
|
||||
|
||||
append_configs(
|
||||
sim_configs=sim_config,
|
||||
initial_state=genesis_states,
|
||||
env_processes=env_processes,
|
||||
partial_state_update_blocks=psubs
|
||||
)
|
||||
|
|
@ -0,0 +1,23 @@
|
|||
import pandas as pd
|
||||
from tabulate import tabulate
|
||||
# The following imports NEED to be in the exact order
|
||||
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
|
||||
from documentation.examples import sys_model_B
|
||||
from cadCAD import configs
|
||||
|
||||
exec_mode = ExecutionMode()
|
||||
|
||||
print("Simulation Execution: Single Configuration")
|
||||
print()
|
||||
first_config = configs # only contains sys_model_B
|
||||
single_proc_ctx = ExecutionContext(context=exec_mode.single_proc)
|
||||
run = Executor(exec_context=single_proc_ctx, configs=first_config)
|
||||
|
||||
raw_result, tensor_field = run.execute()
|
||||
result = pd.DataFrame(raw_result)
|
||||
print()
|
||||
print("Tensor Field: sys_model_B")
|
||||
print(tabulate(tensor_field, headers='keys', tablefmt='psql'))
|
||||
print("Output:")
|
||||
print(tabulate(result, headers='keys', tablefmt='psql'))
|
||||
print()
|
||||
|
|
@ -1,4 +1,6 @@
|
|||
pandas
|
||||
wheel
|
||||
pathos
|
||||
pipenv
|
||||
fn
|
||||
tabulate
|
||||
tabulate
|
||||
funcy
|
||||
45
setup.py
45
setup.py
|
|
@ -1,11 +1,38 @@
|
|||
from setuptools import setup
|
||||
from setuptools import setup, find_packages
|
||||
|
||||
setup(name='SimCAD',
|
||||
version='0.1',
|
||||
description='Sim-Cad Enigne',
|
||||
url='https://github.com/BlockScience/DiffyQ-SimCAD',
|
||||
long_description = """
|
||||
cadCAD (complex adaptive systems computer-aided design) is a python based, unified modeling framework for stochastic
|
||||
dynamical systems and differential games for research, validation, and Computer Aided Design of economic systems created
|
||||
by BlockScience. It is capable of modeling systems at all levels of abstraction from Agent Based Modeling (ABM) to
|
||||
System Dynamics (SD), and enabling smooth integration of computational social science simulations with empirical data
|
||||
science workflows.
|
||||
|
||||
An economic system is treated as a state-based model and defined through a set of endogenous and exogenous state
|
||||
variables which are updated through mechanisms and environmental processes, respectively. Behavioral models, which may
|
||||
be deterministic or stochastic, provide the evolution of the system within the action space of the mechanisms.
|
||||
Mathematical formulations of these economic games treat agent utility as derived from the state rather than direct from
|
||||
an action, creating a rich, dynamic modeling framework. Simulations may be run with a range of initial conditions and
|
||||
parameters for states, behaviors, mechanisms, and environmental processes to understand and visualize network behavior
|
||||
under various conditions. Support for A/B testing policies, Monte Carlo analysis, and other common numerical methods is
|
||||
provided.
|
||||
"""
|
||||
|
||||
setup(name='cadCAD',
|
||||
version='0.3.1',
|
||||
description="cadCAD: a differential games based simulation software package for research, validation, and \
|
||||
Computer Aided Design of economic systems",
|
||||
long_description=long_description,
|
||||
url='https://github.com/BlockScience/cadCAD',
|
||||
author='Joshua E. Jodesty',
|
||||
author_email='joshua@block.science',
|
||||
# license='?????',
|
||||
packages=['SimCAD'],
|
||||
zip_safe=False)
|
||||
author_email='joshua@block.science, joshua.jodesty@gmail.com',
|
||||
license='LICENSE.txt',
|
||||
packages=find_packages(),
|
||||
install_requires=[
|
||||
"pandas",
|
||||
"wheel",
|
||||
"pathos",
|
||||
"fn",
|
||||
"tabulate",
|
||||
"funcy"
|
||||
]
|
||||
)
|
||||
|
|
|
|||
File diff suppressed because one or more lines are too long
|
|
@ -0,0 +1,27 @@
|
|||
ds1,ds2,ds3,run,substep,timestep
|
||||
0,0,1,1,0,0
|
||||
1,40,5,1,1,1
|
||||
2,40,5,1,2,1
|
||||
3,40,5,1,3,1
|
||||
4,40,5,1,1,2
|
||||
5,40,5,1,2,2
|
||||
6,40,5,1,3,2
|
||||
7,40,5,1,1,3
|
||||
8,40,5,1,2,3
|
||||
9,40,5,1,3,3
|
||||
10,40,5,1,1,4
|
||||
11,40,5,1,2,4
|
||||
12,40,5,1,3,4
|
||||
0,0,1,2,0,0
|
||||
1,40,5,2,1,1
|
||||
2,40,5,2,2,1
|
||||
3,40,5,2,3,1
|
||||
4,40,5,2,1,2
|
||||
5,40,5,2,2,2
|
||||
6,40,5,2,3,2
|
||||
7,40,5,2,1,3
|
||||
8,40,5,2,2,3
|
||||
9,40,5,2,3,3
|
||||
10,40,5,2,1,4
|
||||
11,40,5,2,2,4
|
||||
12,40,5,2,3,4
|
||||
|
|
|
@ -0,0 +1,24 @@
|
|||
import pandas as pd
|
||||
from tabulate import tabulate
|
||||
# The following imports NEED to be in the exact order
|
||||
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
|
||||
# from simulations.validation import config1_test_pipe
|
||||
# from simulations.validation import config1
|
||||
from simulations.validation import write_simulation
|
||||
from cadCAD import configs
|
||||
|
||||
exec_mode = ExecutionMode()
|
||||
|
||||
print("Simulation Execution: Single Configuration")
|
||||
print()
|
||||
first_config = configs # only contains config1
|
||||
single_proc_ctx = ExecutionContext(context=exec_mode.single_proc)
|
||||
run = Executor(exec_context=single_proc_ctx, configs=first_config)
|
||||
|
||||
raw_result, _ = run.main()
|
||||
result = pd.DataFrame(raw_result)
|
||||
result.to_csv('/Users/jjodesty/Projects/DiffyQ-SimCAD/simulations/external_data/output.csv', index=False)
|
||||
|
||||
print("Output:")
|
||||
print(tabulate(result, headers='keys', tablefmt='psql'))
|
||||
print()
|
||||
|
|
@ -0,0 +1,24 @@
|
|||
import pandas as pd
|
||||
from tabulate import tabulate
|
||||
# The following imports NEED to be in the exact order
|
||||
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
|
||||
from simulations.validation import sweep_config
|
||||
from cadCAD import configs
|
||||
|
||||
exec_mode = ExecutionMode()
|
||||
|
||||
print("Simulation Execution: Concurrent Execution")
|
||||
multi_proc_ctx = ExecutionContext(context=exec_mode.multi_proc)
|
||||
run = Executor(exec_context=multi_proc_ctx, configs=configs)
|
||||
|
||||
i = 0
|
||||
config_names = ['sweep_config_A', 'sweep_config_B']
|
||||
for raw_result, tensor_field in run.execute():
|
||||
result = pd.DataFrame(raw_result)
|
||||
print()
|
||||
print("Tensor Field: " + config_names[i])
|
||||
print(tabulate(tensor_field, headers='keys', tablefmt='psql'))
|
||||
print("Output:")
|
||||
print(tabulate(result, headers='keys', tablefmt='psql'))
|
||||
print()
|
||||
i += 1
|
||||
|
|
@ -0,0 +1,160 @@
|
|||
import numpy as np
|
||||
from datetime import timedelta
|
||||
|
||||
|
||||
from cadCAD.configuration import append_configs
|
||||
from cadCAD.configuration.utils import bound_norm_random, config_sim, time_step, env_trigger
|
||||
|
||||
seeds = {
|
||||
'z': np.random.RandomState(1),
|
||||
'a': np.random.RandomState(2),
|
||||
'b': np.random.RandomState(3),
|
||||
'c': np.random.RandomState(4)
|
||||
}
|
||||
|
||||
|
||||
# Policies per Mechanism
|
||||
def p1m1(_g, step, sL, s):
|
||||
return {'param1': 1}
|
||||
def p2m1(_g, step, sL, s):
|
||||
return {'param1': 1, 'param2': 4}
|
||||
|
||||
def p1m2(_g, step, sL, s):
|
||||
return {'param1': 'a', 'param2': 2}
|
||||
def p2m2(_g, step, sL, s):
|
||||
return {'param1': 'b', 'param2': 4}
|
||||
|
||||
def p1m3(_g, step, sL, s):
|
||||
return {'param1': ['c'], 'param2': np.array([10, 100])}
|
||||
def p2m3(_g, step, sL, s):
|
||||
return {'param1': ['d'], 'param2': np.array([20, 200])}
|
||||
|
||||
|
||||
# Internal States per Mechanism
|
||||
def s1m1(_g, step, sL, s, _input):
|
||||
y = 's1'
|
||||
x = s['s1'] + 1
|
||||
return (y, x)
|
||||
def s2m1(_g, step, sL, s, _input):
|
||||
y = 's2'
|
||||
x = _input['param2']
|
||||
return (y, x)
|
||||
|
||||
def s1m2(_g, step, sL, s, _input):
|
||||
y = 's1'
|
||||
x = s['s1'] + 1
|
||||
return (y, x)
|
||||
def s2m2(_g, step, sL, s, _input):
|
||||
y = 's2'
|
||||
x = _input['param2']
|
||||
return (y, x)
|
||||
|
||||
def s1m3(_g, step, sL, s, _input):
|
||||
y = 's1'
|
||||
x = s['s1'] + 1
|
||||
return (y, x)
|
||||
def s2m3(_g, step, sL, s, _input):
|
||||
y = 's2'
|
||||
x = _input['param2']
|
||||
return (y, x)
|
||||
|
||||
def policies(_g, step, sL, s, _input):
|
||||
y = 'policies'
|
||||
x = _input
|
||||
return (y, x)
|
||||
|
||||
|
||||
# Exogenous States
|
||||
proc_one_coef_A = 0.7
|
||||
proc_one_coef_B = 1.3
|
||||
|
||||
def es3(_g, step, sL, s, _input):
|
||||
y = 's3'
|
||||
x = s['s3'] * bound_norm_random(seeds['a'], proc_one_coef_A, proc_one_coef_B)
|
||||
return (y, x)
|
||||
|
||||
def es4(_g, step, sL, s, _input):
|
||||
y = 's4'
|
||||
x = s['s4'] * bound_norm_random(seeds['b'], proc_one_coef_A, proc_one_coef_B)
|
||||
return (y, x)
|
||||
|
||||
def update_timestamp(_g, step, sL, s, _input):
|
||||
y = 'timestamp'
|
||||
return y, time_step(dt_str=s[y], dt_format='%Y-%m-%d %H:%M:%S', _timedelta=timedelta(days=0, minutes=0, seconds=1))
|
||||
|
||||
|
||||
# Genesis States
|
||||
genesis_states = {
|
||||
's1': 0.0,
|
||||
's2': 0.0,
|
||||
's3': 1.0,
|
||||
's4': 1.0,
|
||||
'timestamp': '2018-10-01 15:16:24'
|
||||
}
|
||||
|
||||
|
||||
# Environment Process
|
||||
# ToDo: Depreciation Waring for env_proc_trigger convention
|
||||
trigger_timestamps = ['2018-10-01 15:16:25', '2018-10-01 15:16:27', '2018-10-01 15:16:29']
|
||||
env_processes = {
|
||||
"s3": [lambda _g, x: 5],
|
||||
"s4": env_trigger(3)(trigger_field='timestamp', trigger_vals=trigger_timestamps, funct_list=[lambda _g, x: 10])
|
||||
}
|
||||
|
||||
|
||||
partial_state_update_block = [
|
||||
{
|
||||
"policies": {
|
||||
"b1": p1m1,
|
||||
"b2": p2m1
|
||||
},
|
||||
"variables": {
|
||||
"s1": s1m1,
|
||||
"s2": s2m1,
|
||||
"s3": es3,
|
||||
"s4": es4,
|
||||
"timestamp": update_timestamp
|
||||
}
|
||||
},
|
||||
{
|
||||
"policies": {
|
||||
"b1": p1m2,
|
||||
"b2": p2m2
|
||||
},
|
||||
"variables": {
|
||||
"s1": s1m2,
|
||||
"s2": s2m2,
|
||||
# "s3": es3p1,
|
||||
# "s4": es4p2,
|
||||
}
|
||||
},
|
||||
{
|
||||
"policies": {
|
||||
"b1": p1m3,
|
||||
"b2": p2m3
|
||||
},
|
||||
"variables": {
|
||||
"s1": s1m3,
|
||||
"s2": s2m3,
|
||||
# "s3": es3p1,
|
||||
# "s4": es4p2,
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
sim_config = config_sim(
|
||||
{
|
||||
"N": 1,
|
||||
# "N": 5,
|
||||
"T": range(5),
|
||||
}
|
||||
)
|
||||
|
||||
append_configs(
|
||||
sim_configs=sim_config,
|
||||
initial_state=genesis_states,
|
||||
env_processes=env_processes,
|
||||
partial_state_update_blocks=partial_state_update_block,
|
||||
policy_ops=[lambda a, b: a + b]
|
||||
)
|
||||
|
|
@ -0,0 +1,147 @@
|
|||
import numpy as np
|
||||
from datetime import timedelta
|
||||
|
||||
from cadCAD.configuration import append_configs
|
||||
from cadCAD.configuration.utils import bound_norm_random, config_sim, env_trigger, time_step
|
||||
|
||||
seeds = {
|
||||
'z': np.random.RandomState(1),
|
||||
'a': np.random.RandomState(2),
|
||||
'b': np.random.RandomState(3),
|
||||
'c': np.random.RandomState(3)
|
||||
}
|
||||
|
||||
|
||||
# Policies per Mechanism
|
||||
def p1m1(_g, step, sL, s):
|
||||
return {'param1': 1}
|
||||
def p2m1(_g, step, sL, s):
|
||||
return {'param2': 4}
|
||||
|
||||
def p1m2(_g, step, sL, s):
|
||||
return {'param1': 'a', 'param2': 2}
|
||||
def p2m2(_g, step, sL, s):
|
||||
return {'param1': 'b', 'param2': 4}
|
||||
|
||||
def p1m3(_g, step, sL, s):
|
||||
return {'param1': ['c'], 'param2': np.array([10, 100])}
|
||||
def p2m3(_g, step, sL, s):
|
||||
return {'param1': ['d'], 'param2': np.array([20, 200])}
|
||||
|
||||
|
||||
# Internal States per Mechanism
|
||||
def s1m1(_g, step, sL, s, _input):
|
||||
y = 's1'
|
||||
x = _input['param1']
|
||||
return (y, x)
|
||||
def s2m1(_g, step, sL, s, _input):
|
||||
y = 's2'
|
||||
x = _input['param2']
|
||||
return (y, x)
|
||||
|
||||
def s1m2(_g, step, sL, s, _input):
|
||||
y = 's1'
|
||||
x = _input['param1']
|
||||
return (y, x)
|
||||
def s2m2(_g, step, sL, s, _input):
|
||||
y = 's2'
|
||||
x = _input['param2']
|
||||
return (y, x)
|
||||
|
||||
def s1m3(_g, step, sL, s, _input):
|
||||
y = 's1'
|
||||
x = _input['param1']
|
||||
return (y, x)
|
||||
def s2m3(_g, step, sL, s, _input):
|
||||
y = 's2'
|
||||
x = _input['param2']
|
||||
return (y, x)
|
||||
|
||||
|
||||
# Exogenous States
|
||||
proc_one_coef_A = 0.7
|
||||
proc_one_coef_B = 1.3
|
||||
|
||||
def es3(_g, step, sL, s, _input):
|
||||
y = 's3'
|
||||
x = s['s3'] * bound_norm_random(seeds['a'], proc_one_coef_A, proc_one_coef_B)
|
||||
return (y, x)
|
||||
|
||||
def es4(_g, step, sL, s, _input):
|
||||
y = 's4'
|
||||
x = s['s4'] * bound_norm_random(seeds['b'], proc_one_coef_A, proc_one_coef_B)
|
||||
return (y, x)
|
||||
|
||||
def update_timestamp(_g, step, sL, s, _input):
|
||||
y = 'timestamp'
|
||||
return y, time_step(dt_str=s[y], dt_format='%Y-%m-%d %H:%M:%S', _timedelta=timedelta(days=0, minutes=0, seconds=1))
|
||||
|
||||
|
||||
# Genesis States
|
||||
genesis_states = {
|
||||
's1': 0,
|
||||
's2': 0,
|
||||
's3': 1,
|
||||
's4': 1,
|
||||
'timestamp': '2018-10-01 15:16:24'
|
||||
}
|
||||
|
||||
|
||||
# Environment Process
|
||||
# ToDo: Depreciation Waring for env_proc_trigger convention
|
||||
trigger_timestamps = ['2018-10-01 15:16:25', '2018-10-01 15:16:27', '2018-10-01 15:16:29']
|
||||
env_processes = {
|
||||
"s3": [lambda _g, x: 5],
|
||||
"s4": env_trigger(3)(trigger_field='timestamp', trigger_vals=trigger_timestamps, funct_list=[lambda _g, x: 10])
|
||||
}
|
||||
|
||||
partial_state_update_block = {
|
||||
"m1": {
|
||||
"policies": {
|
||||
"b1": p1m1,
|
||||
# "b2": p2m1
|
||||
},
|
||||
"states": {
|
||||
"s1": s1m1,
|
||||
# "s2": s2m1
|
||||
"s3": es3,
|
||||
"s4": es4,
|
||||
"timestep": update_timestamp
|
||||
}
|
||||
},
|
||||
"m2": {
|
||||
"policies": {
|
||||
"b1": p1m2,
|
||||
# "b2": p2m2
|
||||
},
|
||||
"states": {
|
||||
"s1": s1m2,
|
||||
# "s2": s2m2
|
||||
}
|
||||
},
|
||||
"m3": {
|
||||
"policies": {
|
||||
"b1": p1m3,
|
||||
"b2": p2m3
|
||||
},
|
||||
"states": {
|
||||
"s1": s1m3,
|
||||
"s2": s2m3
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
sim_config = config_sim(
|
||||
{
|
||||
"N": 2,
|
||||
"T": range(5),
|
||||
}
|
||||
)
|
||||
|
||||
append_configs(
|
||||
sim_configs=sim_config,
|
||||
initial_state=genesis_states,
|
||||
env_processes=env_processes,
|
||||
partial_state_update_blocks=partial_state_update_block
|
||||
)
|
||||
|
|
@ -0,0 +1,67 @@
|
|||
from cadCAD.configuration import append_configs
|
||||
from cadCAD.configuration.utils import config_sim
|
||||
import pandas as pd
|
||||
from cadCAD.utils import SilentDF
|
||||
|
||||
df = SilentDF(pd.read_csv('/Users/jjodesty/Projects/DiffyQ-SimCAD/simulations/external_data/output.csv'))
|
||||
|
||||
|
||||
def query(s, df):
|
||||
return df[
|
||||
(df['run'] == s['run']) & (df['substep'] == s['substep']) & (df['timestep'] == s['timestep'])
|
||||
].drop(columns=['run', 'substep', "timestep"])
|
||||
|
||||
def p1(_g, substep, sL, s):
|
||||
result_dict = query(s, df).to_dict()
|
||||
del result_dict["ds3"]
|
||||
return {k: list(v.values()).pop() for k, v in result_dict.items()}
|
||||
|
||||
def p2(_g, substep, sL, s):
|
||||
result_dict = query(s, df).to_dict()
|
||||
del result_dict["ds1"], result_dict["ds2"]
|
||||
return {k: list(v.values()).pop() for k, v in result_dict.items()}
|
||||
|
||||
# ToDo: SilentDF(df) wont work
|
||||
#integrate_ext_dataset
|
||||
def integrate_ext_dataset(_g, step, sL, s, _input):
|
||||
result_dict = query(s, df).to_dict()
|
||||
return 'external_data', {k: list(v.values()).pop() for k, v in result_dict.items()}
|
||||
|
||||
def increment(y, incr_by):
|
||||
return lambda _g, step, sL, s, _input: (y, s[y] + incr_by)
|
||||
increment = increment('increment', 1)
|
||||
|
||||
def view_policies(_g, step, sL, s, _input):
|
||||
return 'policies', _input
|
||||
|
||||
|
||||
external_data = {'ds1': None, 'ds2': None, 'ds3': None}
|
||||
state_dict = {
|
||||
'increment': 0,
|
||||
'external_data': external_data,
|
||||
'policies': external_data
|
||||
}
|
||||
|
||||
|
||||
policies = {"p1": p1, "p2": p2}
|
||||
states = {'increment': increment, 'external_data': integrate_ext_dataset, 'policies': view_policies}
|
||||
PSUB = {'policies': policies, 'states': states}
|
||||
|
||||
# needs M1&2 need behaviors
|
||||
partial_state_update_blocks = {
|
||||
'PSUB1': PSUB,
|
||||
'PSUB2': PSUB,
|
||||
'PSUB3': PSUB
|
||||
}
|
||||
|
||||
sim_config = config_sim({
|
||||
"N": 2,
|
||||
"T": range(4)
|
||||
})
|
||||
|
||||
append_configs(
|
||||
sim_configs=sim_config,
|
||||
initial_state=state_dict,
|
||||
partial_state_update_blocks=partial_state_update_blocks,
|
||||
policy_ops=[lambda a, b: {**a, **b}]
|
||||
)
|
||||
|
|
@ -0,0 +1,91 @@
|
|||
from cadCAD.configuration import append_configs
|
||||
from cadCAD.configuration.utils import config_sim, access_block
|
||||
|
||||
policies, variables = {}, {}
|
||||
exclusion_list = ['nonexsistant', 'last_x', '2nd_to_last_x', '3rd_to_last_x', '4th_to_last_x']
|
||||
|
||||
# Policies per Mechanism
|
||||
|
||||
# WARNING: DO NOT delete elements from sH
|
||||
# state_history, target_field, psu_block_offset, exculsion_list
|
||||
def last_update(_g, substep, sH, s):
|
||||
return {"last_x": access_block(
|
||||
state_history=sH,
|
||||
target_field="last_x",
|
||||
psu_block_offset=-1,
|
||||
exculsion_list=exclusion_list
|
||||
)
|
||||
}
|
||||
policies["last_x"] = last_update
|
||||
|
||||
def second2last_update(_g, substep, sH, s):
|
||||
return {"2nd_to_last_x": access_block(sH, "2nd_to_last_x", -2, exclusion_list)}
|
||||
policies["2nd_to_last_x"] = second2last_update
|
||||
|
||||
|
||||
# Internal States per Mechanism
|
||||
|
||||
# WARNING: DO NOT delete elements from sH
|
||||
def add(y, x):
|
||||
return lambda _g, substep, sH, s, _input: (y, s[y] + x)
|
||||
variables['x'] = add('x', 1)
|
||||
|
||||
# last_partial_state_update_block
|
||||
def nonexsistant(_g, substep, sH, s, _input):
|
||||
return 'nonexsistant', access_block(sH, "nonexsistant", 0, exclusion_list)
|
||||
variables['nonexsistant'] = nonexsistant
|
||||
|
||||
# last_partial_state_update_block
|
||||
def last_x(_g, substep, sH, s, _input):
|
||||
return 'last_x', _input["last_x"]
|
||||
variables['last_x'] = last_x
|
||||
|
||||
# 2nd to last partial state update block
|
||||
def second_to_last_x(_g, substep, sH, s, _input):
|
||||
return '2nd_to_last_x', _input["2nd_to_last_x"]
|
||||
variables['2nd_to_last_x'] = second_to_last_x
|
||||
|
||||
# 3rd to last partial state update block
|
||||
def third_to_last_x(_g, substep, sH, s, _input):
|
||||
return '3rd_to_last_x', access_block(sH, "3rd_to_last_x", -3, exclusion_list)
|
||||
variables['3rd_to_last_x'] = third_to_last_x
|
||||
|
||||
# 4th to last partial state update block
|
||||
def fourth_to_last_x(_g, substep, sH, s, _input):
|
||||
return '4th_to_last_x', access_block(sH, "4th_to_last_x", -4, exclusion_list)
|
||||
variables['4th_to_last_x'] = fourth_to_last_x
|
||||
|
||||
|
||||
genesis_states = {
|
||||
'x': 0,
|
||||
'nonexsistant': [],
|
||||
'last_x': [],
|
||||
'2nd_to_last_x': [],
|
||||
'3rd_to_last_x': [],
|
||||
'4th_to_last_x': []
|
||||
}
|
||||
|
||||
PSUB = {
|
||||
"policies": policies,
|
||||
"variables": variables
|
||||
}
|
||||
|
||||
partial_state_update_block = {
|
||||
"PSUB1": PSUB,
|
||||
"PSUB2": PSUB,
|
||||
"PSUB3": PSUB
|
||||
}
|
||||
|
||||
sim_config = config_sim(
|
||||
{
|
||||
"N": 1,
|
||||
"T": range(3),
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
append_configs(
|
||||
sim_configs=sim_config,
|
||||
initial_state=genesis_states,
|
||||
partial_state_update_blocks=partial_state_update_block
|
||||
)
|
||||
|
|
@ -0,0 +1,83 @@
|
|||
from cadCAD.configuration import append_configs
|
||||
from cadCAD.configuration.utils import config_sim
|
||||
|
||||
|
||||
# Policies per Mechanism
|
||||
def p1m1(_g, step, sL, s):
|
||||
return {'policy1': 1}
|
||||
def p2m1(_g, step, sL, s):
|
||||
return {'policy2': 2}
|
||||
|
||||
def p1m2(_g, step, sL, s):
|
||||
return {'policy1': 2, 'policy2': 2}
|
||||
def p2m2(_g, step, sL, s):
|
||||
return {'policy1': 2, 'policy2': 2}
|
||||
|
||||
def p1m3(_g, step, sL, s):
|
||||
return {'policy1': 1, 'policy2': 2, 'policy3': 3}
|
||||
def p2m3(_g, step, sL, s):
|
||||
return {'policy1': 1, 'policy2': 2, 'policy3': 3}
|
||||
|
||||
|
||||
# Internal States per Mechanism
|
||||
def add(y, x):
|
||||
return lambda _g, step, sH, s, _input: (y, s[y] + x)
|
||||
|
||||
def policies(_g, step, sH, s, _input):
|
||||
y = 'policies'
|
||||
x = _input
|
||||
return (y, x)
|
||||
|
||||
|
||||
# Genesis States
|
||||
genesis_states = {
|
||||
'policies': {},
|
||||
's1': 0
|
||||
}
|
||||
|
||||
variables = {
|
||||
's1': add('s1', 1),
|
||||
"policies": policies
|
||||
}
|
||||
|
||||
partial_state_update_block = {
|
||||
"m1": {
|
||||
"policies": {
|
||||
"p1": p1m1,
|
||||
"p2": p2m1
|
||||
},
|
||||
"variables": variables
|
||||
},
|
||||
"m2": {
|
||||
"policies": {
|
||||
"p1": p1m2,
|
||||
"p2": p2m2
|
||||
},
|
||||
"variables": variables
|
||||
},
|
||||
"m3": {
|
||||
"policies": {
|
||||
"p1": p1m3,
|
||||
"p2": p2m3
|
||||
},
|
||||
"variables": variables
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
sim_config = config_sim(
|
||||
{
|
||||
"N": 1,
|
||||
"T": range(3),
|
||||
}
|
||||
)
|
||||
|
||||
# Aggregation == Reduce Map / Reduce Map Aggregation
|
||||
# using env functions (include in reg test using / for env proc)
|
||||
append_configs(
|
||||
sim_configs=sim_config,
|
||||
initial_state=genesis_states,
|
||||
partial_state_update_blocks=partial_state_update_block,
|
||||
# ToDo: subsequent functions should include policy dict for access to each policy (i.e shouldnt be a map)
|
||||
policy_ops=[lambda a, b: a + b, lambda y: y * 2] # Default: lambda a, b: a + b ToDO: reduction function requires high lvl explanation
|
||||
)
|
||||
|
|
@ -0,0 +1,159 @@
|
|||
import numpy as np
|
||||
from datetime import timedelta
|
||||
import pprint
|
||||
|
||||
from cadCAD.configuration import append_configs
|
||||
from cadCAD.configuration.utils import env_trigger, var_substep_trigger, config_sim, time_step, psub_list
|
||||
|
||||
from typing import Dict, List
|
||||
|
||||
pp = pprint.PrettyPrinter(indent=4)
|
||||
|
||||
seeds = {
|
||||
'z': np.random.RandomState(1),
|
||||
'a': np.random.RandomState(2),
|
||||
'b': np.random.RandomState(3),
|
||||
'c': np.random.RandomState(3)
|
||||
}
|
||||
|
||||
# Optional
|
||||
g: Dict[str, List[int]] = {
|
||||
'alpha': [1],
|
||||
# 'beta': [2],
|
||||
# 'gamma': [3],
|
||||
'beta': [2, 5],
|
||||
'gamma': [3, 4],
|
||||
'omega': [7]
|
||||
}
|
||||
|
||||
psu_steps = ['m1', 'm2', 'm3']
|
||||
system_substeps = len(psu_steps)
|
||||
var_timestep_trigger = var_substep_trigger([0, system_substeps])
|
||||
env_timestep_trigger = env_trigger(system_substeps)
|
||||
env_process = {}
|
||||
psu_block = {k: {"policies": {}, "variables": {}} for k in psu_steps}
|
||||
|
||||
# ['s1', 's2', 's3', 's4']
|
||||
# Policies per Mechanism
|
||||
def p1m1(_g, step, sL, s):
|
||||
return {'param1': 1}
|
||||
psu_block['m1']['policies']['p1'] = p1m1
|
||||
|
||||
def p2m1(_g, step, sL, s):
|
||||
return {'param2': 4}
|
||||
psu_block['m1']['policies']['p2'] = p2m1
|
||||
|
||||
def p1m2(_g, step, sL, s):
|
||||
return {'param1': 'a', 'param2': _g['beta']}
|
||||
psu_block['m2']['policies']['p1'] = p1m2
|
||||
|
||||
def p2m2(_g, step, sL, s):
|
||||
return {'param1': 'b', 'param2': 0}
|
||||
psu_block['m2']['policies']['p2'] = p2m2
|
||||
|
||||
def p1m3(_g, step, sL, s):
|
||||
return {'param1': np.array([10, 100])}
|
||||
psu_block['m3']['policies']['p1'] = p1m3
|
||||
|
||||
def p2m3(_g, step, sL, s):
|
||||
return {'param1': np.array([20, 200])}
|
||||
psu_block['m3']['policies']['p2'] = p2m3
|
||||
|
||||
|
||||
# Internal States per Mechanism
|
||||
def s1m1(_g, step, sL, s, _input):
|
||||
return 's1', 0
|
||||
psu_block['m1']["variables"]['s1'] = s1m1
|
||||
|
||||
def s2m1(_g, step, sL, s, _input):
|
||||
return 's2', _g['beta']
|
||||
psu_block['m1']["variables"]['s2'] = s2m1
|
||||
|
||||
def s1m2(_g, step, sL, s, _input):
|
||||
return 's1', _input['param2']
|
||||
psu_block['m2']["variables"]['s1'] = s1m2
|
||||
|
||||
def s2m2(_g, step, sL, s, _input):
|
||||
return 's2', _input['param2']
|
||||
psu_block['m2']["variables"]['s2'] = s2m2
|
||||
|
||||
def s1m3(_g, step, sL, s, _input):
|
||||
return 's1', 0
|
||||
psu_block['m3']["variables"]['s1'] = s1m3
|
||||
|
||||
def s2m3(_g, step, sL, s, _input):
|
||||
return 's2', 0
|
||||
psu_block['m3']["variables"]['s2'] = s2m3
|
||||
|
||||
|
||||
# Exogenous States
|
||||
def update_timestamp(_g, step, sL, s, _input):
|
||||
y = 'timestamp'
|
||||
return y, time_step(dt_str=s[y], dt_format='%Y-%m-%d %H:%M:%S', _timedelta=timedelta(days=0, minutes=0, seconds=1))
|
||||
for m in ['m1','m2','m3']:
|
||||
# psu_block[m]["variables"]['timestamp'] = update_timestamp
|
||||
psu_block[m]["variables"]['timestamp'] = var_timestep_trigger(y='timestamp', f=update_timestamp)
|
||||
# psu_block[m]["variables"]['timestamp'] = var_trigger(
|
||||
# y='timestamp', f=update_timestamp, pre_conditions={'substep': [0, system_substeps]}, cond_op=lambda a, b: a and b
|
||||
# )
|
||||
|
||||
proc_one_coef = 0.7
|
||||
def es3(_g, step, sL, s, _input):
|
||||
return 's3', s['s3'] + proc_one_coef
|
||||
# use `timestep_trigger` to update every ts
|
||||
for m in ['m1','m2','m3']:
|
||||
psu_block[m]["variables"]['s3'] = var_timestep_trigger(y='s3', f=es3)
|
||||
|
||||
|
||||
def es4(_g, step, sL, s, _input):
|
||||
return 's4', s['s4'] + _g['gamma']
|
||||
for m in ['m1','m2','m3']:
|
||||
psu_block[m]["variables"]['s4'] = var_timestep_trigger(y='s4', f=es4)
|
||||
|
||||
|
||||
# ToDo: The number of values entered in sweep should be the # of config objs created,
|
||||
# not dependent on the # of times the sweep is applied
|
||||
# sweep exo_state func and point to exo-state in every other funtion
|
||||
# param sweep on genesis states
|
||||
|
||||
# Genesis States
|
||||
genesis_states = {
|
||||
's1': 0.0,
|
||||
's2': 0.0,
|
||||
's3': 1.0,
|
||||
's4': 1.0,
|
||||
'timestamp': '2018-10-01 15:16:24'
|
||||
}
|
||||
|
||||
|
||||
# Environment Process
|
||||
# ToDo: Validate - make env proc trigger field agnostic
|
||||
env_process["s3"] = [lambda _g, x: _g['beta'], lambda _g, x: x + 1]
|
||||
env_process["s4"] = env_timestep_trigger(trigger_field='timestep', trigger_vals=[5], funct_list=[lambda _g, x: _g['beta']])
|
||||
|
||||
|
||||
# config_sim Necessary
|
||||
sim_config = config_sim(
|
||||
{
|
||||
"N": 2,
|
||||
"T": range(5),
|
||||
"M": g, # Optional
|
||||
}
|
||||
)
|
||||
|
||||
# New Convention
|
||||
partial_state_update_blocks = psub_list(psu_block, psu_steps)
|
||||
append_configs(
|
||||
sim_configs=sim_config,
|
||||
initial_state=genesis_states,
|
||||
seeds=seeds,
|
||||
env_processes=env_process,
|
||||
partial_state_update_blocks=partial_state_update_blocks
|
||||
)
|
||||
|
||||
|
||||
print()
|
||||
print("Policie State Update Block:")
|
||||
pp.pprint(partial_state_update_blocks)
|
||||
print()
|
||||
print()
|
||||
|
|
@ -0,0 +1,36 @@
|
|||
import unittest
|
||||
|
||||
import pandas as pd
|
||||
# from tabulate import tabulate
|
||||
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
|
||||
from simulations.regression_tests import policy_aggregation
|
||||
from cadCAD import configs
|
||||
|
||||
exec_mode = ExecutionMode()
|
||||
first_config = configs # only contains config1
|
||||
single_proc_ctx = ExecutionContext(context=exec_mode.single_proc)
|
||||
run = Executor(exec_context=single_proc_ctx, configs=first_config)
|
||||
raw_result, tensor_field = run.execute()
|
||||
result = pd.DataFrame(raw_result)
|
||||
|
||||
class TestStringMethods(unittest.TestCase):
|
||||
def __init__(self, result: pd.DataFrame, tensor_field: pd.DataFrame) -> None:
|
||||
self.result = result
|
||||
self.tensor_field = tensor_field
|
||||
|
||||
def test_upper(self):
|
||||
self.assertEqual('foo'.upper(), 'FOO')
|
||||
|
||||
def test_isupper(self):
|
||||
self.assertTrue('FOO'.isupper())
|
||||
self.assertFalse('Foo'.isupper())
|
||||
|
||||
def test_split(self):
|
||||
s = 'hello world'
|
||||
self.assertEqual(s.split(), ['hello', 'world'])
|
||||
# check that s.split fails when the separator is not a string
|
||||
with self.assertRaises(TypeError):
|
||||
s.split(2)
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
|
@ -0,0 +1,183 @@
|
|||
from copy import deepcopy
|
||||
|
||||
import pandas as pd
|
||||
from fn.func import curried
|
||||
from datetime import timedelta
|
||||
import pprint as pp
|
||||
|
||||
from cadCAD.utils import SilentDF #, val_switch
|
||||
from cadCAD.configuration import append_configs
|
||||
from cadCAD.configuration.utils import time_step, config_sim, var_trigger, var_substep_trigger, env_trigger, psub_list
|
||||
from cadCAD.configuration.utils.userDefinedObject import udoPipe, UDO
|
||||
|
||||
|
||||
DF = SilentDF(pd.read_csv('/Users/jjodesty/Projects/DiffyQ-SimCAD/simulations/external_data/output.csv'))
|
||||
|
||||
|
||||
class udoExample(object):
|
||||
def __init__(self, x, dataset=None):
|
||||
self.x = x
|
||||
self.mem_id = str(hex(id(self)))
|
||||
self.ds = dataset # for setting ds initially or querying
|
||||
self.perception = {}
|
||||
|
||||
def anon(self, f):
|
||||
return f(self)
|
||||
|
||||
def updateX(self):
|
||||
self.x += 1
|
||||
return self
|
||||
|
||||
def updateDS(self):
|
||||
self.ds.iloc[0,0] -= 10
|
||||
# pp.pprint(self.ds)
|
||||
return self
|
||||
|
||||
def perceive(self, s):
|
||||
self.perception = self.ds[
|
||||
(self.ds['run'] == s['run']) & (self.ds['substep'] == s['substep']) & (self.ds['timestep'] == s['timestep'])
|
||||
].drop(columns=['run', 'substep']).to_dict()
|
||||
return self
|
||||
|
||||
def read(self, ds_uri):
|
||||
self.ds = SilentDF(pd.read_csv(ds_uri))
|
||||
return self
|
||||
|
||||
def write(self, ds_uri):
|
||||
pd.to_csv(ds_uri)
|
||||
|
||||
# ToDo: Generic update function
|
||||
|
||||
pass
|
||||
|
||||
|
||||
state_udo = UDO(udo=udoExample(0, DF), masked_members=['obj', 'perception'])
|
||||
policy_udoA = UDO(udo=udoExample(0, DF), masked_members=['obj', 'perception'])
|
||||
policy_udoB = UDO(udo=udoExample(0, DF), masked_members=['obj', 'perception'])
|
||||
|
||||
|
||||
sim_config = config_sim({
|
||||
"N": 2,
|
||||
"T": range(4)
|
||||
})
|
||||
|
||||
# ToDo: DataFrame Column order
|
||||
state_dict = {
|
||||
'increment': 0,
|
||||
'state_udo': state_udo, 'state_udo_tracker': 0,
|
||||
'state_udo_perception_tracker': {"ds1": None, "ds2": None, "ds3": None, "timestep": None},
|
||||
'udo_policies': {'udo_A': policy_udoA, 'udo_B': policy_udoB},
|
||||
'udo_policy_tracker': (0, 0),
|
||||
'timestamp': '2019-01-01 00:00:00'
|
||||
}
|
||||
|
||||
psu_steps = ['m1', 'm2', 'm3']
|
||||
system_substeps = len(psu_steps)
|
||||
var_timestep_trigger = var_substep_trigger([0, system_substeps])
|
||||
env_timestep_trigger = env_trigger(system_substeps)
|
||||
psu_block = {k: {"policies": {}, "variables": {}} for k in psu_steps}
|
||||
|
||||
def udo_policyA(_g, step, sL, s):
|
||||
s['udo_policies']['udo_A'].updateX()
|
||||
return {'udo_A': udoPipe(s['udo_policies']['udo_A'])}
|
||||
# policies['a'] = udo_policyA
|
||||
for m in psu_steps:
|
||||
psu_block[m]['policies']['a'] = udo_policyA
|
||||
|
||||
def udo_policyB(_g, step, sL, s):
|
||||
s['udo_policies']['udo_B'].updateX()
|
||||
return {'udo_B': udoPipe(s['udo_policies']['udo_B'])}
|
||||
# policies['b'] = udo_policyB
|
||||
for m in psu_steps:
|
||||
psu_block[m]['policies']['b'] = udo_policyB
|
||||
|
||||
|
||||
# policies = {"p1": udo_policyA, "p2": udo_policyB}
|
||||
# policies = {"A": udo_policyA, "B": udo_policyB}
|
||||
|
||||
def add(y: str, added_val):
|
||||
return lambda _g, step, sL, s, _input: (y, s[y] + added_val)
|
||||
# state_updates['increment'] = add('increment', 1)
|
||||
for m in psu_steps:
|
||||
psu_block[m]["variables"]['increment'] = add('increment', 1)
|
||||
|
||||
|
||||
@curried
|
||||
def perceive(s, self):
|
||||
self.perception = self.ds[
|
||||
(self.ds['run'] == s['run']) & (self.ds['substep'] == s['substep']) & (self.ds['timestep'] == s['timestep'])
|
||||
].drop(columns=['run', 'substep']).to_dict()
|
||||
return self
|
||||
|
||||
|
||||
def state_udo_update(_g, step, sL, s, _input):
|
||||
y = 'state_udo'
|
||||
# s['hydra_state'].updateX().anon(perceive(s))
|
||||
s['state_udo'].updateX().perceive(s).updateDS()
|
||||
x = udoPipe(s['state_udo'])
|
||||
return y, x
|
||||
for m in psu_steps:
|
||||
psu_block[m]["variables"]['state_udo'] = state_udo_update
|
||||
|
||||
|
||||
def track(destination, source):
|
||||
return lambda _g, step, sL, s, _input: (destination, s[source].x)
|
||||
state_udo_tracker = track('state_udo_tracker', 'state_udo')
|
||||
for m in psu_steps:
|
||||
psu_block[m]["variables"]['state_udo_tracker'] = state_udo_tracker
|
||||
|
||||
|
||||
def track_state_udo_perception(destination, source):
|
||||
def id(past_perception):
|
||||
if len(past_perception) == 0:
|
||||
return state_dict['state_udo_perception_tracker']
|
||||
else:
|
||||
return past_perception
|
||||
return lambda _g, step, sL, s, _input: (destination, id(s[source].perception))
|
||||
state_udo_perception_tracker = track_state_udo_perception('state_udo_perception_tracker', 'state_udo')
|
||||
for m in psu_steps:
|
||||
psu_block[m]["variables"]['state_udo_perception_tracker'] = state_udo_perception_tracker
|
||||
|
||||
|
||||
def view_udo_policy(_g, step, sL, s, _input):
|
||||
return 'udo_policies', _input
|
||||
for m in psu_steps:
|
||||
psu_block[m]["variables"]['udo_policies'] = view_udo_policy
|
||||
|
||||
|
||||
def track_udo_policy(destination, source):
|
||||
def val_switch(v):
|
||||
if isinstance(v, pd.DataFrame) is True or isinstance(v, SilentDF) is True:
|
||||
return SilentDF(v)
|
||||
else:
|
||||
return v.x
|
||||
return lambda _g, step, sL, s, _input: (destination, tuple(val_switch(v) for _, v in s[source].items()))
|
||||
udo_policy_tracker = track_udo_policy('udo_policy_tracker', 'udo_policies')
|
||||
for m in psu_steps:
|
||||
psu_block[m]["variables"]['udo_policy_tracker'] = udo_policy_tracker
|
||||
|
||||
|
||||
def update_timestamp(_g, step, sL, s, _input):
|
||||
y = 'timestamp'
|
||||
return y, time_step(dt_str=s[y], dt_format='%Y-%m-%d %H:%M:%S', _timedelta=timedelta(days=0, minutes=0, seconds=1))
|
||||
for m in psu_steps:
|
||||
psu_block[m]["variables"]['timestamp'] = var_timestep_trigger(y='timestamp', f=update_timestamp)
|
||||
# psu_block[m]["variables"]['timestamp'] = var_trigger(
|
||||
# y='timestamp', f=update_timestamp,
|
||||
# pre_conditions={'substep': [0, system_substeps]}, cond_op=lambda a, b: a and b
|
||||
# )
|
||||
# psu_block[m]["variables"]['timestamp'] = update_timestamp
|
||||
|
||||
# ToDo: Bug without specifying parameters
|
||||
# New Convention
|
||||
partial_state_update_blocks = psub_list(psu_block, psu_steps)
|
||||
append_configs(
|
||||
sim_configs=sim_config,
|
||||
initial_state=state_dict,
|
||||
partial_state_update_blocks=partial_state_update_blocks
|
||||
)
|
||||
|
||||
print()
|
||||
print("State Updates:")
|
||||
pp.pprint(partial_state_update_blocks)
|
||||
print()
|
||||
|
|
@ -0,0 +1,169 @@
|
|||
import pandas as pd
|
||||
import pprint as pp
|
||||
from fn.func import curried
|
||||
from datetime import timedelta
|
||||
|
||||
from cadCAD.utils import SilentDF #, val_switch
|
||||
from cadCAD.configuration import append_configs
|
||||
from cadCAD.configuration.utils import time_step, config_sim
|
||||
from cadCAD.configuration.utils.userDefinedObject import udoPipe, UDO
|
||||
|
||||
|
||||
DF = SilentDF(pd.read_csv('/Users/jjodesty/Projects/DiffyQ-SimCAD/simulations/external_data/output.csv'))
|
||||
|
||||
class udoExample(object):
|
||||
def __init__(self, x, dataset=None):
|
||||
self.x = x
|
||||
self.mem_id = str(hex(id(self)))
|
||||
self.ds = dataset # for setting ds initially or querying
|
||||
self.perception = {}
|
||||
|
||||
def anon(self, f):
|
||||
return f(self)
|
||||
|
||||
def updateX(self):
|
||||
self.x += 1
|
||||
return self
|
||||
|
||||
def perceive(self, s):
|
||||
self.perception = self.ds[
|
||||
(self.ds['run'] == s['run']) & (self.ds['substep'] == s['substep']) & (self.ds['timestep'] == s['timestep'])
|
||||
].drop(columns=['run', 'substep']).to_dict()
|
||||
return self
|
||||
|
||||
def read(self, ds_uri):
|
||||
self.ds = SilentDF(pd.read_csv(ds_uri))
|
||||
return self
|
||||
|
||||
def write(self, ds_uri):
|
||||
pd.to_csv(ds_uri)
|
||||
|
||||
# ToDo: Generic update function
|
||||
|
||||
pass
|
||||
|
||||
# can be accessed after an update within the same substep and timestep
|
||||
|
||||
state_udo = UDO(udo=udoExample(0, DF), masked_members=['obj', 'perception'])
|
||||
policy_udoA = UDO(udo=udoExample(0, DF), masked_members=['obj', 'perception'])
|
||||
policy_udoB = UDO(udo=udoExample(0, DF), masked_members=['obj', 'perception'])
|
||||
|
||||
def udo_policyA(_g, step, sL, s):
|
||||
s['udo_policies']['udo_A'].updateX()
|
||||
return {'udo_A': udoPipe(s['udo_policies']['udo_A'])}
|
||||
|
||||
def udo_policyB(_g, step, sL, s):
|
||||
s['udo_policies']['udo_B'].updateX()
|
||||
return {'udo_B': udoPipe(s['udo_policies']['udo_B'])}
|
||||
|
||||
|
||||
policies = {"p1": udo_policyA, "p2": udo_policyB}
|
||||
|
||||
# ToDo: DataFrame Column order
|
||||
state_dict = {
|
||||
'increment': 0,
|
||||
'state_udo': state_udo, 'state_udo_tracker_a': 0, 'state_udo_tracker_b': 0,
|
||||
'state_udo_perception_tracker': {"ds1": None, "ds2": None, "ds3": None, "timestep": None},
|
||||
'udo_policies': {'udo_A': policy_udoA, 'udo_B': policy_udoB},
|
||||
'udo_policy_tracker_a': (0, 0), 'udo_policy_tracker_b': (0, 0),
|
||||
'timestamp': '2019-01-01 00:00:00'
|
||||
}
|
||||
|
||||
@curried
|
||||
def perceive(s, self):
|
||||
self.perception = self.ds[
|
||||
(self.ds['run'] == s['run']) & (self.ds['substep'] == s['substep']) & (self.ds['timestep'] == s['timestep'])
|
||||
].drop(columns=['run', 'substep']).to_dict()
|
||||
return self
|
||||
|
||||
def view_udo_policy(_g, step, sL, s, _input):
|
||||
return 'udo_policies', _input
|
||||
|
||||
def state_udo_update(_g, step, sL, s, _input):
|
||||
y = 'state_udo'
|
||||
# s['hydra_state'].updateX().anon(perceive(s))
|
||||
s['state_udo'].updateX().perceive(s)
|
||||
x = udoPipe(s['state_udo'])
|
||||
return y, x
|
||||
|
||||
def increment(y, incr_by):
|
||||
return lambda _g, step, sL, s, _input: (y, s[y] + incr_by)
|
||||
|
||||
def track(destination, source):
|
||||
return lambda _g, step, sL, s, _input: (destination, s[source].x)
|
||||
|
||||
def track_udo_policy(destination, source):
|
||||
def val_switch(v):
|
||||
if isinstance(v, pd.DataFrame) is True or isinstance(v, SilentDF) is True:
|
||||
return SilentDF(v)
|
||||
else:
|
||||
return v.x
|
||||
return lambda _g, step, sL, s, _input: (destination, tuple(val_switch(v) for _, v in s[source].items()))
|
||||
|
||||
def track_state_udo_perception(destination, source):
|
||||
def id(past_perception):
|
||||
if len(past_perception) == 0:
|
||||
return state_dict['state_udo_perception_tracker']
|
||||
else:
|
||||
return past_perception
|
||||
return lambda _g, step, sL, s, _input: (destination, id(s[source].perception))
|
||||
|
||||
|
||||
def time_model(y, substeps, time_delta, ts_format='%Y-%m-%d %H:%M:%S'):
|
||||
def apply_incriment_condition(s):
|
||||
if s['substep'] == 0 or s['substep'] == substeps:
|
||||
return y, time_step(dt_str=s[y], dt_format=ts_format, _timedelta=time_delta)
|
||||
else:
|
||||
return y, s[y]
|
||||
return lambda _g, step, sL, s, _input: apply_incriment_condition(s)
|
||||
|
||||
|
||||
states = {
|
||||
'increment': increment('increment', 1),
|
||||
'state_udo_tracker_a': track('state_udo_tracker_a', 'state_udo'),
|
||||
'state_udo': state_udo_update,
|
||||
'state_udo_perception_tracker': track_state_udo_perception('state_udo_perception_tracker', 'state_udo'),
|
||||
'state_udo_tracker_b': track('state_udo_tracker_b', 'state_udo'),
|
||||
'udo_policy_tracker_a': track_udo_policy('udo_policy_tracker_a', 'udo_policies'),
|
||||
'udo_policies': view_udo_policy,
|
||||
'udo_policy_tracker_b': track_udo_policy('udo_policy_tracker_b', 'udo_policies')
|
||||
}
|
||||
|
||||
substeps=3
|
||||
update_timestamp = time_model(
|
||||
'timestamp',
|
||||
substeps=3,
|
||||
time_delta=timedelta(days=0, minutes=0, seconds=1),
|
||||
ts_format='%Y-%m-%d %H:%M:%S'
|
||||
)
|
||||
states['timestamp'] = update_timestamp
|
||||
|
||||
PSUB = {
|
||||
'policies': policies,
|
||||
'states': states
|
||||
}
|
||||
|
||||
# needs M1&2 need behaviors
|
||||
partial_state_update_blocks = [PSUB] * substeps
|
||||
# pp.pprint(partial_state_update_blocks)
|
||||
|
||||
sim_config = config_sim({
|
||||
"N": 2,
|
||||
"T": range(4)
|
||||
})
|
||||
|
||||
# ToDo: Bug without specifying parameters
|
||||
append_configs(
|
||||
sim_configs=sim_config,
|
||||
initial_state=state_dict,
|
||||
seeds={},
|
||||
raw_exogenous_states={},
|
||||
env_processes={},
|
||||
partial_state_update_blocks=partial_state_update_blocks,
|
||||
# policy_ops=[lambda a, b: {**a, **b}]
|
||||
)
|
||||
|
||||
print()
|
||||
print("State Updates:")
|
||||
pp.pprint(partial_state_update_blocks)
|
||||
print()
|
||||
|
|
@ -0,0 +1,25 @@
|
|||
import pandas as pd
|
||||
from typing import List
|
||||
from tabulate import tabulate
|
||||
# The following imports NEED to be in the exact order
|
||||
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
|
||||
from simulations.regression_tests import config1
|
||||
from cadCAD import configs
|
||||
|
||||
exec_mode = ExecutionMode()
|
||||
|
||||
print("Simulation Execution: Single Configuration")
|
||||
print()
|
||||
first_config = configs # only contains config1
|
||||
single_proc_ctx = ExecutionContext(context=exec_mode.single_proc)
|
||||
run = Executor(exec_context=single_proc_ctx, configs=first_config)
|
||||
|
||||
raw_result, tensor_field = run.execute()
|
||||
result = pd.DataFrame(raw_result)
|
||||
print()
|
||||
print("Tensor Field: config1")
|
||||
# print(raw_result)
|
||||
print(tabulate(tensor_field[['m', 'b1', 's1', 's2']], headers='keys', tablefmt='psql'))
|
||||
print("Output:")
|
||||
print(tabulate(result, headers='keys', tablefmt='psql'))
|
||||
print()
|
||||
|
|
@ -0,0 +1,23 @@
|
|||
import pandas as pd
|
||||
from tabulate import tabulate
|
||||
# The following imports NEED to be in the exact order
|
||||
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
|
||||
from simulations.regression_tests import config2
|
||||
from cadCAD import configs
|
||||
|
||||
exec_mode = ExecutionMode()
|
||||
|
||||
print("Simulation Execution: Single Configuration")
|
||||
print()
|
||||
first_config = configs # only contains config2
|
||||
single_proc_ctx = ExecutionContext(context=exec_mode.single_proc)
|
||||
run = Executor(exec_context=single_proc_ctx, configs=first_config)
|
||||
|
||||
raw_result, tensor_field = run.execute()
|
||||
result = pd.DataFrame(raw_result)
|
||||
print()
|
||||
print("Tensor Field: config1")
|
||||
print(tabulate(tensor_field, headers='keys', tablefmt='psql'))
|
||||
print("Output:")
|
||||
print(tabulate(result, headers='keys', tablefmt='psql'))
|
||||
print()
|
||||
|
|
@ -0,0 +1,26 @@
|
|||
import pandas as pd
|
||||
from tabulate import tabulate
|
||||
# The following imports NEED to be in the exact order
|
||||
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
|
||||
from simulations.regression_tests import external_dataset
|
||||
from cadCAD import configs
|
||||
|
||||
exec_mode = ExecutionMode()
|
||||
|
||||
print("Simulation Execution: Single Configuration")
|
||||
print()
|
||||
first_config = configs # only contains config1
|
||||
single_proc_ctx = ExecutionContext(context=exec_mode.single_proc)
|
||||
run = Executor(exec_context=single_proc_ctx, configs=first_config)
|
||||
|
||||
raw_result, tensor_field = run.execute()
|
||||
result = pd.DataFrame(raw_result)
|
||||
result = pd.concat([result, result['external_data'].apply(pd.Series)], axis=1)[
|
||||
['run', 'substep', 'timestep', 'increment', 'external_data', 'policies', 'ds1', 'ds2', 'ds3', ]
|
||||
]
|
||||
print()
|
||||
print("Tensor Field: config1")
|
||||
print(tabulate(tensor_field, headers='keys', tablefmt='psql'))
|
||||
print("Output:")
|
||||
print(tabulate(result, headers='keys', tablefmt='psql'))
|
||||
print()
|
||||
|
|
@ -0,0 +1,27 @@
|
|||
import pandas as pd
|
||||
from tabulate import tabulate
|
||||
# The following imports NEED to be in the exact order
|
||||
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
|
||||
from simulations.regression_tests import historical_state_access
|
||||
from cadCAD import configs
|
||||
|
||||
exec_mode = ExecutionMode()
|
||||
|
||||
print("Simulation Execution: Single Configuration")
|
||||
print()
|
||||
first_config = configs # only contains config1
|
||||
single_proc_ctx = ExecutionContext(context=exec_mode.single_proc)
|
||||
run = Executor(exec_context=single_proc_ctx, configs=first_config)
|
||||
|
||||
raw_result, tensor_field = run.execute()
|
||||
result = pd.DataFrame(raw_result)
|
||||
# cols = ['run','substep','timestep','x','nonexsistant','last_x','2nd_to_last_x','3rd_to_last_x','4th_to_last_x']
|
||||
cols = ['last_x']
|
||||
result = result[cols]
|
||||
|
||||
print()
|
||||
print("Tensor Field: config1")
|
||||
print(tabulate(tensor_field, headers='keys', tablefmt='psql'))
|
||||
print("Output:")
|
||||
print(tabulate(result, headers='keys', tablefmt='psql'))
|
||||
print()
|
||||
|
|
@ -0,0 +1,25 @@
|
|||
import pandas as pd
|
||||
from tabulate import tabulate
|
||||
# The following imports NEED to be in the exact order
|
||||
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
|
||||
from simulations.regression_tests import config1, config2
|
||||
from cadCAD import configs
|
||||
|
||||
exec_mode = ExecutionMode()
|
||||
|
||||
print("Simulation Execution: Concurrent Execution")
|
||||
multi_proc_ctx = ExecutionContext(context=exec_mode.multi_proc)
|
||||
run = Executor(exec_context=multi_proc_ctx, configs=configs)
|
||||
|
||||
# print(configs)
|
||||
i = 0
|
||||
config_names = ['config1', 'config2']
|
||||
for raw_result, tensor_field in run.execute():
|
||||
result = pd.DataFrame(raw_result)
|
||||
print()
|
||||
print(f"Tensor Field: {config_names[i]}")
|
||||
print(tabulate(tensor_field, headers='keys', tablefmt='psql'))
|
||||
print("Output:")
|
||||
print(tabulate(result, headers='keys', tablefmt='psql'))
|
||||
print()
|
||||
i += 1
|
||||
|
|
@ -0,0 +1,26 @@
|
|||
import pandas as pd
|
||||
from tabulate import tabulate
|
||||
# The following imports NEED to be in the exact order
|
||||
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
|
||||
from simulations.regression_tests import sweep_config
|
||||
from cadCAD import configs
|
||||
|
||||
# pprint(configs)
|
||||
|
||||
exec_mode = ExecutionMode()
|
||||
|
||||
print("Simulation Execution: Concurrent Execution")
|
||||
multi_proc_ctx = ExecutionContext(context=exec_mode.multi_proc)
|
||||
run = Executor(exec_context=multi_proc_ctx, configs=configs)
|
||||
|
||||
i = 0
|
||||
config_names = ['sweep_config_A', 'sweep_config_B']
|
||||
for raw_result, tensor_field in run.execute():
|
||||
result = pd.DataFrame(raw_result)
|
||||
print()
|
||||
print("Tensor Field: " + config_names[i])
|
||||
print(tabulate(tensor_field, headers='keys', tablefmt='psql'))
|
||||
print("Output:")
|
||||
print(tabulate(result, headers='keys', tablefmt='psql'))
|
||||
print()
|
||||
i += 1
|
||||
|
|
@ -0,0 +1,25 @@
|
|||
from pprint import pprint
|
||||
|
||||
import pandas as pd
|
||||
from tabulate import tabulate
|
||||
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
|
||||
from simulations.regression_tests import policy_aggregation
|
||||
from cadCAD import configs
|
||||
|
||||
exec_mode = ExecutionMode()
|
||||
|
||||
print("Simulation Execution: Single Configuration")
|
||||
print()
|
||||
first_config = configs # only contains config1
|
||||
single_proc_ctx = ExecutionContext(context=exec_mode.single_proc)
|
||||
run = Executor(exec_context=single_proc_ctx, configs=first_config)
|
||||
|
||||
raw_result, tensor_field = run.execute()
|
||||
result = pd.DataFrame(raw_result)
|
||||
|
||||
print()
|
||||
print("Tensor Field: config1")
|
||||
print(tabulate(tensor_field, headers='keys', tablefmt='psql'))
|
||||
print("Output:")
|
||||
print(tabulate(result, headers='keys', tablefmt='psql'))
|
||||
print()
|
||||
|
|
@ -0,0 +1,48 @@
|
|||
import pandas as pd
|
||||
from tabulate import tabulate
|
||||
# The following imports NEED to be in the exact order
|
||||
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
|
||||
from simulations.regression_tests import udo
|
||||
from cadCAD import configs
|
||||
|
||||
exec_mode = ExecutionMode()
|
||||
|
||||
print("Simulation Execution: Single Configuration")
|
||||
print()
|
||||
|
||||
|
||||
|
||||
single_proc_ctx = ExecutionContext(context=exec_mode.single_proc)
|
||||
run = Executor(exec_context=single_proc_ctx, configs=configs)
|
||||
# cols = configs[0].initial_state.keys()
|
||||
cols = [
|
||||
'increment',
|
||||
'state_udo_tracker', 'state_udo', 'state_udo_perception_tracker',
|
||||
'udo_policies', 'udo_policy_tracker',
|
||||
'timestamp'
|
||||
]
|
||||
raw_result, tensor_field = run.execute()
|
||||
result = pd.DataFrame(raw_result)[['run', 'substep', 'timestep'] + cols]
|
||||
# result = pd.concat([result.drop(['c'], axis=1), result['c'].apply(pd.Series)], axis=1)
|
||||
|
||||
# print(list(result['c']))
|
||||
|
||||
# print(tabulate(result['c'].apply(pd.Series), headers='keys', tablefmt='psql'))
|
||||
|
||||
# print(result.iloc[8,:]['state_udo'].ds)
|
||||
|
||||
# ctypes.cast(id(v['state_udo']['mem_id']), ctypes.py_object).value
|
||||
|
||||
print()
|
||||
print("Tensor Field: config1")
|
||||
print(tabulate(tensor_field, headers='keys', tablefmt='psql'))
|
||||
print("Output:")
|
||||
print(tabulate(result, headers='keys', tablefmt='psql'))
|
||||
print()
|
||||
print(result.info(verbose=True))
|
||||
|
||||
# def f(df, col):
|
||||
# for k in df[col].iloc[0].keys():
|
||||
# df[k] = None
|
||||
# for index, row in df.iterrows():
|
||||
# # df.apply(lambda row:, axis=1)
|
||||
|
|
@ -0,0 +1,44 @@
|
|||
import pandas as pd
|
||||
from tabulate import tabulate
|
||||
# The following imports NEED to be in the exact order
|
||||
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
|
||||
from simulations.regression_tests import udo_inter_substep_update
|
||||
from cadCAD import configs
|
||||
|
||||
exec_mode = ExecutionMode()
|
||||
|
||||
print("Simulation Execution: Single Configuration")
|
||||
print()
|
||||
|
||||
|
||||
first_config = configs # only contains config1
|
||||
single_proc_ctx = ExecutionContext(context=exec_mode.single_proc)
|
||||
run = Executor(exec_context=single_proc_ctx, configs=first_config)
|
||||
# cols = configs[0].initial_state.keys()
|
||||
cols = [
|
||||
'increment',
|
||||
'state_udo_tracker_a', 'state_udo', 'state_udo_perception_tracker', 'state_udo_tracker_b',
|
||||
'udo_policy_tracker_a', 'udo_policies', 'udo_policy_tracker_b',
|
||||
'timestamp'
|
||||
]
|
||||
raw_result, tensor_field = run.execute()
|
||||
result = pd.DataFrame(raw_result)[['run', 'substep', 'timestep'] + cols]
|
||||
# result = pd.concat([result.drop(['c'], axis=1), result['c'].apply(pd.Series)], axis=1)
|
||||
|
||||
# print(list(result['c']))
|
||||
|
||||
# print(tabulate(result['c'].apply(pd.Series), headers='keys', tablefmt='psql'))
|
||||
|
||||
print()
|
||||
print("Tensor Field: config1")
|
||||
print(tabulate(tensor_field, headers='keys', tablefmt='psql'))
|
||||
print("Output:")
|
||||
print(tabulate(result, headers='keys', tablefmt='psql'))
|
||||
print()
|
||||
print(result.info(verbose=True))
|
||||
|
||||
# def f(df, col):
|
||||
# for k in df[col].iloc[0].keys():
|
||||
# df[k] = None
|
||||
# for index, row in df.iterrows():
|
||||
# # df.apply(lambda row:, axis=1)
|
||||
|
|
@ -0,0 +1,166 @@
|
|||
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
|
||||
from cadCAD.configuration import Configuration
|
||||
from cadCAD.configuration.utils.userDefinedObject import udoPipe, UDO
|
||||
import networkx as nx
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
|
||||
import pprint as pp
|
||||
|
||||
T = 50 #iterations in our simulation
|
||||
n = 3 #number of boxes in our network
|
||||
m = 2 #for barabasi graph type number of edges is (n-2)*m
|
||||
|
||||
G = nx.barabasi_albert_graph(n, m)
|
||||
k = len(G.edges)
|
||||
|
||||
|
||||
# class udoExample(object):
|
||||
# def __init__(self, G):
|
||||
# self.G = G
|
||||
# self.mem_id = str(hex(id(self)))
|
||||
|
||||
|
||||
g = UDO(udo=G)
|
||||
print()
|
||||
# print(g.edges)
|
||||
# print(G.edges)
|
||||
# pp.pprint(f"{type(g)}: {g}")
|
||||
# next
|
||||
balls = np.zeros(n,)
|
||||
|
||||
for node in g.nodes:
|
||||
rv = np.random.randint(1,25)
|
||||
g.nodes[node]['initial_balls'] = rv
|
||||
balls[node] = rv
|
||||
|
||||
# pp.pprint(balls)
|
||||
|
||||
# next
|
||||
scale=100
|
||||
nx.draw_kamada_kawai(G, node_size=balls*scale,labels=nx.get_node_attributes(G,'initial_balls'))
|
||||
|
||||
# next
|
||||
|
||||
initial_conditions = {'balls':balls, 'network':G}
|
||||
print(initial_conditions)
|
||||
|
||||
# next
|
||||
|
||||
def update_balls(params, step, sL, s, _input):
|
||||
delta_balls = _input['delta']
|
||||
new_balls = s['balls']
|
||||
for e in G.edges:
|
||||
move_ball = delta_balls[e]
|
||||
src = e[0]
|
||||
dst = e[1]
|
||||
if (new_balls[src] >= move_ball) and (new_balls[dst] >= -move_ball):
|
||||
new_balls[src] = new_balls[src] - move_ball
|
||||
new_balls[dst] = new_balls[dst] + move_ball
|
||||
|
||||
key = 'balls'
|
||||
value = new_balls
|
||||
|
||||
return (key, value)
|
||||
|
||||
|
||||
def update_network(params, step, sL, s, _input):
|
||||
new_nodes = _input['nodes']
|
||||
new_edges = _input['edges']
|
||||
new_balls = _input['quantity']
|
||||
|
||||
graph = s['network']
|
||||
|
||||
for node in new_nodes:
|
||||
graph.add_node(node)
|
||||
graph.nodes[node]['initial_balls'] = new_balls[node]
|
||||
graph.nodes[node]['strat'] = _input['node_strats'][node]
|
||||
|
||||
for edge in new_edges:
|
||||
graph.add_edge(edge[0], edge[1])
|
||||
graph.edges[edge]['strat'] = _input['edge_strats'][edge]
|
||||
|
||||
key = 'network'
|
||||
value = graph
|
||||
return (key, value)
|
||||
|
||||
|
||||
def update_network_balls(params, step, sL, s, _input):
|
||||
new_nodes = _input['nodes']
|
||||
new_balls = _input['quantity']
|
||||
balls = np.zeros(len(s['balls']) + len(new_nodes))
|
||||
|
||||
for node in s['network'].nodes:
|
||||
balls[node] = s['balls'][node]
|
||||
|
||||
for node in new_nodes:
|
||||
balls[node] = new_balls[node]
|
||||
|
||||
key = 'balls'
|
||||
value = balls
|
||||
|
||||
return (key, value)
|
||||
|
||||
# next
|
||||
|
||||
|
||||
def greedy_robot(src_balls, dst_balls):
|
||||
# robot wishes to accumlate balls at its source
|
||||
# takes half of its neighbors balls
|
||||
if src_balls < dst_balls:
|
||||
delta = -np.floor(dst_balls / 2)
|
||||
else:
|
||||
delta = 0
|
||||
|
||||
return delta
|
||||
|
||||
|
||||
def fair_robot(src_balls, dst_balls):
|
||||
# robot follows the simple balancing rule
|
||||
delta = np.sign(src_balls - dst_balls)
|
||||
|
||||
return delta
|
||||
|
||||
|
||||
def giving_robot(src_balls, dst_balls):
|
||||
# robot wishes to gice away balls one at a time
|
||||
if src_balls > 0:
|
||||
delta = 1
|
||||
else:
|
||||
delta = 0
|
||||
|
||||
return delta
|
||||
|
||||
# next
|
||||
|
||||
robot_strategies = [greedy_robot,fair_robot, giving_robot]
|
||||
|
||||
for node in G.nodes:
|
||||
nstrats = len(robot_strategies)
|
||||
rv = np.random.randint(0,nstrats)
|
||||
G.nodes[node]['strat'] = robot_strategies[rv]
|
||||
|
||||
for e in G.edges:
|
||||
owner_node = e[0]
|
||||
G.edges[e]['strat'] = G.nodes[owner_node]['strat']
|
||||
|
||||
# next
|
||||
|
||||
def robotic_network(params, step, sL, s):
|
||||
graph = s['network']
|
||||
|
||||
delta_balls = {}
|
||||
for e in graph.edges:
|
||||
src = e[0]
|
||||
src_balls = s['balls'][src]
|
||||
dst = e[1]
|
||||
dst_balls = s['balls'][dst]
|
||||
|
||||
# transfer balls according to specific robot strat
|
||||
strat = graph.edges[e]['strat']
|
||||
delta_balls[e] = strat(src_balls, dst_balls)
|
||||
|
||||
return_dict = {'nodes': [], 'edges': {}, 'quantity': {}, 'node_strats': {}, 'edge_strats': {}, 'delta': delta_balls}
|
||||
|
||||
return (return_dict)
|
||||
|
|
@ -0,0 +1,221 @@
|
|||
from cadCAD.configuration import append_configs
|
||||
from cadCAD.configuration.utils.userDefinedObject import udoPipe, UDO
|
||||
import networkx as nx
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
|
||||
T = 50 #iterations in our simulation
|
||||
n = 3 #number of boxes in our network
|
||||
m = 2 #for barabasi graph type number of edges is (n-2)*m
|
||||
|
||||
G = nx.barabasi_albert_graph(n, m)
|
||||
k = len(G.edges)
|
||||
|
||||
balls = np.zeros(n,)
|
||||
|
||||
for node in G.nodes:
|
||||
rv = np.random.randint(1,25)
|
||||
G.nodes[node]['initial_balls'] = rv
|
||||
balls[node] = rv
|
||||
|
||||
scale=100
|
||||
nx.draw_kamada_kawai(G, node_size=balls*scale,labels=nx.get_node_attributes(G,'initial_balls'))
|
||||
|
||||
def greedy_robot(src_balls, dst_balls):
|
||||
# robot wishes to accumlate balls at its source
|
||||
# takes half of its neighbors balls
|
||||
if src_balls < dst_balls:
|
||||
return -np.floor(dst_balls / 2)
|
||||
else:
|
||||
return 0
|
||||
|
||||
|
||||
def fair_robot(src_balls, dst_balls):
|
||||
# robot follows the simple balancing rule
|
||||
return np.sign(src_balls - dst_balls)
|
||||
|
||||
|
||||
def giving_robot(src_balls, dst_balls):
|
||||
# robot wishes to gice away balls one at a time
|
||||
if src_balls > 0:
|
||||
return 1
|
||||
else:
|
||||
return 0
|
||||
|
||||
robot_strategies = [greedy_robot,fair_robot, giving_robot]
|
||||
|
||||
for node in G.nodes:
|
||||
nstrats = len(robot_strategies)
|
||||
rv = np.random.randint(0,nstrats)
|
||||
G.nodes[node]['strat'] = robot_strategies[rv]
|
||||
|
||||
for e in G.edges:
|
||||
owner_node = e[0]
|
||||
G.edges[e]['strat'] = G.nodes[owner_node]['strat']
|
||||
|
||||
default_policy = {'nodes': [], 'edges': {}, 'quantity': {}, 'node_strats': {}, 'edge_strats': {}, 'delta': {}}
|
||||
class robot(object):
|
||||
def __init__(self, graph, balls, internal_policy=default_policy):
|
||||
self.mem_id = str(hex(id(self)))
|
||||
self.internal_policy = internal_policy
|
||||
self.graph = graph
|
||||
self.balls = balls
|
||||
|
||||
|
||||
def robotic_network(self, graph, balls): # move balls
|
||||
self.graph, self.balls = graph, balls
|
||||
delta_balls = {}
|
||||
for e in self.graph.edges:
|
||||
src = e[0]
|
||||
src_balls = self.balls[src]
|
||||
dst = e[1]
|
||||
dst_balls = self.balls[dst]
|
||||
|
||||
# transfer balls according to specific robot strat
|
||||
strat = self.graph.edges[e]['strat']
|
||||
delta_balls[e] = strat(src_balls, dst_balls)
|
||||
|
||||
self.internal_policy = {'nodes': [], 'edges': {}, 'quantity': {}, 'node_strats': {}, 'edge_strats': {}, 'delta': delta_balls}
|
||||
return self
|
||||
|
||||
def agent_arrival(self, graph, balls): # add node
|
||||
self.graph, self.balls = graph, balls
|
||||
node = len(self.graph.nodes)
|
||||
edge_list = self.graph.edges
|
||||
|
||||
# choose a m random edges without replacement
|
||||
# new = np.random.choose(edgelist,m)
|
||||
new = [0, 1] # tester
|
||||
|
||||
nodes = [node]
|
||||
edges = [(node, new_node) for new_node in new]
|
||||
|
||||
initial_balls = {node: np.random.randint(1, 25)}
|
||||
|
||||
rv = np.random.randint(0, nstrats)
|
||||
node_strat = {node: robot_strategies[rv]}
|
||||
|
||||
edge_strats = {e: robot_strategies[rv] for e in edges}
|
||||
|
||||
self.internal_policy = {'nodes': nodes,
|
||||
'edges': edges,
|
||||
'quantity': initial_balls,
|
||||
'node_strats': node_strat,
|
||||
'edge_strats': edge_strats,
|
||||
'delta': np.zeros(node + 1)
|
||||
}
|
||||
|
||||
return self
|
||||
|
||||
robot_udo = UDO(udo=robot(G, balls), masked_members=['obj'])
|
||||
initial_conditions = {'balls': balls, 'network': G, 'robot': robot_udo}
|
||||
|
||||
|
||||
def update_balls(params, step, sL, s, _input):
|
||||
delta_balls = _input['robot'].internal_policy['delta']
|
||||
new_balls = s['balls']
|
||||
for e in G.edges:
|
||||
move_ball = delta_balls[e]
|
||||
src = e[0]
|
||||
dst = e[1]
|
||||
if (new_balls[src] >= move_ball) and (new_balls[dst] >= -move_ball):
|
||||
new_balls[src] = new_balls[src] - move_ball
|
||||
new_balls[dst] = new_balls[dst] + move_ball
|
||||
|
||||
key = 'balls'
|
||||
value = new_balls
|
||||
|
||||
return (key, value)
|
||||
|
||||
|
||||
def update_network(params, step, sL, s, _input):
|
||||
new_nodes = _input['robot'].internal_policy['nodes']
|
||||
new_edges = _input['robot'].internal_policy['edges']
|
||||
new_balls = _input['robot'].internal_policy['quantity']
|
||||
|
||||
graph = s['network']
|
||||
|
||||
for node in new_nodes:
|
||||
graph.add_node(node)
|
||||
graph.nodes[node]['initial_balls'] = new_balls[node]
|
||||
graph.nodes[node]['strat'] = _input['robot'].internal_policy['node_strats'][node]
|
||||
|
||||
for edge in new_edges:
|
||||
graph.add_edge(edge[0], edge[1])
|
||||
graph.edges[edge]['strat'] = _input['robot'].internal_policy['edge_strats'][edge]
|
||||
|
||||
key = 'network'
|
||||
value = graph
|
||||
return (key, value)
|
||||
|
||||
|
||||
def update_network_balls(params, step, sL, s, _input):
|
||||
new_nodes = _input['robot'].internal_policy['nodes']
|
||||
new_balls = _input['robot'].internal_policy['quantity']
|
||||
balls = np.zeros(len(s['balls']) + len(new_nodes))
|
||||
|
||||
for node in s['network'].nodes:
|
||||
balls[node] = s['balls'][node]
|
||||
|
||||
for node in new_nodes:
|
||||
balls[node] = new_balls[node]
|
||||
|
||||
key = 'balls'
|
||||
value = balls
|
||||
|
||||
return (key, value)
|
||||
|
||||
|
||||
def robotic_network(params, step, sL, s):
|
||||
s['robot'].robotic_network(s['network'], s['balls'])
|
||||
return {'robot': udoPipe(s['robot'])}
|
||||
|
||||
|
||||
def agent_arrival(params, step, sL, s):
|
||||
s['robot'].agent_arrival(s['network'], s['balls'])
|
||||
return {'robot': udoPipe(s['robot'])}
|
||||
|
||||
def get_robot(params, step, sL, s, _input):
|
||||
return 'robot', _input['robot']
|
||||
|
||||
partial_state_update_blocks = [
|
||||
{
|
||||
'policies': {
|
||||
# The following policy functions will be evaluated and their returns will be passed to the state update functions
|
||||
'p1': robotic_network
|
||||
},
|
||||
'variables': { # The following state variables will be updated simultaneously
|
||||
'balls': update_balls,
|
||||
'robot': get_robot
|
||||
|
||||
}
|
||||
},
|
||||
{
|
||||
'policies': {
|
||||
# The following policy functions will be evaluated and their returns will be passed to the state update functions
|
||||
'p1': agent_arrival
|
||||
},
|
||||
'variables': { # The following state variables will be updated simultaneously
|
||||
'network': update_network,
|
||||
'balls': update_network_balls,
|
||||
'robot': get_robot
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
simulation_parameters = {
|
||||
'T': range(T),
|
||||
'N': 1,
|
||||
'M': {}
|
||||
}
|
||||
append_configs(
|
||||
sim_configs=simulation_parameters, #dict containing state update functions
|
||||
initial_state=initial_conditions, #dict containing variable names and initial values
|
||||
partial_state_update_blocks= partial_state_update_blocks #, #dict containing state update functions
|
||||
# policy_ops=[lambda a, b: {**a, **b}]
|
||||
)
|
||||
# config = Configuration(initial_state=initial_conditions, #dict containing variable names and initial values
|
||||
# partial_state_update_blocks=partial_state_update_blocks, #dict containing state update functions
|
||||
# sim_config=simulation_parameters #dict containing simulation parameters
|
||||
# )
|
||||
|
|
@ -0,0 +1,25 @@
|
|||
import pandas as pd
|
||||
from tabulate import tabulate
|
||||
# The following imports NEED to be in the exact order
|
||||
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
|
||||
from cadCAD import configs
|
||||
|
||||
exec_mode = ExecutionMode()
|
||||
|
||||
print("Simulation Execution: Single Configuration")
|
||||
print()
|
||||
first_config = configs # only contains config1
|
||||
single_proc_ctx = ExecutionContext(context=exec_mode.single_proc)
|
||||
run = Executor(exec_context=single_proc_ctx, configs=first_config)
|
||||
|
||||
raw_result, tensor_field = run.main()
|
||||
result = pd.DataFrame(raw_result)
|
||||
print()
|
||||
print("Tensor Field: config1")
|
||||
print(tabulate(tensor_field, headers='keys', tablefmt='psql'))
|
||||
print("Output:")
|
||||
print(tabulate(result, headers='keys', tablefmt='psql'))
|
||||
print(result[['network']])
|
||||
print()
|
||||
|
||||
print(result[['network', 'substep']])
|
||||
File diff suppressed because one or more lines are too long
|
|
@ -0,0 +1,139 @@
|
|||
from decimal import Decimal
|
||||
import numpy as np
|
||||
from datetime import timedelta
|
||||
|
||||
from cadCAD.configuration import append_configs
|
||||
from cadCAD.configuration.utils import env_proc_trigger, bound_norm_random, ep_time_step, config_sim
|
||||
|
||||
|
||||
seeds = {
|
||||
'z': np.random.RandomState(1),
|
||||
'a': np.random.RandomState(2),
|
||||
'b': np.random.RandomState(3),
|
||||
'c': np.random.RandomState(3)
|
||||
}
|
||||
|
||||
|
||||
# Policies per Mechanism
|
||||
def p1m1(_g, step, sL, s):
|
||||
return {'param1': 1}
|
||||
def p2m1(_g, step, sL, s):
|
||||
return {'param2': 4}
|
||||
|
||||
def p1m2(_g, step, sL, s):
|
||||
return {'param1': 'a', 'param2': 2}
|
||||
def p2m2(_g, step, sL, s):
|
||||
return {'param1': 'b', 'param2': 4}
|
||||
|
||||
def p1m3(_g, step, sL, s):
|
||||
return {'param1': ['c'], 'param2': np.array([10, 100])}
|
||||
def p2m3(_g, step, sL, s):
|
||||
return {'param1': ['d'], 'param2': np.array([20, 200])}
|
||||
|
||||
|
||||
# Internal States per Mechanism
|
||||
def s1m1(_g, step, sL, s, _input):
|
||||
y = 's1'
|
||||
x = _input['param1']
|
||||
return (y, x)
|
||||
def s2m1(_g, step, sL, s, _input):
|
||||
y = 's2'
|
||||
x = _input['param2']
|
||||
return (y, x)
|
||||
|
||||
def s1m2(_g, step, sL, s, _input):
|
||||
y = 's1'
|
||||
x = _input['param1']
|
||||
return (y, x)
|
||||
def s2m2(_g, step, sL, s, _input):
|
||||
y = 's2'
|
||||
x = _input['param2']
|
||||
return (y, x)
|
||||
|
||||
def s1m3(_g, step, sL, s, _input):
|
||||
y = 's1'
|
||||
x = _input['param1']
|
||||
return (y, x)
|
||||
def s2m3(_g, step, sL, s, _input):
|
||||
y = 's2'
|
||||
x = _input['param2']
|
||||
return (y, x)
|
||||
|
||||
def s1m4(_g, step, sL, s, _input):
|
||||
y = 's1'
|
||||
x = [1]
|
||||
return (y, x)
|
||||
|
||||
|
||||
# Exogenous States
|
||||
proc_one_coef_A = 0.7
|
||||
proc_one_coef_B = 1.3
|
||||
|
||||
def es3p1(_g, step, sL, s, _input):
|
||||
y = 's3'
|
||||
x = s['s3'] * bound_norm_random(seeds['a'], proc_one_coef_A, proc_one_coef_B)
|
||||
return (y, x)
|
||||
|
||||
def es4p2(_g, step, sL, s, _input):
|
||||
y = 's4'
|
||||
x = s['s4'] * bound_norm_random(seeds['b'], proc_one_coef_A, proc_one_coef_B)
|
||||
return (y, x)
|
||||
|
||||
ts_format = '%Y-%m-%d %H:%M:%S'
|
||||
t_delta = timedelta(days=0, minutes=0, seconds=1)
|
||||
def es5p2(_g, step, sL, s, _input):
|
||||
y = 'timestamp'
|
||||
x = ep_time_step(s, dt_str=s['timestamp'], fromat_str=ts_format, _timedelta=t_delta)
|
||||
return (y, x)
|
||||
|
||||
|
||||
# Environment States
|
||||
def env_a(x):
|
||||
return 5
|
||||
def env_b(x):
|
||||
return 10
|
||||
|
||||
|
||||
# Genesis States
|
||||
genesis_states = {
|
||||
's1': Decimal(0.0),
|
||||
's2': Decimal(0.0),
|
||||
's3': Decimal(1.0),
|
||||
's4': Decimal(1.0),
|
||||
'timestamp': '2018-10-01 15:16:24'
|
||||
}
|
||||
|
||||
|
||||
raw_exogenous_states = {
|
||||
"s3": es3p1,
|
||||
"s4": es4p2,
|
||||
"timestamp": es5p2
|
||||
}
|
||||
|
||||
|
||||
env_processes = {
|
||||
"s3": env_a,
|
||||
"s4": env_proc_trigger('2018-10-01 15:16:25', env_b)
|
||||
}
|
||||
|
||||
|
||||
partial_state_update_block = [
|
||||
]
|
||||
|
||||
|
||||
sim_config = config_sim(
|
||||
{
|
||||
"N": 2,
|
||||
"T": range(5),
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
append_configs(
|
||||
sim_configs=sim_config,
|
||||
initial_state=genesis_states,
|
||||
seeds={},
|
||||
raw_exogenous_states={},
|
||||
env_processes={},
|
||||
partial_state_update_blocks=partial_state_update_block
|
||||
)
|
||||
File diff suppressed because one or more lines are too long
|
|
@ -0,0 +1,150 @@
|
|||
import networkx as nx
|
||||
from scipy.stats import expon, gamma
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
#helper functions
|
||||
def get_nodes_by_type(g, node_type_selection):
|
||||
return [node for node in g.nodes if g.nodes[node]['type']== node_type_selection ]
|
||||
|
||||
def get_edges_by_type(g, edge_type_selection):
|
||||
return [edge for edge in g.edges if g.edges[edge]['type']== edge_type_selection ]
|
||||
|
||||
def total_funds_given_total_supply(total_supply):
|
||||
|
||||
#can put any bonding curve invariant here for initializatio!
|
||||
total_funds = total_supply
|
||||
|
||||
return total_funds
|
||||
|
||||
#maximum share of funds a proposal can take
|
||||
default_beta = .2 #later we should set this to be param so we can sweep it
|
||||
# tuning param for the trigger function
|
||||
default_rho = .001
|
||||
|
||||
def trigger_threshold(requested, funds, supply, beta = default_beta, rho = default_rho):
|
||||
|
||||
share = requested/funds
|
||||
if share < beta:
|
||||
return rho*supply/(beta-share)**2
|
||||
else:
|
||||
return np.inf
|
||||
|
||||
def initialize_network(n,m, funds_func=total_funds_given_total_supply, trigger_func =trigger_threshold ):
|
||||
network = nx.DiGraph()
|
||||
for i in range(n):
|
||||
network.add_node(i)
|
||||
network.nodes[i]['type']="participant"
|
||||
|
||||
h_rv = expon.rvs(loc=0.0, scale=1000)
|
||||
network.nodes[i]['holdings'] = h_rv
|
||||
|
||||
s_rv = np.random.rand()
|
||||
network.nodes[i]['sentiment'] = s_rv
|
||||
|
||||
participants = get_nodes_by_type(network, 'participant')
|
||||
initial_supply = np.sum([ network.nodes[i]['holdings'] for i in participants])
|
||||
|
||||
initial_funds = funds_func(initial_supply)
|
||||
|
||||
#generate initial proposals
|
||||
for ind in range(m):
|
||||
j = n+ind
|
||||
network.add_node(j)
|
||||
network.nodes[j]['type']="proposal"
|
||||
network.nodes[j]['conviction']=0
|
||||
network.nodes[j]['status']='candidate'
|
||||
network.nodes[j]['age']=0
|
||||
|
||||
r_rv = gamma.rvs(3,loc=0.001, scale=10000)
|
||||
network.node[j]['funds_requested'] = r_rv
|
||||
|
||||
network.nodes[j]['trigger']= trigger_threshold(r_rv, initial_funds, initial_supply)
|
||||
|
||||
for i in range(n):
|
||||
network.add_edge(i, j)
|
||||
|
||||
rv = np.random.rand()
|
||||
a_rv = 1-4*(1-rv)*rv #polarized distribution
|
||||
network.edges[(i, j)]['affinity'] = a_rv
|
||||
network.edges[(i,j)]['tokens'] = 0
|
||||
network.edges[(i, j)]['conviction'] = 0
|
||||
|
||||
proposals = get_nodes_by_type(network, 'proposal')
|
||||
total_requested = np.sum([ network.nodes[i]['funds_requested'] for i in proposals])
|
||||
|
||||
return network, initial_funds, initial_supply, total_requested
|
||||
|
||||
def trigger_sweep(field, trigger_func,xmax=.2,default_alpha=.5):
|
||||
|
||||
if field == 'token_supply':
|
||||
alpha = default_alpha
|
||||
share_of_funds = np.arange(.001,xmax,.001)
|
||||
total_supply = np.arange(0,10**9, 10**6)
|
||||
demo_data_XY = np.outer(share_of_funds,total_supply)
|
||||
|
||||
demo_data_Z0=np.empty(demo_data_XY.shape)
|
||||
demo_data_Z1=np.empty(demo_data_XY.shape)
|
||||
demo_data_Z2=np.empty(demo_data_XY.shape)
|
||||
demo_data_Z3=np.empty(demo_data_XY.shape)
|
||||
for sof_ind in range(len(share_of_funds)):
|
||||
sof = share_of_funds[sof_ind]
|
||||
for ts_ind in range(len(total_supply)):
|
||||
ts = total_supply[ts_ind]
|
||||
tc = ts /(1-alpha)
|
||||
trigger = trigger_func(sof, 1, ts)
|
||||
demo_data_Z0[sof_ind,ts_ind] = np.log10(trigger)
|
||||
demo_data_Z1[sof_ind,ts_ind] = trigger
|
||||
demo_data_Z2[sof_ind,ts_ind] = trigger/tc #share of maximum possible conviction
|
||||
demo_data_Z3[sof_ind,ts_ind] = np.log10(trigger/tc)
|
||||
return {'log10_trigger':demo_data_Z0,
|
||||
'trigger':demo_data_Z1,
|
||||
'share_of_max_conv': demo_data_Z2,
|
||||
'log10_share_of_max_conv':demo_data_Z3,
|
||||
'total_supply':total_supply,
|
||||
'share_of_funds':share_of_funds}
|
||||
elif field == 'alpha':
|
||||
alpha = np.arange(.5,1,.01)
|
||||
share_of_funds = np.arange(.001,xmax,.001)
|
||||
total_supply = 10**9
|
||||
demo_data_XY = np.outer(share_of_funds,alpha)
|
||||
|
||||
demo_data_Z4=np.empty(demo_data_XY.shape)
|
||||
demo_data_Z5=np.empty(demo_data_XY.shape)
|
||||
demo_data_Z6=np.empty(demo_data_XY.shape)
|
||||
demo_data_Z7=np.empty(demo_data_XY.shape)
|
||||
for sof_ind in range(len(share_of_funds)):
|
||||
sof = share_of_funds[sof_ind]
|
||||
for a_ind in range(len(alpha)):
|
||||
ts = total_supply
|
||||
a = alpha[a_ind]
|
||||
tc = ts /(1-a)
|
||||
trigger = trigger_func(sof, 1, ts)
|
||||
demo_data_Z4[sof_ind,a_ind] = np.log10(trigger)
|
||||
demo_data_Z5[sof_ind,a_ind] = trigger
|
||||
demo_data_Z6[sof_ind,a_ind] = trigger/tc #share of maximum possible conviction
|
||||
demo_data_Z7[sof_ind,a_ind] = np.log10(trigger/tc)
|
||||
|
||||
return {'log10_trigger':demo_data_Z4,
|
||||
'trigger':demo_data_Z5,
|
||||
'share_of_max_conv': demo_data_Z6,
|
||||
'log10_share_of_max_conv':demo_data_Z7,
|
||||
'alpha':alpha,
|
||||
'share_of_funds':share_of_funds}
|
||||
|
||||
else:
|
||||
return "invalid field"
|
||||
|
||||
def trigger_plotter(share_of_funds,Z, color_label,y, ylabel,cmap='jet'):
|
||||
dims = (10, 5)
|
||||
fig, ax = plt.subplots(figsize=dims)
|
||||
|
||||
cf = plt.contourf(share_of_funds, y, Z.T, 100, cmap=cmap)
|
||||
cbar=plt.colorbar(cf)
|
||||
plt.axis([share_of_funds[0], share_of_funds[-1], y[0], y[-1]])
|
||||
#ax.set_xscale('log')
|
||||
plt.ylabel(ylabel)
|
||||
plt.xlabel('Share of Funds Requested')
|
||||
plt.title('Trigger Function Map')
|
||||
|
||||
cbar.ax.set_ylabel(color_label)
|
||||
|
|
@ -0,0 +1,548 @@
|
|||
import numpy as np
|
||||
|
||||
from cadCAD.configuration.utils import config_sim
|
||||
from simulations.validation.conviction_helpers import *
|
||||
#import networkx as nx
|
||||
from scipy.stats import expon, gamma
|
||||
|
||||
|
||||
#functions for partial state update block 1
|
||||
|
||||
#Driving processes: arrival of participants, proposals and funds
|
||||
##-----------------------------------------
|
||||
def gen_new_participant(network, new_participant_holdings):
|
||||
|
||||
i = len([node for node in network.nodes])
|
||||
|
||||
network.add_node(i)
|
||||
network.nodes[i]['type']="participant"
|
||||
|
||||
s_rv = np.random.rand()
|
||||
network.nodes[i]['sentiment'] = s_rv
|
||||
network.nodes[i]['holdings']=new_participant_holdings
|
||||
|
||||
for j in get_nodes_by_type(network, 'proposal'):
|
||||
network.add_edge(i, j)
|
||||
|
||||
rv = np.random.rand()
|
||||
a_rv = 1-4*(1-rv)*rv #polarized distribution
|
||||
network.edges[(i, j)]['affinity'] = a_rv
|
||||
network.edges[(i,j)]['tokens'] = a_rv*network.nodes[i]['holdings']
|
||||
network.edges[(i, j)]['conviction'] = 0
|
||||
|
||||
return network
|
||||
|
||||
|
||||
scale_factor = 1000
|
||||
|
||||
def gen_new_proposal(network, funds, supply, total_funds, trigger_func):
|
||||
j = len([node for node in network.nodes])
|
||||
network.add_node(j)
|
||||
network.nodes[j]['type']="proposal"
|
||||
|
||||
network.nodes[j]['conviction']=0
|
||||
network.nodes[j]['status']='candidate'
|
||||
network.nodes[j]['age']=0
|
||||
|
||||
rescale = scale_factor*funds/total_funds
|
||||
r_rv = gamma.rvs(3,loc=0.001, scale=rescale)
|
||||
network.node[j]['funds_requested'] = r_rv
|
||||
|
||||
network.nodes[j]['trigger']= trigger_func(r_rv, funds, supply)
|
||||
|
||||
participants = get_nodes_by_type(network, 'participant')
|
||||
proposing_participant = np.random.choice(participants)
|
||||
|
||||
for i in participants:
|
||||
network.add_edge(i, j)
|
||||
if i==proposing_participant:
|
||||
network.edges[(i, j)]['affinity']=1
|
||||
else:
|
||||
rv = np.random.rand()
|
||||
a_rv = 1-4*(1-rv)*rv #polarized distribution
|
||||
network.edges[(i, j)]['affinity'] = a_rv
|
||||
|
||||
network.edges[(i, j)]['conviction'] = 0
|
||||
network.edges[(i,j)]['tokens'] = 0
|
||||
return network
|
||||
|
||||
|
||||
|
||||
def driving_process(params, step, sL, s):
|
||||
|
||||
#placeholder plumbing for random processes
|
||||
arrival_rate = 10/s['sentiment']
|
||||
rv1 = np.random.rand()
|
||||
new_participant = bool(rv1<1/arrival_rate)
|
||||
if new_participant:
|
||||
h_rv = expon.rvs(loc=0.0, scale=1000)
|
||||
new_participant_holdings = h_rv
|
||||
else:
|
||||
new_participant_holdings = 0
|
||||
|
||||
network = s['network']
|
||||
affinities = [network.edges[e]['affinity'] for e in network.edges ]
|
||||
median_affinity = np.median(affinities)
|
||||
|
||||
proposals = get_nodes_by_type(network, 'proposal')
|
||||
fund_requests = [network.nodes[j]['funds_requested'] for j in proposals if network.nodes[j]['status']=='candidate' ]
|
||||
|
||||
funds = s['funds']
|
||||
total_funds_requested = np.sum(fund_requests)
|
||||
|
||||
proposal_rate = 10/median_affinity * total_funds_requested/funds
|
||||
rv2 = np.random.rand()
|
||||
new_proposal = bool(rv2<1/proposal_rate)
|
||||
|
||||
sentiment = s['sentiment']
|
||||
funds = s['funds']
|
||||
scale_factor = 1+4000*sentiment**2
|
||||
|
||||
#this shouldn't happen but expon is throwing domain errors
|
||||
if scale_factor > 1:
|
||||
funds_arrival = expon.rvs(loc = 0, scale = scale_factor )
|
||||
else:
|
||||
funds_arrival = 0
|
||||
|
||||
return({'new_participant':new_participant,
|
||||
'new_participant_holdings':new_participant_holdings,
|
||||
'new_proposal':new_proposal,
|
||||
'funds_arrival':funds_arrival})
|
||||
|
||||
|
||||
#Mechanisms for updating the state based on driving processes
|
||||
##---
|
||||
def update_network(params, step, sL, s, _input):
|
||||
|
||||
print(params)
|
||||
print(type(params))
|
||||
|
||||
network = s['network']
|
||||
funds = s['funds']
|
||||
supply = s['supply']
|
||||
trigger_func = params['trigger_func']
|
||||
|
||||
new_participant = _input['new_participant'] #T/F
|
||||
new_proposal = _input['new_proposal'] #T/F
|
||||
|
||||
if new_participant:
|
||||
new_participant_holdings = _input['new_participant_holdings']
|
||||
network = gen_new_participant(network, new_participant_holdings)
|
||||
|
||||
if new_proposal:
|
||||
network= gen_new_proposal(network,funds,supply )
|
||||
|
||||
#update age of the existing proposals
|
||||
proposals = get_nodes_by_type(network, 'proposal')
|
||||
|
||||
for j in proposals:
|
||||
network.nodes[j]['age'] = network.nodes[j]['age']+1
|
||||
if network.nodes[j]['status'] == 'candidate':
|
||||
requested = network.nodes[j]['funds_requested']
|
||||
network.nodes[j]['trigger'] = trigger_func(requested, funds, supply)
|
||||
else:
|
||||
network.nodes[j]['trigger'] = np.nan
|
||||
|
||||
key = 'network'
|
||||
value = network
|
||||
|
||||
return (key, value)
|
||||
|
||||
def increment_funds(params, step, sL, s, _input):
|
||||
|
||||
funds = s['funds']
|
||||
funds_arrival = _input['funds_arrival']
|
||||
|
||||
#increment funds
|
||||
funds = funds + funds_arrival
|
||||
|
||||
key = 'funds'
|
||||
value = funds
|
||||
|
||||
return (key, value)
|
||||
|
||||
def increment_supply(params, step, sL, s, _input):
|
||||
|
||||
supply = s['supply']
|
||||
supply_arrival = _input['new_participant_holdings']
|
||||
|
||||
#increment funds
|
||||
supply = supply + supply_arrival
|
||||
|
||||
key = 'supply'
|
||||
value = supply
|
||||
|
||||
return (key, value)
|
||||
|
||||
#functions for partial state update block 2
|
||||
|
||||
#Driving processes: completion of previously funded proposals
|
||||
##-----------------------------------------
|
||||
|
||||
def check_progress(params, step, sL, s):
|
||||
|
||||
network = s['network']
|
||||
proposals = get_nodes_by_type(network, 'proposal')
|
||||
|
||||
completed = []
|
||||
for j in proposals:
|
||||
if network.nodes[j]['status'] == 'active':
|
||||
grant_size = network.nodes[j]['funds_requested']
|
||||
base_completion_rate=params['base_completion_rate']
|
||||
likelihood = 1.0/(base_completion_rate+np.log(grant_size))
|
||||
if np.random.rand() < likelihood:
|
||||
completed.append(j)
|
||||
|
||||
return({'completed':completed})
|
||||
|
||||
|
||||
#Mechanisms for updating the state based on check progress
|
||||
##---
|
||||
def complete_proposal(params, step, sL, s, _input):
|
||||
|
||||
network = s['network']
|
||||
participants = get_nodes_by_type(network, 'participant')
|
||||
|
||||
completed = _input['completed']
|
||||
for j in completed:
|
||||
network.nodes[j]['status']='completed'
|
||||
for i in participants:
|
||||
force = network.edges[(i,j)]['affinity']
|
||||
sentiment = network.node[i]['sentiment']
|
||||
network.node[i]['sentiment'] = get_sentimental(sentiment, force, decay=0)
|
||||
|
||||
key = 'network'
|
||||
value = network
|
||||
|
||||
return (key, value)
|
||||
|
||||
def update_sentiment_on_completion(params, step, sL, s, _input):
|
||||
|
||||
network = s['network']
|
||||
proposals = get_nodes_by_type(network, 'proposal')
|
||||
completed = _input['completed']
|
||||
|
||||
grants_outstanding = np.sum([network.nodes[j]['funds_requested'] for j in proposals if network.nodes[j]['status']=='active'])
|
||||
|
||||
grants_completed = np.sum([network.nodes[j]['funds_requested'] for j in completed])
|
||||
|
||||
sentiment = s['sentiment']
|
||||
|
||||
force = grants_completed/grants_outstanding
|
||||
mu = params['sentiment_decay']
|
||||
if (force >=0) and (force <=1):
|
||||
sentiment = get_sentimental(sentiment, force, mu)
|
||||
else:
|
||||
sentiment = get_sentimental(sentiment, 0, mu)
|
||||
|
||||
|
||||
key = 'sentiment'
|
||||
value = sentiment
|
||||
|
||||
return (key, value)
|
||||
|
||||
def get_sentimental(sentiment, force, decay=0):
|
||||
mu = decay
|
||||
sentiment = sentiment*(1-mu) + force
|
||||
|
||||
if sentiment > 1:
|
||||
sentiment = 1
|
||||
|
||||
return sentiment
|
||||
|
||||
#functions for partial state update block 3
|
||||
|
||||
#Decision processes: trigger function policy
|
||||
##-----------------------------------------
|
||||
|
||||
def trigger_function(params, step, sL, s):
|
||||
|
||||
network = s['network']
|
||||
funds = s['funds']
|
||||
supply = s['supply']
|
||||
proposals = get_nodes_by_type(network, 'proposal')
|
||||
tmin = params['tmin']
|
||||
|
||||
accepted = []
|
||||
triggers = {}
|
||||
for j in proposals:
|
||||
if network.nodes[j]['status'] == 'candidate':
|
||||
requested = network.nodes[j]['funds_requested']
|
||||
age = network.nodes[j]['age']
|
||||
threshold = trigger_threshold(requested, funds, supply)
|
||||
if age > tmin:
|
||||
conviction = network.nodes[j]['conviction']
|
||||
if conviction >threshold:
|
||||
accepted.append(j)
|
||||
else:
|
||||
threshold = np.nan
|
||||
|
||||
triggers[j] = threshold
|
||||
|
||||
|
||||
|
||||
return({'accepted':accepted, 'triggers':triggers})
|
||||
|
||||
def decrement_funds(params, step, sL, s, _input):
|
||||
|
||||
funds = s['funds']
|
||||
network = s['network']
|
||||
accepted = _input['accepted']
|
||||
|
||||
#decrement funds
|
||||
for j in accepted:
|
||||
funds = funds - network.nodes[j]['funds_requested']
|
||||
|
||||
key = 'funds'
|
||||
value = funds
|
||||
|
||||
return (key, value)
|
||||
|
||||
def update_proposals(params, step, sL, s, _input):
|
||||
|
||||
network = s['network']
|
||||
accepted = _input['accepted']
|
||||
triggers = _input['triggers']
|
||||
participants = get_nodes_by_type(network, 'participant')
|
||||
proposals = get_nodes_by_type(network, 'proposals')
|
||||
sensitivity = params['sensitivity']
|
||||
|
||||
for j in proposals:
|
||||
network.nodes[j]['trigger'] = triggers[j]
|
||||
|
||||
#bookkeeping conviction and participant sentiment
|
||||
for j in accepted:
|
||||
network.nodes[j]['status']='active'
|
||||
network.nodes[j]['conviction']=np.nan
|
||||
#change status to active
|
||||
for i in participants:
|
||||
|
||||
#operating on edge = (i,j)
|
||||
#reset tokens assigned to other candidates
|
||||
network.edges[(i,j)]['tokens']=0
|
||||
network.edges[(i,j)]['conviction'] = np.nan
|
||||
|
||||
#update participants sentiments (positive or negative)
|
||||
affinities = [network.edges[(i,p)]['affinity'] for p in proposals if not(p in accepted)]
|
||||
if len(affinities)>1:
|
||||
max_affinity = np.max(affinities)
|
||||
force = network.edges[(i,j)]['affinity']-sensitivity*max_affinity
|
||||
else:
|
||||
force = 0
|
||||
|
||||
#based on what their affinities to the accepted proposals
|
||||
network.nodes[i]['sentiment'] = get_sentimental(network.nodes[i]['sentiment'], force, False)
|
||||
|
||||
|
||||
key = 'network'
|
||||
value = network
|
||||
|
||||
return (key, value)
|
||||
|
||||
def update_sentiment_on_release(params, step, sL, s, _input):
|
||||
|
||||
network = s['network']
|
||||
proposals = get_nodes_by_type(network, 'proposal')
|
||||
accepted = _input['accepted']
|
||||
|
||||
proposals_outstanding = np.sum([network.nodes[j]['funds_requested'] for j in proposals if network.nodes[j]['status']=='candidate'])
|
||||
|
||||
proposals_accepted = np.sum([network.nodes[j]['funds_requested'] for j in accepted])
|
||||
|
||||
sentiment = s['sentiment']
|
||||
force = proposals_accepted/proposals_outstanding
|
||||
if (force >=0) and (force <=1):
|
||||
sentiment = get_sentimental(sentiment, force, False)
|
||||
else:
|
||||
sentiment = get_sentimental(sentiment, 0, False)
|
||||
|
||||
key = 'sentiment'
|
||||
value = sentiment
|
||||
|
||||
return (key, value)
|
||||
|
||||
def participants_decisions(params, step, sL, s):
|
||||
network = s['network']
|
||||
participants = get_nodes_by_type(network, 'participant')
|
||||
proposals = get_nodes_by_type(network, 'proposal')
|
||||
candidates = [j for j in proposals if network.nodes[j]['status']=='candidate']
|
||||
sensitivity = params['sensitivity']
|
||||
|
||||
gain = .01
|
||||
delta_holdings={}
|
||||
proposals_supported ={}
|
||||
for i in participants:
|
||||
force = network.nodes[i]['sentiment']-sensitivity
|
||||
delta_holdings[i] = network.nodes[i]['holdings']*gain*force
|
||||
|
||||
support = []
|
||||
for j in candidates:
|
||||
affinity = network.edges[(i, j)]['affinity']
|
||||
cutoff = sensitivity*np.max([network.edges[(i,p)]['affinity'] for p in candidates])
|
||||
if cutoff <.5:
|
||||
cutoff = .5
|
||||
|
||||
if affinity > cutoff:
|
||||
support.append(j)
|
||||
|
||||
proposals_supported[i] = support
|
||||
|
||||
return({'delta_holdings':delta_holdings, 'proposals_supported':proposals_supported})
|
||||
|
||||
def update_tokens(params, step, sL, s, _input):
|
||||
|
||||
network = s['network']
|
||||
delta_holdings = _input['delta_holdings']
|
||||
proposals = get_nodes_by_type(network, 'proposal')
|
||||
proposals_supported = _input['proposals_supported']
|
||||
participants = get_nodes_by_type(network, 'participant')
|
||||
alpha = params['alpha']
|
||||
|
||||
for i in participants:
|
||||
network.nodes[i]['holdings'] = network.nodes[i]['holdings']+delta_holdings[i]
|
||||
supported = proposals_supported[i]
|
||||
total_affinity = np.sum([ network.edges[(i, j)]['affinity'] for j in supported])
|
||||
for j in proposals:
|
||||
if j in supported:
|
||||
normalized_affinity = network.edges[(i, j)]['affinity']/total_affinity
|
||||
network.edges[(i, j)]['tokens'] = normalized_affinity*network.nodes[i]['holdings']
|
||||
else:
|
||||
network.edges[(i, j)]['tokens'] = 0
|
||||
|
||||
prior_conviction = network.edges[(i, j)]['conviction']
|
||||
current_tokens = network.edges[(i, j)]['tokens']
|
||||
network.edges[(i, j)]['conviction'] =current_tokens+alpha*prior_conviction
|
||||
|
||||
for j in proposals:
|
||||
network.nodes[j]['conviction'] = np.sum([ network.edges[(i, j)]['conviction'] for i in participants])
|
||||
|
||||
key = 'network'
|
||||
value = network
|
||||
|
||||
return (key, value)
|
||||
|
||||
def update_supply(params, step, sL, s, _input):
|
||||
|
||||
supply = s['supply']
|
||||
delta_holdings = _input['delta_holdings']
|
||||
delta_supply = np.sum([v for v in delta_holdings.values()])
|
||||
|
||||
supply = supply + delta_supply
|
||||
|
||||
key = 'supply'
|
||||
value = supply
|
||||
|
||||
return (key, value)
|
||||
|
||||
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
|
||||
# The Partial State Update Blocks
|
||||
partial_state_update_blocks = [
|
||||
{
|
||||
'policies': {
|
||||
#new proposals or new participants
|
||||
'random': driving_process
|
||||
},
|
||||
'variables': {
|
||||
'network': update_network,
|
||||
'funds':increment_funds,
|
||||
'supply':increment_supply
|
||||
}
|
||||
},
|
||||
{
|
||||
'policies': {
|
||||
'completion': check_progress #see if any of the funded proposals completes
|
||||
},
|
||||
'variables': { # The following state variables will be updated simultaneously
|
||||
'sentiment': update_sentiment_on_completion, #note completing decays sentiment, completing bumps it
|
||||
'network': complete_proposal #book-keeping
|
||||
}
|
||||
},
|
||||
{
|
||||
'policies': {
|
||||
'release': trigger_function #check each proposal to see if it passes
|
||||
},
|
||||
'variables': { # The following state variables will be updated simultaneously
|
||||
'funds': decrement_funds, #funds expended
|
||||
'sentiment': update_sentiment_on_release, #releasing funds can bump sentiment
|
||||
'network': update_proposals #reset convictions, and participants sentiments
|
||||
#update based on affinities
|
||||
}
|
||||
},
|
||||
{
|
||||
'policies': {
|
||||
'participants_act': participants_decisions, #high sentiment, high affinity =>buy
|
||||
#low sentiment, low affinities => burn
|
||||
#assign tokens to top affinities
|
||||
},
|
||||
'variables': {
|
||||
'supply': update_supply,
|
||||
'network': update_tokens #update everyones holdings
|
||||
#and their conviction for each proposal
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
n= 25 #initial participants
|
||||
m= 3 #initial proposals
|
||||
|
||||
initial_sentiment = .5
|
||||
|
||||
network, initial_funds, initial_supply, total_requested = initialize_network(n,m,total_funds_given_total_supply,trigger_threshold)
|
||||
|
||||
initial_conditions = {'network':network,
|
||||
'supply': initial_supply,
|
||||
'funds':initial_funds,
|
||||
'sentiment': initial_sentiment}
|
||||
|
||||
#power of 1 token forever
|
||||
# conviction_capactity = [2]
|
||||
# alpha = [1-1/cc for cc in conviction_capactity]
|
||||
# print(alpha)
|
||||
|
||||
params={
|
||||
'sensitivity': [.75],
|
||||
'tmin': [7], #unit days; minimum periods passed before a proposal can pass
|
||||
'sentiment_decay': [.001], #termed mu in the state update function
|
||||
'alpha': [0.5, 0.9],
|
||||
'base_completion_rate': [10],
|
||||
'trigger_func': [trigger_threshold]
|
||||
}
|
||||
|
||||
|
||||
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
|
||||
# Settings of general simulation parameters, unrelated to the system itself
|
||||
# `T` is a range with the number of discrete units of time the simulation will run for;
|
||||
# `N` is the number of times the simulation will be run (Monte Carlo runs)
|
||||
time_periods_per_run = 250
|
||||
monte_carlo_runs = 1
|
||||
|
||||
simulation_parameters = config_sim({
|
||||
'T': range(time_periods_per_run),
|
||||
'N': monte_carlo_runs,
|
||||
'M': params
|
||||
})
|
||||
|
||||
|
||||
from cadCAD.configuration import append_configs
|
||||
|
||||
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
|
||||
# The configurations above are then packaged into a `Configuration` object
|
||||
append_configs(
|
||||
initial_state=initial_conditions, #dict containing variable names and initial values
|
||||
partial_state_update_blocks=partial_state_update_blocks, #dict containing state update functions
|
||||
sim_configs=simulation_parameters #dict containing simulation parameters
|
||||
)
|
||||
|
||||
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
|
||||
from cadCAD import configs
|
||||
|
||||
exec_mode = ExecutionMode()
|
||||
multi_proc_ctx = ExecutionContext(context=exec_mode.multi_proc)
|
||||
run = Executor(exec_context=multi_proc_ctx, configs=configs)
|
||||
raw_result, tensor = run.execute()
|
||||
|
||||
# exec_mode = ExecutionMode()
|
||||
# exec_context = ExecutionContext(context=exec_mode.multi_proc)
|
||||
# # run = Executor(exec_context=exec_context, configs=configs)
|
||||
# executor = Executor(exec_context, configs) # Pass the configuration object inside an array
|
||||
# raw_result, tensor = executor.execute() # The `main()` method returns a tuple; its first elements contains the raw results
|
||||
|
|
@ -0,0 +1,555 @@
|
|||
from pprint import pprint
|
||||
|
||||
import numpy as np
|
||||
from tabulate import tabulate
|
||||
|
||||
from cadCAD.configuration.utils import config_sim
|
||||
from simulations.validation.conviction_helpers import *
|
||||
#import networkx as nx
|
||||
from scipy.stats import expon, gamma
|
||||
|
||||
|
||||
#functions for partial state update block 1
|
||||
|
||||
#Driving processes: arrival of participants, proposals and funds
|
||||
##-----------------------------------------
|
||||
|
||||
|
||||
def gen_new_participant(network, new_participant_holdings):
|
||||
|
||||
i = len([node for node in network.nodes])
|
||||
|
||||
network.add_node(i)
|
||||
network.nodes[i]['type']="participant"
|
||||
|
||||
s_rv = np.random.rand()
|
||||
network.nodes[i]['sentiment'] = s_rv
|
||||
network.nodes[i]['holdings']=new_participant_holdings
|
||||
|
||||
for j in get_nodes_by_type(network, 'proposal'):
|
||||
network.add_edge(i, j)
|
||||
|
||||
rv = np.random.rand()
|
||||
a_rv = 1-4*(1-rv)*rv #polarized distribution
|
||||
network.edges[(i, j)]['affinity'] = a_rv
|
||||
network.edges[(i,j)]['tokens'] = a_rv*network.nodes[i]['holdings']
|
||||
network.edges[(i, j)]['conviction'] = 0
|
||||
|
||||
return network
|
||||
|
||||
|
||||
scale_factor = 1000
|
||||
|
||||
def gen_new_proposal(network, funds, supply, trigger_func):
|
||||
j = len([node for node in network.nodes])
|
||||
network.add_node(j)
|
||||
network.nodes[j]['type']="proposal"
|
||||
|
||||
network.nodes[j]['conviction']=0
|
||||
network.nodes[j]['status']='candidate'
|
||||
network.nodes[j]['age']=0
|
||||
|
||||
rescale = scale_factor*funds
|
||||
r_rv = gamma.rvs(3,loc=0.001, scale=rescale)
|
||||
network.node[j]['funds_requested'] = r_rv
|
||||
|
||||
network.nodes[j]['trigger']= trigger_func(r_rv, funds, supply)
|
||||
|
||||
participants = get_nodes_by_type(network, 'participant')
|
||||
proposing_participant = np.random.choice(participants)
|
||||
|
||||
for i in participants:
|
||||
network.add_edge(i, j)
|
||||
if i==proposing_participant:
|
||||
network.edges[(i, j)]['affinity']=1
|
||||
else:
|
||||
rv = np.random.rand()
|
||||
a_rv = 1-4*(1-rv)*rv #polarized distribution
|
||||
network.edges[(i, j)]['affinity'] = a_rv
|
||||
|
||||
network.edges[(i, j)]['conviction'] = 0
|
||||
network.edges[(i,j)]['tokens'] = 0
|
||||
return network
|
||||
|
||||
|
||||
|
||||
def driving_process(params, step, sL, s):
|
||||
|
||||
#placeholder plumbing for random processes
|
||||
arrival_rate = 10/s['sentiment']
|
||||
rv1 = np.random.rand()
|
||||
new_participant = bool(rv1<1/arrival_rate)
|
||||
if new_participant:
|
||||
h_rv = expon.rvs(loc=0.0, scale=1000)
|
||||
new_participant_holdings = h_rv
|
||||
else:
|
||||
new_participant_holdings = 0
|
||||
|
||||
network = s['network']
|
||||
affinities = [network.edges[e]['affinity'] for e in network.edges ]
|
||||
median_affinity = np.median(affinities)
|
||||
|
||||
proposals = get_nodes_by_type(network, 'proposal')
|
||||
fund_requests = [network.nodes[j]['funds_requested'] for j in proposals if network.nodes[j]['status']=='candidate' ]
|
||||
|
||||
funds = s['funds']
|
||||
total_funds_requested = np.sum(fund_requests)
|
||||
|
||||
proposal_rate = 10/median_affinity * total_funds_requested/funds
|
||||
rv2 = np.random.rand()
|
||||
new_proposal = bool(rv2<1/proposal_rate)
|
||||
|
||||
sentiment = s['sentiment']
|
||||
funds = s['funds']
|
||||
scale_factor = 1+4000*sentiment**2
|
||||
|
||||
#this shouldn't happen but expon is throwing domain errors
|
||||
if scale_factor > 1:
|
||||
funds_arrival = expon.rvs(loc = 0, scale = scale_factor )
|
||||
else:
|
||||
funds_arrival = 0
|
||||
|
||||
return({'new_participant':new_participant,
|
||||
'new_participant_holdings':new_participant_holdings,
|
||||
'new_proposal':new_proposal,
|
||||
'funds_arrival':funds_arrival})
|
||||
|
||||
|
||||
#Mechanisms for updating the state based on driving processes
|
||||
##---
|
||||
def update_network(params, step, sL, s, _input):
|
||||
|
||||
network = s['network']
|
||||
funds = s['funds']
|
||||
supply = s['supply']
|
||||
trigger_func = params['trigger_func']
|
||||
|
||||
new_participant = _input['new_participant'] #T/F
|
||||
new_proposal = _input['new_proposal'] #T/F
|
||||
|
||||
if new_participant:
|
||||
new_participant_holdings = _input['new_participant_holdings']
|
||||
network = gen_new_participant(network, new_participant_holdings)
|
||||
|
||||
if new_proposal:
|
||||
network= gen_new_proposal(network,funds,supply,trigger_func )
|
||||
|
||||
#update age of the existing proposals
|
||||
proposals = get_nodes_by_type(network, 'proposal')
|
||||
|
||||
for j in proposals:
|
||||
network.nodes[j]['age'] = network.nodes[j]['age']+1
|
||||
if network.nodes[j]['status'] == 'candidate':
|
||||
requested = network.nodes[j]['funds_requested']
|
||||
network.nodes[j]['trigger'] = trigger_func(requested, funds, supply)
|
||||
else:
|
||||
network.nodes[j]['trigger'] = np.nan
|
||||
|
||||
key = 'network'
|
||||
value = network
|
||||
|
||||
return (key, value)
|
||||
|
||||
def increment_funds(params, step, sL, s, _input):
|
||||
|
||||
funds = s['funds']
|
||||
funds_arrival = _input['funds_arrival']
|
||||
|
||||
#increment funds
|
||||
funds = funds + funds_arrival
|
||||
|
||||
key = 'funds'
|
||||
value = funds
|
||||
|
||||
return (key, value)
|
||||
|
||||
def increment_supply(params, step, sL, s, _input):
|
||||
|
||||
supply = s['supply']
|
||||
supply_arrival = _input['new_participant_holdings']
|
||||
|
||||
#increment funds
|
||||
supply = supply + supply_arrival
|
||||
|
||||
key = 'supply'
|
||||
value = supply
|
||||
|
||||
return (key, value)
|
||||
|
||||
#functions for partial state update block 2
|
||||
|
||||
#Driving processes: completion of previously funded proposals
|
||||
##-----------------------------------------
|
||||
|
||||
def check_progress(params, step, sL, s):
|
||||
|
||||
network = s['network']
|
||||
proposals = get_nodes_by_type(network, 'proposal')
|
||||
|
||||
completed = []
|
||||
for j in proposals:
|
||||
if network.nodes[j]['status'] == 'active':
|
||||
grant_size = network.nodes[j]['funds_requested']
|
||||
base_completion_rate=params['base_completion_rate']
|
||||
likelihood = 1.0/(base_completion_rate+np.log(grant_size))
|
||||
if np.random.rand() < likelihood:
|
||||
completed.append(j)
|
||||
|
||||
return({'completed':completed})
|
||||
|
||||
|
||||
#Mechanisms for updating the state based on check progress
|
||||
##---
|
||||
def complete_proposal(params, step, sL, s, _input):
|
||||
|
||||
network = s['network']
|
||||
participants = get_nodes_by_type(network, 'participant')
|
||||
|
||||
completed = _input['completed']
|
||||
for j in completed:
|
||||
network.nodes[j]['status']='completed'
|
||||
for i in participants:
|
||||
force = network.edges[(i,j)]['affinity']
|
||||
sentiment = network.node[i]['sentiment']
|
||||
network.node[i]['sentiment'] = get_sentimental(sentiment, force, decay=0)
|
||||
|
||||
key = 'network'
|
||||
value = network
|
||||
|
||||
return (key, value)
|
||||
|
||||
def update_sentiment_on_completion(params, step, sL, s, _input):
|
||||
|
||||
network = s['network']
|
||||
proposals = get_nodes_by_type(network, 'proposal')
|
||||
completed = _input['completed']
|
||||
|
||||
grants_outstanding = np.sum([network.nodes[j]['funds_requested'] for j in proposals if network.nodes[j]['status']=='active'])
|
||||
|
||||
grants_completed = np.sum([network.nodes[j]['funds_requested'] for j in completed])
|
||||
|
||||
sentiment = s['sentiment']
|
||||
|
||||
force = grants_completed/grants_outstanding
|
||||
mu = params['sentiment_decay']
|
||||
if (force >=0) and (force <=1):
|
||||
sentiment = get_sentimental(sentiment, force, mu)
|
||||
else:
|
||||
sentiment = get_sentimental(sentiment, 0, mu)
|
||||
|
||||
|
||||
key = 'sentiment'
|
||||
value = sentiment
|
||||
|
||||
return (key, value)
|
||||
|
||||
def get_sentimental(sentiment, force, decay=0):
|
||||
mu = decay
|
||||
sentiment = sentiment*(1-mu) + force
|
||||
|
||||
if sentiment > 1:
|
||||
sentiment = 1
|
||||
|
||||
return sentiment
|
||||
|
||||
#functions for partial state update block 3
|
||||
|
||||
#Decision processes: trigger function policy
|
||||
##-----------------------------------------
|
||||
|
||||
def trigger_function(params, step, sL, s):
|
||||
|
||||
network = s['network']
|
||||
funds = s['funds']
|
||||
supply = s['supply']
|
||||
proposals = get_nodes_by_type(network, 'proposal')
|
||||
tmin = params['tmin']
|
||||
|
||||
accepted = []
|
||||
triggers = {}
|
||||
for j in proposals:
|
||||
if network.nodes[j]['status'] == 'candidate':
|
||||
requested = network.nodes[j]['funds_requested']
|
||||
age = network.nodes[j]['age']
|
||||
threshold = trigger_threshold(requested, funds, supply)
|
||||
if age > tmin:
|
||||
conviction = network.nodes[j]['conviction']
|
||||
if conviction >threshold:
|
||||
accepted.append(j)
|
||||
else:
|
||||
threshold = np.nan
|
||||
|
||||
triggers[j] = threshold
|
||||
|
||||
|
||||
|
||||
return({'accepted':accepted, 'triggers':triggers})
|
||||
|
||||
def decrement_funds(params, step, sL, s, _input):
|
||||
|
||||
funds = s['funds']
|
||||
network = s['network']
|
||||
accepted = _input['accepted']
|
||||
|
||||
#decrement funds
|
||||
for j in accepted:
|
||||
funds = funds - network.nodes[j]['funds_requested']
|
||||
|
||||
key = 'funds'
|
||||
value = funds
|
||||
|
||||
return (key, value)
|
||||
|
||||
def update_proposals(params, step, sL, s, _input):
|
||||
|
||||
network = s['network']
|
||||
accepted = _input['accepted']
|
||||
triggers = _input['triggers']
|
||||
participants = get_nodes_by_type(network, 'participant')
|
||||
proposals = get_nodes_by_type(network, 'proposals')
|
||||
sensitivity = params['sensitivity']
|
||||
|
||||
for j in proposals:
|
||||
network.nodes[j]['trigger'] = triggers[j]
|
||||
|
||||
#bookkeeping conviction and participant sentiment
|
||||
for j in accepted:
|
||||
network.nodes[j]['status']='active'
|
||||
network.nodes[j]['conviction']=np.nan
|
||||
#change status to active
|
||||
for i in participants:
|
||||
|
||||
#operating on edge = (i,j)
|
||||
#reset tokens assigned to other candidates
|
||||
network.edges[(i,j)]['tokens']=0
|
||||
network.edges[(i,j)]['conviction'] = np.nan
|
||||
|
||||
#update participants sentiments (positive or negative)
|
||||
affinities = [network.edges[(i,p)]['affinity'] for p in proposals if not(p in accepted)]
|
||||
if len(affinities)>1:
|
||||
max_affinity = np.max(affinities)
|
||||
force = network.edges[(i,j)]['affinity']-sensitivity*max_affinity
|
||||
else:
|
||||
force = 0
|
||||
|
||||
#based on what their affinities to the accepted proposals
|
||||
network.nodes[i]['sentiment'] = get_sentimental(network.nodes[i]['sentiment'], force, False)
|
||||
|
||||
|
||||
key = 'network'
|
||||
value = network
|
||||
|
||||
return (key, value)
|
||||
|
||||
def update_sentiment_on_release(params, step, sL, s, _input):
|
||||
|
||||
network = s['network']
|
||||
proposals = get_nodes_by_type(network, 'proposal')
|
||||
accepted = _input['accepted']
|
||||
|
||||
proposals_outstanding = np.sum([network.nodes[j]['funds_requested'] for j in proposals if network.nodes[j]['status']=='candidate'])
|
||||
|
||||
proposals_accepted = np.sum([network.nodes[j]['funds_requested'] for j in accepted])
|
||||
|
||||
sentiment = s['sentiment']
|
||||
force = proposals_accepted/proposals_outstanding
|
||||
if (force >=0) and (force <=1):
|
||||
sentiment = get_sentimental(sentiment, force, False)
|
||||
else:
|
||||
sentiment = get_sentimental(sentiment, 0, False)
|
||||
|
||||
key = 'sentiment'
|
||||
value = sentiment
|
||||
|
||||
return (key, value)
|
||||
|
||||
def participants_decisions(params, step, sL, s):
|
||||
network = s['network']
|
||||
participants = get_nodes_by_type(network, 'participant')
|
||||
proposals = get_nodes_by_type(network, 'proposal')
|
||||
candidates = [j for j in proposals if network.nodes[j]['status']=='candidate']
|
||||
sensitivity = params['sensitivity']
|
||||
|
||||
gain = .01
|
||||
delta_holdings={}
|
||||
proposals_supported ={}
|
||||
for i in participants:
|
||||
force = network.nodes[i]['sentiment']-sensitivity
|
||||
delta_holdings[i] = network.nodes[i]['holdings']*gain*force
|
||||
|
||||
support = []
|
||||
for j in candidates:
|
||||
affinity = network.edges[(i, j)]['affinity']
|
||||
cutoff = sensitivity*np.max([network.edges[(i,p)]['affinity'] for p in candidates])
|
||||
if cutoff <.5:
|
||||
cutoff = .5
|
||||
|
||||
if affinity > cutoff:
|
||||
support.append(j)
|
||||
|
||||
proposals_supported[i] = support
|
||||
|
||||
return({'delta_holdings':delta_holdings, 'proposals_supported':proposals_supported})
|
||||
|
||||
def update_tokens(params, step, sL, s, _input):
|
||||
|
||||
network = s['network']
|
||||
delta_holdings = _input['delta_holdings']
|
||||
proposals = get_nodes_by_type(network, 'proposal')
|
||||
proposals_supported = _input['proposals_supported']
|
||||
participants = get_nodes_by_type(network, 'participant')
|
||||
alpha = params['alpha']
|
||||
|
||||
for i in participants:
|
||||
network.nodes[i]['holdings'] = network.nodes[i]['holdings']+delta_holdings[i]
|
||||
supported = proposals_supported[i]
|
||||
total_affinity = np.sum([ network.edges[(i, j)]['affinity'] for j in supported])
|
||||
for j in proposals:
|
||||
if j in supported:
|
||||
normalized_affinity = network.edges[(i, j)]['affinity']/total_affinity
|
||||
network.edges[(i, j)]['tokens'] = normalized_affinity*network.nodes[i]['holdings']
|
||||
else:
|
||||
network.edges[(i, j)]['tokens'] = 0
|
||||
|
||||
prior_conviction = network.edges[(i, j)]['conviction']
|
||||
current_tokens = network.edges[(i, j)]['tokens']
|
||||
network.edges[(i, j)]['conviction'] =current_tokens+alpha*prior_conviction
|
||||
|
||||
for j in proposals:
|
||||
network.nodes[j]['conviction'] = np.sum([ network.edges[(i, j)]['conviction'] for i in participants])
|
||||
|
||||
key = 'network'
|
||||
value = network
|
||||
|
||||
return (key, value)
|
||||
|
||||
def update_supply(params, step, sL, s, _input):
|
||||
|
||||
supply = s['supply']
|
||||
delta_holdings = _input['delta_holdings']
|
||||
delta_supply = np.sum([v for v in delta_holdings.values()])
|
||||
|
||||
supply = supply + delta_supply
|
||||
|
||||
key = 'supply'
|
||||
value = supply
|
||||
|
||||
return (key, value)
|
||||
|
||||
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
|
||||
# The Partial State Update Blocks
|
||||
partial_state_update_blocks = [
|
||||
{
|
||||
'policies': {
|
||||
#new proposals or new participants
|
||||
'random': driving_process
|
||||
},
|
||||
'variables': {
|
||||
'network': update_network,
|
||||
'funds':increment_funds,
|
||||
'supply':increment_supply
|
||||
}
|
||||
},
|
||||
{
|
||||
'policies': {
|
||||
'completion': check_progress #see if any of the funded proposals completes
|
||||
},
|
||||
'variables': { # The following state variables will be updated simultaneously
|
||||
'sentiment': update_sentiment_on_completion, #note completing decays sentiment, completing bumps it
|
||||
'network': complete_proposal #book-keeping
|
||||
}
|
||||
},
|
||||
{
|
||||
'policies': {
|
||||
'release': trigger_function #check each proposal to see if it passes
|
||||
},
|
||||
'variables': { # The following state variables will be updated simultaneously
|
||||
'funds': decrement_funds, #funds expended
|
||||
'sentiment': update_sentiment_on_release, #releasing funds can bump sentiment
|
||||
'network': update_proposals #reset convictions, and participants sentiments
|
||||
#update based on affinities
|
||||
}
|
||||
},
|
||||
{
|
||||
'policies': {
|
||||
'participants_act': participants_decisions, #high sentiment, high affinity =>buy
|
||||
#low sentiment, low affinities => burn
|
||||
#assign tokens to top affinities
|
||||
},
|
||||
'variables': {
|
||||
'supply': update_supply,
|
||||
'network': update_tokens #update everyones holdings
|
||||
#and their conviction for each proposal
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
n= 25 #initial participants
|
||||
m= 3 #initial proposals
|
||||
|
||||
initial_sentiment = .5
|
||||
|
||||
network, initial_funds, initial_supply, total_requested = initialize_network(n,m,total_funds_given_total_supply,trigger_threshold)
|
||||
|
||||
initial_conditions = {'network':network,
|
||||
'supply': initial_supply,
|
||||
'funds':initial_funds,
|
||||
'sentiment': initial_sentiment}
|
||||
|
||||
#power of 1 token forever
|
||||
# conviction_capactity = [2]
|
||||
# alpha = [1-1/cc for cc in conviction_capactity]
|
||||
# print(alpha)
|
||||
|
||||
params={
|
||||
'sensitivity': [.75],
|
||||
'tmin': [7], #unit days; minimum periods passed before a proposal can pass
|
||||
'sentiment_decay': [.001], #termed mu in the state update function
|
||||
'alpha': [0.5],
|
||||
'base_completion_rate': [10],
|
||||
'trigger_func': [trigger_threshold]
|
||||
}
|
||||
|
||||
|
||||
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
|
||||
# Settings of general simulation parameters, unrelated to the system itself
|
||||
# `T` is a range with the number of discrete units of time the simulation will run for;
|
||||
# `N` is the number of times the simulation will be run (Monte Carlo runs)
|
||||
time_periods_per_run = 250
|
||||
monte_carlo_runs = 1
|
||||
|
||||
simulation_parameters = config_sim({
|
||||
'T': range(time_periods_per_run),
|
||||
'N': monte_carlo_runs,
|
||||
'M': params
|
||||
})
|
||||
|
||||
|
||||
from cadCAD.configuration import append_configs
|
||||
|
||||
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
|
||||
# The configurations above are then packaged into a `Configuration` object
|
||||
append_configs(
|
||||
initial_state=initial_conditions, #dict containing variable names and initial values
|
||||
partial_state_update_blocks=partial_state_update_blocks, #dict containing state update functions
|
||||
sim_configs=simulation_parameters #dict containing simulation parameters
|
||||
)
|
||||
|
||||
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
|
||||
from cadCAD import configs
|
||||
import pandas as pd
|
||||
|
||||
exec_mode = ExecutionMode()
|
||||
multi_proc_ctx = ExecutionContext(context=exec_mode.multi_proc)
|
||||
run = Executor(exec_context=multi_proc_ctx, configs=configs)
|
||||
|
||||
i = 0
|
||||
for raw_result, tensor_field in run.execute():
|
||||
result = pd.DataFrame(raw_result)
|
||||
print()
|
||||
print(f"Tensor Field: {type(tensor_field)}")
|
||||
print(tabulate(tensor_field, headers='keys', tablefmt='psql'))
|
||||
print(f"Output: {type(result)}")
|
||||
print(tabulate(result, headers='keys', tablefmt='psql'))
|
||||
print()
|
||||
i += 1
|
||||
|
|
@ -0,0 +1,763 @@
|
|||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Exogenous Example\n",
|
||||
"## Authored by BlockScience, MV Barlin\n",
|
||||
"### Updated July-10-2019 \n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Key assumptions and space:\n",
|
||||
"1. Implementation of System Model in cell 2\n",
|
||||
"2. Timestep = day\n",
|
||||
"3. Launch simulation, without intervention from changing governance policies"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Library Imports"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from IPython.display import Image\n",
|
||||
"import pandas as pd\n",
|
||||
"import numpy as np\n",
|
||||
"import matplotlib as mpl\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"import seaborn as sns\n",
|
||||
"import math\n",
|
||||
"#from tabulate import tabulate\n",
|
||||
"from scipy import stats\n",
|
||||
"sns.set_style('whitegrid')\n",
|
||||
"from decimal import Decimal\n",
|
||||
"from datetime import timedelta\n",
|
||||
"\n",
|
||||
"%matplotlib inline"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## cadCAD Setup\n",
|
||||
"#### ----------------cadCAD LIBRARY IMPORTS------------------------"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from cadCAD.engine import ExecutionMode, ExecutionContext, Executor\n",
|
||||
"#from simulations.validation import sweep_config\n",
|
||||
"from cadCAD import configs\n",
|
||||
"from cadCAD.configuration import append_configs\n",
|
||||
"from cadCAD.configuration.utils import proc_trigger, ep_time_step, config_sim"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#from cadCAD.configuration.utils.parameterSweep import config_sim"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import Dict, List"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### ----------------Random State Seed-----------------------------"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"seed = {\n",
|
||||
"# 'z': np.random.RandomState(1)\n",
|
||||
"}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Timestamp"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ts_format = '%Y-%m-%d %H:%M:%S'\n",
|
||||
"t_delta = timedelta(days=0, minutes=0, seconds=1)\n",
|
||||
"def set_time(_g, step, sL, s, _input):\n",
|
||||
" y = 'timestamp'\n",
|
||||
" x = ep_time_step(s, dt_str=s['timestamp'], fromat_str=ts_format, _timedelta=t_delta)\n",
|
||||
" return (y, x)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ASSUMED PARAMETERS"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### PRICE LIST"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# dai_xns_conversion = 1.0 # Assumed for static conversion 'PUBLISHED PRICE LIST' DEPRECATED"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Initial Condition State Variables"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"del_stake_pct = 2\n",
|
||||
"\n",
|
||||
"starting_xns = float(10**10) # initial supply of xns tokens\n",
|
||||
"starting_broker_xns = float(1 * 10**8) # inital holding of xns token by broker app\n",
|
||||
"starting_broker_fiat = float(1 * 10**5) # inital holding of xns token by broker app\n",
|
||||
"starting_broker_stable = float(1 * 10**6) # inital holding of stable token by broker app\n",
|
||||
"starting_deposit_acct = float(100) # inital deposit locked for first month of resources TBD: make function of resource*price\n",
|
||||
"starting_entrance = float(1 * 10**4) # TBD: make function of entrance fee % * cost * # of initial apps\n",
|
||||
"starting_app_usage = float(10) # initial fees from app usage \n",
|
||||
"starting_platform = float(100) # initial platform fees \n",
|
||||
"starting_resource_fees = float(10) # initial resource fees usage paid by apps \n",
|
||||
"starting_app_subsidy = float(0.25* 10**9) # initial application subsidy pool\n",
|
||||
"starting_stake = float(4 * 10**7)\n",
|
||||
"starting_stake_pool = starting_stake + ((3*10**7)*(del_stake_pct)) # initial staked pool + ((3*10**7)*(del_stake_pct))\n",
|
||||
"\n",
|
||||
"#starting_block_reward = float(0) # initial block reward MOVED ABOVE TO POLICY\n",
|
||||
"starting_capacity_subsidy = float(7.5 * 10**7) # initial capacity subsidy pool\n",
|
||||
"starting_delegate_holdings = 0.15 * starting_xns\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Initial Condition Composite State Variables"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# subsidy limit is 30% of the 10B supply\n",
|
||||
"starting_treasury = float(5.5 * 10**9) \n",
|
||||
"starting_app_income = float(0) # initial income to application\n",
|
||||
"starting_resource_income = float(0) # initial income to application\n",
|
||||
"starting_delegate_income = float(0) # initial income to delegate"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Initial Condition Exogoneous State Variables "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"starting_xns_fiat = float(0.01) # initial xns per fiat signal\n",
|
||||
"starting_fiat_ext = float(1) # initial xns per fiat signal\n",
|
||||
"starting_stable_ext = float(1) # initial stable signal"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Exogenous Price Updates"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def delta_price(mean,sd):\n",
|
||||
" '''Returns normal random variable generated by first two central moments of price change of input ticker'''\n",
|
||||
" rv = np.random.normal(mean, sd)\n",
|
||||
" return rv"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"def xns_ext_update(_g, step, sL, s, _input):\n",
|
||||
" key = 'XNS_fiat_external'\n",
|
||||
" \n",
|
||||
" value = s['XNS_fiat_external'] * (1 + delta_price(0.000000, 0.005))\n",
|
||||
" \n",
|
||||
" return key, value"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"From Currency Analysis of DAI-USD pair \n",
|
||||
"May-09-2018 through June-10-2019 \n",
|
||||
"Datasource: BitFinex \n",
|
||||
"Analysis of daily return percentage performed by BlockScience"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"DAI_mean = 0.0000719\n",
|
||||
"DAI_sd = 0.006716"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The daily return is computed as: \n",
|
||||
"$$ r = \\frac{Price_n - Price_{n-1}}{Price_{n-1}} $$ \n",
|
||||
"Thus, the modelled current price can be as: \n",
|
||||
"$$ Price_n = Price_{n-1} * r + Price_{n-1} $$"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"def stable_update(_g, step, sL, s, _input):\n",
|
||||
" key = 'stable_external'\n",
|
||||
" \n",
|
||||
" value = s['stable_external'] * (1 + delta_price(DAI_mean, DAI_sd))\n",
|
||||
" return key, value\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Assumed Parameters"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"apps_deployed = 1 # Make part of test- application deployment model\n",
|
||||
"\n",
|
||||
"starting_deposit_acct = float(100) # inital deposit locked for first month of resources TBD: make function of resource*price\n",
|
||||
"\n",
|
||||
"app_resource_fee_constant = 10**1 # in STABLE, assumed per day per total nodes \n",
|
||||
"platform_fee_constant = 10 # in XNS\n",
|
||||
"# ^^^^^^^^^^^^ MAKE A PERCENTAGE OR FLAT FEE as PART of TESTING"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"1000"
|
||||
]
|
||||
},
|
||||
"execution_count": 16,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"\n",
|
||||
"alpha = 100 # Fee Rate\n",
|
||||
"beta = 0.10 # FIXED Too high because multiplied by constant and resource fees\n",
|
||||
"app_platform = alpha * platform_fee_constant\n",
|
||||
"app_platform"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"10.0"
|
||||
]
|
||||
},
|
||||
"execution_count": 17,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"beta_out =beta*100\n",
|
||||
"beta_out"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"0.15"
|
||||
]
|
||||
},
|
||||
"execution_count": 18,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"starting_capacity_subsidy / (5 * 10**7) / 10"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"weight = 0.95 # 0.95 internal weight 5% friction from external markets\n",
|
||||
"\n",
|
||||
"def xns_int_update(_g, step, sL, s, _input):\n",
|
||||
" key = 'XNS_fiat_internal'\n",
|
||||
"\n",
|
||||
" internal = s['XNS_fiat_internal'] * weight\n",
|
||||
" external = s['XNS_fiat_external'] * (1 - weight)\n",
|
||||
" value = internal + external\n",
|
||||
" \n",
|
||||
" return key, value"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### CONFIGURATION DICTIONARY"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 20,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"time_step_count = 3652 # days = 10 years\n",
|
||||
"run_count = 1"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Genesis States"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 21,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#----------STATE VARIABLE Genesis DICTIONARY---------------------------\n",
|
||||
"genesis_states = {\n",
|
||||
" 'XNS_fiat_external' : starting_xns_fiat,\n",
|
||||
" 'XNS_fiat_internal' : starting_xns_fiat,\n",
|
||||
" # 'fiat_external' : starting_fiat_ext,\n",
|
||||
" 'stable_external' : starting_stable_ext,\n",
|
||||
" 'timestamp': '2018-10-01 15:16:24', #es5\n",
|
||||
"}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 22,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#--------------EXOGENOUS STATE MECHANISM DICTIONARY--------------------\n",
|
||||
"exogenous_states = {\n",
|
||||
" 'XNS_fiat_external' : xns_ext_update,\n",
|
||||
"# 'fiat_external' : starting_fiat_ext,\n",
|
||||
" 'stable_external' : stable_update,\n",
|
||||
" \"timestamp\": set_time,\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
"#--------------ENVIRONMENTAL PROCESS DICTIONARY------------------------\n",
|
||||
"env_processes = {\n",
|
||||
"# \"Poisson\": env_proc_id\n",
|
||||
"}\n",
|
||||
"#----------------------SIMULATION RUN SETUP----------------------------\n",
|
||||
"sim_config = config_sim(\n",
|
||||
" {\n",
|
||||
" \"N\": run_count,\n",
|
||||
" \"T\": range(time_step_count)\n",
|
||||
"# \"M\": g # for parameter sweep\n",
|
||||
"}\n",
|
||||
")\n",
|
||||
"#----------------------MECHANISM AND BEHAVIOR DICTIONARY---------------\n",
|
||||
"partial_state_update_block = {\n",
|
||||
" \"price\": { \n",
|
||||
" \"policies\": { \n",
|
||||
" },\n",
|
||||
" \"variables\": {\n",
|
||||
" 'XNS_fiat_internal' : xns_int_update\n",
|
||||
"# 'app_income' : app_earn,\n",
|
||||
" }\n",
|
||||
" },\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"append_configs(\n",
|
||||
" sim_configs=sim_config,\n",
|
||||
" initial_state=genesis_states,\n",
|
||||
" seeds=seed,\n",
|
||||
" raw_exogenous_states= exogenous_states,\n",
|
||||
" env_processes=env_processes,\n",
|
||||
" partial_state_update_blocks=partial_state_update_block\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Running cadCAD"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 23,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Simulation Execution: Single Configuration\n",
|
||||
"\n",
|
||||
"single_proc: [<cadCAD.configuration.Configuration object at 0x0000024B3B37AF60>]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"C:\\Users\\mbarl\\AppData\\Local\\Continuum\\anaconda3\\lib\\site-packages\\cadCAD\\utils\\__init__.py:89: FutureWarning: The use of a dictionary to describe Partial State Update Blocks will be deprecated. Use a list instead.\n",
|
||||
" FutureWarning)\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"exec_mode = ExecutionMode()\n",
|
||||
"\n",
|
||||
"print(\"Simulation Execution: Single Configuration\")\n",
|
||||
"print()\n",
|
||||
"first_config = configs # only contains config1\n",
|
||||
"single_proc_ctx = ExecutionContext(context=exec_mode.single_proc)\n",
|
||||
"run1 = Executor(exec_context=single_proc_ctx, configs=first_config)\n",
|
||||
"run1_raw_result, tensor_field = run1.main()\n",
|
||||
"result = pd.DataFrame(run1_raw_result)\n",
|
||||
"# print()\n",
|
||||
"# print(\"Tensor Field: config1\")\n",
|
||||
"# print(tabulate(tensor_field, headers='keys', tablefmt='psql'))\n",
|
||||
"# print(\"Output:\")\n",
|
||||
"# print(tabulate(result, headers='keys', tablefmt='psql'))\n",
|
||||
"# print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 24,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"df = result"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 25,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
"<div>\n",
|
||||
"<style scoped>\n",
|
||||
" .dataframe tbody tr th:only-of-type {\n",
|
||||
" vertical-align: middle;\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" .dataframe tbody tr th {\n",
|
||||
" vertical-align: top;\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" .dataframe thead th {\n",
|
||||
" text-align: right;\n",
|
||||
" }\n",
|
||||
"</style>\n",
|
||||
"<table border=\"1\" class=\"dataframe\">\n",
|
||||
" <thead>\n",
|
||||
" <tr style=\"text-align: right;\">\n",
|
||||
" <th></th>\n",
|
||||
" <th>XNS_fiat_external</th>\n",
|
||||
" <th>XNS_fiat_internal</th>\n",
|
||||
" <th>run</th>\n",
|
||||
" <th>stable_external</th>\n",
|
||||
" <th>substep</th>\n",
|
||||
" <th>timestamp</th>\n",
|
||||
" <th>timestep</th>\n",
|
||||
" </tr>\n",
|
||||
" </thead>\n",
|
||||
" <tbody>\n",
|
||||
" <tr>\n",
|
||||
" <th>0</th>\n",
|
||||
" <td>0.010000</td>\n",
|
||||
" <td>0.010000</td>\n",
|
||||
" <td>1</td>\n",
|
||||
" <td>1.000000</td>\n",
|
||||
" <td>0</td>\n",
|
||||
" <td>2018-10-01 15:16:24</td>\n",
|
||||
" <td>0</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>1</th>\n",
|
||||
" <td>0.009944</td>\n",
|
||||
" <td>0.010000</td>\n",
|
||||
" <td>1</td>\n",
|
||||
" <td>1.000172</td>\n",
|
||||
" <td>1</td>\n",
|
||||
" <td>2018-10-01 15:16:25</td>\n",
|
||||
" <td>1</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>2</th>\n",
|
||||
" <td>0.009889</td>\n",
|
||||
" <td>0.009997</td>\n",
|
||||
" <td>1</td>\n",
|
||||
" <td>1.003516</td>\n",
|
||||
" <td>1</td>\n",
|
||||
" <td>2018-10-01 15:16:26</td>\n",
|
||||
" <td>2</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>3</th>\n",
|
||||
" <td>0.009848</td>\n",
|
||||
" <td>0.009992</td>\n",
|
||||
" <td>1</td>\n",
|
||||
" <td>0.990655</td>\n",
|
||||
" <td>1</td>\n",
|
||||
" <td>2018-10-01 15:16:27</td>\n",
|
||||
" <td>3</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>4</th>\n",
|
||||
" <td>0.009814</td>\n",
|
||||
" <td>0.009985</td>\n",
|
||||
" <td>1</td>\n",
|
||||
" <td>1.001346</td>\n",
|
||||
" <td>1</td>\n",
|
||||
" <td>2018-10-01 15:16:28</td>\n",
|
||||
" <td>4</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>5</th>\n",
|
||||
" <td>0.009798</td>\n",
|
||||
" <td>0.009976</td>\n",
|
||||
" <td>1</td>\n",
|
||||
" <td>1.002495</td>\n",
|
||||
" <td>1</td>\n",
|
||||
" <td>2018-10-01 15:16:29</td>\n",
|
||||
" <td>5</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>6</th>\n",
|
||||
" <td>0.009706</td>\n",
|
||||
" <td>0.009967</td>\n",
|
||||
" <td>1</td>\n",
|
||||
" <td>0.994911</td>\n",
|
||||
" <td>1</td>\n",
|
||||
" <td>2018-10-01 15:16:30</td>\n",
|
||||
" <td>6</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>7</th>\n",
|
||||
" <td>0.009625</td>\n",
|
||||
" <td>0.009954</td>\n",
|
||||
" <td>1</td>\n",
|
||||
" <td>0.998919</td>\n",
|
||||
" <td>1</td>\n",
|
||||
" <td>2018-10-01 15:16:31</td>\n",
|
||||
" <td>7</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>8</th>\n",
|
||||
" <td>0.009632</td>\n",
|
||||
" <td>0.009938</td>\n",
|
||||
" <td>1</td>\n",
|
||||
" <td>0.995047</td>\n",
|
||||
" <td>1</td>\n",
|
||||
" <td>2018-10-01 15:16:32</td>\n",
|
||||
" <td>8</td>\n",
|
||||
" </tr>\n",
|
||||
" <tr>\n",
|
||||
" <th>9</th>\n",
|
||||
" <td>0.009648</td>\n",
|
||||
" <td>0.009922</td>\n",
|
||||
" <td>1</td>\n",
|
||||
" <td>0.980786</td>\n",
|
||||
" <td>1</td>\n",
|
||||
" <td>2018-10-01 15:16:33</td>\n",
|
||||
" <td>9</td>\n",
|
||||
" </tr>\n",
|
||||
" </tbody>\n",
|
||||
"</table>\n",
|
||||
"</div>"
|
||||
],
|
||||
"text/plain": [
|
||||
" XNS_fiat_external XNS_fiat_internal run stable_external substep \\\n",
|
||||
"0 0.010000 0.010000 1 1.000000 0 \n",
|
||||
"1 0.009944 0.010000 1 1.000172 1 \n",
|
||||
"2 0.009889 0.009997 1 1.003516 1 \n",
|
||||
"3 0.009848 0.009992 1 0.990655 1 \n",
|
||||
"4 0.009814 0.009985 1 1.001346 1 \n",
|
||||
"5 0.009798 0.009976 1 1.002495 1 \n",
|
||||
"6 0.009706 0.009967 1 0.994911 1 \n",
|
||||
"7 0.009625 0.009954 1 0.998919 1 \n",
|
||||
"8 0.009632 0.009938 1 0.995047 1 \n",
|
||||
"9 0.009648 0.009922 1 0.980786 1 \n",
|
||||
"\n",
|
||||
" timestamp timestep \n",
|
||||
"0 2018-10-01 15:16:24 0 \n",
|
||||
"1 2018-10-01 15:16:25 1 \n",
|
||||
"2 2018-10-01 15:16:26 2 \n",
|
||||
"3 2018-10-01 15:16:27 3 \n",
|
||||
"4 2018-10-01 15:16:28 4 \n",
|
||||
"5 2018-10-01 15:16:29 5 \n",
|
||||
"6 2018-10-01 15:16:30 6 \n",
|
||||
"7 2018-10-01 15:16:31 7 \n",
|
||||
"8 2018-10-01 15:16:32 8 \n",
|
||||
"9 2018-10-01 15:16:33 9 "
|
||||
]
|
||||
},
|
||||
"execution_count": 25,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"df.head(10)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
|
|
@ -0,0 +1,183 @@
|
|||
from decimal import Decimal
|
||||
import numpy as np
|
||||
from datetime import timedelta
|
||||
import pprint
|
||||
|
||||
from cadCAD.configuration import append_configs
|
||||
from cadCAD.configuration.utils import env_proc_trigger, ep_time_step, config_sim
|
||||
|
||||
from typing import Dict, List
|
||||
|
||||
# from cadCAD.utils.sys_config import exo, exo_check
|
||||
|
||||
pp = pprint.PrettyPrinter(indent=4)
|
||||
|
||||
seeds = {
|
||||
'z': np.random.RandomState(1),
|
||||
'a': np.random.RandomState(2),
|
||||
'b': np.random.RandomState(3),
|
||||
'c': np.random.RandomState(3)
|
||||
}
|
||||
|
||||
# Optional
|
||||
g: Dict[str, List[int]] = {
|
||||
'alpha': [1],
|
||||
'beta': [2, 5],
|
||||
'gamma': [3, 4],
|
||||
'omega': [7]
|
||||
}
|
||||
|
||||
# Policies per Mechanism
|
||||
def p1m1(_g, step, sL, s):
|
||||
return {'param1': 1}
|
||||
|
||||
def p2m1(_g, step, sL, s):
|
||||
return {'param2': 4}
|
||||
|
||||
def p1m2(_g, step, sL, s):
|
||||
return {'param1': 'a', 'param2': _g['beta']}
|
||||
|
||||
def p2m2(_g, step, sL, s):
|
||||
return {'param1': 'b', 'param2': 0}
|
||||
|
||||
def p1m3(_g, step, sL, s):
|
||||
return {'param1': np.array([10, 100])}
|
||||
|
||||
def p2m3(_g, step, sL, s):
|
||||
return {'param1': np.array([20, 200])}
|
||||
|
||||
# Internal States per Mechanism
|
||||
def s1m1(_g, step, sL, s, _input):
|
||||
return 's1', 0
|
||||
|
||||
def s2m1(_g, step, sL, s, _input):
|
||||
return 's2', _g['beta']
|
||||
|
||||
def s1m2(_g, step, sL, s, _input):
|
||||
return 's1', _input['param2']
|
||||
|
||||
def s2m2(_g, step, sL, s, _input):
|
||||
return 's2', _input['param2']
|
||||
|
||||
def s1m3(_g, step, sL, s, _input):
|
||||
return 's1', 0
|
||||
|
||||
def s2m3(_g, step, sL, s, _input):
|
||||
return 's2', 0
|
||||
|
||||
|
||||
# Exogenous States
|
||||
proc_one_coef_A = 0.7
|
||||
proc_one_coef_B = 1.3
|
||||
|
||||
|
||||
def es3p1(_g, step, sL, s, _input):
|
||||
return 's3', _g['gamma']
|
||||
# @curried
|
||||
def es4p2(_g, step, sL, s, _input):
|
||||
return 's4', _g['gamma']
|
||||
|
||||
ts_format = '%Y-%m-%d %H:%M:%S'
|
||||
t_delta = timedelta(days=0, minutes=0, seconds=1)
|
||||
def es5p2(_g, step, sL, s, _input):
|
||||
y = 'timestep'
|
||||
x = ep_time_step(s, dt_str=s['timestep'], fromat_str=ts_format, _timedelta=t_delta)
|
||||
return (y, x)
|
||||
|
||||
|
||||
# Environment States
|
||||
# @curried
|
||||
# def env_a(param, x):
|
||||
# return x + param
|
||||
def env_a(x):
|
||||
return x
|
||||
def env_b(x):
|
||||
return 10
|
||||
|
||||
|
||||
# Genesis States
|
||||
genesis_states = {
|
||||
's1': Decimal(0.0),
|
||||
's2': Decimal(0.0),
|
||||
's3': Decimal(1.0),
|
||||
's4': Decimal(1.0),
|
||||
# 'timestep': '2018-10-01 15:16:24'
|
||||
}
|
||||
|
||||
|
||||
# remove `exo_update_per_ts` to update every ts
|
||||
raw_exogenous_states = {
|
||||
"s3": es3p1,
|
||||
"s4": es4p2,
|
||||
# "timestep": es5p2
|
||||
}
|
||||
|
||||
|
||||
# ToDo: make env proc trigger field agnostic
|
||||
# ToDo: input json into function renaming __name__
|
||||
triggered_env_b = env_proc_trigger(1, env_b)
|
||||
env_processes = {
|
||||
"s3": env_a, #sweep(beta, env_a),
|
||||
"s4": triggered_env_b #rename('parameterized', triggered_env_b) #sweep(beta, triggered_env_b)
|
||||
}
|
||||
# parameterized_env_processes = parameterize_states(env_processes)
|
||||
#
|
||||
# pp.pprint(parameterized_env_processes)
|
||||
# exit()
|
||||
|
||||
# ToDo: The number of values entered in sweep should be the # of config objs created,
|
||||
# not dependent on the # of times the sweep is applied
|
||||
# sweep exo_state func and point to exo-state in every other funtion
|
||||
# param sweep on genesis states
|
||||
|
||||
partial_state_update_block = {
|
||||
"m1": {
|
||||
"policies": {
|
||||
"b1": p1m1,
|
||||
"b2": p2m1
|
||||
},
|
||||
"variables": {
|
||||
"s1": s1m1,
|
||||
"s2": s2m1
|
||||
}
|
||||
},
|
||||
"m2": {
|
||||
"policies": {
|
||||
"b1": p1m2,
|
||||
"b2": p2m2,
|
||||
},
|
||||
"variables": {
|
||||
"s1": s1m2,
|
||||
"s2": s2m2
|
||||
}
|
||||
},
|
||||
"m3": {
|
||||
"policies": {
|
||||
"b1": p1m3,
|
||||
"b2": p2m3
|
||||
},
|
||||
"variables": {
|
||||
"s1": s1m3,
|
||||
"s2": s2m3
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# config_sim Necessary
|
||||
sim_config = config_sim(
|
||||
{
|
||||
"N": 2,
|
||||
"T": range(5),
|
||||
"M": g # Optional
|
||||
}
|
||||
)
|
||||
|
||||
# New Convention
|
||||
append_configs(
|
||||
sim_configs=sim_config,
|
||||
initial_state=genesis_states,
|
||||
seeds=seeds,
|
||||
raw_exogenous_states={}, #raw_exogenous_states,
|
||||
env_processes={}, #env_processes,
|
||||
partial_state_update_blocks=partial_state_update_block
|
||||
)
|
||||
|
|
@ -0,0 +1,181 @@
|
|||
from decimal import Decimal
|
||||
import numpy as np
|
||||
from datetime import timedelta
|
||||
import pprint
|
||||
|
||||
from cadCAD.configuration import append_configs
|
||||
from cadCAD.configuration.utils import env_proc_trigger, ep_time_step, config_sim
|
||||
|
||||
from typing import Dict, List
|
||||
|
||||
pp = pprint.PrettyPrinter(indent=4)
|
||||
|
||||
seeds = {
|
||||
'z': np.random.RandomState(1),
|
||||
'a': np.random.RandomState(2),
|
||||
'b': np.random.RandomState(3),
|
||||
'c': np.random.RandomState(3)
|
||||
}
|
||||
|
||||
# Optional
|
||||
g: Dict[str, List[int]] = {
|
||||
'alpha': [1],
|
||||
'beta': [2, 5],
|
||||
'gamma': [3, 4],
|
||||
'omega': [7]
|
||||
}
|
||||
|
||||
# Policies per Mechanism
|
||||
def p1m1(_g, step, sL, s):
|
||||
return {'param1': 1}
|
||||
|
||||
def p2m1(_g, step, sL, s):
|
||||
return {'param2': 4}
|
||||
|
||||
def p1m2(_g, step, sL, s):
|
||||
return {'param1': 'a', 'param2': _g['beta']}
|
||||
|
||||
def p2m2(_g, step, sL, s):
|
||||
return {'param1': 'b', 'param2': 0}
|
||||
|
||||
def p1m3(_g, step, sL, s):
|
||||
return {'param1': np.array([10, 100])}
|
||||
|
||||
def p2m3(_g, step, sL, s):
|
||||
return {'param1': np.array([20, 200])}
|
||||
|
||||
# Internal States per Mechanism
|
||||
def s1m1(_g, step, sL, s, _input):
|
||||
return 's1', 0
|
||||
|
||||
def s2m1(_g, step, sL, s, _input):
|
||||
return 's2', _g['beta']
|
||||
|
||||
def s1m2(_g, step, sL, s, _input):
|
||||
return 's1', _input['param2']
|
||||
|
||||
def s2m2(_g, step, sL, s, _input):
|
||||
return 's2', _input['param2']
|
||||
|
||||
def s1m3(_g, step, sL, s, _input):
|
||||
return 's1', 0
|
||||
|
||||
def s2m3(_g, step, sL, s, _input):
|
||||
return 's2', 0
|
||||
|
||||
|
||||
# Exogenous States
|
||||
proc_one_coef_A = 0.7
|
||||
proc_one_coef_B = 1.3
|
||||
|
||||
|
||||
def es3p1(_g, step, sL, s, _input):
|
||||
return 's3', _g['gamma']
|
||||
# @curried
|
||||
def es4p2(_g, step, sL, s, _input):
|
||||
return 's4', _g['gamma']
|
||||
|
||||
ts_format = '%Y-%m-%d %H:%M:%S'
|
||||
t_delta = timedelta(days=0, minutes=0, seconds=1)
|
||||
def es5p2(_g, step, sL, s, _input):
|
||||
y = 'timestep'
|
||||
x = ep_time_step(s, dt_str=s['timestep'], fromat_str=ts_format, _timedelta=t_delta)
|
||||
return (y, x)
|
||||
|
||||
|
||||
# Environment States
|
||||
# @curried
|
||||
# def env_a(param, x):
|
||||
# return x + param
|
||||
def env_a(x):
|
||||
return x
|
||||
def env_b(x):
|
||||
return 10
|
||||
|
||||
|
||||
# Genesis States
|
||||
genesis_states = {
|
||||
's1': Decimal(0.0),
|
||||
's2': Decimal(0.0),
|
||||
's3': Decimal(1.0),
|
||||
's4': Decimal(1.0),
|
||||
# 'timestep': '2018-10-01 15:16:24'
|
||||
}
|
||||
|
||||
|
||||
# remove `exo_update_per_ts` to update every ts
|
||||
raw_exogenous_states = {
|
||||
"s3": es3p1,
|
||||
"s4": es4p2,
|
||||
# "timestep": es5p2
|
||||
}
|
||||
|
||||
|
||||
# ToDo: make env proc trigger field agnostic
|
||||
# ToDo: input json into function renaming __name__
|
||||
triggered_env_b = env_proc_trigger(1, env_b)
|
||||
env_processes = {
|
||||
"s3": env_a, #sweep(beta, env_a),
|
||||
"s4": triggered_env_b #rename('parameterized', triggered_env_b) #sweep(beta, triggered_env_b)
|
||||
}
|
||||
# parameterized_env_processes = parameterize_states(env_processes)
|
||||
#
|
||||
# pp.pprint(parameterized_env_processes)
|
||||
# exit()
|
||||
|
||||
# ToDo: The number of values entered in sweep should be the # of config objs created,
|
||||
# not dependent on the # of times the sweep is applied
|
||||
# sweep exo_state func and point to exo-state in every other funtion
|
||||
# param sweep on genesis states
|
||||
|
||||
partial_state_update_block = {
|
||||
"m1": {
|
||||
"policies": {
|
||||
"b1": p1m1,
|
||||
"b2": p2m1
|
||||
},
|
||||
"variables": {
|
||||
"s1": s1m1,
|
||||
"s2": s2m1
|
||||
}
|
||||
},
|
||||
"m2": {
|
||||
"policies": {
|
||||
"b1": p1m2,
|
||||
"b2": p2m2,
|
||||
},
|
||||
"variables": {
|
||||
"s1": s1m2,
|
||||
"s2": s2m2
|
||||
}
|
||||
},
|
||||
"m3": {
|
||||
"policies": {
|
||||
"b1": p1m3,
|
||||
"b2": p2m3
|
||||
},
|
||||
"variables": {
|
||||
"s1": s1m3,
|
||||
"s2": s2m3
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# config_sim Necessary
|
||||
sim_config = config_sim(
|
||||
{
|
||||
"N": 2,
|
||||
"T": range(5),
|
||||
"M": g # Optional
|
||||
}
|
||||
)
|
||||
|
||||
# New Convention
|
||||
append_configs(
|
||||
sim_configs=sim_config,
|
||||
initial_state=genesis_states,
|
||||
seeds=seeds,
|
||||
raw_exogenous_states=raw_exogenous_states,
|
||||
env_processes=env_processes,
|
||||
partial_state_update_blocks=partial_state_update_block
|
||||
)
|
||||
|
|
@ -0,0 +1,118 @@
|
|||
from decimal import Decimal
|
||||
|
||||
import numpy as np
|
||||
|
||||
from cadCAD.configuration import append_configs
|
||||
from cadCAD.configuration.utils import bound_norm_random, config_sim
|
||||
|
||||
seeds = {
|
||||
'z': np.random.RandomState(1),
|
||||
'a': np.random.RandomState(2),
|
||||
'b': np.random.RandomState(3),
|
||||
'c': np.random.RandomState(3)
|
||||
}
|
||||
|
||||
|
||||
# Policies per Mechanism
|
||||
def p1(_g, step, sL, s):
|
||||
return {'param1': 10}
|
||||
def p2(_g, step, sL, s):
|
||||
return {'param1': 10, 'param2': 40}
|
||||
|
||||
|
||||
# Internal States per Mechanism
|
||||
def s1(_g, step, sL, s, _input):
|
||||
y = 'ds1'
|
||||
x = s['ds1'] + 1
|
||||
return (y, x)
|
||||
def s2(_g, step, sL, s, _input):
|
||||
y = 'ds2'
|
||||
x = _input['param2']
|
||||
return (y, x)
|
||||
|
||||
|
||||
# Exogenous States
|
||||
proc_one_coef_A = 0.7
|
||||
proc_one_coef_B = 1.3
|
||||
|
||||
def es(_g, step, sL, s, _input):
|
||||
y = 'ds3'
|
||||
x = s['ds3'] * bound_norm_random(seeds['a'], proc_one_coef_A, proc_one_coef_B)
|
||||
return (y, x)
|
||||
|
||||
|
||||
# Environment States
|
||||
def env_a(x):
|
||||
return 5
|
||||
def env_b(x):
|
||||
return 10
|
||||
|
||||
|
||||
# Genesis States
|
||||
genesis_states = {
|
||||
'ds1': Decimal(0.0),
|
||||
'ds2': Decimal(0.0),
|
||||
'ds3': Decimal(1.0)
|
||||
}
|
||||
|
||||
|
||||
raw_exogenous_states = {
|
||||
"ds3": es
|
||||
}
|
||||
|
||||
|
||||
env_processes = {
|
||||
"ds3": env_a
|
||||
}
|
||||
|
||||
|
||||
partial_state_update_block = {
|
||||
"m1": {
|
||||
"policies": {
|
||||
"p1": p1,
|
||||
"p2": p2
|
||||
},
|
||||
"variables": {
|
||||
"ds1": s1,
|
||||
"ds2": s2
|
||||
}
|
||||
},
|
||||
"m2": {
|
||||
"policies": {
|
||||
"p1": p1,
|
||||
"p2": p2
|
||||
},
|
||||
"variables": {
|
||||
"ds1": s1,
|
||||
"ds2": s2
|
||||
}
|
||||
},
|
||||
"m3": {
|
||||
"policies": {
|
||||
"p1": p1,
|
||||
"p2": p2
|
||||
},
|
||||
"variables": {
|
||||
"ds1": s1,
|
||||
"ds2": s2
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
sim_config = config_sim(
|
||||
{
|
||||
"N": 2,
|
||||
"T": range(4),
|
||||
}
|
||||
)
|
||||
|
||||
append_configs(
|
||||
sim_configs=sim_config,
|
||||
initial_state=genesis_states,
|
||||
seeds=seeds,
|
||||
raw_exogenous_states=raw_exogenous_states,
|
||||
env_processes=env_processes,
|
||||
partial_state_update_blocks=partial_state_update_block,
|
||||
policy_ops=[lambda a, b: a + b]
|
||||
)
|
||||
|
|
@ -0,0 +1,57 @@
|
|||
import unittest
|
||||
from parameterized import parameterized
|
||||
from functools import reduce
|
||||
|
||||
|
||||
def generate_assertions_df(df, expected_results, target_cols, evaluations):
|
||||
test_names = []
|
||||
for eval_f in evaluations:
|
||||
def wrapped_eval(a, b):
|
||||
try:
|
||||
return eval_f(a, b)
|
||||
except KeyError:
|
||||
return True
|
||||
|
||||
test_name = f"{eval_f.__name__}_test"
|
||||
test_names.append(test_name)
|
||||
df[test_name] = df.apply(
|
||||
lambda x: wrapped_eval(
|
||||
x.filter(items=target_cols).to_dict(),
|
||||
expected_results[(x['run'], x['timestep'], x['substep'])]
|
||||
),
|
||||
axis=1
|
||||
)
|
||||
|
||||
return df, test_names
|
||||
|
||||
|
||||
def make_generic_test(params):
|
||||
class TestSequence(unittest.TestCase):
|
||||
|
||||
def generic_test(self, tested_df, expected_reults, test_name):
|
||||
erroneous = tested_df[(tested_df[test_name] == False)]
|
||||
# print(tabulate(tested_df, headers='keys', tablefmt='psql'))
|
||||
|
||||
if erroneous.empty is False: # Or Entire df IS NOT erroneous
|
||||
for index, row in erroneous.iterrows():
|
||||
expected = expected_reults[(row['run'], row['timestep'], row['substep'])]
|
||||
unexpected = {f"invalid_{k}": expected[k] for k in expected if k in row and expected[k] != row[k]}
|
||||
|
||||
for key in unexpected.keys():
|
||||
erroneous[key] = None
|
||||
erroneous.at[index, key] = unexpected[key]
|
||||
# etc.
|
||||
|
||||
# ToDo: Condition that will change false to true
|
||||
self.assertTrue(reduce(lambda a, b: a and b, tested_df[test_name]))
|
||||
|
||||
|
||||
@parameterized.expand(params)
|
||||
def test_validation(self, name, result_df, expected_reults, target_cols, evaluations):
|
||||
# alt for (*) Exec Debug mode
|
||||
tested_df, test_names = generate_assertions_df(result_df, expected_reults, target_cols, evaluations)
|
||||
|
||||
for test_name in test_names:
|
||||
self.generic_test(tested_df, expected_reults, test_name)
|
||||
|
||||
return TestSequence
|
||||
|
|
@ -0,0 +1,66 @@
|
|||
from cadCAD.configuration import append_configs
|
||||
from cadCAD.configuration.utils import config_sim
|
||||
import pandas as pd
|
||||
from cadCAD.utils import SilentDF
|
||||
|
||||
df = SilentDF(pd.read_csv('/DiffyQ-SimCAD/simulations/external_data/output.csv'))
|
||||
|
||||
|
||||
def query(s, df):
|
||||
return df[
|
||||
(df['run'] == s['run']) & (df['substep'] == s['substep']) & (df['timestep'] == s['timestep'])
|
||||
].drop(columns=['run', 'substep', "timestep"])
|
||||
|
||||
def p1(_g, substep, sL, s):
|
||||
result_dict = query(s, df).to_dict()
|
||||
del result_dict["ds3"]
|
||||
return {k: list(v.values()).pop() for k, v in result_dict.items()}
|
||||
|
||||
def p2(_g, substep, sL, s):
|
||||
result_dict = query(s, df).to_dict()
|
||||
del result_dict["ds1"], result_dict["ds2"]
|
||||
return {k: list(v.values()).pop() for k, v in result_dict.items()}
|
||||
|
||||
# integrate_ext_dataset
|
||||
def integrate_ext_dataset(_g, step, sL, s, _input):
|
||||
result_dict = query(s, df).to_dict()
|
||||
return 'external_data', {k: list(v.values()).pop() for k, v in result_dict.items()}
|
||||
|
||||
def increment(y, incr_by):
|
||||
return lambda _g, step, sL, s, _input: (y, s[y] + incr_by)
|
||||
increment = increment('increment', 1)
|
||||
|
||||
def view_policies(_g, step, sL, s, _input):
|
||||
return 'policies', _input
|
||||
|
||||
|
||||
external_data = {'ds1': None, 'ds2': None, 'ds3': None}
|
||||
state_dict = {
|
||||
'increment': 0,
|
||||
'external_data': external_data,
|
||||
'policies': external_data
|
||||
}
|
||||
|
||||
|
||||
policies = {"p1": p1, "p2": p2}
|
||||
states = {'increment': increment, 'external_data': integrate_ext_dataset, 'policies': view_policies}
|
||||
PSUB = {'policies': policies, 'states': states}
|
||||
|
||||
# needs M1&2 need behaviors
|
||||
partial_state_update_blocks = {
|
||||
'PSUB1': PSUB,
|
||||
'PSUB2': PSUB,
|
||||
'PSUB3': PSUB
|
||||
}
|
||||
|
||||
sim_config = config_sim({
|
||||
"N": 2,
|
||||
"T": range(4)
|
||||
})
|
||||
|
||||
append_configs(
|
||||
sim_configs=sim_config,
|
||||
initial_state=state_dict,
|
||||
partial_state_update_blocks=partial_state_update_blocks,
|
||||
policy_ops=[lambda a, b: {**a, **b}]
|
||||
)
|
||||
|
|
@ -0,0 +1,93 @@
|
|||
from cadCAD.configuration import append_configs
|
||||
from cadCAD.configuration.utils import config_sim, access_block
|
||||
|
||||
|
||||
policies, variables = {}, {}
|
||||
exclusion_list = ['nonexsistant', 'last_x', '2nd_to_last_x', '3rd_to_last_x', '4th_to_last_x']
|
||||
|
||||
# Policies per Mechanism
|
||||
|
||||
# WARNING: DO NOT delete elements from sH
|
||||
# state_history, target_field, psu_block_offset, exculsion_list
|
||||
def last_update(_g, substep, sH, s):
|
||||
return {"last_x": access_block(
|
||||
state_history=sH,
|
||||
target_field="last_x",
|
||||
psu_block_offset=-1,
|
||||
exculsion_list=exclusion_list
|
||||
)
|
||||
}
|
||||
policies["last_x"] = last_update
|
||||
|
||||
def second2last_update(_g, substep, sH, s):
|
||||
return {"2nd_to_last_x": access_block(sH, "2nd_to_last_x", -2, exclusion_list)}
|
||||
policies["2nd_to_last_x"] = second2last_update
|
||||
|
||||
|
||||
# Internal States per Mechanism
|
||||
|
||||
# WARNING: DO NOT delete elements from sH
|
||||
def add(y, x):
|
||||
return lambda _g, substep, sH, s, _input: (y, s[y] + x)
|
||||
variables['x'] = add('x', 1)
|
||||
|
||||
# last_partial_state_update_block
|
||||
def nonexsistant(_g, substep, sH, s, _input):
|
||||
return 'nonexsistant', access_block(sH, "nonexsistant", 0, exclusion_list)
|
||||
variables['nonexsistant'] = nonexsistant
|
||||
|
||||
# last_partial_state_update_block
|
||||
def last_x(_g, substep, sH, s, _input):
|
||||
return 'last_x', _input["last_x"]
|
||||
variables['last_x'] = last_x
|
||||
|
||||
# 2nd to last partial state update block
|
||||
def second_to_last_x(_g, substep, sH, s, _input):
|
||||
return '2nd_to_last_x', _input["2nd_to_last_x"]
|
||||
variables['2nd_to_last_x'] = second_to_last_x
|
||||
|
||||
# 3rd to last partial state update block
|
||||
def third_to_last_x(_g, substep, sH, s, _input):
|
||||
return '3rd_to_last_x', access_block(sH, "3rd_to_last_x", -3, exclusion_list)
|
||||
variables['3rd_to_last_x'] = third_to_last_x
|
||||
|
||||
# 4th to last partial state update block
|
||||
def fourth_to_last_x(_g, substep, sH, s, _input):
|
||||
return '4th_to_last_x', access_block(sH, "4th_to_last_x", -4, exclusion_list)
|
||||
variables['4th_to_last_x'] = fourth_to_last_x
|
||||
|
||||
|
||||
genesis_states = {
|
||||
'x': 0,
|
||||
'nonexsistant': [],
|
||||
'last_x': [],
|
||||
'2nd_to_last_x': [],
|
||||
'3rd_to_last_x': [],
|
||||
'4th_to_last_x': []
|
||||
}
|
||||
|
||||
PSUB = {
|
||||
"policies": policies,
|
||||
"variables": variables
|
||||
}
|
||||
|
||||
partial_state_update_block = {
|
||||
"PSUB1": PSUB,
|
||||
"PSUB2": PSUB,
|
||||
"PSUB3": PSUB
|
||||
}
|
||||
|
||||
sim_config = config_sim(
|
||||
{
|
||||
"N": 1,
|
||||
"T": range(3),
|
||||
}
|
||||
)
|
||||
|
||||
append_configs(
|
||||
sim_configs=sim_config,
|
||||
initial_state=genesis_states,
|
||||
partial_state_update_blocks=partial_state_update_block
|
||||
)
|
||||
|
||||
|
||||
|
|
@ -0,0 +1,98 @@
|
|||
import pprint
|
||||
from typing import Dict, List
|
||||
|
||||
from cadCAD.configuration import append_configs
|
||||
from cadCAD.configuration.utils import env_trigger, var_substep_trigger, config_sim, psub_list
|
||||
|
||||
pp = pprint.PrettyPrinter(indent=4)
|
||||
|
||||
def some_function(x):
|
||||
return x
|
||||
|
||||
# Optional
|
||||
# dict must contain lists opf 2 distinct lengths
|
||||
g: Dict[str, List[int]] = {
|
||||
'alpha': [1],
|
||||
'beta': [2, some_function],
|
||||
'gamma': [3, 4],
|
||||
'omega': [7]
|
||||
}
|
||||
|
||||
psu_steps = ['m1', 'm2', 'm3']
|
||||
system_substeps = len(psu_steps)
|
||||
var_timestep_trigger = var_substep_trigger([0, system_substeps])
|
||||
env_timestep_trigger = env_trigger(system_substeps)
|
||||
env_process = {}
|
||||
|
||||
|
||||
# ['s1', 's2', 's3', 's4']
|
||||
# Policies per Mechanism
|
||||
def gamma(_g, step, sL, s):
|
||||
return {'gamma': _g['gamma']}
|
||||
|
||||
|
||||
def omega(_g, step, sL, s):
|
||||
return {'omega': _g['omega']}
|
||||
|
||||
|
||||
# Internal States per Mechanism
|
||||
def alpha(_g, step, sL, s, _input):
|
||||
return 'alpha', _g['alpha']
|
||||
|
||||
|
||||
def beta(_g, step, sL, s, _input):
|
||||
return 'beta', _g['beta']
|
||||
|
||||
|
||||
def policies(_g, step, sL, s, _input):
|
||||
return 'policies', _input
|
||||
|
||||
|
||||
def sweeped(_g, step, sL, s, _input):
|
||||
return 'sweeped', {'beta': _g['beta'], 'gamma': _g['gamma']}
|
||||
|
||||
psu_block = {k: {"policies": {}, "variables": {}} for k in psu_steps}
|
||||
for m in psu_steps:
|
||||
psu_block[m]['policies']['gamma'] = gamma
|
||||
psu_block[m]['policies']['omega'] = omega
|
||||
psu_block[m]["variables"]['alpha'] = alpha
|
||||
psu_block[m]["variables"]['beta'] = beta
|
||||
psu_block[m]['variables']['policies'] = policies
|
||||
psu_block[m]["variables"]['sweeped'] = var_timestep_trigger(y='sweeped', f=sweeped)
|
||||
|
||||
|
||||
# Genesis States
|
||||
genesis_states = {
|
||||
'alpha': 0,
|
||||
'beta': 0,
|
||||
'policies': {},
|
||||
'sweeped': {}
|
||||
}
|
||||
|
||||
# Environment Process
|
||||
env_process['sweeped'] = env_timestep_trigger(trigger_field='timestep', trigger_vals=[5], funct_list=[lambda _g, x: _g['beta']])
|
||||
|
||||
|
||||
sim_config = config_sim(
|
||||
{
|
||||
"N": 2,
|
||||
"T": range(5),
|
||||
"M": g, # Optional
|
||||
}
|
||||
)
|
||||
|
||||
# New Convention
|
||||
partial_state_update_blocks = psub_list(psu_block, psu_steps)
|
||||
append_configs(
|
||||
sim_configs=sim_config,
|
||||
initial_state=genesis_states,
|
||||
env_processes=env_process,
|
||||
partial_state_update_blocks=partial_state_update_blocks
|
||||
)
|
||||
|
||||
|
||||
print()
|
||||
print("Policie State Update Block:")
|
||||
pp.pprint(partial_state_update_blocks)
|
||||
print()
|
||||
print()
|
||||
|
|
@ -0,0 +1,83 @@
|
|||
from cadCAD.configuration import append_configs
|
||||
from cadCAD.configuration.utils import config_sim
|
||||
|
||||
|
||||
# Policies per Mechanism
|
||||
def p1m1(_g, step, sL, s):
|
||||
return {'policy1': 1}
|
||||
def p2m1(_g, step, sL, s):
|
||||
return {'policy2': 2}
|
||||
|
||||
def p1m2(_g, step, sL, s):
|
||||
return {'policy1': 2, 'policy2': 2}
|
||||
def p2m2(_g, step, sL, s):
|
||||
return {'policy1': 2, 'policy2': 2}
|
||||
|
||||
def p1m3(_g, step, sL, s):
|
||||
return {'policy1': 1, 'policy2': 2, 'policy3': 3}
|
||||
def p2m3(_g, step, sL, s):
|
||||
return {'policy1': 1, 'policy2': 2, 'policy3': 3}
|
||||
|
||||
|
||||
# Internal States per Mechanism
|
||||
def add(y, x):
|
||||
return lambda _g, step, sH, s, _input: (y, s[y] + x)
|
||||
|
||||
def policies(_g, step, sH, s, _input):
|
||||
y = 'policies'
|
||||
x = _input
|
||||
return (y, x)
|
||||
|
||||
|
||||
# Genesis States
|
||||
genesis_states = {
|
||||
'policies': {},
|
||||
's1': 0
|
||||
}
|
||||
|
||||
variables = {
|
||||
's1': add('s1', 1),
|
||||
"policies": policies
|
||||
}
|
||||
|
||||
partial_state_update_block = {
|
||||
"m1": {
|
||||
"policies": {
|
||||
"p1": p1m1,
|
||||
"p2": p2m1
|
||||
},
|
||||
"variables": variables
|
||||
},
|
||||
"m2": {
|
||||
"policies": {
|
||||
"p1": p1m2,
|
||||
"p2": p2m2
|
||||
},
|
||||
"variables": variables
|
||||
},
|
||||
"m3": {
|
||||
"policies": {
|
||||
"p1": p1m3,
|
||||
"p2": p2m3
|
||||
},
|
||||
"variables": variables
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
sim_config = config_sim(
|
||||
{
|
||||
"N": 1,
|
||||
"T": range(3),
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
append_configs(
|
||||
sim_configs=sim_config,
|
||||
initial_state=genesis_states,
|
||||
partial_state_update_blocks=partial_state_update_block,
|
||||
policy_ops=[lambda a, b: a + b, lambda y: y * 2] # Default: lambda a, b: a + b
|
||||
)
|
||||
|
||||
|
||||
|
|
@ -0,0 +1,110 @@
|
|||
import unittest
|
||||
|
||||
import pandas as pd
|
||||
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
|
||||
from simulations.regression_tests import external_dataset
|
||||
from cadCAD import configs
|
||||
from testing.generic_test import make_generic_test
|
||||
|
||||
exec_mode = ExecutionMode()
|
||||
|
||||
print("Simulation Execution: Single Configuration")
|
||||
print()
|
||||
first_config = configs
|
||||
single_proc_ctx = ExecutionContext(context=exec_mode.single_proc)
|
||||
run = Executor(exec_context=single_proc_ctx, configs=first_config)
|
||||
|
||||
raw_result, tensor_field = run.execute()
|
||||
result = pd.DataFrame(raw_result)
|
||||
|
||||
|
||||
def get_expected_results(run):
|
||||
return {
|
||||
(run, 0, 0): {
|
||||
'external_data': {'ds1': None, 'ds2': None, 'ds3': None},
|
||||
'increment': 0,
|
||||
'policies': {'ds1': None, 'ds2': None, 'ds3': None}
|
||||
},
|
||||
(run, 1, 1): {
|
||||
'external_data': {'ds1': 0, 'ds2': 0, 'ds3': 1},
|
||||
'increment': 1,
|
||||
'policies': {'ds1': 0, 'ds2': 0, 'ds3': 1}
|
||||
},
|
||||
(run, 1, 2): {
|
||||
'external_data': {'ds1': 1, 'ds2': 40, 'ds3': 5},
|
||||
'increment': 2,
|
||||
'policies': {'ds1': 1, 'ds2': 40, 'ds3': 5}
|
||||
},
|
||||
(run, 1, 3): {
|
||||
'external_data': {'ds1': 2, 'ds2': 40, 'ds3': 5},
|
||||
'increment': 3,
|
||||
'policies': {'ds1': 2, 'ds2': 40, 'ds3': 5}
|
||||
},
|
||||
(run, 2, 1): {
|
||||
'external_data': {'ds1': 3, 'ds2': 40, 'ds3': 5},
|
||||
'increment': 4,
|
||||
'policies': {'ds1': 3, 'ds2': 40, 'ds3': 5}
|
||||
},
|
||||
(run, 2, 2): {
|
||||
'external_data': {'ds1': 4, 'ds2': 40, 'ds3': 5},
|
||||
'increment': 5,
|
||||
'policies': {'ds1': 4, 'ds2': 40, 'ds3': 5}
|
||||
},
|
||||
(run, 2, 3): {
|
||||
'external_data': {'ds1': 5, 'ds2': 40, 'ds3': 5},
|
||||
'increment': 6,
|
||||
'policies': {'ds1': 5, 'ds2': 40, 'ds3': 5}
|
||||
},
|
||||
(run, 3, 1): {
|
||||
'external_data': {'ds1': 6, 'ds2': 40, 'ds3': 5},
|
||||
'increment': 7,
|
||||
'policies': {'ds1': 6, 'ds2': 40, 'ds3': 5}
|
||||
},
|
||||
(run, 3, 2): {
|
||||
'external_data': {'ds1': 7, 'ds2': 40, 'ds3': 5},
|
||||
'increment': 8,
|
||||
'policies': {'ds1': 7, 'ds2': 40, 'ds3': 5}
|
||||
},
|
||||
(run, 3, 3): {
|
||||
'external_data': {'ds1': 8, 'ds2': 40, 'ds3': 5},
|
||||
'increment': 9,
|
||||
'policies': {'ds1': 8, 'ds2': 40, 'ds3': 5}
|
||||
},
|
||||
(run, 4, 1): {
|
||||
'external_data': {'ds1': 9, 'ds2': 40, 'ds3': 5},
|
||||
'increment': 10,
|
||||
'policies': {'ds1': 9, 'ds2': 40, 'ds3': 5}
|
||||
},
|
||||
(run, 4, 2): {
|
||||
'external_data': {'ds1': 10, 'ds2': 40, 'ds3': 5},
|
||||
'increment': 11,
|
||||
'policies': {'ds1': 10, 'ds2': 40, 'ds3': 5}
|
||||
},
|
||||
(run, 4, 3): {
|
||||
'external_data': {'ds1': 11, 'ds2': 40, 'ds3': 5},
|
||||
'increment': 12,
|
||||
'policies': {'ds1': 11, 'ds2': 40, 'ds3': 5}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
expected_results = {}
|
||||
expected_results_1 = get_expected_results(1)
|
||||
expected_results_2 = get_expected_results(2)
|
||||
expected_results.update(expected_results_1)
|
||||
expected_results.update(expected_results_2)
|
||||
|
||||
|
||||
def row(a, b):
|
||||
return a == b
|
||||
|
||||
|
||||
params = [["external_dataset", result, expected_results, ['increment', 'external_data', 'policies'], [row]]]
|
||||
|
||||
|
||||
class GenericTest(make_generic_test(params)):
|
||||
pass
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
|
@ -0,0 +1,124 @@
|
|||
import unittest
|
||||
import pandas as pd
|
||||
|
||||
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
|
||||
from testing.generic_test import make_generic_test
|
||||
from testing.system_models import historical_state_access
|
||||
from cadCAD import configs
|
||||
|
||||
|
||||
exec_mode = ExecutionMode()
|
||||
single_proc_ctx = ExecutionContext(context=exec_mode.single_proc)
|
||||
run = Executor(exec_context=single_proc_ctx, configs=configs)
|
||||
|
||||
raw_result, tensor_field = run.execute()
|
||||
result = pd.DataFrame(raw_result)
|
||||
expected_results = {
|
||||
(1, 0, 0): {'x': 0, 'nonexsistant': [], 'last_x': [], '2nd_to_last_x': [], '3rd_to_last_x': [], '4th_to_last_x': []},
|
||||
(1, 1, 1): {'x': 1,
|
||||
'nonexsistant': [],
|
||||
'last_x': [{'x': 0, 'run': 1, 'substep': 0, 'timestep': 0}],
|
||||
'2nd_to_last_x': [],
|
||||
'3rd_to_last_x': [],
|
||||
'4th_to_last_x': []},
|
||||
(1, 1, 2): {'x': 2,
|
||||
'nonexsistant': [],
|
||||
'last_x': [{'x': 0, 'run': 1, 'substep': 0, 'timestep': 0}],
|
||||
'2nd_to_last_x': [],
|
||||
'3rd_to_last_x': [],
|
||||
'4th_to_last_x': []},
|
||||
(1, 1, 3): {'x': 3,
|
||||
'nonexsistant': [],
|
||||
'last_x': [{'x': 0, 'run': 1, 'substep': 0, 'timestep': 0}],
|
||||
'2nd_to_last_x': [],
|
||||
'3rd_to_last_x': [],
|
||||
'4th_to_last_x': []},
|
||||
(1, 2, 1): {'x': 4,
|
||||
'nonexsistant': [],
|
||||
'last_x': [
|
||||
{'x': 4, 'run': 1, 'substep': 1, 'timestep': 1}, # x: 1
|
||||
{'x': 2, 'run': 1, 'substep': 2, 'timestep': 1},
|
||||
{'x': 3, 'run': 1, 'substep': 3, 'timestep': 1}
|
||||
],
|
||||
'2nd_to_last_x': [{'x': -1, 'run': 1, 'substep': 0, 'timestep': 0}], # x: 0
|
||||
'3rd_to_last_x': [],
|
||||
'4th_to_last_x': []},
|
||||
(1, 2, 2): {'x': 5,
|
||||
'nonexsistant': [],
|
||||
'last_x': [
|
||||
{'x': 1, 'run': 1, 'substep': 1, 'timestep': 1},
|
||||
{'x': 2, 'run': 1, 'substep': 2, 'timestep': 1},
|
||||
{'x': 3, 'run': 1, 'substep': 3, 'timestep': 1}
|
||||
],
|
||||
'2nd_to_last_x': [{'x': 0, 'run': 1, 'substep': 0, 'timestep': 0}],
|
||||
'3rd_to_last_x': [],
|
||||
'4th_to_last_x': []},
|
||||
(1, 2, 3): {'x': 6,
|
||||
'nonexsistant': [],
|
||||
'last_x': [
|
||||
{'x': 1, 'run': 1, 'substep': 1, 'timestep': 1},
|
||||
{'x': 2, 'run': 1, 'substep': 2, 'timestep': 1},
|
||||
{'x': 3, 'run': 1, 'substep': 3, 'timestep': 1}
|
||||
],
|
||||
'2nd_to_last_x': [{'x': 0, 'run': 1, 'substep': 0, 'timestep': 0}],
|
||||
'3rd_to_last_x': [],
|
||||
'4th_to_last_x': []},
|
||||
(1, 3, 1): {'x': 7,
|
||||
'nonexsistant': [],
|
||||
'last_x': [
|
||||
{'x': 4, 'run': 1, 'substep': 1, 'timestep': 2},
|
||||
{'x': 5, 'run': 1, 'substep': 2, 'timestep': 2},
|
||||
{'x': 6, 'run': 1, 'substep': 3, 'timestep': 2}
|
||||
],
|
||||
'2nd_to_last_x': [
|
||||
{'x': 1, 'run': 1, 'substep': 1, 'timestep': 1},
|
||||
{'x': 2, 'run': 1, 'substep': 2, 'timestep': 1},
|
||||
{'x': 3, 'run': 1, 'substep': 3, 'timestep': 1}
|
||||
],
|
||||
'3rd_to_last_x': [{'x': 0, 'run': 1, 'substep': 0, 'timestep': 0}],
|
||||
'4th_to_last_x': []},
|
||||
(1, 3, 2): {'x': 8,
|
||||
'nonexsistant': [],
|
||||
'last_x': [
|
||||
{'x': 4, 'run': 1, 'substep': 1, 'timestep': 2},
|
||||
{'x': 5, 'run': 1, 'substep': 2, 'timestep': 2},
|
||||
{'x': 6, 'run': 1, 'substep': 3, 'timestep': 2}
|
||||
],
|
||||
'2nd_to_last_x': [
|
||||
{'x': 1, 'run': 1, 'substep': 1, 'timestep': 1},
|
||||
{'x': 2, 'run': 1, 'substep': 2, 'timestep': 1},
|
||||
{'x': 3, 'run': 1, 'substep': 3, 'timestep': 1}
|
||||
],
|
||||
'3rd_to_last_x': [{'x': 0, 'run': 1, 'substep': 0, 'timestep': 0}],
|
||||
'4th_to_last_x': []},
|
||||
(1, 3, 3): {'x': 9,
|
||||
'nonexsistant': [],
|
||||
'last_x': [
|
||||
{'x': 4, 'run': 1, 'substep': 1, 'timestep': 2},
|
||||
{'x': 5, 'run': 1, 'substep': 2, 'timestep': 2},
|
||||
{'x': 6, 'run': 1, 'substep': 3, 'timestep': 2}
|
||||
],
|
||||
'2nd_to_last_x': [
|
||||
{'x': 1, 'run': 1, 'substep': 1, 'timestep': 1},
|
||||
{'x': 2, 'run': 1, 'substep': 2, 'timestep': 1},
|
||||
{'x': 3, 'run': 1, 'substep': 3, 'timestep': 1}
|
||||
],
|
||||
'3rd_to_last_x': [{'x': 0, 'run': 1, 'substep': 0, 'timestep': 0}],
|
||||
'4th_to_last_x': []}
|
||||
}
|
||||
|
||||
|
||||
def row(a, b):
|
||||
return a == b
|
||||
params = [
|
||||
["historical_state_access", result, expected_results,
|
||||
['x', 'nonexsistant', 'last_x', '2nd_to_last_x', '3rd_to_last_x', '4th_to_last_x'], [row]]
|
||||
]
|
||||
|
||||
|
||||
class GenericTest(make_generic_test(params)):
|
||||
pass
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
|
@ -0,0 +1,73 @@
|
|||
import unittest
|
||||
import pandas as pd
|
||||
|
||||
|
||||
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
|
||||
from testing.system_models import param_sweep
|
||||
from cadCAD import configs
|
||||
|
||||
from testing.generic_test import make_generic_test
|
||||
from testing.system_models.param_sweep import some_function
|
||||
|
||||
|
||||
exec_mode = ExecutionMode()
|
||||
multi_proc_ctx = ExecutionContext(context=exec_mode.multi_proc)
|
||||
run = Executor(exec_context=multi_proc_ctx, configs=configs)
|
||||
|
||||
|
||||
def get_expected_results(run, beta, gamma):
|
||||
return {
|
||||
(run, 0, 0): {'policies': {}, 'sweeped': {}, 'alpha': 0, 'beta': 0},
|
||||
(run, 1, 1): {'policies': {'gamma': gamma, 'omega': 7}, 'sweeped': {'beta': beta, 'gamma': gamma}, 'alpha': 1, 'beta': beta},
|
||||
(run, 1, 2): {'policies': {'gamma': gamma, 'omega': 7}, 'sweeped': {'beta': beta, 'gamma': gamma}, 'alpha': 1, 'beta': beta},
|
||||
(run, 1, 3): {'policies': {'gamma': gamma, 'omega': 7}, 'sweeped': {'beta': beta, 'gamma': gamma}, 'alpha': 1, 'beta': beta},
|
||||
(run, 2, 1): {'policies': {'gamma': gamma, 'omega': 7}, 'sweeped': {'beta': beta, 'gamma': gamma}, 'alpha': 1, 'beta': beta},
|
||||
(run, 2, 2): {'policies': {'gamma': gamma, 'omega': 7}, 'sweeped': {'beta': beta, 'gamma': gamma}, 'alpha': 1, 'beta': beta},
|
||||
(run, 2, 3): {'policies': {'gamma': gamma, 'omega': 7}, 'sweeped': {'beta': beta, 'gamma': gamma}, 'alpha': 1, 'beta': beta},
|
||||
(run, 3, 1): {'policies': {'gamma': gamma, 'omega': 7}, 'sweeped': {'beta': beta, 'gamma': gamma}, 'alpha': 1, 'beta': beta},
|
||||
(run, 3, 2): {'policies': {'gamma': gamma, 'omega': 7}, 'sweeped': {'beta': beta, 'gamma': gamma}, 'alpha': 1, 'beta': beta},
|
||||
(run, 3, 3): {'policies': {'gamma': gamma, 'omega': 7}, 'sweeped': {'beta': beta, 'gamma': gamma}, 'alpha': 1, 'beta': beta},
|
||||
(run, 4, 1): {'policies': {'gamma': gamma, 'omega': 7}, 'sweeped': {'beta': beta, 'gamma': gamma}, 'alpha': 1, 'beta': beta},
|
||||
(run, 4, 2): {'policies': {'gamma': gamma, 'omega': 7}, 'sweeped': {'beta': beta, 'gamma': gamma}, 'alpha': 1, 'beta': beta},
|
||||
(run, 4, 3): {'policies': {'gamma': gamma, 'omega': 7}, 'sweeped': {'beta': beta, 'gamma': gamma}, 'alpha': 1, 'beta': beta},
|
||||
(run, 5, 1): {'policies': {'gamma': gamma, 'omega': 7}, 'sweeped': beta, 'alpha': 1, 'beta': beta},
|
||||
(run, 5, 2): {'policies': {'gamma': gamma, 'omega': 7}, 'sweeped': beta, 'alpha': 1, 'beta': beta},
|
||||
(run, 5, 3): {'policies': {'gamma': gamma, 'omega': 7}, 'sweeped': beta, 'alpha': 1, 'beta': beta}
|
||||
}
|
||||
|
||||
|
||||
expected_results_1 = {}
|
||||
expected_results_1a = get_expected_results(1, 2, 3)
|
||||
expected_results_1b = get_expected_results(2, 2, 3)
|
||||
expected_results_1.update(expected_results_1a)
|
||||
expected_results_1.update(expected_results_1b)
|
||||
|
||||
expected_results_2 = {}
|
||||
expected_results_2a = get_expected_results(1, some_function, 4)
|
||||
expected_results_2b = get_expected_results(2, some_function, 4)
|
||||
expected_results_2.update(expected_results_2a)
|
||||
expected_results_2.update(expected_results_2b)
|
||||
|
||||
|
||||
i = 0
|
||||
expected_results = [expected_results_1, expected_results_2]
|
||||
config_names = ['sweep_config_A', 'sweep_config_B']
|
||||
|
||||
def row(a, b):
|
||||
return a == b
|
||||
def create_test_params(feature, fields):
|
||||
i = 0
|
||||
for raw_result, _ in run.execute():
|
||||
yield [feature, pd.DataFrame(raw_result), expected_results[i], fields, [row]]
|
||||
i += 1
|
||||
|
||||
|
||||
params = list(create_test_params("param_sweep", ['alpha', 'beta', 'policies', 'sweeped']))
|
||||
|
||||
|
||||
class GenericTest(make_generic_test(params)):
|
||||
pass
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
|
@ -0,0 +1,43 @@
|
|||
import unittest
|
||||
import pandas as pd
|
||||
|
||||
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
|
||||
from testing.generic_test import make_generic_test
|
||||
from testing.system_models import policy_aggregation
|
||||
from cadCAD import configs
|
||||
|
||||
exec_mode = ExecutionMode()
|
||||
single_proc_ctx = ExecutionContext(context=exec_mode.single_proc)
|
||||
run = Executor(exec_context=single_proc_ctx, configs=configs)
|
||||
|
||||
raw_result, tensor_field = run.execute()
|
||||
result = pd.DataFrame(raw_result)
|
||||
|
||||
expected_results = {
|
||||
(1, 0, 0): {'policies': {}, 's1': 0},
|
||||
(1, 1, 1): {'policies': {'policy1': 1, 'policy2': 4}, 's1': 1}, # 'policy1': 2
|
||||
(1, 1, 2): {'policies': {'policy1': 8, 'policy2': 8}, 's1': 2},
|
||||
(1, 1, 3): {'policies': {'policy1': 4, 'policy2': 8, 'policy3': 12}, 's1': 3},
|
||||
(1, 2, 1): {'policies': {'policy1': 2, 'policy2': 4}, 's1': 4},
|
||||
(1, 2, 2): {'policies': {'policy1': 8, 'policy2': 8}, 's1': 5},
|
||||
(1, 2, 3): {'policies': {'policy1': 4, 'policy2': 8, 'policy3': 12}, 's1': 6},
|
||||
(1, 3, 1): {'policies': {'policy1': 2, 'policy2': 4}, 's1': 7},
|
||||
(1, 3, 2): {'policies': {'policy1': 8, 'policy2': 8}, 's1': 8},
|
||||
(1, 3, 3): {'policies': {'policy1': 4, 'policy2': 8, 'policy3': 12}, 's1': 9}
|
||||
}
|
||||
|
||||
|
||||
def row(a, b):
|
||||
return a == b
|
||||
|
||||
|
||||
params = [["policy_aggregation", result, expected_results, ['policies', 's1'], [row]]]
|
||||
|
||||
|
||||
class GenericTest(make_generic_test(params)):
|
||||
pass
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
||||
|
|
@ -0,0 +1,21 @@
|
|||
#
|
||||
# def record_generator(row, cols):
|
||||
# return {col: row[col] for col in cols}
|
||||
|
||||
def gen_metric_row(row, cols):
|
||||
return ((row['run'], row['timestep'], row['substep']), {col: row[col] for col in cols})
|
||||
|
||||
# def gen_metric_row(row):
|
||||
# return ((row['run'], row['timestep'], row['substep']), {'s1': row['s1'], 'policies': row['policies']})
|
||||
|
||||
# def gen_metric_row(row):
|
||||
# return {
|
||||
# 'run': row['run'],
|
||||
# 'timestep': row['timestep'],
|
||||
# 'substep': row['substep'],
|
||||
# 's1': row['s1'],
|
||||
# 'policies': row['policies']
|
||||
# }
|
||||
|
||||
def gen_metric_dict(df, cols):
|
||||
return dict([gen_metric_row(row, cols) for index, row in df.iterrows()])
|
||||
|
|
@ -0,0 +1,11 @@
|
|||
**Robot and Marbles Tutorial Series**
|
||||
|
||||
In this series, we introduce basic concepts of cadCAD and system modelling in general using a simple toy model.
|
||||
[Part 1](robot-marbles-part-1/robot-marbles-part-1.ipynb) - States and State Update Functions
|
||||
[Part 2](robot-marbles-part-2/robot-marbles-part-2.ipynb) - Actions and State Dependent Policies
|
||||
[Part 3](robot-marbles-part-3/robot-marbles-part-3.ipynb) - From Synchronous to Asynchronous Time
|
||||
[Part 4](robot-marbles-part-4/robot-marbles-part-4.ipynb) - Uncertainty and Stochastic Processes
|
||||
[Part 5](robot-marbles-part-5/robot-marbles-part-5.ipynb) - Using class objects as state variables
|
||||
[Part 6](robot-marbles-part-6/robot-marbles-part-6.ipynb) - A/B testing
|
||||
|
||||
Check out the [videos](videos) folder for detailed walkthroughs of each one of the tutorials.
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue