Merge pull request #5 from BlockScience/workshop-assignment

workshop prep
This commit is contained in:
Andrew Clark 2020-08-05 13:16:02 -04:00 committed by GitHub
commit 36bac1ea18
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
164 changed files with 4733 additions and 219 deletions

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

Binary file not shown.

After

Width:  |  Height:  |  Size: 96 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 202 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 210 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 59 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 91 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 30 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 30 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 9.5 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 142 KiB

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1,40 @@
import math
from decimal import Decimal
from datetime import timedelta
import numpy as np
from typing import Dict, List
from cadCAD.configuration import append_configs
from cadCAD.configuration.utils import bound_norm_random, ep_time_step, config_sim, access_block
from .genesis_states import genesis_states
from .partial_state_update_block import partial_state_update_blocks
sim_config = config_sim({
'N': 1,
'T': range(60), #day
})
seeds = {
'p': np.random.RandomState(1),
}
append_configs(
sim_configs=sim_config,
initial_state=genesis_states,
seeds=seeds,
partial_state_update_blocks=partial_state_update_blocks
)
def get_configs():
'''
Function to extract the configuration information for display in a notebook.
'''
sim_config,genesis_states,seeds,partial_state_update_blocks
return sim_config,genesis_states,seeds,partial_state_update_blocks

View File

@ -0,0 +1,10 @@
from .model.initialization import *
genesis_states = {
'network':network,
'funds':initial_funds,
'sentiment': initial_sentiment,
'supply':supply
}

View File

@ -0,0 +1,665 @@
import networkx as nx
from scipy.stats import expon, gamma
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.cm as cmx
import seaborn as sns
#beta = .2 #later we should set this to be param so we can sweep it
# tuning param for the trigger function
#rho = .001
#alpha = 1 - 0.9999599
def trigger_threshold(requested, funds, supply, beta, rho, alpha):
'''
Function that determines threshold for proposals being accepted.
'''
share = requested/funds
if share < beta:
threshold = rho*supply/(beta-share)**2 * 1/(1-alpha)
return threshold
else:
return np.inf
def initial_social_network(network, scale = 1, sigmas=3):
'''
Function to initialize network x social network edges
'''
participants = get_nodes_by_type(network, 'participant')
for i in participants:
for j in participants:
if not(j==i):
influence_rv = expon.rvs(loc=0.0, scale=scale)
if influence_rv > scale+sigmas*scale**2:
network.add_edge(i,j)
network.edges[(i,j)]['influence'] = influence_rv
network.edges[(i,j)]['type'] = 'influence'
return network
def initial_conflict_network(network, rate = .25):
'''
Definition:
Function to initialize network x conflict edges
'''
proposals = get_nodes_by_type(network, 'proposal')
for i in proposals:
for j in proposals:
if not(j==i):
conflict_rv = np.random.rand()
if conflict_rv < rate :
network.add_edge(i,j)
network.edges[(i,j)]['conflict'] = 1-conflict_rv
network.edges[(i,j)]['type'] = 'conflict'
return network
def gen_new_participant(network, new_participant_holdings):
'''
Definition:
Driving processes for the arrival of participants.
Parameters:
network: networkx object
new_participant_holdings: Tokens of new participants
Assumptions:
Initialized network x object
Returns:
Update network x object
'''
i = len([node for node in network.nodes])
network.add_node(i)
network.nodes[i]['type']="participant"
s_rv = np.random.rand()
network.nodes[i]['sentiment'] = s_rv
network.nodes[i]['holdings']=new_participant_holdings
for j in get_nodes_by_type(network, 'proposal'):
network.add_edge(i, j)
a_rv = a_rv = np.random.uniform(-1,1,1)[0]
network.edges[(i, j)]['affinity'] = a_rv
network.edges[(i,j)]['tokens'] = a_rv*network.nodes[i]['holdings']
network.edges[(i, j)]['conviction'] = 0
network.edges[(i,j)]['type'] = 'support'
return network
def gen_new_proposal(network, funds, supply, beta, rho, alpha, funds_requested):
'''
Definition:
Driving processes for the arrival of proposals.
Parameters:
network: networkx object
funds:
supply:
Assumptions:
Initialized network x object
Returns:
Update network x object
'''
j = len([node for node in network.nodes])
network.add_node(j)
network.nodes[j]['type']="proposal"
network.nodes[j]['conviction']=0
network.nodes[j]['status']='candidate'
network.nodes[j]['age']=0
# rescale = funds*scale_factor
# r_rv = gamma.rvs(1.5,loc=0.001, scale=rescale)
network.nodes[j]['funds_requested'] =funds_requested
network.nodes[j]['trigger']= trigger_threshold(funds_requested, funds, supply, beta, rho, alpha)
participants = get_nodes_by_type(network, 'participant')
proposing_participant = np.random.choice(participants)
for i in participants:
network.add_edge(i, j)
if i==proposing_participant:
network.edges[(i, j)]['affinity']=1
else:
a_rv = np.random.uniform(-1,1,1)[0]
network.edges[(i, j)]['affinity'] = a_rv
network.edges[(i, j)]['conviction'] = 0
network.edges[(i,j)]['tokens'] = 0
network.edges[(i,j)]['type'] = 'support'
return network
def get_nodes_by_type(g, node_type_selection):
'''
Definition:
Function to extract nodes based by named type
Parameters:
g: network x object
node_type_selection: node type
Assumptions:
Returns:
List column of the desired information as:
Example:
proposals = get_nodes_by_type(network, 'proposal')
'''
return [node for node in g.nodes if g.nodes[node]['type']== node_type_selection ]
def get_sentimental(sentiment, force, decay=.1):
'''
'''
mu = decay
sentiment = sentiment*(1-mu) + force*mu
if sentiment > 1:
sentiment = 1
elif sentiment < 0:
sentiment = 0
return sentiment
def get_edges_by_type(g, edge_type_selection):
'''
Functions to extract edges based on type
'''
return [edge for edge in g.edges if g.edges[edge]['type']== edge_type_selection ]
def conviction_order(network, proposals):
'''
Function to sort conviction order
'''
ordered = sorted(proposals, key=lambda j:network.nodes[j]['conviction'] , reverse=True)
return ordered
def social_links(network, participant, scale = 1):
'''
'''
participants = get_nodes_by_type(network, 'participant')
i = participant
for j in participants:
if not(j==i):
influence_rv = expon.rvs(loc=0.0, scale=scale)
if influence_rv > scale+scale**2:
network.add_edge(i,j)
network.edges[(i,j)]['influence'] = influence_rv
network.edges[(i,j)]['type'] = 'influence'
return network
def conflict_links(network,proposal ,rate = .25):
'''
'''
proposals = get_nodes_by_type(network, 'proposal')
i = proposal
for j in proposals:
if not(j==i):
conflict_rv = np.random.rand()
if conflict_rv < rate :
network.add_edge(i,j)
network.edges[(i,j)]['conflict'] = 1-conflict_rv
network.edges[(i,j)]['type'] = 'conflict'
return network
def social_affinity_booster(network, proposal, participant):
'''
'''
participants = get_nodes_by_type(network, 'participant')
influencers = get_edges_by_type(network, 'influence')
j=proposal
i=participant
i_tokens = network.nodes[i]['holdings']
influence = np.array([network.edges[(i,node)]['influence'] for node in participants if (i, node) in influencers ])
#print(influence)
tokens = np.array([network.edges[(node,j)]['tokens'] for node in participants if (i, node) in influencers ])
#print(tokens)
influence_sum = np.sum(influence)
if influence_sum>0:
boosts = np.sum(tokens*influence)/(influence_sum*i_tokens)
else:
boosts = 0
return np.sum(boosts)
def snap_plot(nets, size_scale = 1/10, dims = (30,30), savefigs=False):
'''
'''
last_net = nets[-1]
last_props=get_nodes_by_type(last_net, 'proposal')
M = len(last_props)
last_parts=get_nodes_by_type(last_net, 'participant')
N = len(last_parts)
pos = {}
for ind in range(N):
i = last_parts[ind]
pos[i] = np.array([0, 2*ind-N])
for ind in range(M):
j = last_props[ind]
pos[j] = np.array([1, 2*N/M *ind-N])
if savefigs:
counter = 0
length = 10
for net in nets:
edges = get_edges_by_type(net, 'support')
max_tok = np.max([net.edges[e]['tokens'] for e in edges])
E = len(edges)
net_props = get_nodes_by_type(net, 'proposal')
net_parts = get_nodes_by_type(net, 'participant')
net_node_label ={}
num_nodes = len([node for node in net.nodes])
node_color = np.empty((num_nodes,4))
node_size = np.empty(num_nodes)
edge_color = np.empty((E,4))
cm = plt.get_cmap('Reds')
cNorm = colors.Normalize(vmin=0, vmax=max_tok)
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=cm)
net_cand = [j for j in net_props if net.nodes[j]['status']=='candidate']
for j in net_props:
node_size[j] = net.nodes[j]['funds_requested']*size_scale
if net.nodes[j]['status']=="candidate":
node_color[j] = colors.to_rgba('blue')
trigger = net.nodes[j]['trigger']
conviction = net.nodes[j]['conviction']
percent_of_trigger = " "+str(int(100*conviction/trigger))+'%'
net_node_label[j] = str(percent_of_trigger)
elif net.nodes[j]['status']=="active":
node_color[j] = colors.to_rgba('orange')
net_node_label[j] = ''
elif net.nodes[j]['status']=="completed":
node_color[j] = colors.to_rgba('green')
net_node_label[j] = ''
elif net.nodes[j]['status']=="failed":
node_color[j] = colors.to_rgba('gray')
net_node_label[j] = ''
elif net.nodes[j]['status']=="killed":
node_color[j] = colors.to_rgba('black')
net_node_label[j] = ''
for i in net_parts:
node_size[i] = net.nodes[i]['holdings']*size_scale/10
node_color[i] = colors.to_rgba('red')
net_node_label[i] = ''
included_edges = []
for ind in range(E):
e = edges[ind]
tokens = net.edges[e]['tokens']
edge_color[ind] = scalarMap.to_rgba(tokens)
if e[1] in net_cand:
included_edges.append(e)
iE = len(included_edges)
included_edge_color = np.empty((iE,4))
for ind in range(iE):
e = included_edges[ind]
tokens = net.edges[e]['tokens']
included_edge_color[ind] = scalarMap.to_rgba(tokens)
# nx.draw(net,
# pos=pos,
# node_size = node_size,
# node_color = node_color,
# edge_color = included_edge_color,
# edgelist=included_edges,
# labels = net_node_label)
# plt.title('Tokens Staked by Partipants to Proposals')
else:
plt.figure()
nx.draw(net,
pos=pos,
node_size = node_size,
node_color = node_color,
edge_color = included_edge_color,
edgelist=included_edges,
labels = net_node_label)
plt.title('Tokens Staked by Partipants to Proposals')
plt.tight_layout()
plt.axis('on')
plt.xticks([])
plt.yticks([])
if savefigs:
#plt.savefig('images/' + unique_id+'_fig'+str(counter)+'.png')
plt.savefig('images/snap/'+str(counter)+'.png',bbox_inches='tight')
counter = counter+1
plt.show()
def pad(vec, length,fill=True):
'''
'''
if fill:
padded = np.zeros(length,)
else:
padded = np.empty(length,)
padded[:] = np.nan
for i in range(len(vec)):
padded[i]= vec[i]
return padded
def make2D(key, data, fill=False):
'''
'''
maxL = data[key].apply(len).max()
newkey = 'padded_'+key
data[newkey] = data[key].apply(lambda x: pad(x,maxL,fill))
reshaped = np.array([a for a in data[newkey].values])
return reshaped
def quantile_plot(xkey, ykey, dataframe, dq=.1, logy=False, return_df = False):
'''
'''
qX = np.arange(0,1+dq,dq)
data = dataframe[[xkey,ykey]].copy()
qkeys = []
for q in qX:
qkey= 'quantile'+str(int(100*q))
#print(qkey)
data[qkey] = data[ykey].apply(lambda arr: np.quantile(arr,q) )
#print(data[qkey].head())
qkeys.append(qkey)
data[[xkey]+qkeys].plot(x=xkey, logy=logy)
plt.title(ykey + " Quantile Plot" )
plt.ylabel(ykey)
labels = [str(int(100*q))+"$^{th}$ Percentile" for q in qX ]
plt.legend(labels, ncol = 1,loc='center left', bbox_to_anchor=(1, .5))
if return_df:
return data
def affinities_plot(df, dims = (8.5, 11) ):
'''
'''
last_net= df.network.values[-1]
last_props=get_nodes_by_type(last_net, 'proposal')
M = len(last_props)
last_parts=get_nodes_by_type(last_net, 'participant')
N = len(last_parts)
affinities = np.empty((N,M))
for i_ind in range(N):
for j_ind in range(M):
i = last_parts[i_ind]
j = last_props[j_ind]
affinities[i_ind][j_ind] = last_net.edges[(i,j)]['affinity']
fig, ax = plt.subplots(figsize=dims)
sns.heatmap(affinities.T,
xticklabels=last_parts,
yticklabels=last_props,
square=True,
cbar=True,
cmap = plt.cm.RdYlGn,
ax=ax)
plt.title('affinities between participants and proposals')
plt.ylabel('proposal_id')
plt.xlabel('participant_id')
def trigger_sweep(field, trigger_func,beta,rho,alpha,supply=10**9):
'''
'''
xmax= beta
if field == 'effective_supply':
share_of_funds = np.arange(.001,xmax,.001)
total_supply = np.arange(0,supply*10, supply/100)
demo_data_XY = np.outer(share_of_funds,total_supply)
demo_data_Z0=np.empty(demo_data_XY.shape)
demo_data_Z1=np.empty(demo_data_XY.shape)
demo_data_Z2=np.empty(demo_data_XY.shape)
demo_data_Z3=np.empty(demo_data_XY.shape)
for sof_ind in range(len(share_of_funds)):
sof = share_of_funds[sof_ind]
for ts_ind in range(len(total_supply)):
ts = total_supply[ts_ind]
tc = ts /(1-alpha)
trigger = trigger_func(sof, 1, ts,beta,rho,alpha)
demo_data_Z0[sof_ind,ts_ind] = np.log10(trigger)
demo_data_Z1[sof_ind,ts_ind] = trigger
demo_data_Z2[sof_ind,ts_ind] = trigger/tc #share of maximum possible conviction
demo_data_Z3[sof_ind,ts_ind] = np.log10(trigger/tc)
return {'log10_trigger':demo_data_Z0,
'trigger':demo_data_Z1,
'share_of_max_conv': demo_data_Z2,
'log10_share_of_max_conv':demo_data_Z3,
'total_supply':total_supply,
'share_of_funds':share_of_funds,
'alpha':alpha}
elif field == 'alpha':
#note if alpha >.01 then this will give weird results max alpha will be >1
alpha = np.arange(0,.5,.001)
share_of_funds = np.arange(.001,xmax,.001)
demo_data_XY = np.outer(share_of_funds,alpha)
demo_data_Z4=np.empty(demo_data_XY.shape)
demo_data_Z5=np.empty(demo_data_XY.shape)
demo_data_Z6=np.empty(demo_data_XY.shape)
demo_data_Z7=np.empty(demo_data_XY.shape)
for sof_ind in range(len(share_of_funds)):
sof = share_of_funds[sof_ind]
for a_ind in range(len(alpha)):
ts = supply
a = alpha[a_ind]
tc = ts /(1-a)
trigger = trigger_func(sof, 1, ts, beta, rho, a)
demo_data_Z4[sof_ind,a_ind] = np.log10(trigger)
demo_data_Z5[sof_ind,a_ind] = trigger
demo_data_Z6[sof_ind,a_ind] = trigger/tc #share of maximum possible conviction
demo_data_Z7[sof_ind,a_ind] = np.log10(trigger/tc)
return {'log10_trigger':demo_data_Z4,
'trigger':demo_data_Z5,
'share_of_max_conv': demo_data_Z6,
'log10_share_of_max_conv':demo_data_Z7,
'alpha':alpha,
'share_of_funds':share_of_funds,
'supply':supply}
else:
return "invalid field"
def trigger_plotter(share_of_funds,Z, color_label,y, ylabel,cmap='jet'):
'''
'''
dims = (10, 5)
fig, ax = plt.subplots(figsize=dims)
cf = plt.contourf(share_of_funds, y, Z.T, 100, cmap=cmap)
cbar=plt.colorbar(cf)
plt.axis([share_of_funds[0], share_of_funds[-1], y[0], y[-1]])
#ax.set_xscale('log')
plt.ylabel(ylabel)
plt.xlabel('Share of Funds Requested')
plt.title('Trigger Function Map')
cbar.ax.set_ylabel(color_label)
def trigger_grid(supply_sweep, alpha_sweep):
fig, axs = plt.subplots(nrows=2, ncols=1,figsize=(20,20))
axs = axs.flatten()
# cut out the plots that didn't require the heatmap
# and switch to (2,1) subplots
# share_of_funds = alpha_sweep['share_of_funds']
# Z = alpha_sweep['log10_share_of_max_conv']
# y = alpha_sweep['alpha']
# ylabel = 'alpha'
# axs[0].contourf(share_of_funds, y, Z.T,100, cmap='jet', )
# #axs[0].colorbar(cf)
# axs[0].axis([share_of_funds[0], share_of_funds[-1], y[0], y[-1]])
# axs[0].set_ylabel(ylabel)
# axs[0].set_xlabel('Share of Funds Requested')
# axs[0].set_title('Trigger Function Map - Alpha sweep')
share_of_funds = alpha_sweep['share_of_funds']
Z = alpha_sweep['log10_trigger']
y = alpha_sweep['alpha']
ylabel = 'alpha'
supply = alpha_sweep['supply']
cp0=axs[0].contourf(share_of_funds, y, Z.T,100, cmap='jet', )
axs[0].axis([share_of_funds[0], share_of_funds[-1], y[0], y[-1]])
axs[0].set_ylabel(ylabel)
axs[0].set_xlabel('Share of Funds Requested')
axs[0].set_title('Trigger Function Map - Alpha sweep; Supply ='+str(supply))
cb0=plt.colorbar(cp0, ax=axs[0])
cb0.set_label('log10 of conviction to trigger')
# share_of_funds = supply_sweep['share_of_funds']
# Z = supply_sweep['log10_share_of_max_conv']
# y = supply_sweep['total_supply']
# ylabel = 'Effective Supply'
# axs[2].contourf(share_of_funds, y, Z.T,100, cmap='jet', )
# axs[2].axis([share_of_funds[0], share_of_funds[-1], y[0], y[-1]])
# axs[2].set_ylabel(ylabel)
# axs[2].set_xlabel('Share of Funds Requested')
# axs[2].set_title('Trigger Function Map - Supply sweep - Z: share_of_max_conv')
share_of_funds = supply_sweep['share_of_funds']
Z = supply_sweep['log10_trigger']
y = supply_sweep['total_supply']
ylabel = 'Effective Supply'
alpha = supply_sweep['alpha']
max_conv = y/(1-alpha)
cp1=axs[1].contourf(share_of_funds, y, Z.T,100, cmap='jet', )
axs[1].axis([share_of_funds[0], share_of_funds[-1], y[0], y[-1]])
axs[1].set_ylabel(ylabel)
axs[1].set_xlabel('Share of Funds Requested')
axs[1].set_title('Trigger Function Map - Supply sweep; alpha='+str(alpha))
axs[1].set_label('log10 of conviction to trigger')
cb1=plt.colorbar(cp1, ax=axs[1])
cb1.set_label('log10 of conviction to trigger')
def initialize_network(n,m, initial_funds, supply, beta, rho, alpha):
'''
Definition:
Function to initialize network x object
Parameters:
Assumptions:
Returns:
Example:
'''
# initilize network x graph
network = nx.DiGraph()
# create participant nodes with type and token holding
for i in range(n):
network.add_node(i)
network.nodes[i]['type']= "participant"
h_rv = expon.rvs(loc=0.0, scale= supply/n)
network.nodes[i]['holdings'] = h_rv # SOL check
s_rv = np.random.rand()
network.nodes[i]['sentiment'] = s_rv
participants = get_nodes_by_type(network, 'participant')
initial_supply = np.sum([ network.nodes[i]['holdings'] for i in participants])
# Generate initial proposals
for ind in range(m):
j = n+ind
network.add_node(j)
network.nodes[j]['type']="proposal"
network.nodes[j]['conviction'] = 0
network.nodes[j]['status'] = 'candidate'
network.nodes[j]['age'] = 0
r_rv = gamma.rvs(3,loc=0.001, scale=500)
network.nodes[j]['funds_requested'] = r_rv
network.nodes[j]['trigger']= trigger_threshold(r_rv, initial_funds, initial_supply,beta,rho,alpha)
for i in range(n):
network.add_edge(i, j)
rv = np.random.rand()
a_rv = np.random.uniform(-1,1,1)[0]
network.edges[(i, j)]['affinity'] = a_rv
network.edges[(i, j)]['tokens'] = 0
network.edges[(i, j)]['conviction'] = 0
network.edges[(i, j)]['type'] = 'support'
proposals = get_nodes_by_type(network, 'proposal')
total_requested = np.sum([ network.nodes[i]['funds_requested'] for i in proposals])
network = initial_conflict_network(network, rate = .25)
network = initial_social_network(network, scale = 1)
return network

View File

@ -0,0 +1,32 @@
# import libraries
import networkx as nx
import matplotlib.pyplot as plt
import numpy as np
from .conviction_helper_functions import *
# Parameters
# maximum share of funds a proposal can take
beta = .2 #later we should set this to be param so we can sweep it
# tuning param for the trigger function
rho = .0025
#alpha = 1 - 0.9999599 #native timescale for app as in contract code
alpha = 1/2**3 #timescale set in days with 3 day halflife (from comments in contract comments)
supply = 21706 # Honey supply balance as of 7-17-2020
initial_sentiment = .6
n= 30 #initial participants
m= 7 #initial proposals
sensitivity = .75
tmin = 0 #unit days; minimum periods passed before a proposal can pass
min_supp = 1 #number of tokens that must be stake for a proposal to be a candidate
# sentiment_decay = .01 #termed mu in the state update function
base_completion_rate = 45
base_failure_rate = 180
initial_funds = 48000 # in xDai
network = initialize_network(n,m,initial_funds,supply, beta, rho, alpha)

View File

@ -0,0 +1,185 @@
import numpy as np
from .initialization import *
from .conviction_helper_functions import *
import networkx as nx
# hyperparameters
mu = 0.01
# Phase 2
# Behaviors
def check_progress(params, step, sL, s):
'''
Driving processes: completion of previously funded proposals
'''
network = s['network']
proposals = get_nodes_by_type(network, 'proposal')
completed = []
failed = []
for j in proposals:
if network.nodes[j]['status'] == 'active':
grant_size = network.nodes[j]['funds_requested']
likelihood = 1.0/(base_completion_rate+np.log(grant_size))
failure_rate = 1.0/(base_failure_rate+np.log(grant_size))
if np.random.rand() < likelihood:
completed.append(j)
elif np.random.rand() < failure_rate:
failed.append(j)
return({'completed':completed, 'failed':failed})
# Mechanisms
def complete_proposal(params, step, sL, s, _input):
'''
Book-keeping of failed and completed proposals. Update network object
'''
network = s['network']
participants = get_nodes_by_type(network, 'participant')
proposals = get_nodes_by_type(network, 'proposal')
competitors = get_edges_by_type(network, 'conflict')
completed = _input['completed']
for j in completed:
network.nodes[j]['status']='completed'
for c in proposals:
if (j,c) in competitors:
conflict = network.edges[(j,c)]['conflict']
for i in participants:
network.edges[(i,c)]['affinity'] = network.edges[(i,c)]['affinity'] *(1-conflict)
for i in participants:
force = network.edges[(i,j)]['affinity']
sentiment = network.nodes[i]['sentiment']
network.nodes[i]['sentiment'] = get_sentimental(sentiment, force, decay=0)
failed = _input['failed']
for j in failed:
network.nodes[j]['status']='failed'
for i in participants:
force = -network.edges[(i,j)]['affinity']
sentiment = network.nodes[i]['sentiment']
network.nodes[i]['sentiment'] = get_sentimental(sentiment, force, decay=0)
key = 'network'
value = network
return (key, value)
def update_sentiment_on_completion(params, step, sL, s, _input):
network = s['network']
completed = _input['completed']
failed = _input['failed']
sentiment = s['sentiment']
completed_count = len(completed)
failed_count = len(failed)
if completed_count+failed_count>0:
sentiment = get_sentimental(sentiment,completed_count-failed_count, .25)
else:
sentiment = get_sentimental(sentiment, 0, 0)
key = 'sentiment'
value = sentiment
return (key, value)
# Phase 3
# Behaviors
def participants_decisions(params, step, sL, s):
'''
High sentiment, high affinity =>buy
Low sentiment, low affinities => burn
Assign tokens to top affinities
'''
network = s['network']
participants = get_nodes_by_type(network, 'participant')
proposals = get_nodes_by_type(network, 'proposal')
candidates = [j for j in proposals if network.nodes[j]['status']=='candidate']
#sensitivity = params['sensitivity']
gain = .01
delta_holdings={}
proposals_supported ={}
for i in participants:
engagement_rate = .3*network.nodes[i]['sentiment']
if np.random.rand()<engagement_rate:
force = network.nodes[i]['sentiment']-sensitivity
delta_holdings[i] = network.nodes[i]['holdings']*gain*force
support = []
for j in candidates:
booster = social_affinity_booster(network, j, i)
affinity = network.edges[(i, j)]['affinity']+booster
cutoff = sensitivity*np.max([network.edges[(i,p)]['affinity'] for p in candidates])
# range is [-1,1], where 0 is indifference, this determines min affinity supported
# if no proposal meets this threshold participants may support a null proposal
if cutoff <.3:
cutoff = .3
if affinity > cutoff:
support.append(j)
proposals_supported[i] = support
else:
delta_holdings[i] = 0
proposals_supported[i] = [j for j in candidates if network.edges[(i,j)]['tokens']>0 ]
return({'delta_holdings':delta_holdings, 'proposals_supported':proposals_supported})
# Mechanisms
def update_tokens(params, step, sL, s, _input):
'''
Description:
Udate everyones holdings and their conviction for each proposal
'''
network = s['network']
delta_holdings = _input['delta_holdings']
proposals = get_nodes_by_type(network, 'proposal')
candidates = [j for j in proposals if network.nodes[j]['status']=='candidate']
proposals_supported = _input['proposals_supported']
participants = get_nodes_by_type(network, 'participant')
for i in participants:
network.nodes[i]['holdings'] = network.nodes[i]['holdings']+delta_holdings[i]
supported = proposals_supported[i]
total_affinity = np.sum([ network.edges[(i, j)]['affinity'] for j in supported])
for j in candidates:
if j in supported:
normalized_affinity = network.edges[(i, j)]['affinity']/total_affinity
network.edges[(i, j)]['tokens'] = normalized_affinity*network.nodes[i]['holdings']
else:
network.edges[(i, j)]['tokens'] = 0
prior_conviction = network.edges[(i, j)]['conviction']
current_tokens = network.edges[(i, j)]['tokens']
network.edges[(i, j)]['conviction'] =current_tokens+alpha*prior_conviction
for j in candidates:
network.nodes[j]['conviction'] = np.sum([ network.edges[(i, j)]['conviction'] for i in participants])
total_tokens = np.sum([network.edges[(i, j)]['tokens'] for i in participants ])
if total_tokens < min_supp:
network.nodes[j]['status'] = 'killed'
key = 'network'
value = network
return (key, value)

View File

@ -0,0 +1,132 @@
import numpy as np
from .initialization import *
from .conviction_helper_functions import *
import networkx as nx
# Behaviors
def trigger_function(params, step, sL, s):
'''
This policy checks to see if each proposal passes or not.
'''
network = s['network']
funds = s['funds']
supply = s['supply']
proposals = get_nodes_by_type(network, 'proposal')
accepted = []
triggers = {}
funds_to_be_released = 0
for j in proposals:
if network.nodes[j]['status'] == 'candidate':
requested = network.nodes[j]['funds_requested']
age = network.nodes[j]['age']
threshold = trigger_threshold(requested, funds, supply, beta, rho, alpha)
if age > tmin:
conviction = network.nodes[j]['conviction']
if conviction >threshold:
accepted.append(j)
funds_to_be_released = funds_to_be_released + requested
else:
threshold = np.nan
triggers[j] = threshold
#catch over release and keep the highest conviction results
if funds_to_be_released > funds:
ordered = conviction_order(network, accepted)
accepted = []
release = 0
ind = 0
while release + network.nodes[ordered[ind]]['funds_requested'] < funds:
accepted.append(ordered[ind])
release= network.nodes[ordered[ind]]['funds_requested']
ind=ind+1
return({'accepted':accepted, 'triggers':triggers})
# Mechanisms
def decrement_funds(params, step, sL, s, _input):
'''
If a proposal passes, funds are decremented by the amount of the proposal
'''
funds = s['funds']
network = s['network']
accepted = _input['accepted']
#decrement funds
for j in accepted:
funds = funds - network.nodes[j]['funds_requested']
key = 'funds'
value = funds
return (key, value)
def update_sentiment_on_release(params, step, sL, s, _input):
network = s['network']
proposals = get_nodes_by_type(network, 'proposal')
accepted = _input['accepted']
proposals_outstanding = np.sum([network.nodes[j]['funds_requested'] for j in proposals if network.nodes[j]['status']=='candidate'])
proposals_accepted = np.sum([network.nodes[j]['funds_requested'] for j in accepted])
sentiment = s['sentiment']
force = len(accepted)
if force>0:
sentiment = get_sentimental(sentiment, force, .25)
else:
sentiment = get_sentimental(sentiment, 0, 0)
key = 'sentiment'
value = sentiment
return (key, value)
def update_proposals(params, step, sL, s, _input):
'''
If proposal passes, its status is changed in the network object.
'''
network = s['network']
accepted = _input['accepted']
triggers = _input['triggers']
participants = get_nodes_by_type(network, 'participant')
proposals = get_nodes_by_type(network, 'proposals')
#sensitivity = params['sensitivity']
for j in proposals:
network.nodes[j]['trigger'] = triggers[j]
#bookkeeping conviction and participant sentiment
for j in accepted:
network.nodes[j]['status']='active'
network.nodes[j]['conviction']=np.nan
#change status to active
for i in participants:
#operating on edge = (i,j)
#reset tokens assigned to other candidates
network.edges[(i,j)]['tokens']=0
network.edges[(i,j)]['conviction'] = np.nan
#update participants sentiments (positive or negative)
affinities = [network.edges[(i,p)]['affinity'] for p in proposals if not(p in accepted)]
if len(affinities)>1:
max_affinity = np.max(affinities)
force = network.edges[(i,j)]['affinity']-sensitivity*max_affinity
else:
force = 0
#based on what their affinities to the accepted proposals
network.nodes[i]['sentiment'] = get_sentimental(network.nodes[i]['sentiment'], force, False)
key = 'network'
value = network
return (key, value)

View File

@ -0,0 +1,102 @@
import numpy as np
import pandas as pd
from .initialization import *
from .conviction_helper_functions import *
import networkx as nx
from scipy.stats import expon, gamma
# Behaviors
def driving_process(params, step, sL, s):
'''
Driving process for adding new participants (their funds) and new proposals.
'''
###WORKSHOP###
#construct heuristics for arrival of
#new funds (added to communal funds)
#new participants (and personal funds)
#new Proposals
### helpful data collection from the state
funds = s['funds']
network = s['network']
sentiment = s['sentiment']
proposals = get_nodes_by_type(network, 'proposal')
participants = get_nodes_by_type(network, 'participant')
candidate_proposals = [j for j in proposals if network.nodes[j]['status']=='candidate']
#### Part 1:
# Arrival of New Funds
#### Part 2:
# Arrival of a new participant?
# how much holdings do they have?
#### Part 3:
# Arrival of new proposals?
# How many?
# how much funds are they requesting?
return({'new_participant':new_participant, #True/False
'new_participant_holdings':new_participant_holdings, #funds held by new participant if True
'new_proposal':new_proposal, #True/False
'new_proposal_ct': new_proposal_ct, #int
'new_proposal_requested':new_proposal_requested, #list funds requested by new proposal if True, len =ct
'funds_arrival':funds_arrival}) #quantity of new funds arriving to the communal pool
# Mechanisms
def update_network(params, step, sL, s, _input):
'''
Add new participants and proposals to network object
'''
network = s['network']
funds = s['funds']
#supply = s['supply']
new_participant = _input['new_participant']
new_proposal = _input['new_proposal']
if new_participant:
new_participant_holdings = _input['new_participant_holdings']
network = gen_new_participant(network, new_participant_holdings)
if new_proposal:
for ct in range(_input['new_proposal_ct']):
funds_req = _input['new_proposal_requested'][ct]
network= gen_new_proposal(network,funds,supply, beta, rho, alpha, funds_req)
#update age of the existing proposals
proposals = get_nodes_by_type(network, 'proposal')
for j in proposals:
network.nodes[j]['age'] = network.nodes[j]['age']+1
if network.nodes[j]['status'] == 'candidate':
requested = network.nodes[j]['funds_requested']
network.nodes[j]['trigger'] = trigger_threshold(requested, funds, supply, beta, rho, alpha)
else:
network.nodes[j]['trigger'] = np.nan
key = 'network'
value = network
return (key, value)
def increment_funds(params, step, sL, s, _input):
'''
Increase funds by the amount of the new particpant's funds.
'''
funds = s['funds']
funds_arrival = _input['funds_arrival']
#increment funds
funds = funds + funds_arrival
key = 'funds'
value = funds
return (key, value)

View File

@ -0,0 +1,47 @@
from .model.system import *
from .model.participants import *
from .model.proposals import *
# The Partial State Update Blocks
partial_state_update_blocks = [
{
# system.py:
'policies': {
'random': driving_process
},
'variables': {
'network': update_network,
'funds':increment_funds,
}
},
{
# participants.py
'policies': {
'completion': check_progress
},
'variables': {
'sentiment': update_sentiment_on_completion, #note completing decays sentiment, completing bumps it
'network': complete_proposal
}
},
{
# proposals.py
'policies': {
'release': trigger_function
},
'variables': {
'funds': decrement_funds,
'sentiment': update_sentiment_on_release, #releasing funds can bump sentiment
'network': update_proposals
}
},
{
# participants.py
'policies': {
'participants_act': participants_decisions
},
'variables': {
'network': update_tokens
}
}
]

View File

@ -0,0 +1,69 @@
import pandas as pd
from .model.conviction_helper_functions import *
from model import economyconfig
from cadCAD.engine import ExecutionMode, ExecutionContext
exec_mode = ExecutionMode()
from cadCAD.engine import Executor
from cadCAD import configs
def run(input_config=configs):
'''
Definition:
Run simulation
Parameters:
input_config: Optional way to pass in system configuration
'''
exec_mode = ExecutionMode()
local_mode_ctx = ExecutionContext(context=exec_mode.local_mode)
simulation = Executor(exec_context=local_mode_ctx, configs=input_config)
raw_system_events, tensor_field, sessions = simulation.execute()
# Result System Events DataFrame
df = pd.DataFrame(raw_system_events)
return df
def postprocessing(df, sim_ind=-1):
'''
Function for postprocessing the simulation results to extract key information from the network object.
'''
# subset to last substep of each simulation
df= df[df.substep==df.substep.max()]
sim_count = df.simulation.max()
if sim_ind <0:
sim_ind = sim_count+1+sim_ind
df=df[df.simulation==sim_ind]
# Extract information from dataframe
df['conviction'] = df.network.apply(lambda g: np.array([g.nodes[j]['conviction'] for j in get_nodes_by_type(g, 'proposal') if g.nodes[j]['status']=='candidate']))
df['candidate_count'] = df.network.apply(lambda g: len([j for j in get_nodes_by_type(g, 'proposal') if g.nodes[j]['status']=='candidate']))
df['candidate_funds'] = df.network.apply(lambda g: np.sum([g.nodes[j]['funds_requested'] for j in get_nodes_by_type(g, 'proposal') if g.nodes[j]['status']=='candidate']))
df['killed_count'] = df.network.apply(lambda g: len([j for j in get_nodes_by_type(g, 'proposal') if g.nodes[j]['status']=='killed']))
df['killed_funds'] = df.network.apply(lambda g: np.sum([g.nodes[j]['funds_requested'] for j in get_nodes_by_type(g, 'proposal') if g.nodes[j]['status']=='killed']))
df['candidate_funds_requested'] = df.network.apply(lambda g: np.array([g.nodes[j]['funds_requested'] for j in get_nodes_by_type(g, 'proposal') if g.nodes[j]['status']=='candidate']))
df['active_count'] = df.network.apply(lambda g: len([j for j in get_nodes_by_type(g, 'proposal') if g.nodes[j]['status']=='active']))
df['active_funds'] = df.network.apply(lambda g: np.sum([g.nodes[j]['funds_requested'] for j in get_nodes_by_type(g, 'proposal') if g.nodes[j]['status']=='active']))
df['failed_count'] = df.network.apply(lambda g: len([j for j in get_nodes_by_type(g, 'proposal') if g.nodes[j]['status']=='failed']))
df['failed_funds'] = df.network.apply(lambda g: np.sum([g.nodes[j]['funds_requested'] for j in get_nodes_by_type(g, 'proposal') if g.nodes[j]['status']=='failed']))
df['completed_count'] = df.network.apply(lambda g: len([j for j in get_nodes_by_type(g, 'proposal') if g.nodes[j]['status']=='completed']))
df['completed_funds'] = df.network.apply(lambda g: np.sum([g.nodes[j]['funds_requested'] for j in get_nodes_by_type(g, 'proposal') if g.nodes[j]['status']=='completed']))
df['funds_requested'] = df.network.apply(lambda g: np.array([g.nodes[j]['funds_requested'] for j in get_nodes_by_type(g, 'proposal')]))
df['share_of_funds_requested'] = df.candidate_funds_requested/df.funds
df['share_of_funds_requested_all'] = df.funds_requested/df.funds
df['triggers'] = df.network.apply(lambda g: np.array([g.nodes[j]['trigger'] for j in get_nodes_by_type(g, 'proposal') if g.nodes[j]['status']=='candidate' ]))
df['conviction_share_of_trigger'] = df.conviction/df.triggers
df['age'] = df.network.apply(lambda g: np.array([g.nodes[j]['age'] for j in get_nodes_by_type(g, 'proposal') if g.nodes[j]['status']=='candidate' ]))
df['age_all'] = df.network.apply(lambda g: np.array([g.nodes[j]['age'] for j in get_nodes_by_type(g, 'proposal') ]))
df['conviction_all'] = df.network.apply(lambda g: np.array([g.nodes[j]['conviction'] for j in get_nodes_by_type(g, 'proposal') ]))
df['triggers_all'] = df.network.apply(lambda g: np.array([g.nodes[j]['trigger'] for j in get_nodes_by_type(g, 'proposal') ]))
df['conviction_share_of_trigger_all'] = df.conviction_all/df.triggers_all
return df

File diff suppressed because one or more lines are too long

Binary file not shown.

Before

Width:  |  Height:  |  Size: 43 KiB

After

Width:  |  Height:  |  Size: 59 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 44 KiB

After

Width:  |  Height:  |  Size: 91 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 25 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 26 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 29 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 29 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 9.1 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 9.1 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 9.1 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 9.1 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 9.1 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 32 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 44 KiB

After

Width:  |  Height:  |  Size: 30 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 33 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 52 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 52 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 60 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 67 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 67 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 52 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 53 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 59 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 60 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 56 KiB

After

Width:  |  Height:  |  Size: 30 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 36 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 9.9 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 9.9 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 9.9 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 10 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 10 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 10 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 10 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 10 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 10 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 58 KiB

After

Width:  |  Height:  |  Size: 9.5 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 45 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 48 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 48 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 48 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 49 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 50 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 50 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 11 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 11 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 11 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 59 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 11 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 11 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 11 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 11 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 11 KiB

Some files were not shown because too many files have changed in this diff Show More