initial
This commit is contained in:
parent
6112aff978
commit
f4019ba256
File diff suppressed because one or more lines are too long
Binary file not shown.
|
After Width: | Height: | Size: 86 KiB |
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
|
@ -0,0 +1,45 @@
|
|||
import math
|
||||
from decimal import Decimal
|
||||
from datetime import timedelta
|
||||
import numpy as np
|
||||
from typing import Dict, List
|
||||
|
||||
from cadCAD.configuration import append_configs
|
||||
from cadCAD.configuration.utils import bound_norm_random, ep_time_step, config_sim, access_block
|
||||
|
||||
from .genesis_states import genesis_states
|
||||
from .partial_state_update_block import partial_state_update_blocks
|
||||
|
||||
params: Dict[str, List[int]] = {
|
||||
'month': [0,12,36,50,100]
|
||||
}
|
||||
|
||||
|
||||
sim_config = config_sim({
|
||||
'N': 1,
|
||||
'T': range(100), #day
|
||||
# 'M': #params,
|
||||
})
|
||||
|
||||
seeds = {
|
||||
'p': np.random.RandomState(1),
|
||||
}
|
||||
|
||||
|
||||
append_configs(
|
||||
sim_configs=sim_config,
|
||||
initial_state=genesis_states,
|
||||
seeds=seeds,
|
||||
partial_state_update_blocks=partial_state_update_blocks
|
||||
)
|
||||
|
||||
|
||||
|
||||
def get_configs():
|
||||
'''
|
||||
Function to extract the configuration information for display in a notebook.
|
||||
'''
|
||||
|
||||
sim_config,genesis_states,seeds,partial_state_update_blocks
|
||||
|
||||
return sim_config,genesis_states,seeds,partial_state_update_blocks
|
||||
|
|
@ -0,0 +1,8 @@
|
|||
from .model.initialization import *
|
||||
|
||||
|
||||
genesis_states = {
|
||||
'network':network,
|
||||
'funds':initial_funds
|
||||
|
||||
}
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
|
@ -0,0 +1,656 @@
|
|||
import networkx as nx
|
||||
from scipy.stats import expon, gamma
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
import matplotlib.colors as colors
|
||||
import matplotlib.cm as cmx
|
||||
import seaborn as sns
|
||||
|
||||
default_beta = 0.2
|
||||
default_rho = 0.02
|
||||
|
||||
def trigger_threshold(requested, funds, supply, beta = default_beta, rho = default_rho):
|
||||
'''
|
||||
Definition:
|
||||
|
||||
Parameters:
|
||||
|
||||
Assumptions:
|
||||
|
||||
Returns:
|
||||
|
||||
Example:
|
||||
'''
|
||||
share = requested/funds
|
||||
if share < beta:
|
||||
return rho*supply/(beta-share)**2
|
||||
else:
|
||||
return np.inf
|
||||
|
||||
def initial_social_network(network, scale = 1, sigmas=3):
|
||||
'''
|
||||
Definition:
|
||||
Function to initialize network x object
|
||||
|
||||
Parameters:
|
||||
|
||||
Assumptions:
|
||||
|
||||
Returns:
|
||||
|
||||
Example:
|
||||
'''
|
||||
participants = get_nodes_by_type(network, 'participant')
|
||||
|
||||
for i in participants:
|
||||
for j in participants:
|
||||
if not(j==i):
|
||||
influence_rv = expon.rvs(loc=0.0, scale=scale)
|
||||
if influence_rv > scale+sigmas*scale**2:
|
||||
network.add_edge(i,j)
|
||||
network.edges[(i,j)]['influence'] = influence_rv
|
||||
network.edges[(i,j)]['type'] = 'influence'
|
||||
return network
|
||||
|
||||
def initial_conflict_network(network, rate = .25):
|
||||
'''
|
||||
Definition:
|
||||
Function to initialize network x object
|
||||
|
||||
Parameters:
|
||||
|
||||
Assumptions:
|
||||
|
||||
Returns:
|
||||
|
||||
Example:
|
||||
'''
|
||||
proposals = get_nodes_by_type(network, 'proposal')
|
||||
|
||||
for i in proposals:
|
||||
for j in proposals:
|
||||
if not(j==i):
|
||||
conflict_rv = np.random.rand()
|
||||
if conflict_rv < rate :
|
||||
network.add_edge(i,j)
|
||||
network.edges[(i,j)]['conflict'] = 1-conflict_rv
|
||||
network.edges[(i,j)]['type'] = 'conflict'
|
||||
return network
|
||||
|
||||
def gen_new_participant(network, new_participant_holdings):
|
||||
'''
|
||||
Definition:
|
||||
Driving processes for the arrival of participants.
|
||||
|
||||
Parameters:
|
||||
network: networkx object
|
||||
new_participant_holdings: Tokens of new participants
|
||||
|
||||
Assumptions:
|
||||
Initialized network x object
|
||||
|
||||
Returns:
|
||||
Update network x object
|
||||
'''
|
||||
|
||||
i = len([node for node in network.nodes])
|
||||
|
||||
network.add_node(i)
|
||||
network.nodes[i]['type']="participant"
|
||||
|
||||
s_rv = np.random.rand()
|
||||
#network.nodes[i]['sentiment'] = s_rv
|
||||
network.nodes[i]['holdings']=new_participant_holdings
|
||||
|
||||
for j in get_nodes_by_type(network, 'proposal'):
|
||||
network.add_edge(i, j)
|
||||
|
||||
rv = np.random.rand()
|
||||
a_rv = 1-4*(1-rv)*rv #polarized distribution
|
||||
network.edges[(i, j)]['affinity'] = a_rv
|
||||
network.edges[(i,j)]['tokens'] = a_rv*network.nodes[i]['holdings']
|
||||
network.edges[(i, j)]['conviction'] = 0
|
||||
network.edges[(i,j)]['type'] = 'support'
|
||||
|
||||
return network
|
||||
|
||||
|
||||
|
||||
|
||||
def gen_new_proposal(network, funds, supply, scale_factor = 1.0/100):
|
||||
'''
|
||||
Definition:
|
||||
Driving processes for the arrival of proposals.
|
||||
|
||||
Parameters:
|
||||
network: networkx object
|
||||
funds:
|
||||
supply:
|
||||
|
||||
Assumptions:
|
||||
Initialized network x object
|
||||
|
||||
Returns:
|
||||
Update network x object
|
||||
'''
|
||||
j = len([node for node in network.nodes])
|
||||
network.add_node(j)
|
||||
network.nodes[j]['type']="proposal"
|
||||
|
||||
network.nodes[j]['conviction']=0
|
||||
network.nodes[j]['status']='candidate'
|
||||
network.nodes[j]['age']=0
|
||||
|
||||
rescale = funds*scale_factor
|
||||
r_rv = gamma.rvs(3,loc=0.001, scale=rescale)
|
||||
network.nodes[j]['funds_requested'] = r_rv
|
||||
|
||||
network.nodes[j]['trigger']= trigger_threshold(r_rv, funds, supply)
|
||||
|
||||
participants = get_nodes_by_type(network, 'participant')
|
||||
proposing_participant = np.random.choice(participants)
|
||||
|
||||
for i in participants:
|
||||
network.add_edge(i, j)
|
||||
if i==proposing_participant:
|
||||
network.edges[(i, j)]['affinity']=1
|
||||
else:
|
||||
rv = np.random.rand()
|
||||
a_rv = 1-4*(1-rv)*rv #polarized distribution
|
||||
network.edges[(i, j)]['affinity'] = a_rv
|
||||
|
||||
network.edges[(i, j)]['conviction'] = 0
|
||||
network.edges[(i,j)]['tokens'] = 0
|
||||
network.edges[(i,j)]['type'] = 'support'
|
||||
|
||||
return network
|
||||
|
||||
|
||||
def get_nodes_by_type(g, node_type_selection):
|
||||
'''
|
||||
Definition:
|
||||
Function to extract nodes based by named type
|
||||
|
||||
Parameters:
|
||||
g: network x object
|
||||
node_type_selection: node type
|
||||
|
||||
Assumptions:
|
||||
|
||||
Returns:
|
||||
List column of the desired information as:
|
||||
|
||||
Example:
|
||||
proposals = get_nodes_by_type(network, 'proposal')
|
||||
|
||||
'''
|
||||
return [node for node in g.nodes if g.nodes[node]['type']== node_type_selection ]
|
||||
|
||||
|
||||
def get_edges_by_type(g, edge_type_selection):
|
||||
'''
|
||||
Definition:
|
||||
Function to initialize network x object
|
||||
|
||||
Parameters:
|
||||
|
||||
Assumptions:
|
||||
|
||||
Returns:
|
||||
|
||||
Example:
|
||||
'''
|
||||
return [edge for edge in g.edges if g.edges[edge]['type']== edge_type_selection ]
|
||||
|
||||
|
||||
def conviction_order(network, proposals):
|
||||
'''
|
||||
Definition:
|
||||
Function to initialize network x object
|
||||
|
||||
Parameters:
|
||||
|
||||
Assumptions:
|
||||
|
||||
Returns:
|
||||
|
||||
Example:
|
||||
'''
|
||||
ordered = sorted(proposals, key=lambda j:network.nodes[j]['conviction'] , reverse=True)
|
||||
|
||||
return ordered
|
||||
|
||||
|
||||
|
||||
def social_links(network, participant, scale = 1):
|
||||
'''
|
||||
Definition:
|
||||
Function to initialize network x object
|
||||
|
||||
Parameters:
|
||||
|
||||
Assumptions:
|
||||
|
||||
Returns:
|
||||
|
||||
Example:
|
||||
'''
|
||||
|
||||
participants = get_nodes_by_type(network, 'participant')
|
||||
|
||||
i = participant
|
||||
for j in participants:
|
||||
if not(j==i):
|
||||
influence_rv = expon.rvs(loc=0.0, scale=scale)
|
||||
if influence_rv > scale+scale**2:
|
||||
network.add_edge(i,j)
|
||||
network.edges[(i,j)]['influence'] = influence_rv
|
||||
network.edges[(i,j)]['type'] = 'influence'
|
||||
return network
|
||||
|
||||
|
||||
def conflict_links(network,proposal ,rate = .25):
|
||||
'''
|
||||
Definition:
|
||||
Function to initialize network x object
|
||||
|
||||
Parameters:
|
||||
|
||||
Assumptions:
|
||||
|
||||
Returns:
|
||||
|
||||
Example:
|
||||
'''
|
||||
|
||||
proposals = get_nodes_by_type(network, 'proposal')
|
||||
|
||||
i = proposal
|
||||
for j in proposals:
|
||||
if not(j==i):
|
||||
conflict_rv = np.random.rand()
|
||||
if conflict_rv < rate :
|
||||
network.add_edge(i,j)
|
||||
network.edges[(i,j)]['conflict'] = 1-conflict_rv
|
||||
network.edges[(i,j)]['type'] = 'conflict'
|
||||
return network
|
||||
|
||||
def social_affinity_booster(network, proposal, participant):
|
||||
'''
|
||||
Definition:
|
||||
Function to initialize network x object
|
||||
|
||||
Parameters:
|
||||
|
||||
Assumptions:
|
||||
|
||||
Returns:
|
||||
|
||||
Example:
|
||||
'''
|
||||
|
||||
participants = get_nodes_by_type(network, 'participant')
|
||||
influencers = get_edges_by_type(network, 'influence')
|
||||
|
||||
j=proposal
|
||||
i=participant
|
||||
|
||||
i_tokens = network.nodes[i]['holdings']
|
||||
|
||||
influence = np.array([network.edges[(i,node)]['influence'] for node in participants if (i, node) in influencers ])
|
||||
#print(influence)
|
||||
tokens = np.array([network.edges[(node,j)]['tokens'] for node in participants if (i, node) in influencers ])
|
||||
#print(tokens)
|
||||
|
||||
|
||||
influence_sum = np.sum(influence)
|
||||
|
||||
if influence_sum>0:
|
||||
boosts = np.sum(tokens*influence)/(influence_sum*i_tokens)
|
||||
else:
|
||||
boosts = 0
|
||||
|
||||
return np.sum(boosts)
|
||||
|
||||
|
||||
def trigger_sweep(field, trigger_func,xmax=.2,default_alpha=.5):
|
||||
'''
|
||||
Definition:
|
||||
Function to initialize network x object
|
||||
|
||||
Parameters:
|
||||
|
||||
Assumptions:
|
||||
|
||||
Returns:
|
||||
|
||||
Example:
|
||||
'''
|
||||
|
||||
if field == 'token_supply':
|
||||
alpha = default_alpha
|
||||
share_of_funds = np.arange(.001,xmax,.001)
|
||||
total_supply = np.arange(0,10**9, 10**6)
|
||||
demo_data_XY = np.outer(share_of_funds,total_supply)
|
||||
|
||||
demo_data_Z0=np.empty(demo_data_XY.shape)
|
||||
demo_data_Z1=np.empty(demo_data_XY.shape)
|
||||
demo_data_Z2=np.empty(demo_data_XY.shape)
|
||||
demo_data_Z3=np.empty(demo_data_XY.shape)
|
||||
for sof_ind in range(len(share_of_funds)):
|
||||
sof = share_of_funds[sof_ind]
|
||||
for ts_ind in range(len(total_supply)):
|
||||
ts = total_supply[ts_ind]
|
||||
tc = ts /(1-alpha)
|
||||
trigger = trigger_func(sof, 1, ts)
|
||||
demo_data_Z0[sof_ind,ts_ind] = np.log10(trigger)
|
||||
demo_data_Z1[sof_ind,ts_ind] = trigger
|
||||
demo_data_Z2[sof_ind,ts_ind] = trigger/tc #share of maximum possible conviction
|
||||
demo_data_Z3[sof_ind,ts_ind] = np.log10(trigger/tc)
|
||||
return {'log10_trigger':demo_data_Z0,
|
||||
'trigger':demo_data_Z1,
|
||||
'share_of_max_conv': demo_data_Z2,
|
||||
'log10_share_of_max_conv':demo_data_Z3,
|
||||
'total_supply':total_supply,
|
||||
'share_of_funds':share_of_funds}
|
||||
elif field == 'alpha':
|
||||
alpha = np.arange(.5,1,.01)
|
||||
share_of_funds = np.arange(.001,xmax,.001)
|
||||
total_supply = 10**9
|
||||
demo_data_XY = np.outer(share_of_funds,alpha)
|
||||
|
||||
demo_data_Z4=np.empty(demo_data_XY.shape)
|
||||
demo_data_Z5=np.empty(demo_data_XY.shape)
|
||||
demo_data_Z6=np.empty(demo_data_XY.shape)
|
||||
demo_data_Z7=np.empty(demo_data_XY.shape)
|
||||
for sof_ind in range(len(share_of_funds)):
|
||||
sof = share_of_funds[sof_ind]
|
||||
for a_ind in range(len(alpha)):
|
||||
ts = total_supply
|
||||
a = alpha[a_ind]
|
||||
tc = ts /(1-a)
|
||||
trigger = trigger_func(sof, 1, ts)
|
||||
demo_data_Z4[sof_ind,a_ind] = np.log10(trigger)
|
||||
demo_data_Z5[sof_ind,a_ind] = trigger
|
||||
demo_data_Z6[sof_ind,a_ind] = trigger/tc #share of maximum possible conviction
|
||||
demo_data_Z7[sof_ind,a_ind] = np.log10(trigger/tc)
|
||||
|
||||
return {'log10_trigger':demo_data_Z4,
|
||||
'trigger':demo_data_Z5,
|
||||
'share_of_max_conv': demo_data_Z6,
|
||||
'log10_share_of_max_conv':demo_data_Z7,
|
||||
'alpha':alpha,
|
||||
'share_of_funds':share_of_funds}
|
||||
|
||||
else:
|
||||
return "invalid field"
|
||||
|
||||
def trigger_plotter(share_of_funds,Z, color_label,y, ylabel,cmap='jet'):
|
||||
'''
|
||||
Definition:
|
||||
Function to initialize network x object
|
||||
|
||||
Parameters:
|
||||
|
||||
Assumptions:
|
||||
|
||||
Returns:
|
||||
|
||||
Example:
|
||||
'''
|
||||
dims = (10, 5)
|
||||
fig, ax = plt.subplots(figsize=dims)
|
||||
|
||||
cf = plt.contourf(share_of_funds, y, Z.T, 100, cmap=cmap)
|
||||
cbar=plt.colorbar(cf)
|
||||
plt.axis([share_of_funds[0], share_of_funds[-1], y[0], y[-1]])
|
||||
#ax.set_xscale('log')
|
||||
plt.ylabel(ylabel)
|
||||
plt.xlabel('Share of Funds Requested')
|
||||
plt.title('Trigger Function Map')
|
||||
|
||||
cbar.ax.set_ylabel(color_label)
|
||||
|
||||
|
||||
def snap_plot(nets, size_scale = 1/500, ani = False, dims = (20,20), savefigs=False):
|
||||
'''
|
||||
Definition:
|
||||
Function to initialize network x object
|
||||
|
||||
Parameters:
|
||||
|
||||
Assumptions:
|
||||
|
||||
Returns:
|
||||
|
||||
Example:
|
||||
'''
|
||||
|
||||
last_net = nets[-1]
|
||||
|
||||
last_props=get_nodes_by_type(last_net, 'proposal')
|
||||
M = len(last_props)
|
||||
last_parts=get_nodes_by_type(last_net, 'participant')
|
||||
N = len(last_parts)
|
||||
pos = {}
|
||||
|
||||
for ind in range(N):
|
||||
i = last_parts[ind]
|
||||
pos[i] = np.array([0, 2*ind-N])
|
||||
|
||||
for ind in range(M):
|
||||
j = last_props[ind]
|
||||
pos[j] = np.array([1, 2*N/M *ind-N])
|
||||
|
||||
if ani:
|
||||
figs = []
|
||||
fig, ax = plt.subplots(figsize=dims)
|
||||
|
||||
if savefigs:
|
||||
counter = 0
|
||||
length = 10
|
||||
import string
|
||||
unique_id = ''.join([np.random.choice(list(string.ascii_letters + string.digits)) for _ in range(length)])
|
||||
for net in nets:
|
||||
edges = get_edges_by_type(net, 'support')
|
||||
max_tok = np.max([net.edges[e]['tokens'] for e in edges])
|
||||
|
||||
E = len(edges)
|
||||
|
||||
net_props = get_nodes_by_type(net, 'proposal')
|
||||
net_parts = get_nodes_by_type(net, 'participant')
|
||||
net_node_label ={}
|
||||
|
||||
num_nodes = len([node for node in net.nodes])
|
||||
|
||||
node_color = np.empty((num_nodes,4))
|
||||
node_size = np.empty(num_nodes)
|
||||
|
||||
edge_color = np.empty((E,4))
|
||||
cm = plt.get_cmap('Reds')
|
||||
|
||||
cNorm = colors.Normalize(vmin=0, vmax=max_tok)
|
||||
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=cm)
|
||||
|
||||
net_cand = [j for j in net_props if net.nodes[j]['status']=='candidate']
|
||||
|
||||
for j in net_props:
|
||||
node_size[j] = net.nodes[j]['funds_requested']*size_scale
|
||||
if net.nodes[j]['status']=="candidate":
|
||||
node_color[j] = colors.to_rgba('blue')
|
||||
trigger = net.nodes[j]['trigger']
|
||||
conviction = net.nodes[j]['conviction']
|
||||
percent_of_trigger = " "+str(int(100*conviction/trigger))+'%'
|
||||
net_node_label[j] = str(percent_of_trigger)
|
||||
elif net.nodes[j]['status']=="active":
|
||||
node_color[j] = colors.to_rgba('orange')
|
||||
net_node_label[j] = ''
|
||||
elif net.nodes[j]['status']=="completed":
|
||||
node_color[j] = colors.to_rgba('green')
|
||||
net_node_label[j] = ''
|
||||
elif net.nodes[j]['status']=="failed":
|
||||
node_color[j] = colors.to_rgba('gray')
|
||||
net_node_label[j] = ''
|
||||
elif net.nodes[j]['status']=="killed":
|
||||
node_color[j] = colors.to_rgba('black')
|
||||
net_node_label[j] = ''
|
||||
|
||||
for i in net_parts:
|
||||
node_size[i] = net.nodes[i]['holdings']*size_scale/10
|
||||
node_color[i] = colors.to_rgba('red')
|
||||
net_node_label[i] = ''
|
||||
|
||||
included_edges = []
|
||||
for ind in range(E):
|
||||
e = edges[ind]
|
||||
tokens = net.edges[e]['tokens']
|
||||
edge_color[ind] = scalarMap.to_rgba(tokens)
|
||||
if e[1] in net_cand:
|
||||
included_edges.append(e)
|
||||
|
||||
|
||||
iE = len(included_edges)
|
||||
included_edge_color = np.empty((iE,4))
|
||||
for ind in range(iE):
|
||||
e = included_edges[ind]
|
||||
tokens = net.edges[e]['tokens']
|
||||
included_edge_color[ind] = scalarMap.to_rgba(tokens)
|
||||
|
||||
# nx.draw(net,
|
||||
# pos=pos,
|
||||
# node_size = node_size,
|
||||
# node_color = node_color,
|
||||
# edge_color = included_edge_color,
|
||||
# edgelist=included_edges,
|
||||
# labels = net_node_label)
|
||||
# plt.title('Tokens Staked by Partipants to Proposals')
|
||||
|
||||
if ani:
|
||||
nx.draw(net,
|
||||
pos=pos,
|
||||
node_size = node_size,
|
||||
node_color = node_color,
|
||||
edge_color = included_edge_color,
|
||||
edgelist=included_edges,
|
||||
labels = net_node_label, ax=ax)
|
||||
figs.append(fig)
|
||||
|
||||
else:
|
||||
nx.draw(net,
|
||||
pos=pos,
|
||||
node_size = node_size,
|
||||
node_color = node_color,
|
||||
edge_color = included_edge_color,
|
||||
edgelist=included_edges,
|
||||
labels = net_node_label)
|
||||
plt.title('Tokens Staked by Partipants to Proposals')
|
||||
if savefigs:
|
||||
plt.savefig(unique_id+'_fig'+str(counter)+'.png')
|
||||
counter = counter+1
|
||||
plt.show()
|
||||
|
||||
if ani:
|
||||
False
|
||||
#anim = animation.ArtistAnimation(fig, , interval=50, blit=True, repeat_delay=1000)
|
||||
#plt.show()
|
||||
|
||||
def pad(vec, length,fill=True):
|
||||
'''
|
||||
Definition:
|
||||
Function to initialize network x object
|
||||
|
||||
Parameters:
|
||||
|
||||
Assumptions:
|
||||
|
||||
Returns:
|
||||
|
||||
Example:
|
||||
'''
|
||||
|
||||
if fill:
|
||||
padded = np.zeros(length,)
|
||||
else:
|
||||
padded = np.empty(length,)
|
||||
padded[:] = np.nan
|
||||
|
||||
for i in range(len(vec)):
|
||||
padded[i]= vec[i]
|
||||
|
||||
return padded
|
||||
|
||||
def make2D(key, data, fill=False):
|
||||
'''
|
||||
Definition:
|
||||
Function to initialize network x object
|
||||
|
||||
Parameters:
|
||||
|
||||
Assumptions:
|
||||
|
||||
Returns:
|
||||
|
||||
Example:
|
||||
'''
|
||||
maxL = data[key].apply(len).max()
|
||||
newkey = 'padded_'+key
|
||||
data[newkey] = data[key].apply(lambda x: pad(x,maxL,fill))
|
||||
reshaped = np.array([a for a in data[newkey].values])
|
||||
|
||||
return reshaped
|
||||
|
||||
|
||||
def quantile_plot(xkey, ykey, dataframe, dq=.1, logy=False, return_df = False):
|
||||
'''
|
||||
'''
|
||||
qX = np.arange(0,1+dq,dq)
|
||||
|
||||
data = dataframe[[xkey,ykey]].copy()
|
||||
|
||||
qkeys = []
|
||||
for q in qX:
|
||||
qkey= 'quantile'+str(int(100*q))
|
||||
#print(qkey)
|
||||
data[qkey] = data[ykey].apply(lambda arr: np.quantile(arr,q) )
|
||||
#print(data[qkey].head())
|
||||
qkeys.append(qkey)
|
||||
|
||||
data[[xkey]+qkeys].plot(x=xkey, logy=logy)
|
||||
|
||||
plt.title(ykey + " Quantile Plot" )
|
||||
plt.ylabel(ykey)
|
||||
labels = [str(int(100*q))+"$^{th}$ Percentile" for q in qX ]
|
||||
|
||||
plt.legend(labels, ncol = 1,loc='center left', bbox_to_anchor=(1, .5))
|
||||
if return_df:
|
||||
return data
|
||||
|
||||
def affinities_plot(df):
|
||||
'''
|
||||
'''
|
||||
last_net= df.network.values[-1]
|
||||
last_props=get_nodes_by_type(last_net, 'proposal')
|
||||
M = len(last_props)
|
||||
last_parts=get_nodes_by_type(last_net, 'participant')
|
||||
N = len(last_parts)
|
||||
|
||||
affinities = np.empty((N,M))
|
||||
for i_ind in range(N):
|
||||
for j_ind in range(M):
|
||||
i = last_parts[i_ind]
|
||||
j = last_props[j_ind]
|
||||
affinities[i_ind][j_ind] = last_net.edges[(i,j)]['affinity']
|
||||
|
||||
dims = (20, 5)
|
||||
fig, ax = plt.subplots(figsize=dims)
|
||||
|
||||
sns.heatmap(affinities.T,
|
||||
xticklabels=last_parts,
|
||||
yticklabels=last_props,
|
||||
square=True,
|
||||
cbar=True,
|
||||
ax=ax)
|
||||
|
||||
plt.title('affinities between participants and proposals')
|
||||
plt.ylabel('proposal_id')
|
||||
plt.xlabel('participant_id')
|
||||
|
|
@ -0,0 +1,99 @@
|
|||
|
||||
# import libraries
|
||||
import networkx as nx
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
from .conviction_helper_functions import *
|
||||
|
||||
# Parameters
|
||||
#maximum share of funds a proposal can take
|
||||
beta = .2 #later we should set this to be param so we can sweep it
|
||||
# tuning param for the trigger function
|
||||
rho = .001
|
||||
supply = 1231286.81
|
||||
|
||||
|
||||
|
||||
n= 60 #initial participants
|
||||
m= 3 #initial proposals
|
||||
|
||||
initial_sentiment = .6
|
||||
|
||||
theta =.35
|
||||
kappa = 6 #bonding curve curvature
|
||||
alpha = 0.5
|
||||
sale_price = .1
|
||||
sensitivity = .75
|
||||
tmin = 7 #unit days; minimum periods passed before a proposal can pass
|
||||
min_supp = 50 #number of tokens that must be stake for a proposal to be a candidate
|
||||
sentiment_decay = .01 #termed mu in the state update function
|
||||
base_completion_rate = 100
|
||||
base_failure_rate = 200
|
||||
# trigger_func = trigger_threshold
|
||||
tax_rate = .02
|
||||
|
||||
initial_funds = 40781.42
|
||||
|
||||
def initialize_network(n,m, inital_funds, expected_supply = 10**6):
|
||||
'''
|
||||
Definition:
|
||||
Function to initialize network x object
|
||||
|
||||
Parameters:
|
||||
|
||||
Assumptions:
|
||||
|
||||
Returns:
|
||||
|
||||
Example:
|
||||
'''
|
||||
# initilize network x graph
|
||||
network = nx.DiGraph()
|
||||
# create participant nodes with type and token holding
|
||||
for i in range(n):
|
||||
network.add_node(i)
|
||||
network.nodes[i]['type']= "participant"
|
||||
|
||||
h_rv = expon.rvs(loc=0.0, scale= expected_supply/n)
|
||||
network.nodes[i]['holdings'] = h_rv # SOL check
|
||||
|
||||
# s_rv = np.random.rand()
|
||||
# network.nodes[i]['sentiment'] = s_rv
|
||||
|
||||
participants = get_nodes_by_type(network, 'participant')
|
||||
initial_supply = np.sum([ network.nodes[i]['holdings'] for i in participants])
|
||||
|
||||
|
||||
# Generate initial proposals
|
||||
for ind in range(m):
|
||||
j = n+ind
|
||||
network.add_node(j)
|
||||
network.nodes[j]['type']="proposal"
|
||||
network.nodes[j]['conviction'] = 0
|
||||
network.nodes[j]['status'] = 'candidate'
|
||||
network.nodes[j]['age'] = 0
|
||||
|
||||
r_rv = gamma.rvs(3,loc=0.001, scale=10000)
|
||||
network.nodes[j]['funds_requested'] = r_rv
|
||||
|
||||
network.nodes[j]['trigger']= trigger_threshold(r_rv, initial_funds, initial_supply,beta=beta,rho=rho)
|
||||
|
||||
for i in range(n):
|
||||
network.add_edge(i, j)
|
||||
|
||||
rv = np.random.rand()
|
||||
a_rv = 1-4*(1-rv)*rv #polarized distribution
|
||||
network.edges[(i, j)]['affinity'] = a_rv
|
||||
network.edges[(i, j)]['tokens'] = 0
|
||||
network.edges[(i, j)]['conviction'] = 0
|
||||
network.edges[(i, j)]['type'] = 'support'
|
||||
|
||||
proposals = get_nodes_by_type(network, 'proposal')
|
||||
total_requested = np.sum([ network.nodes[i]['funds_requested'] for i in proposals])
|
||||
|
||||
# network = initial_conflict_network(network, rate = .25)
|
||||
# network = initial_social_network(network, scale = 1)
|
||||
|
||||
return network, initial_funds, initial_supply, total_requested
|
||||
#initializers
|
||||
network, initial_funds, initial_supply, total_requested = initialize_network(n,m,initial_funds)
|
||||
|
|
@ -0,0 +1,164 @@
|
|||
|
||||
import numpy as np
|
||||
from .initialization import *
|
||||
from .conviction_helper_functions import *
|
||||
import networkx as nx
|
||||
# from scipy.stats import expon, gamma
|
||||
|
||||
|
||||
# Phase 2
|
||||
# Behaviors
|
||||
def check_progress(params, step, sL, s):
|
||||
'''
|
||||
Driving processes: completion of previously funded proposals
|
||||
'''
|
||||
|
||||
network = s['network']
|
||||
proposals = get_nodes_by_type(network, 'proposal')
|
||||
|
||||
completed = []
|
||||
failed = []
|
||||
for j in proposals:
|
||||
if network.nodes[j]['status'] == 'active':
|
||||
grant_size = network.nodes[j]['funds_requested']
|
||||
likelihood = 1.0/(base_completion_rate+np.log(grant_size))
|
||||
|
||||
failure_rate = 1.0/(base_failure_rate+np.log(grant_size))
|
||||
if np.random.rand() < likelihood:
|
||||
completed.append(j)
|
||||
elif np.random.rand() < failure_rate:
|
||||
failed.append(j)
|
||||
|
||||
return({'completed':completed, 'failed':failed})
|
||||
|
||||
|
||||
|
||||
# Mechanisms
|
||||
def complete_proposal(params, step, sL, s, _input):
|
||||
'''
|
||||
Book-keeping
|
||||
'''
|
||||
|
||||
network = s['network']
|
||||
participants = get_nodes_by_type(network, 'participant')
|
||||
proposals = get_nodes_by_type(network, 'proposal')
|
||||
#competitors = get_edges_by_type(network, 'conflict')
|
||||
|
||||
completed = _input['completed']
|
||||
for j in completed:
|
||||
network.nodes[j]['status']='completed'
|
||||
|
||||
# for c in proposals:
|
||||
# if (j,c) in competitors:
|
||||
# conflict = network.edges[(j,c)]['conflict']
|
||||
# for i in participants:
|
||||
# network.edges[(i,c)]['affinity'] = network.edges[(i,c)]['affinity'] *(1-conflict)
|
||||
|
||||
for i in participants:
|
||||
force = network.edges[(i,j)]['affinity']
|
||||
# sentiment = network.nodes[i]['sentiment']
|
||||
# network.nodes[i]['sentiment'] = get_sentimental(sentiment, force, decay=0)
|
||||
|
||||
|
||||
|
||||
failed = _input['failed']
|
||||
for j in failed:
|
||||
network.nodes[j]['status']='failed'
|
||||
for i in participants:
|
||||
force = -network.edges[(i,j)]['affinity']
|
||||
# sentiment = network.nodes[i]['sentiment']
|
||||
# network.nodes[i]['sentiment'] = get_sentimental(sentiment, force, decay=0)
|
||||
|
||||
key = 'network'
|
||||
value = network
|
||||
|
||||
return (key, value)
|
||||
|
||||
|
||||
# Phase 3
|
||||
# Behaviors
|
||||
def participants_decisions(params, step, sL, s):
|
||||
'''
|
||||
High sentiment, high affinity =>buy
|
||||
Low sentiment, low affinities => burn
|
||||
Assign tokens to top affinities
|
||||
'''
|
||||
network = s['network']
|
||||
participants = get_nodes_by_type(network, 'participant')
|
||||
proposals = get_nodes_by_type(network, 'proposal')
|
||||
candidates = [j for j in proposals if network.nodes[j]['status']=='candidate']
|
||||
#sensitivity = params['sensitivity']
|
||||
|
||||
gain = .01
|
||||
delta_holdings={}
|
||||
proposals_supported ={}
|
||||
for i in participants:
|
||||
|
||||
#engagement_rate = .3*network.nodes[i]['sentiment']
|
||||
engagement_rate = .3*initial_sentiment
|
||||
if np.random.rand()<engagement_rate:
|
||||
|
||||
#force = network.nodes[i]['sentiment']-sensitivity
|
||||
force = initial_sentiment - sensitivity
|
||||
delta_holdings[i] = network.nodes[i]['holdings']*gain*force
|
||||
|
||||
support = []
|
||||
for j in candidates:
|
||||
booster = social_affinity_booster(network, j, i)
|
||||
affinity = network.edges[(i, j)]['affinity']+booster
|
||||
cutoff = sensitivity*np.max([network.edges[(i,p)]['affinity'] for p in candidates])
|
||||
if cutoff <.5:
|
||||
cutoff = .5
|
||||
|
||||
if affinity > cutoff:
|
||||
support.append(j)
|
||||
|
||||
proposals_supported[i] = support
|
||||
else:
|
||||
delta_holdings[i] = 0
|
||||
proposals_supported[i] = [j for j in candidates if network.edges[(i,j)]['tokens']>0 ]
|
||||
|
||||
return({'delta_holdings':delta_holdings, 'proposals_supported':proposals_supported})
|
||||
|
||||
# Mechanisms
|
||||
def update_tokens(params, step, sL, s, _input):
|
||||
'''
|
||||
Description:
|
||||
Udate everyones holdings and their conviction for each proposal
|
||||
'''
|
||||
|
||||
network = s['network']
|
||||
delta_holdings = _input['delta_holdings']
|
||||
proposals = get_nodes_by_type(network, 'proposal')
|
||||
candidates = [j for j in proposals if network.nodes[j]['status']=='candidate']
|
||||
proposals_supported = _input['proposals_supported']
|
||||
participants = get_nodes_by_type(network, 'participant')
|
||||
|
||||
for i in participants:
|
||||
network.nodes[i]['holdings'] = network.nodes[i]['holdings']+delta_holdings[i]
|
||||
supported = proposals_supported[i]
|
||||
total_affinity = np.sum([ network.edges[(i, j)]['affinity'] for j in supported])
|
||||
for j in candidates:
|
||||
if j in supported:
|
||||
normalized_affinity = network.edges[(i, j)]['affinity']/total_affinity
|
||||
network.edges[(i, j)]['tokens'] = normalized_affinity*network.nodes[i]['holdings']
|
||||
else:
|
||||
network.edges[(i, j)]['tokens'] = 0
|
||||
|
||||
prior_conviction = network.edges[(i, j)]['conviction']
|
||||
current_tokens = network.edges[(i, j)]['tokens']
|
||||
network.edges[(i, j)]['conviction'] =current_tokens+alpha*prior_conviction
|
||||
|
||||
for j in candidates:
|
||||
network.nodes[j]['conviction'] = np.sum([ network.edges[(i, j)]['conviction'] for i in participants])
|
||||
total_tokens = np.sum([network.edges[(i, j)]['tokens'] for i in participants ])
|
||||
if total_tokens < min_supp:
|
||||
network.nodes[j]['status'] = 'killed'
|
||||
|
||||
key = 'network'
|
||||
value = network
|
||||
|
||||
return (key, value)
|
||||
|
||||
|
||||
|
||||
|
|
@ -0,0 +1,112 @@
|
|||
|
||||
import numpy as np
|
||||
from .initialization import *
|
||||
from .conviction_helper_functions import *
|
||||
import networkx as nx
|
||||
|
||||
# parameters:
|
||||
sensitivity = 0.75
|
||||
tmin = 7
|
||||
|
||||
# Behaviors
|
||||
def trigger_function(params, step, sL, s):
|
||||
'''
|
||||
'''
|
||||
network = s['network']
|
||||
funds = s['funds']
|
||||
#supply = s['supply']
|
||||
proposals = get_nodes_by_type(network, 'proposal')
|
||||
|
||||
accepted = []
|
||||
triggers = {}
|
||||
funds_to_be_released = 0
|
||||
for j in proposals:
|
||||
if network.nodes[j]['status'] == 'candidate':
|
||||
requested = network.nodes[j]['funds_requested']
|
||||
age = network.nodes[j]['age']
|
||||
threshold = trigger_threshold(requested, funds, supply)
|
||||
if age > tmin:
|
||||
conviction = network.nodes[j]['conviction']
|
||||
if conviction >threshold:
|
||||
accepted.append(j)
|
||||
funds_to_be_released = funds_to_be_released + requested
|
||||
else:
|
||||
threshold = np.nan
|
||||
|
||||
triggers[j] = threshold
|
||||
|
||||
#catch over release and keep the highest conviction results
|
||||
if funds_to_be_released > funds:
|
||||
|
||||
ordered = conviction_order(network, accepted)
|
||||
accepted = []
|
||||
release = 0
|
||||
ind = 0
|
||||
while release + network.nodes[ordered[ind]]['funds_requested'] < funds:
|
||||
accepted.append(ordered[ind])
|
||||
release= network.nodes[ordered[ind]]['funds_requested']
|
||||
ind=ind+1
|
||||
|
||||
|
||||
return({'accepted':accepted, 'triggers':triggers})
|
||||
|
||||
# Mechanisms
|
||||
def decrement_funds(params, step, sL, s, _input):
|
||||
'''
|
||||
'''
|
||||
|
||||
funds = s['funds']
|
||||
network = s['network']
|
||||
accepted = _input['accepted']
|
||||
|
||||
#decrement funds
|
||||
for j in accepted:
|
||||
funds = funds - network.nodes[j]['funds_requested']
|
||||
|
||||
key = 'funds'
|
||||
value = funds
|
||||
|
||||
return (key, value)
|
||||
|
||||
def update_proposals(params, step, sL, s, _input):
|
||||
'''
|
||||
'''
|
||||
|
||||
network = s['network']
|
||||
accepted = _input['accepted']
|
||||
triggers = _input['triggers']
|
||||
participants = get_nodes_by_type(network, 'participant')
|
||||
proposals = get_nodes_by_type(network, 'proposals')
|
||||
#sensitivity = params['sensitivity']
|
||||
|
||||
for j in proposals:
|
||||
network.nodes[j]['trigger'] = triggers[j]
|
||||
|
||||
#bookkeeping conviction and participant sentiment
|
||||
for j in accepted:
|
||||
network.nodes[j]['status']='active'
|
||||
network.nodes[j]['conviction']=np.nan
|
||||
#change status to active
|
||||
for i in participants:
|
||||
|
||||
#operating on edge = (i,j)
|
||||
#reset tokens assigned to other candidates
|
||||
network.edges[(i,j)]['tokens']=0
|
||||
network.edges[(i,j)]['conviction'] = np.nan
|
||||
|
||||
#update participants sentiments (positive or negative)
|
||||
affinities = [network.edges[(i,p)]['affinity'] for p in proposals if not(p in accepted)]
|
||||
if len(affinities)>1:
|
||||
max_affinity = np.max(affinities)
|
||||
force = network.edges[(i,j)]['affinity']-sensitivity*max_affinity
|
||||
else:
|
||||
force = 0
|
||||
|
||||
#based on what their affinities to the accepted proposals
|
||||
#network.nodes[i]['sentiment'] = get_sentimental(network.nodes[i]['sentiment'], force, False)
|
||||
|
||||
|
||||
key = 'network'
|
||||
value = network
|
||||
|
||||
return (key, value)
|
||||
|
|
@ -0,0 +1,106 @@
|
|||
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
from .initialization import *
|
||||
from .conviction_helper_functions import *
|
||||
import networkx as nx
|
||||
from scipy.stats import expon, gamma
|
||||
|
||||
# hyperparameters
|
||||
sentiment = 0.6
|
||||
|
||||
|
||||
# Behaviors
|
||||
def driving_process(params, step, sL, s):
|
||||
arrival_rate = 10/(1+sentiment)
|
||||
rv1 = np.random.rand()
|
||||
new_participant = bool(rv1<1/arrival_rate)
|
||||
supporters = get_edges_by_type(s['network'], 'support')
|
||||
|
||||
len_parts = len(get_nodes_by_type(s['network'], 'participant'))
|
||||
#supply = s['supply']
|
||||
expected_holdings = .1*supply/len_parts
|
||||
if new_participant:
|
||||
h_rv = expon.rvs(loc=0.0, scale=expected_holdings)
|
||||
new_participant_holdings = h_rv
|
||||
else:
|
||||
new_participant_holdings = 0
|
||||
|
||||
network = s['network']
|
||||
affinities = [network.edges[e]['affinity'] for e in supporters ]
|
||||
median_affinity = np.median(affinities)
|
||||
|
||||
proposals = get_nodes_by_type(network, 'proposal')
|
||||
fund_requests = [network.nodes[j]['funds_requested'] for j in proposals if network.nodes[j]['status']=='candidate' ]
|
||||
|
||||
funds = s['funds']
|
||||
total_funds_requested = np.sum(fund_requests)
|
||||
|
||||
proposal_rate = 1/median_affinity * (1+total_funds_requested/funds)
|
||||
rv2 = np.random.rand()
|
||||
new_proposal = bool(rv2<1/proposal_rate)
|
||||
|
||||
#sentiment = s['sentiment']
|
||||
funds = s['funds']
|
||||
scale_factor = funds*sentiment**2/10000
|
||||
|
||||
if scale_factor <1:
|
||||
scale_factor = 1
|
||||
|
||||
#this shouldn't happen but expon is throwing domain errors
|
||||
if sentiment>.4:
|
||||
funds_arrival = expon.rvs(loc = 0, scale = scale_factor )
|
||||
else:
|
||||
funds_arrival = 0
|
||||
|
||||
return({'new_participant':new_participant,
|
||||
'new_participant_holdings':new_participant_holdings,
|
||||
'new_proposal':new_proposal,
|
||||
'funds_arrival':funds_arrival})
|
||||
|
||||
|
||||
# Mechanisms
|
||||
def update_network(params, step, sL, s, _input):
|
||||
|
||||
network = s['network']
|
||||
funds = s['funds']
|
||||
#supply = s['supply']
|
||||
|
||||
new_participant = _input['new_participant']
|
||||
new_proposal = _input['new_proposal']
|
||||
|
||||
if new_participant:
|
||||
new_participant_holdings = _input['new_participant_holdings']
|
||||
network = gen_new_participant(network, new_participant_holdings)
|
||||
|
||||
if new_proposal:
|
||||
network= gen_new_proposal(network,funds,supply)
|
||||
|
||||
#update age of the existing proposals
|
||||
proposals = get_nodes_by_type(network, 'proposal')
|
||||
|
||||
for j in proposals:
|
||||
network.nodes[j]['age'] = network.nodes[j]['age']+1
|
||||
if network.nodes[j]['status'] == 'candidate':
|
||||
requested = network.nodes[j]['funds_requested']
|
||||
network.nodes[j]['trigger'] = trigger_threshold(requested, funds, supply)
|
||||
else:
|
||||
network.nodes[j]['trigger'] = np.nan
|
||||
|
||||
key = 'network'
|
||||
value = network
|
||||
|
||||
return (key, value)
|
||||
|
||||
def increment_funds(params, step, sL, s, _input):
|
||||
|
||||
funds = s['funds']
|
||||
funds_arrival = _input['funds_arrival']
|
||||
|
||||
#increment funds
|
||||
funds = funds + funds_arrival
|
||||
|
||||
key = 'funds'
|
||||
value = funds
|
||||
|
||||
return (key, value)
|
||||
|
|
@ -0,0 +1,45 @@
|
|||
from .model.system import *
|
||||
from .model.participants import *
|
||||
from .model.proposals import *
|
||||
|
||||
# The Partial State Update Blocks
|
||||
partial_state_update_blocks = [
|
||||
{
|
||||
# system.py:
|
||||
'policies': {
|
||||
'random': driving_process
|
||||
},
|
||||
'variables': {
|
||||
'network': update_network,
|
||||
'funds':increment_funds,
|
||||
}
|
||||
},
|
||||
{
|
||||
# participants.py
|
||||
'policies': {
|
||||
'completion': check_progress
|
||||
},
|
||||
'variables': {
|
||||
'network': complete_proposal
|
||||
}
|
||||
},
|
||||
{
|
||||
# proposals.py
|
||||
'policies': {
|
||||
'release': trigger_function
|
||||
},
|
||||
'variables': {
|
||||
'funds': decrement_funds,
|
||||
'network': update_proposals
|
||||
}
|
||||
},
|
||||
{
|
||||
# participants.py
|
||||
'policies': {
|
||||
'participants_act': participants_decisions
|
||||
},
|
||||
'variables': {
|
||||
'network': update_tokens
|
||||
}
|
||||
}
|
||||
]
|
||||
|
|
@ -0,0 +1,60 @@
|
|||
import pandas as pd
|
||||
from .model.conviction_helper_functions import *
|
||||
from model import economyconfig
|
||||
from cadCAD.engine import ExecutionMode, ExecutionContext
|
||||
exec_mode = ExecutionMode()
|
||||
from cadCAD.engine import Executor
|
||||
from cadCAD import configs
|
||||
|
||||
def run(input_config=configs):
|
||||
'''
|
||||
Definition:
|
||||
Run simulation
|
||||
|
||||
Parameters:
|
||||
input_config: Optional way to pass in system configuration
|
||||
'''
|
||||
exec_mode = ExecutionMode()
|
||||
local_mode_ctx = ExecutionContext(context=exec_mode.local_mode)
|
||||
|
||||
simulation = Executor(exec_context=local_mode_ctx, configs=input_config)
|
||||
raw_system_events, tensor_field, sessions = simulation.execute()
|
||||
# Result System Events DataFrame
|
||||
df = pd.DataFrame(raw_system_events)
|
||||
return df
|
||||
|
||||
def postprocessing(df):
|
||||
'''
|
||||
'''
|
||||
# Extract information from dataframe
|
||||
df['conviction'] = df.network.apply(lambda g: np.array([g.nodes[j]['conviction'] for j in get_nodes_by_type(g, 'proposal') if g.nodes[j]['status']=='candidate']))
|
||||
df['candidate_count'] = df.network.apply(lambda g: len([j for j in get_nodes_by_type(g, 'proposal') if g.nodes[j]['status']=='candidate']))
|
||||
df['candidate_funds'] = df.network.apply(lambda g: np.sum([g.nodes[j]['funds_requested'] for j in get_nodes_by_type(g, 'proposal') if g.nodes[j]['status']=='candidate']))
|
||||
df['killed_count'] = df.network.apply(lambda g: len([j for j in get_nodes_by_type(g, 'proposal') if g.nodes[j]['status']=='killed']))
|
||||
df['killed_funds'] = df.network.apply(lambda g: np.sum([g.nodes[j]['funds_requested'] for j in get_nodes_by_type(g, 'proposal') if g.nodes[j]['status']=='killed']))
|
||||
df['candidate_funds_requested'] = df.network.apply(lambda g: np.array([g.nodes[j]['funds_requested'] for j in get_nodes_by_type(g, 'proposal') if g.nodes[j]['status']=='candidate']))
|
||||
df['active_count'] = df.network.apply(lambda g: len([j for j in get_nodes_by_type(g, 'proposal') if g.nodes[j]['status']=='active']))
|
||||
df['active_funds'] = df.network.apply(lambda g: np.sum([g.nodes[j]['funds_requested'] for j in get_nodes_by_type(g, 'proposal') if g.nodes[j]['status']=='active']))
|
||||
df['failed_count'] = df.network.apply(lambda g: len([j for j in get_nodes_by_type(g, 'proposal') if g.nodes[j]['status']=='failed']))
|
||||
df['failed_funds'] = df.network.apply(lambda g: np.sum([g.nodes[j]['funds_requested'] for j in get_nodes_by_type(g, 'proposal') if g.nodes[j]['status']=='failed']))
|
||||
df['completed_count'] = df.network.apply(lambda g: len([j for j in get_nodes_by_type(g, 'proposal') if g.nodes[j]['status']=='completed']))
|
||||
df['completed_funds'] = df.network.apply(lambda g: np.sum([g.nodes[j]['funds_requested'] for j in get_nodes_by_type(g, 'proposal') if g.nodes[j]['status']=='completed']))
|
||||
|
||||
df['funds_requested'] = df.network.apply(lambda g: np.array([g.nodes[j]['funds_requested'] for j in get_nodes_by_type(g, 'proposal')]))
|
||||
df['share_of_funds_requested'] = df.candidate_funds_requested/df.funds
|
||||
|
||||
df['share_of_funds_requested_all'] = df.funds_requested/df.funds
|
||||
|
||||
df['triggers'] = df.network.apply(lambda g: np.array([g.nodes[j]['trigger'] for j in get_nodes_by_type(g, 'proposal') if g.nodes[j]['status']=='candidate' ]))
|
||||
df['conviction_share_of_trigger'] = df.conviction/df.triggers
|
||||
df['age'] = df.network.apply(lambda g: np.array([g.nodes[j]['age'] for j in get_nodes_by_type(g, 'proposal') if g.nodes[j]['status']=='candidate' ]))
|
||||
|
||||
df['age_all'] = df.network.apply(lambda g: np.array([g.nodes[j]['age'] for j in get_nodes_by_type(g, 'proposal') ]))
|
||||
df['conviction_all'] = df.network.apply(lambda g: np.array([g.nodes[j]['conviction'] for j in get_nodes_by_type(g, 'proposal') ]))
|
||||
df['triggers_all'] = df.network.apply(lambda g: np.array([g.nodes[j]['trigger'] for j in get_nodes_by_type(g, 'proposal') ]))
|
||||
|
||||
df['conviction_share_of_trigger_all'] = df.conviction_all/df.triggers_all
|
||||
|
||||
rdf= df[df.substep==df.substep.max()].copy()
|
||||
|
||||
return df,rdf
|
||||
Loading…
Reference in New Issue