4 addon=str(''.join(random.choice(string.ascii_uppercase+string.digits) for _ in range(N)))
8 #change directory for data and plot outputs
14 if sys.platform == "linux" or sys.platform == "linux2" or sys.platform == "darwin":
15 datadir=root+'/data/'+addon+'/' #linux or mac
16 elif sys.platform == "win32":
17 datadir=root+'\\data\\'+addon+'\\' #windows
22 def empirical_dataframe():
25 columns=('time','drug','accuracy','trial')
26 emp_times = [3.0,5.0,7.0,9.0]
27 emp_dataframe = pd.DataFrame(columns=columns,index=np.arange(0, 12))
28 pre_PHE=[0.972, 0.947, 0.913, 0.798]
29 pre_GFC=[0.970, 0.942, 0.882, 0.766]
30 post_GFC=[0.966, 0.928, 0.906, 0.838]
31 post_PHE=[0.972, 0.938, 0.847, 0.666]
33 for t in range(len(emp_times)):
34 emp_dataframe.loc[q]=[emp_times[t],'control (empirical)',np.average([pre_GFC[t],pre_PHE[t]]),0]
35 emp_dataframe.loc[q+1]=[emp_times[t],'PHE (empirical)',post_PHE[t],0]
36 emp_dataframe.loc[q+2]=[emp_times[t],'GFC (empirical)',post_GFC[t],0]
42 trials=np.arange(P['n_trials'])
43 perceived=np.ones(P['n_trials']) #list of correctly perceived (not necessarily remembered) cues
44 rng=np.random.RandomState(seed=P['seed'])
45 cues=2*rng.randint(2,size=P['n_trials'])-1 #whether the cues is on the left or right
46 for n in range(len(perceived)):
47 if rng.rand()<P['misperceive']: perceived[n]=0
48 return trials, perceived, cues
51 '''drug approximations'''
53 class MySolver(nengo.solvers.Solver):
54 #When the simulator builds the network, it looks for a solver to calculate the decoders
55 #instead of the normal least-squares solver, we define our own, so that we can return
57 def __init__(self,weights): #feed in old decoders
58 self.weights=False #they are not weights but decoders
59 self.my_weights=weights
60 def __call__(self,A,Y,rng=None,E=None): #the function that gets called by the builder
61 return self.my_weights.T, dict()
63 def reset_gain_bias(P,model,sim,wm,wm_recurrent,wm_to_decision,drug):
64 #set gains and biases as a constant multiple of the old values
65 wm.gain = sim.data[wm].gain * P['drug_effect_biophysical'][drug][0]
66 wm.bias = sim.data[wm].bias * P['drug_effect_biophysical'][drug][1]
67 #set the solver of each of the connections coming out of wm using the custom MySolver class
68 #with input equal to the old decoders. We use the old decoders because we don't want the builder
69 #to optimize the decoders to the new gainbias/bias values, otherwise it would "adapt" to the drug
70 wm_recurrent.solver = MySolver(sim.model.params[wm_recurrent].weights)
71 wm_to_decision.solver=MySolver(sim.model.params[wm_to_decision].weights)
72 #rebuild the network to affect the gain/bias change
73 sim=nengo.Simulator(model,dt=P['dt'])
76 '''dataframe initialization'''
77 def primary_dataframe(P,sim,drug,trial,probe_wm,probe_output):
80 columns=('time','drug','wm','output','correct','trial')
81 df_primary = pd.DataFrame(columns=columns, index=np.arange(0,len(P['timesteps'])))
83 for t in P['timesteps']:
84 wm_val=np.abs(sim.data[probe_wm][t][0])
85 output_val=sim.data[probe_output][t][0]
86 correct=get_correct(P['cues'][trial],output_val)
88 df_primary.loc[i]=[rt,drug,wm_val,output_val,correct,trial]
92 def firing_dataframe(P,sim,drug,trial,sim_wm,probe_spikes):
95 columns=('time','drug','neuron-trial','tuning','firing_rate')
96 df_firing = pd.DataFrame(columns=columns, index=np.arange(0,len(P['timesteps'])*\
97 int(P['neurons_wm']*P['frac'])))
99 t_h = np.arange(t_width / P['dt']) * P['dt'] - t_width / 2.0
100 h = np.exp(-t_h ** 2 / (2 * P['sigma_smoothing'] ** 2))
101 h = h / np.linalg.norm(h, 1)
103 for nrn in range(int(P['neurons_wm']*P['frac'])):
104 enc = sim_wm.encoders[nrn]
105 tuning = get_tuning(P,trial,enc)
106 spikes = sim.data[probe_spikes][:,nrn]
107 firing_rate = np.convolve(spikes,h,mode='same')
108 for t in P['timesteps']:
110 df_firing.loc[j]=[rt,drug,nrn+trial*P['neurons_wm'],tuning,firing_rate[t]]
112 # print 'appending dataframe for neuron %s' %f
115 def get_correct(cue,output_val):
116 if (cue > 0.0 and output_val > 0.0) or (cue < 0.0 and output_val < 0.0): correct=1
120 def get_tuning(P,trial,enc):
122 enc_min_cutoff=P['enc_min_cutoff']
123 enc_max_cutoff=P['enc_max_cutoff']
124 if (cue > 0.0 and 0.0 < enc[0] < enc_min_cutoff) or \
125 (cue < 0.0 and 0.0 > enc[0] > -1.0*enc_min_cutoff): tuning='superweak'
126 if (cue > 0.0 and enc_min_cutoff < enc[0] < enc_max_cutoff) or \
127 (cue < 0.0 and -1.0*enc_max_cutoff < enc[0] < -1.0*enc_min_cutoff): tuning='weak'
128 elif (cue > 0.0 and enc[0] > enc_max_cutoff) or \
129 (cue < 0.0 and enc[0] < -1.0*enc_max_cutoff): tuning='strong'
130 else: tuning='nonpreferred'