本文整理汇总了Python中population.Population.set_data方法的典型用法代码示例。如果您正苦于以下问题:Python Population.set_data方法的具体用法?Python Population.set_data怎么用?Python Population.set_data使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类population.Population
的用法示例。
在下文中一共展示了Population.set_data方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: run_synth_test
# 需要导入模块: from population import Population [as 别名]
# 或者: from population.Population import set_data [as 别名]
def run_synth_test():
""" Run a test with synthetic data and MCMC inference
"""
# Parse command line args
(options, args) = parse_cmd_line_args()
print "Creating master population object"
model = make_model(options.model, N=options.N)
popn = Population(model)
results_file = os.path.join(options.resultsDir, "geweke_results.pkl")
if os.path.exists(results_file) and not options.force_recompute:
with open(results_file) as f:
x_smpls = cPickle.load(f)
else:
x0 = popn.sample()
data = gen_synth_data(popn, x0, options.N, options.T)
popn.set_data(data)
# Perform inference
N_samples = 1000
x_smpls = geweke_test(popn, data, N_samples=N_samples)
# Save results
print "Saving results to %s" % results_file
with open(results_file, "w") as f:
cPickle.dump(x_smpls, f, protocol=-1)
# Plot empirical parameter distributions
print "Plotting results."
plot_geweke_results(popn, x_smpls, model, resdir=options.resultsDir)
示例2: initialize_parallel_test_harness
# 需要导入模块: from population import Population [as 别名]
# 或者: from population.Population import set_data [as 别名]
def initialize_parallel_test_harness():
# Parse command line args
(options, args) = parse_cmd_line_args()
# Load data from file or create synthetic test dataset
data = load_data(options)
print "Creating master population object"
model = make_model(options.model, N=data['N'])
popn = Population(model)
popn.set_data(data)
# Initialize the GLM with the data
popn_true = None
x_true = None
if 'vars' in data:
x_true = data['vars']
# Load the true model
data_dir = os.path.dirname(options.dataFile)
model_file = os.path.join(data_dir, 'model.pkl')
print "Loading true model from %s" % model_file
with open(model_file) as f:
model_true = cPickle.load(f)
# HACK FOR EXISTING DATA!
if 'N_dims' not in model_true['network']['graph']:
model_true['network']['graph']['N_dims'] = 1
if 'location_prior' not in model_true['network']['graph']:
model_true['network']['graph']['location_prior'] = \
{
'type' : 'gaussian',
'mu' : 0.0,
'sigma' : 1.0
}
if 'L' in x_true['net']['graph']:
x_true['net']['graph']['L'] = x_true['net']['graph']['L'].ravel()
# END HACK
popn_true = Population(model_true)
popn_true.set_data(data)
#ll_true = popn_true.compute_log_p(x_true)
#print "true LL: %f" % ll_true
# Create a client with direct view to all engines
if options.json is not None:
client = Client(options.json)
else:
client = Client(profile=options.profile)
dview = client[:]
print "Found %d engines." % len(dview)
print "Initializing imports on each engine"
initialize_imports(dview)
print "Creating population objects on each engine"
create_population_on_engines(dview, data, options.model)
return options, popn, data, client, popn_true, x_true
示例3: initialize_client
# 需要导入模块: from population import Population [as 别名]
# 或者: from population.Population import set_data [as 别名]
def initialize_client(model, data):
""" Initialize a population objsect on the client
"""
# Initialize a model with N neurons
print "Initializing GLM"
global popn
popn = Population(model)
# Initialize the GLM with the data
popn.set_data(data)
示例4: initialize_parallel_test_harness
# 需要导入模块: from population import Population [as 别名]
# 或者: from population.Population import set_data [as 别名]
def initialize_parallel_test_harness():
# Parse command line args
(options, args) = parse_cmd_line_args()
# Load data from file or create synthetic test dataset
data = load_data(options)
print "Creating master population object"
model = make_model(options.model, N=data["N"])
stabilize_sparsity(model)
popn = Population(model)
popn.set_data(data)
# Initialize the GLM with the data
popn_true = None
x_true = None
if "vars" in data:
x_true = data["vars"]
# Load the true model
data_dir = os.path.dirname(options.dataFile)
model_file = os.path.join(data_dir, "model.pkl")
print "Loading true model from %s" % model_file
with open(model_file) as f:
model_true = cPickle.load(f)
# HACK FOR EXISTING DATA!
if "N_dims" not in model_true["network"]["graph"]:
model_true["network"]["graph"]["N_dims"] = 1
if "location_prior" not in model_true["network"]["graph"]:
model_true["network"]["graph"]["location_prior"] = {"type": "gaussian", "mu": 0.0, "sigma": 1.0}
if "L" in x_true["net"]["graph"]:
x_true["net"]["graph"]["L"] = x_true["net"]["graph"]["L"].ravel()
# END HACK
popn_true = Population(model_true)
popn_true.set_data(data)
# Create a client with direct view to all engines
if options.json is not None:
client = Client(options.json)
else:
client = Client(profile=options.profile)
dview = client[:]
print "Found %d engines." % len(dview)
print "Initializing imports on each engine"
initialize_imports(dview)
print "Creating population objects on each engine"
create_population_on_engines(dview, data, options.model)
return options, popn, data, client, popn_true, x_true
示例5: initialize_test_harness
# 需要导入模块: from population import Population [as 别名]
# 或者: from population.Population import set_data [as 别名]
def initialize_test_harness():
""" Initialize a model with N neurons. Use the data if specified on the
command line, otherwise sample new data from the model.
Return a population object, the data, and a set of true parameters
which is expected for synthetic tests
"""
# Parse command line args
(options, args) = parse_cmd_line_args()
# Load data from file or create synthetic test dataset
data = load_data(options)
print "Creating master population object"
model = make_model(options.model, N=data['N'])
stabilize_sparsity(model)
popn = Population(model)
popn.set_data(data)
# Initialize the GLM with the data
popn_true = None
x_true = None
if 'vars' in data:
x_true = data['vars']
# Load the true model
model_true = None
data_dir = os.path.dirname(options.dataFile)
model_file = os.path.join(data_dir, 'model.pkl')
print "Loading true model from %s" % model_file
with open(model_file) as f:
model_true = cPickle.load(f)
# HACK FOR EXISTING DATA!
if 'N_dims' not in model_true['network']['graph']:
model_true['network']['graph']['N_dims'] = 1
if 'location_prior' not in model_true['network']['graph']:
model_true['network']['graph']['location_prior'] = \
{
'type' : 'gaussian',
'mu' : 0.0,
'sigma' : 1.0
}
if 'L' in x_true['net']['graph']:
x_true['net']['graph']['L'] = x_true['net']['graph']['L'].ravel()
# END HACK
popn_true = Population(model_true)
popn_true.set_data(data)
ll_true = popn_true.compute_log_p(x_true)
print "true LL: %f" % ll_true
return options, popn, data, popn_true, x_true
示例6: run_synth_test
# 需要导入模块: from population import Population [as 别名]
# 或者: from population.Population import set_data [as 别名]
def run_synth_test():
""" Run a test with synthetic data and MCMC inference
"""
options, popn, data, client, popn_true, x_true = initialize_parallel_test_harness()
# If x0 specified, load x0 from file
x0 = None
if options.x0_file is not None:
with open(options.x0_file, "r") as f:
print "Initializing with state from: %s" % options.x0_file
prev_x0 = cPickle.load(f)
if isinstance(prev_x0, list):
x0 = prev_x0[-1]
else:
mle_x0 = prev_x0
# HACK: We're assuming x0 came from a standard GLM
mle_model = make_model("standard_glm", N=data["N"])
mle_popn = Population(mle_model)
mle_popn.set_data(data)
x0 = popn.sample(None)
x0 = convert_model(mle_popn, mle_model, mle_x0, popn, popn.model, x0)
use_existing = False
fname = os.path.join(options.resultsDir, "%s_marginal_lkhd.pkl" % options.model)
if use_existing and os.path.exists(fname):
print "Found existing results"
with open(fname) as f:
marg_lkhd = cPickle.load(f)
else:
N_samples = 10
popn_true.set_data(data)
# Estimate the marginal log likelihood
print "Performing parallel inference"
marg_lkhd, log_weights = parallel_ais(
client, data, x0=x0, N_samples=N_samples, steps_per_B=50, resdir=options.resultsDir
)
# Save results
print "Saving results to %s" % fname
with open(fname, "w") as f:
cPickle.dump((marg_lkhd, log_weights), f, protocol=-1)
示例7: initialize_test_harness
# 需要导入模块: from population import Population [as 别名]
# 或者: from population.Population import set_data [as 别名]
def initialize_test_harness(N=2):
""" Initialize a model with N neurons. Use the data if specified on the
command line, otherwise sample new data from the model.
Return a population object, the data, and a set of true parameters
which is expected for synthetic tests
"""
# Parse command line args
(options, args) = parse_cmd_line_args()
# Initialize a model with N neurons
print "Initializing GLM"
model = make_model('spatiotemporal_glm', N=N)
# model = make_model('standard_glm', N=N)
population = Population(model)
# Load data
if not options.dataFile is None:
if options.dataFile.endswith('.mat'):
print "Loading data from %s" % options.dataFile
#data = scipy.io.loadmat(options.dataFile)
# Scipy's IO is weird -- we can save dicts as structs but its hard to reload them
raise Exception('Loading from .mat file is not implemented!')
elif options.dataFile.endswith('.pkl'):
print "Loading data from %s" % options.dataFile
with open(options.dataFile,'r') as f:
data = cPickle.load(f)
else:
raise Exception("Unrecognized file type: %s" % options.dataFile)
else:
print "Generating synthetic data"
data = generate_synth_data(population,
options.resultsDir,
T_stop=60)
# Initialize the GLM with the data
x_true = data['vars']
population.set_data(data)
ll_true = population.compute_log_p(x_true)
print "true LL: %f" % ll_true
return population, data, x_true
示例8: run_synth_test
# 需要导入模块: from population import Population [as 别名]
# 或者: from population.Population import set_data [as 别名]
def run_synth_test():
""" Run a test with synthetic data and MCMC inference
"""
options, popn, data, popn_true, x_true = initialize_test_harness()
# If x0 specified, load x0 from file
x0 = None
if options.x0_file is not None:
with open(options.x0_file, 'r') as f:
print "Initializing with state from: %s" % options.x0_file
mle_x0 = cPickle.load(f)
# HACK: We're assuming x0 came from a standard GLM
mle_model = make_model('standard_glm', N=data['N'])
mle_popn = Population(mle_model)
mle_popn.set_data(data)
x0 = popn.sample()
x0 = convert_model(mle_popn, mle_model, mle_x0, popn, popn.model, x0)
# Perform inference
N_samples = 1000
x_smpls = gibbs_sample(popn, data, x0=x0, N_samples=N_samples)
# Save results
results_file = os.path.join(options.resultsDir, 'results.pkl')
print "Saving results to %s" % results_file
with open(results_file, 'w') as f:
cPickle.dump(x_smpls, f, protocol=-1)
# Plot average of last 20% of samples
smpl_frac = 0.2
plot_results(popn,
x_smpls[-1*int(smpl_frac*N_samples):],
popn_true=popn_true,
x_true=x_true,
resdir=options.resultsDir)
示例9: load_set_of_results
# 需要导入模块: from population import Population [as 别名]
# 或者: from population.Population import set_data [as 别名]
def load_set_of_results(N, T, graph_model='er', sample_frac=0.1):
data_dir = os.path.join('/group', 'hips', 'scott', 'pyglm', 'data', 'synth', graph_model, 'N%dT%d' % (N, T))
# Evaluate the state for each of the parameter settings
s_infs_mcmc = []
s_infs_map = []
s_trues = []
# Enumerate the subdirectories containing the data
subdirs = os.listdir(data_dir)
subdirs = reduce(lambda sd, d: sd + [d] \
if os.path.isdir(os.path.join(data_dir, d)) \
else sd,
subdirs, [])
# For each data subdirectory, load the true data, the MAP estimate, and the MCMC results
print "WARNING: Make sure we sample all subdirs"
# import pdb; pdb.set_trace()
for d in subdirs:
print "Loading data and results from %s" % d
print "Loading true data"
with open(os.path.join(data_dir, d, 'data.pkl'), 'r') as f:
data = cPickle.load(f)
print "Loading model"
with open(os.path.join(data_dir, d, 'model.pkl'), 'r') as f:
model_data = cPickle.load(f)
#HACK
if 'N_dims' not in model_data['network']['graph']:
model_data['network']['graph']['N_dims'] = 1
if 'location_prior' not in model_data['network']['graph']:
model_data['network']['graph']['location_prior'] = \
{
'type' : 'gaussian',
'mu' : 0.0,
'sigma' : 1.0
}
if 'L' in data['vars']['net']['graph']:
data['vars']['net']['graph']['L'] = data['vars']['net']['graph']['L'].ravel()
popn_data = Population(model_data)
popn_data.set_data(data)
s_trues.append(popn_data.eval_state(data['vars']))
try:
print "Loading map estimate"
with open(os.path.join(data_dir, d, 'map', 'results.pkl'), 'r') as f:
x_map = cPickle.load(f)
model_map = make_model('standard_glm', N=data['N'])
popn_map = Population(model_map)
popn_map.set_data(data)
print "Evaluating MAP state"
s_infs_map.append(popn_map.eval_state(x_map))
except Exception as e:
print "ERROR: Failed to load MAP estimate"
try:
print "Loading mcmc estimate"
with open(os.path.join(data_dir, d, 'mcmc', 'results.pkl'), 'r') as f:
x_mcmc = cPickle.load(f)
model_mcmc = make_model('sparse_weighted_model', N=data['N'])
popn_mcmc = Population(model_mcmc)
popn_mcmc.set_data(data)
# Now compute the true and false positive rates for MCMC
# For MCMC results, only consider the tail of the samples
print "Evaluating MCMC states"
N_samples = len(x_mcmc)
start_smpl = int(np.floor(N_samples - sample_frac*N_samples))
# Evaluate the state
this_s_mcmc = []
for i in range(start_smpl, N_samples):
this_s_mcmc.append(popn_mcmc.eval_state(x_mcmc[i]))
s_infs_mcmc.append(this_s_mcmc)
except Exception as e:
print "ERROR: Failed to load MCMC estimate"
return s_trues, s_infs_map, s_infs_mcmc
示例10: run_synth_test
# 需要导入模块: from population import Population [as 别名]
# 或者: from population.Population import set_data [as 别名]
def run_synth_test():
""" Run a test with synthetic data and MCMC inference
"""
options, popn, data, client, popn_true, x_true = initialize_parallel_test_harness()
raise Exception("Make sur ethe sparsity is set properly!")
# If x0 specified, load x0 from file
x0 = None
if options.x0_file is not None:
with open(options.x0_file, 'r') as f:
print "Initializing with state from: %s" % options.x0_file
prev_x0 = cPickle.load(f)
if isinstance(prev_x0, list):
x0 = prev_x0[-1]
else:
mle_x0 = prev_x0
# HACK: We're assuming x0 came from a standard GLM
mle_model = make_model('standard_glm', N=data['N'])
mle_popn = Population(mle_model)
mle_popn.set_data(data)
x0 = popn.sample(None)
x0 = convert_model(mle_popn, mle_model, mle_x0, popn, popn.model, x0)
# !!!!DEBUG!!!!!
# Initialize with true variables
# import copy
# x0 = copy.deepcopy(x_true)
use_existing = False
if use_existing and \
os.path.exists(os.path.join(options.resultsDir, 'results.pkl')):
print "Found existing results"
with open(os.path.join(options.resultsDir, 'results.pkl')) as f:
x_smpls = cPickle.load(f)
N_samples = len(x_smpls)
else:
N_samples = 1000
# Define a callback to evaluate log likelihoods and predictive log likelihoods
print "Creating synthetic test data"
T_test = 15
popn_test = Population(popn.model)
test_data = gen_synth_data(data['N'], T_test, popn_true, x_true)
popn_test.set_data(test_data)
# Compute pred ll under true model
popn_true.set_data(test_data)
x_true['predll'] = popn_true.compute_ll(x_true)
popn_true.set_data(data)
# Compute the predictive log likelihood under a homogeneous PP model wiht MLE
# homog_pred_lls[j] = compute_homog_pp(train_data, test_data)
pred_lls = np.zeros(N_samples)
smpl = [0]
def pred_ll_cbk(x):
pred_ll = popn_test.compute_ll(x)
pred_lls[smpl[0]] = pred_ll
x['predll'] = pred_ll
smpl[0] += 1
print "Pred LL: %.2f" % pred_ll
pred_ll_cbk = None
# Perform inference
print "Performing parallel inference"
start_time = time.time()
x_smpls = parallel_gibbs_sample(client, data,
x0=x0, N_samples=N_samples,
save_interval=50, results_dir=options.resultsDir,
callback=pred_ll_cbk)
stop_time = time.time()
# Save results
print "Saving results to %s" % os.path.join(options.resultsDir, 'results.pkl')
with open(os.path.join(options.resultsDir, 'results.pkl'),'w') as f:
cPickle.dump(x_smpls, f, protocol=-1)
# Save runtime
with open(os.path.join(options.resultsDir, 'runtime.pkl'),'w') as f:
cPickle.dump(stop_time-start_time, f, protocol=-1)
# Plot average of last 20% of samples
print "Plotting results"
smpl_frac = 1.0
# Only plot the impulse response matrix for small N
do_plot = data['N'] < 20
do_plot_imp_responses = data['N'] < 30
if do_plot:
plot_results(popn,
x_smpls[-1*int(smpl_frac*len(x_smpls)):],
popn_true,
x_true,
#.........这里部分代码省略.........
示例11: postprocess
# 需要导入模块: from population import Population [as 别名]
# 或者: from population.Population import set_data [as 别名]
def postprocess(popn, x_inf, popn_true, x_true, options):
""" Compute an ROC curve from a set of inferred samples and the true state
"""
true_state = popn_true.eval_state(x_true)
# Make sure we have a list of x's
if not isinstance(x_inf, list):
x_inf = [x_inf]
inf_state = [popn.eval_state(x) for x in x_inf]
# Check if the inference model is a standard GLM or a network GLM
if
# Now compute the true and false positive rates for MAP
(map_tpr, map_fpr) = compute_roc_from_std_glm(true_state, map_state)
map_tprs.append(map_tpr)
map_fprs.append(map_fpr)
print "Loading mcmc estimate"
x_mcmc = None
with open(os.path.join(data_dir, d, 'mcmc', 'results.pkl'), 'r') as f:
x_mcmc = cPickle.load(f)
model_mcmc = make_model('sparse_weighted_model', N=data['N'])
popn_mcmc = Population(model_mcmc)
popn_mcmc.set_data(data)
# Evaluate the state
mcmc_state = []
for x in x_mcmc:
mcmc_state.append(popn_mcmc.eval_state(x))
# Now compute the true and false positive rates for MCMC
# For MCMC results, only consider the tail of the samples
N_samples = len(mcmc_state)
sample_frac = 0.2
start_smpl = int(np.floor(N_samples - sample_frac*N_samples))
(mcmc_tpr, mcmc_fpr) = compute_roc_from_sparse_glm_smpls(true_state, mcmc_state[start_smpl:])
mcmc_tprs.append(mcmc_tpr)
mcmc_fprs.append(mcmc_fpr)
# Pickle the roc results
with open(PKL_FNAME, 'w') as f:
# TODO Dump the MCMC results too
cPickle.dump({'map_tprs' : map_tprs,
'map_fprs' : map_fprs},
f,
protocol=-1)
# Plot the actual ROC curve
# Subsample to get about 10 errorbars
subsample = N*N//10
f = plt.figure()
ax = f.add_subplot(111)
plot_roc_curve(map_tprs, map_fprs, ax=ax, color='b', subsample=subsample)
# plot_roc_curve(mcmc_tprs, mcmc_fprs, ax=ax, color='r', subsample=subsample)
fname = os.path.join(PLOTDIR,'roc_N%dT%d.pdf' % (N,T))
print "Saving ROC to %s" % fname
f.savefig(fname)
plt.close(f)
示例12: fit_latent_network_to_mle
# 需要导入模块: from population import Population [as 别名]
# 或者: from population.Population import set_data [as 别名]
def fit_latent_network_to_mle():
""" Run a test with synthetic data and MCMC inference
"""
options, popn, data, popn_true, x_true = initialize_test_harness()
import pdb; pdb.set_trace()
# Load MLE parameters from command line
mle_x = None
if options.x0_file is not None:
with open(options.x0_file, 'r') as f:
print "Initializing with state from: %s" % options.x0_file
mle_x = cPickle.load(f)
mle_model = make_model('standard_glm', N=data['N'])
mle_popn = Population(mle_model)
mle_popn.set_data(data)
# Create a location sampler
print "Initializing latent location sampler"
loc_sampler = LatentLocationUpdate()
loc_sampler.preprocess(popn)
# Convert the mle results into a weighted adjacency matrix
x_aw = popn.sample(None)
x_aw = convert_model(mle_popn, mle_model, mle_x, popn, popn.model, x_aw)
# Get rid of unnecessary keys
del x_aw['glms']
# Fit the latent distance network to a thresholded adjacency matrix
ws = np.sort(np.abs(x_aw['net']['weights']['W']))
wperm = np.argsort(np.abs(x_aw['net']['weights']['W']))
nthrsh = 20
threshs = np.arange(ws.size, step=ws.size/nthrsh)
res = []
N = popn.N
for th in threshs:
print "Fitting network for threshold: %.3f" % th
A = np.zeros_like(ws, dtype=np.int8)
A[wperm[th:]] = 1
A = A.reshape((N,N))
# A = (np.abs(x_aw['net']['weights']['W']) >= th).astype(np.int8).reshape((N,N))
# Make sure the diag is still all 1s
A[np.diag_indices(N)] = 1
x = copy.deepcopy(x_aw)
x['net']['graph']['A'] = A
smpls = fit_latent_network_given_A(x, loc_sampler)
# Index the results by the overall sparsity of A
key = (np.sum(A)-N) / (np.float(np.size(A))-N)
res.append((key, smpls))
# Save results
results_file = os.path.join(options.resultsDir, 'fit_latent_network_results.pkl')
print "Saving results to %s" % results_file
with open(results_file, 'w') as f:
cPickle.dump(res, f)
示例13: gibbs_sample
# 需要导入模块: from population import Population [as 别名]
# 或者: from population.Population import set_data [as 别名]
def gibbs_sample(population,
data,
N_samples=1000,
x0=None,
init_from_mle=True):
"""
Sample the posterior distribution over parameters using MCMC.
"""
N = population.model['N']
# Draw initial state from prior if not given
if x0 is None:
x0 = population.sample()
if init_from_mle:
print "Initializing with coordinate descent"
from models.model_factory import make_model, convert_model
from population import Population
mle_model = make_model('standard_glm', N=N)
mle_popn = Population(mle_model)
mle_popn.set_data(data)
mle_x0 = mle_popn.sample()
# Initialize with MLE under standard GLM
mle_x0 = coord_descent(mle_popn, data, x0=mle_x0, maxiter=1)
# Convert between inferred parameters of the standard GLM
# and the parameters of this model. Eg. Convert unweighted
# networks to weighted networks with normalized impulse responses.
x0 = convert_model(mle_popn, mle_model, mle_x0, population, population.model, x0)
# Create updates for this population
serial_updates, parallel_updates = initialize_updates(population)
# DEBUG Profile the Gibbs sampling loop
import cProfile, pstats, StringIO
pr = cProfile.Profile()
pr.enable()
# Alternate fitting the network and fitting the GLMs
x_smpls = [x0]
x = x0
import time
start_time = time.clock()
for smpl in np.arange(N_samples):
# Print the current log likelihood
lp = population.compute_log_p(x)
# Compute iters per second
stop_time = time.clock()
if stop_time - start_time == 0:
print "Gibbs iteration %d. Iter/s exceeds time resolution. Log prob: %.3f" % (smpl, lp)
else:
print "Gibbs iteration %d. Iter/s = %f. Log prob: %.3f" % (smpl,
1.0/(stop_time-start_time),
lp)
start_time = stop_time
# Go through each parallel MH update
for parallel_update in parallel_updates:
for n in np.arange(N):
parallel_update.update(x, n)
# Sample the serial updates
for serial_update in serial_updates:
serial_update.update(x)
x_smpls.append(copy.deepcopy(x))
pr.disable()
s = StringIO.StringIO()
sortby = 'cumulative'
ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
ps.print_stats()
with open('mcmc.prof.txt', 'w') as f:
f.write(s.getvalue())
f.close()
return x_smpls
示例14: run_synth_test
# 需要导入模块: from population import Population [as 别名]
# 或者: from population.Population import set_data [as 别名]
def run_synth_test():
""" Run a test with synthetic data and MCMC inference
"""
options, popn, data, client, popn_true, x_true = initialize_parallel_test_harness()
# If x0 specified, load x0 from file
x0 = None
if options.x0_file is not None:
with open(options.x0_file, 'r') as f:
print "Initializing with state from: %s" % options.x0_file
prev_x0 = cPickle.load(f)
if isinstance(prev_x0, list):
x0 = prev_x0[-1]
else:
mle_x0 = prev_x0
# HACK: We're assuming x0 came from a standard GLM
mle_model = make_model('standard_glm', N=data['N'])
mle_popn = Population(mle_model)
mle_popn.set_data(data)
x0 = popn.sample()
x0 = convert_model(mle_popn, mle_model, mle_x0, popn, popn.model, x0)
use_existing = False
if use_existing and \
os.path.exists(os.path.join(options.resultsDir, 'results.pkl')):
print "Found existing results"
with open(os.path.join(options.resultsDir, 'results.pkl')) as f:
x_smpls = cPickle.load(f)
N_samples = len(x_smpls)
else:
# Perform inference
print "Performing parallel inference"
N_samples = 1000
x_smpls = parallel_gibbs_sample(client, data,
x0=x0, N_samples=N_samples,
save_interval=50, results_dir=options.resultsDir)
# Save results
print "Saving results to %s" % os.path.join(options.resultsDir, 'results.pkl')
with open(os.path.join(options.resultsDir, 'results.pkl'),'w') as f:
cPickle.dump(x_smpls, f, protocol=-1)
# Plot average of last 20% of samples
print "Plotting results"
smpl_frac = 0.5
# Only plot the impulse response matrix for small N
do_plot = data['N'] < 20
do_plot_imp_responses = data['N'] < 30
if do_plot:
plot_results(popn,
x_smpls[-1*int(smpl_frac*N_samples):],
popn_true,
x_true,
do_plot_imp_responses=do_plot_imp_responses,
resdir=options.resultsDir)
示例15: run_gen_synth_data
# 需要导入模块: from population import Population [as 别名]
# 或者: from population.Population import set_data [as 别名]
def run_gen_synth_data():
""" Run a test with synthetic data and MCMC inference
"""
options, args = parse_cmd_line_args()
# Create the model
model = make_model(options.model, N=options.N)
# Set the sparsity level to minimize the risk of unstable networks
stabilize_sparsity(model)
print "Creating master population object"
popn = Population(model)
# Sample random parameters from the model
x_true = popn.sample()
# Check stability of matrix
assert check_stability(model, x_true, options.N), "ERROR: Sampled network is unstable!"
# Save the model so it can be loaded alongside the data
fname_model = os.path.join(options.resultsDir, 'model.pkl')
print "Saving data to %s" % fname_model
with open(fname_model,'w') as f:
cPickle.dump(model, f, protocol=-1)
print "Generating synthetic data with %d neurons and %.2f seconds." % \
(options.N, options.T_stop)
# Set simulation parametrs
dt = 0.001
dt_stim = 0.1
D_stim = 1
stim = np.random.randn(options.T_stop/dt_stim, D_stim)
data = gen_synth_data(options.N, options.T_stop, popn, x_true, dt, dt_stim, D_stim, stim)
# Set the data so that the population state can be evaluated
popn.set_data(data)
# DEBUG Evaluate the firing rate and the simulated firing rate
state = popn.eval_state(x_true)
for n in np.arange(options.N):
lam_true = state['glms'][n]['lam']
lam_sim = popn.glm.nlin_model.f_nlin(data['X'][:,n])
assert np.allclose(lam_true, lam_sim)
# Save the data for reuse
#fname_mat = os.path.join(options.resultsDir, 'data.mat')
#print "Saving data to %s" % fname_mat
#scipy.io.savemat(fname_mat, data, oned_as='row')
# Pickle the data so we can open it more easily
fname_pkl = os.path.join(options.resultsDir, 'data.pkl')
print "Saving data to %s" % fname_pkl
with open(fname_pkl,'w') as f:
cPickle.dump(data, f, protocol=-1)
# Plot firing rates, stimulus responses, etc
plot_results(popn, data['vars'],
resdir=options.resultsDir,
do_plot_stim_resp=False,
do_plot_imp_responses=False)