本文整理汇总了Python中Locker.lock_wait方法的典型用法代码示例。如果您正苦于以下问题:Python Locker.lock_wait方法的具体用法?Python Locker.lock_wait怎么用?Python Locker.lock_wait使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Locker
的用法示例。
在下文中一共展示了Locker.lock_wait方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: import Locker [as 别名]
# 或者: from Locker import lock_wait [as 别名]
class GPEIOptChooser:
def __init__(self, expt_dir, covar="Matern52", mcmc_iters=10,
pending_samples=100, noiseless=False, burnin=100,
grid_subset=20, use_multiprocessing=True):
self.cov_func = getattr(gp, covar)
self.locker = Locker()
self.state_pkl = os.path.join(expt_dir, self.__module__ + ".pkl")
self.stats_file = os.path.join(expt_dir,
self.__module__ + "_hyperparameters.txt")
self.mcmc_iters = int(mcmc_iters)
self.burnin = int(burnin)
self.needs_burnin = True
self.pending_samples = int(pending_samples)
self.D = -1
self.hyper_iters = 1
# Number of points to optimize EI over
self.grid_subset = int(grid_subset)
self.noiseless = bool(int(noiseless))
self.hyper_samples = []
self.noise_scale = 0.1 # horseshoe prior
self.amp2_scale = 1 # zero-mean log normal prior
self.max_ls = 2 # top-hat prior on length scales
# If multiprocessing fails or deadlocks, set this to False
self.use_multiprocessing = bool(int(use_multiprocessing))
def dump_hypers(self):
self.locker.lock_wait(self.state_pkl)
# Write the hyperparameters out to a Pickle.
fh = tempfile.NamedTemporaryFile(mode='w', delete=False)
cPickle.dump({ 'dims' : self.D,
'ls' : self.ls,
'amp2' : self.amp2,
'noise' : self.noise,
'hyper_samples' : self.hyper_samples,
'mean' : self.mean },
fh)
fh.close()
# Use an atomic move for better NFS happiness.
cmd = 'mv "%s" "%s"' % (fh.name, self.state_pkl)
os.system(cmd) # TODO: Should check system-dependent return status.
self.locker.unlock(self.state_pkl)
# Write the hyperparameters out to a human readable file as well
fh = open(self.stats_file, 'w')
fh.write('Mean Noise Amplitude <length scales>\n')
fh.write('-----------ALL SAMPLES-------------\n')
meanhyps = 0*np.hstack(self.hyper_samples[0])
for i in self.hyper_samples:
hyps = np.hstack(i)
meanhyps += (1/float(len(self.hyper_samples)))*hyps
for j in hyps:
fh.write(str(j) + ' ')
fh.write('\n')
fh.write('-----------MEAN OF SAMPLES-------------\n')
for j in meanhyps:
fh.write(str(j) + ' ')
fh.write('\n')
fh.close()
# This passes out html or javascript to display interesting
# stats - such as the length scales (sensitivity to various
# dimensions).
def generate_stats_html(self):
# Need this because the model may not necessarily be
# initialized when this code is called.
if not self._read_only():
return 'Chooser not yet ready to display output'
mean_mean = np.mean(np.vstack([h[0] for h in self.hyper_samples]))
mean_noise = np.mean(np.vstack([h[1] for h in self.hyper_samples]))
mean_ls = np.mean(np.vstack([h[3][np.newaxis,:] for h in self.hyper_samples]),0)
try:
output = (
'<br /><span class=\"label label-info\">Estimated mean:</span> ' + str(mean_mean) +
'<br /><span class=\"label label-info\">Estimated noise:</span> ' + str(mean_noise) +
'<br /><br /><span class=\"label label-info\">Inverse parameter sensitivity' +
' - Gaussian Process length scales</span><br /><br />' +
'<div id=\"lschart\"></div><script type=\"text/javascript\">' +
'var lsdata = [' + ','.join(['%.2f' % i for i in mean_ls]) + '];')
except:
return 'Chooser not yet ready to display output.'
output += ('bar_chart("#lschart", lsdata, ' + str(self.max_ls) + ');' +
'</script>')
return output
# Read in the chooser from file. Returns True only on success
def _read_only(self):
if os.path.exists(self.state_pkl):
fh = open(self.state_pkl, 'r')
state = cPickle.load(fh)
#.........这里部分代码省略.........
示例2: __init__
# 需要导入模块: import Locker [as 别名]
# 或者: from Locker import lock_wait [as 别名]
class GPEIperSecChooser:
def __init__(self, expt_dir, covar="Matern52", mcmc_iters=10,
pending_samples=100, noiseless=False, burnin=100,
grid_subset=20):
self.cov_func = getattr(gp, covar)
self.locker = Locker()
self.state_pkl = os.path.join(expt_dir, self.__module__ + ".pkl")
self.stats_file = os.path.join(expt_dir,
self.__module__ + "_hyperparameters.txt")
self.mcmc_iters = int(mcmc_iters)
self.burnin = int(burnin)
self.needs_burnin = True
self.pending_samples = pending_samples
self.D = -1
self.hyper_iters = 1
# Number of points to optimize EI over
self.grid_subset = int(grid_subset)
self.noiseless = bool(int(noiseless))
self.hyper_samples = []
self.time_hyper_samples = []
self.noise_scale = 0.1 # horseshoe prior
self.amp2_scale = 1 # zero-mean log normal prior
self.max_ls = 10 # top-hat prior on length scales
self.time_noise_scale = 0.1 # horseshoe prior
self.time_amp2_scale = 1 # zero-mean log normal prior
self.time_max_ls = 10 # top-hat prior on length scales
# A simple function to dump out hyperparameters to allow for a hot start
# if the optimization is restarted.
def dump_hypers(self):
self.locker.lock_wait(self.state_pkl)
# Write the hyperparameters out to a Pickle.
fh = tempfile.NamedTemporaryFile(mode='w', delete=False)
cPickle.dump({ 'dims' : self.D,
'ls' : self.ls,
'amp2' : self.amp2,
'noise' : self.noise,
'mean' : self.mean,
'time_ls' : self.time_ls,
'time_amp2' : self.time_amp2,
'time_noise' : self.time_noise,
'time_mean' : self.time_mean },
fh)
fh.close()
# Use an atomic move for better NFS happiness.
cmd = 'mv "%s" "%s"' % (fh.name, self.state_pkl)
os.system(cmd) # TODO: Should check system-dependent return status.
self.locker.unlock(self.state_pkl)
def _real_init(self, dims, values, durations):
self.locker.lock_wait(self.state_pkl)
if os.path.exists(self.state_pkl):
fh = open(self.state_pkl, 'r')
state = cPickle.load(fh)
fh.close()
self.D = state['dims']
self.ls = state['ls']
self.amp2 = state['amp2']
self.noise = state['noise']
self.mean = state['mean']
self.time_ls = state['time_ls']
self.time_amp2 = state['time_amp2']
self.time_noise = state['time_noise']
self.time_mean = state['time_mean']
else:
# Input dimensionality.
self.D = dims
# Initial length scales.
self.ls = np.ones(self.D)
self.time_ls = np.ones(self.D)
# Initial amplitude.
self.amp2 = np.std(values)+1e-4
self.time_amp2 = np.std(durations)+1e-4
# Initial observation noise.
self.noise = 1e-3
self.time_noise = 1e-3
# Initial mean.
self.mean = np.mean(values)
self.time_mean = np.mean(np.log(durations))
self.locker.unlock(self.state_pkl)
def cov(self, amp2, ls, x1, x2=None):
if x2 is None:
return amp2 * (self.cov_func(ls, x1, None)
+ 1e-6*np.eye(x1.shape[0]))
#.........这里部分代码省略.........
示例3: __init__
# 需要导入模块: import Locker [as 别名]
# 或者: from Locker import lock_wait [as 别名]
class GPEIChooser:
def __init__(self, expt_dir, covar="Matern52", mcmc_iters=10,
pending_samples=100, noiseless=False):
self.cov_func = getattr(gp, covar)
self.locker = Locker()
self.state_pkl = os.path.join(expt_dir, self.__module__ + ".pkl")
self.mcmc_iters = int(mcmc_iters)
self.pending_samples = pending_samples
self.D = -1
self.hyper_iters = 1
self.noiseless = bool(int(noiseless))
self.noise_scale = 0.1 # horseshoe prior
self.amp2_scale = 1 # zero-mean log normal prior
self.max_ls = 2 # top-hat prior on length scales
def __del__(self):
self.locker.lock_wait(self.state_pkl)
# Write the hyperparameters out to a Pickle.
fh = tempfile.NamedTemporaryFile(mode='w', delete=False)
cPickle.dump({ 'dims' : self.D,
'ls' : self.ls,
'amp2' : self.amp2,
'noise' : self.noise,
'mean' : self.mean },
fh)
fh.close()
# Use an atomic move for better NFS happiness.
cmd = 'mv "%s" "%s"' % (fh.name, self.state_pkl)
os.system(cmd) # TODO: Should check system-dependent return status.
self.locker.unlock(self.state_pkl)
def _real_init(self, dims, values):
self.locker.lock_wait(self.state_pkl)
if os.path.exists(self.state_pkl):
fh = open(self.state_pkl, 'r')
state = cPickle.load(fh)
fh.close()
self.D = state['dims']
self.ls = state['ls']
self.amp2 = state['amp2']
self.noise = state['noise']
self.mean = state['mean']
else:
# Input dimensionality.
self.D = dims
# Initial length scales.
self.ls = np.ones(self.D)
# Initial amplitude.
self.amp2 = np.std(values)+1e-4
# Initial observation noise.
self.noise = 1e-3
# Initial mean.
self.mean = np.mean(values)
self.locker.unlock(self.state_pkl)
def cov(self, x1, x2=None):
if x2 is None:
return self.amp2 * (self.cov_func(self.ls, x1, None)
+ 1e-6*np.eye(x1.shape[0]))
else:
return self.amp2 * self.cov_func(self.ls, x1, x2)
def next(self, grid, values, durations, candidates, pending, complete):
# Don't bother using fancy GP stuff at first.
if complete.shape[0] < 2:
return int(candidates[0])
# Perform the real initialization.
if self.D == -1:
self._real_init(grid.shape[1], values[complete])
# Grab out the relevant sets.
comp = grid[complete,:]
cand = grid[candidates,:]
pend = grid[pending,:]
vals = values[complete]
if self.mcmc_iters > 0:
# Sample from hyperparameters.
overall_ei = np.zeros((cand.shape[0], self.mcmc_iters))
for mcmc_iter in xrange(self.mcmc_iters):
self.sample_hypers(comp, vals)
#.........这里部分代码省略.........
示例4: job_running
# 需要导入模块: import Locker [as 别名]
# 或者: from Locker import lock_wait [as 别名]
class ExperimentGrid:
@staticmethod
def job_running(expt_dir, id):
expt_grid = ExperimentGrid(expt_dir)
expt_grid.set_running(id)
@staticmethod
def job_complete(expt_dir, id, value, duration):
expt_grid = ExperimentGrid(expt_dir)
expt_grid.set_complete(id, value, duration)
@staticmethod
def job_broken(expt_dir, id):
expt_grid = ExperimentGrid(expt_dir)
expt_grid.set_broken(id)
def __init__(self, expt_dir, variables=None, grid_size=None, grid_seed=1):
self.expt_dir = expt_dir
self.jobs_pkl = os.path.join(expt_dir, 'expt-grid.pkl')
self.locker = Locker()
# Only one process at a time is allowed to have access to this.
sys.stderr.write("Waiting to lock grid...")
self.locker.lock_wait(self.jobs_pkl)
sys.stderr.write("...acquired\n")
# Does this exist already?
if variables is not None and not os.path.exists(self.jobs_pkl):
# Set up the grid for the first time.
self.seed = grid_seed
self.vmap = GridMap(variables, grid_size)
self.grid = self.hypercube_grid(self.vmap.card(), grid_size)
self.status = np.zeros(grid_size, dtype=int) + CANDIDATE_STATE
self.values = np.zeros(grid_size) + np.nan
self.durs = np.zeros(grid_size) + np.nan
self.sgeids = np.zeros(grid_size, dtype=int)
# Save this out.
self._save_jobs()
else:
# Load in from the pickle.
self._load_jobs()
def __del__(self):
self._save_jobs()
if self.locker.unlock(self.jobs_pkl):
sys.stderr.write("Released lock on job grid.\n")
else:
raise Exception("Could not release lock on job grid.\n")
def get_grid(self):
return self.grid, self.values, self.durs
def get_candidates(self):
return np.nonzero(self.status == CANDIDATE_STATE)[0]
def get_pending(self):
return np.nonzero((self.status == SUBMITTED_STATE) | (self.status == RUNNING_STATE))[0]
def get_complete(self):
return np.nonzero(self.status == COMPLETE_STATE)[0]
def get_broken(self):
return np.nonzero(self.status == BROKEN_STATE)[0]
def get_params(self, index):
return self.vmap.get_params(self.grid[index,:])
def get_best(self):
finite = self.values[np.isfinite(self.values)]
if len(finite) > 0:
cur_min = np.min(finite)
index = np.nonzero(self.values==cur_min)[0][0]
return cur_min, index
else:
return np.nan, -1
def get_sgeid(self, id):
return self.sgeids[id]
def add_to_grid(self, candidate):
# Set up the grid
self.grid = np.vstack((self.grid, candidate))
self.status = np.append(self.status, np.zeros(1, dtype=int) +
int(CANDIDATE_STATE))
self.values = np.append(self.values, np.zeros(1)+np.nan)
self.durs = np.append(self.durs, np.zeros(1)+np.nan)
self.sgeids = np.append(self.sgeids, np.zeros(1,dtype=int))
# Save this out.
self._save_jobs()
return self.grid.shape[0]-1
def set_candidate(self, id):
self.status[id] = CANDIDATE_STATE
self._save_jobs()
#.........这里部分代码省略.........
示例5: job_running
# 需要导入模块: import Locker [as 别名]
# 或者: from Locker import lock_wait [as 别名]
class ExperimentGrid:
@staticmethod
def job_running(expt_dir, id):
expt_grid = ExperimentGrid(expt_dir)
expt_grid.set_running(id)
@staticmethod
def job_complete(expt_dir, id, value, duration):
log("setting job %d complete" % id)
expt_grid = ExperimentGrid(expt_dir)
expt_grid.set_complete(id, value, duration)
log("set...")
@staticmethod
def job_broken(expt_dir, id):
expt_grid = ExperimentGrid(expt_dir)
expt_grid.set_broken(id)
def __init__(self, expt_dir, variables=None, grid_size=None, grid_seed=1):
self._ready = False
self.expt_dir = expt_dir
self.jobs_pkl = os.path.join(expt_dir, EXPERIMENT_GRID_FILE)
self.locker = Locker()
# Only one process at a time is allowed to have access to the grid.
self.locker.lock_wait(self.jobs_pkl)
# Set up the grid for the first time if it doesn't exist.
if variables is not None and not os.path.exists(self.jobs_pkl):
self.seed = grid_seed
self.vmap = GridMap(variables, grid_size)
self.grid = self._hypercube_grid(self.vmap.card(), grid_size)
self.status = np.zeros(grid_size, dtype=int) + CANDIDATE_STATE
self.values = np.zeros(grid_size) + np.nan
self.durs = np.zeros(grid_size) + np.nan
self.proc_ids = np.zeros(grid_size, dtype=int)
self._ready = True
self._save_jobs()
# Or load in the grid from the pickled file.
else:
self._load_jobs()
def __del__(self):
self._save_jobs()
if self.locker.unlock(self.jobs_pkl):
pass
else:
raise Exception("Could not release lock on job grid.\n")
def get_grid(self):
return self.grid, self.values, self.durs
def get_candidates(self):
return np.nonzero(self.status == CANDIDATE_STATE)[0]
def get_pending(self):
return np.nonzero((self.status == SUBMITTED_STATE) | (self.status == RUNNING_STATE))[0]
def get_complete(self):
return np.nonzero(self.status == COMPLETE_STATE)[0]
def get_broken(self):
return np.nonzero(self.status == BROKEN_STATE)[0]
def get_params(self, index):
return self.vmap.get_params(self.grid[index,:])
def get_best(self):
finite = self.values[np.isfinite(self.values)]
if len(finite) > 0:
cur_min = np.min(finite)
index = np.nonzero(self.values==cur_min)[0][0]
return cur_min, index
else:
return np.nan, -1
def get_proc_id(self, id):
return self.proc_ids[id]
def add_to_grid(self, candidate):
# Checks to prevent numerical over/underflow from corrupting the grid
candidate[candidate > 1.0] = 1.0
candidate[candidate < 0.0] = 0.0
# Set up the grid
self.grid = np.vstack((self.grid, candidate))
self.status = np.append(self.status, np.zeros(1, dtype=int) +
int(CANDIDATE_STATE))
self.values = np.append(self.values, np.zeros(1)+np.nan)
self.durs = np.append(self.durs, np.zeros(1)+np.nan)
self.proc_ids = np.append(self.proc_ids, np.zeros(1,dtype=int))
# Save this out.
self._save_jobs()
return self.grid.shape[0]-1
def set_candidate(self, id):
#.........这里部分代码省略.........
示例6: __init__
# 需要导入模块: import Locker [as 别名]
# 或者: from Locker import lock_wait [as 别名]
class GPEIConstrainedChooser:
def __init__(self, expt_dir, covar="Matern52", mcmc_iters=10,
pending_samples=100, noiseless=False, burnin=100,
grid_subset=20, constraint_violating_value=-1):
self.cov_func = getattr(gp, covar)
self.locker = Locker()
self.state_pkl = os.path.join(expt_dir, self.__module__ + ".pkl")
self.stats_file = os.path.join(expt_dir,
self.__module__ + "_hyperparameters.txt")
self.mcmc_iters = int(mcmc_iters)
self.burnin = int(burnin)
self.needs_burnin = True
self.pending_samples = pending_samples
self.D = -1
self.hyper_iters = 1
# Number of points to optimize EI over
self.grid_subset = int(grid_subset)
self.noiseless = bool(int(noiseless))
self.hyper_samples = []
self.constraint_hyper_samples = []
self.ff = None
self.ff_samples = []
self.noise_scale = 0.1 # horseshoe prior
self.amp2_scale = 1 # zero-mean log normal prior
self.max_ls = 2 # top-hat prior on length scales
self.constraint_noise_scale = 0.1 # horseshoe prior
self.constraint_amp2_scale = 1 # zero-mean log normal prio
self.constraint_gain = 1 # top-hat prior on length scales
self.constraint_max_ls = 2 # top-hat prior on length scales
self.bad_value = float(constraint_violating_value)
# A simple function to dump out hyperparameters to allow for a hot start
# if the optimization is restarted.
def dump_hypers(self):
sys.stderr.write("Waiting to lock hyperparameter pickle...")
self.locker.lock_wait(self.state_pkl)
sys.stderr.write("...acquired\n")
# Write the hyperparameters out to a Pickle.
fh = tempfile.NamedTemporaryFile(mode='w', delete=False)
cPickle.dump({ 'dims' : self.D,
'ls' : self.ls,
'amp2' : self.amp2,
'noise' : self.noise,
'mean' : self.mean,
'constraint_ls' : self.constraint_ls,
'constraint_amp2' : self.constraint_amp2,
'constraint_noise' : self.constraint_noise,
'constraint_mean' : self.constraint_mean },
fh)
fh.close()
# Use an atomic move for better NFS happiness.
cmd = 'mv "%s" "%s"' % (fh.name, self.state_pkl)
os.system(cmd) # TODO: Should check system-dependent return status.
self.locker.unlock(self.state_pkl)
def _real_init(self, dims, values, durations):
sys.stderr.write("Waiting to lock hyperparameter pickle...")
self.locker.lock_wait(self.state_pkl)
sys.stderr.write("...acquired\n")
if os.path.exists(self.state_pkl):
fh = open(self.state_pkl, 'r')
state = cPickle.load(fh)
fh.close()
self.D = state['dims']
self.ls = state['ls']
self.amp2 = state['amp2']
self.noise = state['noise']
self.mean = state['mean']
self.constraint_ls = state['constraint_ls']
self.constraint_amp2 = state['constraint_amp2']
self.constraint_noise = state['constraint_noise']
self.constraint_mean = state['constraint_mean']
self.constraint_gain = state['constraint_mean']
self.needs_burnin = False
else:
# Identify constraint violations
goodvals = np.nonzero(values != self.bad_value)[0]
# Input dimensionality.
self.D = dims
# Initial length scales.
self.ls = np.ones(self.D)
self.constraint_ls = np.ones(self.D)
# Initial amplitude.
self.amp2 = np.std(values[goodvals])
self.constraint_amp2 = 1#np.std(durations)
#.........这里部分代码省略.........
示例7: __init__
# 需要导入模块: import Locker [as 别名]
# 或者: from Locker import lock_wait [as 别名]
class GPConstrainedEIChooser:
def __init__(
self,
expt_dir,
covar="Matern52",
mcmc_iters=20,
pending_samples=100,
noiseless=False,
burnin=100,
grid_subset=20,
constraint_violating_value=np.inf,
verbosity=0,
visualize2D=False,
):
self.cov_func = getattr(gp, covar)
self.locker = Locker()
self.state_pkl = os.path.join(expt_dir, self.__module__ + ".pkl")
self.stats_file = os.path.join(expt_dir, self.__module__ + "_hyperparameters.txt")
self.mcmc_iters = int(mcmc_iters)
self.burnin = int(burnin)
self.needs_burnin = True
self.pending_samples = pending_samples
self.D = -1
self.hyper_iters = 1
# Number of points to optimize EI over
self.grid_subset = int(grid_subset)
self.noiseless = bool(int(noiseless))
self.hyper_samples = []
self.constraint_hyper_samples = []
self.ff = None
self.ff_samples = []
self.verbosity = int(verbosity)
self.noise_scale = 0.1 # horseshoe prior
self.amp2_scale = 1 # zero-mean log normal prior
self.max_ls = 2 # top-hat prior on length scales
self.constraint_noise_scale = 0.1 # horseshoe prior
self.constraint_amp2_scale = 1 # zero-mean log normal prio
self.constraint_gain = 1 # top-hat prior on length scales
self.constraint_max_ls = 2 # top-hat prior on length scales
self.bad_value = float(constraint_violating_value)
self.visualize2D = visualize2D
# A simple function to dump out hyperparameters to allow for a hot start
# if the optimization is restarted.
def dump_hypers(self):
self.locker.lock_wait(self.state_pkl)
# Write the hyperparameters out to a Pickle.
fh = tempfile.NamedTemporaryFile(mode="wb", delete=False)
pickle.dump(
{
"dims": self.D,
"ls": self.ls,
"amp2": self.amp2,
"noise": self.noise,
"mean": self.mean,
"constraint_ls": self.constraint_ls,
"constraint_amp2": self.constraint_amp2,
"constraint_noise": self.constraint_noise,
"constraint_mean": self.constraint_mean,
},
fh,
)
fh.close()
# Use an atomic move for better NFS happiness.
cmd = 'mv "%s" "%s"' % (fh.name, self.state_pkl)
os.system(cmd) # TODO: Should check system-dependent return status.
self.locker.unlock(self.state_pkl)
# Write the hyperparameters out to a human readable file as well
fh = open(self.stats_file, "w")
fh.write("Mean Noise Amplitude <length scales>\n")
fh.write("-----------ALL SAMPLES-------------\n")
meanhyps = 0 * np.hstack(self.hyper_samples[0])
for i in self.hyper_samples:
hyps = np.hstack(i)
meanhyps += (1 / float(len(self.hyper_samples))) * hyps
for j in hyps:
fh.write(str(j) + " ")
fh.write("\n")
fh.write("-----------MEAN OF SAMPLES-------------\n")
for j in meanhyps:
fh.write(str(j) + " ")
fh.write("\n")
fh.close()
def _real_init(self, dims, values, durations):
self.locker.lock_wait(self.state_pkl)
self.randomstate = npr.get_state()
if os.path.exists(self.state_pkl):
fh = open(self.state_pkl, "rb")
state = pickle.load(fh)
#.........这里部分代码省略.........