本文整理汇总了Python中lockfile.LockFile类的典型用法代码示例。如果您正苦于以下问题:Python LockFile类的具体用法?Python LockFile怎么用?Python LockFile使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了LockFile类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: write_ht_sensor_log
def write_ht_sensor_log(sensor_ht_read_temp_c, sensor_ht_read_hum, sensor_ht_dewpt_c, sensor):
if not os.path.exists(lock_directory):
os.makedirs(lock_directory)
lock = LockFile(sensor_ht_log_lock_path)
while not lock.i_am_locking():
try:
logging.debug("[Write Sensor Log] Acquiring Lock: %s", lock.path)
lock.acquire(timeout=60) # wait up to 60 seconds
except:
logging.warning("[Write Sensor Log] Breaking Lock to Acquire: %s", lock.path)
lock.break_lock()
lock.acquire()
logging.debug("[Write Sensor Log] Gained lock: %s", lock.path)
try:
with open(sensor_ht_log_file_tmp, "ab") as sensorlog:
sensorlog.write('{0} {1:.1f} {2:.1f} {3:.1f} {4:d}\n'.format(
datetime.datetime.now().strftime("%Y/%m/%d-%H:%M:%S"),
sensor_ht_read_temp_c[sensor], sensor_ht_read_hum[sensor], sensor_ht_dewpt_c[sensor], sensor))
logging.debug("[Write Sensor Log] Data appended to %s", sensor_ht_log_file_tmp)
except:
logging.warning("[Write Sensor Log] Unable to append data to %s", sensor_ht_log_file_tmp)
logging.debug("[Write Sensor Log] Removing lock: %s", lock.path)
lock.release()
示例2: generateQueryAndQueryVectorMap
def generateQueryAndQueryVectorMap(line_tmp):
sentencevector = []
# print "Number of Records Left:\t" + str(corpuscount - tmpcount)
query = line_tmp.lower()
component_word = query.split(' ')
for one_word in component_word:
if redis_handle.exists(one_word):
vector_tmp = redis_handle.get(one_word)
vector_final = normalize_redis_vector(vector_tmp)
sentencevector.append(vector_final)
#indexnum = vocab_dict.get(one_word)
#sentencevector.append((repvector[indexnum]).tolist())
else:
sentencevector.append([float(0)] * vector_size)
l = numpy.array(sentencevector)
# Centroid Calculation - each sentence.
# Sums up all vectors (columns) and generates a final list (1D)of size vector_size
lmt = numpy.array(l.sum(axis=0, dtype=numpy.float32)).tolist()
if (lmt != 0.0):
# Averages the vectors based on the number of words in each sentence.
query_vector = [x / len(component_word) for x in lmt]
else:
query_vector = [float(0)] * vector_size
filename = getRandomOutputFilename()
lock = LockFile(filename)
lock.acquire()
# Open a file handle to the lock file
fh = open(filename, 'w')
fh.write(str(query)+"\t")
for item in query_vector:
fh.write("%s " % str(item))
fh.close()
lock.release()
示例3: release
def release(self):
"""
Method used to release a lock using the lockfile module.
"""
lock = LockFile(self.lockfile)
if lock.i_am_locking():
lock.release()
示例4: on_post
def on_post(self, req, resp, id):
try:
user = req.context['user']
# Kontrola existence ulohy
task = session.query(model.Task).get(id)
if task is None:
req.context['result'] = 'Neexistujici uloha'
resp.status = falcon.HTTP_404
return
# Kontrola existence git_branch a git_path
if (task.git_path is None) or (task.git_branch is None):
req.context['result'] = 'Uloha nema zadanou gitovskou vetev nebo adresar'
resp.status = falcon.HTTP_400
return
if task.git_branch == "master":
req.context['result'] = 'Uloha je jiz ve vetvi master'
resp.status = falcon.HTTP_400
return
wave = session.query(model.Wave).get(task.wave)
# Merge mohou provadet pouze administratori a garant vlny
if (not user.is_logged_in()) or ((not user.is_admin()) and (user.id != wave.garant)):
req.context['result'] = 'Nedostatecna opravneni'
resp.status = falcon.HTTP_400
return
# Kontrola zamku
lock = util.lock.git_locked()
if lock:
req.context['result'] = 'GIT uzamcen zámkem '+lock + "\nNekdo momentalne provadi akci s gitem, opakujte prosim akci za 20 sekund."
resp.status = falcon.HTTP_409
return
try:
mergeLock = LockFile(util.admin.taskMerge.LOCKFILE)
mergeLock.acquire(60) # Timeout zamku je 1 minuta
# Fetch repozitare
repo = git.Repo(util.git.GIT_SEMINAR_PATH)
if task.git_branch in repo.heads:
# Cannot delete branch we are on
repo.git.checkout("master")
repo.git.branch('-D', task.git_branch)
task.git_branch = 'master'
session.commit()
resp.status = falcon.HTTP_200
finally:
mergeLock.release()
except SQLAlchemyError:
session.rollback()
raise
finally:
session.close()
示例5: write_sensor_log
def write_sensor_log():
config = ConfigParser.RawConfigParser()
if not os.path.exists(lock_directory):
os.makedirs(lock_directory)
if not Terminate:
lock = LockFile(sensor_lock_path)
while not lock.i_am_locking():
try:
logging.info("[Write Sensor Log] Acquiring Lock: %s", lock.path)
lock.acquire(timeout=60) # wait up to 60 seconds
except:
logging.warning("[Write Sensor Log] Breaking Lock to Acquire: %s", lock.path)
lock.break_lock()
lock.acquire()
logging.info("[Write Sensor Log] Gained lock: %s", lock.path)
try:
with open(sensor_log_file_tmp, "ab") as sensorlog:
sensorlog.write('{0} {1:.1f} {2:.1f} {3:.1f}\n'.format(
datetime.datetime.now().strftime("%Y %m %d %H %M %S"),
tempc, humidity, dewpointc))
logging.info("[Write Sensor Log] Data appended to %s", sensor_log_file_tmp)
except:
logging.warning("[Write Sensor Log] Unable to append data to %s", sensor_log_file_tmp)
logging.info("[Write Sensor Log] Removing lock: %s", lock.path)
lock.release()
示例6: store
def store(email,nickname,number,rate,strs,regressions):
# User-entered data hits the filesystem here.
if not validate_email(email):
return
newcontrib = [ bleach.clean(email),
bleach.clean(nickname),
bleach.clean(number),
bleach.clean(rate),
bleach.clean(strs),
bleach.clean(regressions)]
lock = LockFile("/var/local/bz-triage/contributors.cfg")
lock.acquire()
try:
contributors = json.load(open("/var/local/bz-triage/contributors.cfg"))
except:
logging.info("Failed to open the file...")
contributors = list()
for existing in contributors:
if existing[0] == newcontrib[0]:
contributors.remove(existing)
contributors.append( newcontrib )
with open("/var/local/bz-triage/contributors.cfg", 'w') as outfile:
json.dump(contributors, outfile)
lock.release()
示例7: write_relay_log
def write_relay_log(relayNumber, relaySeconds, sensor, gpio):
if not os.path.exists(lock_directory):
os.makedirs(lock_directory)
lock = LockFile(relay_log_lock_path)
while not lock.i_am_locking():
try:
logging.debug("[Write Relay Log] Acquiring Lock: %s", lock.path)
lock.acquire(timeout=60) # wait up to 60 seconds
except:
logging.warning("[Write Relay Log] Breaking Lock to Acquire: %s", lock.path)
lock.break_lock()
lock.acquire()
logging.debug("[Write Relay Log] Gained lock: %s", lock.path)
try:
with open(relay_log_file_tmp, "ab") as relaylog:
relaylog.write('{0} {1:d} {2:d} {3:d} {4:.2f}\n'.format(
datetime.datetime.now().strftime("%Y/%m/%d-%H:%M:%S"),
sensor, relayNumber, gpio, relaySeconds))
except:
logging.warning("[Write Relay Log] Unable to append data to %s", relay_log_file_tmp)
logging.debug("[Write Relay Log] Removing lock: %s", lock.path)
lock.release()
示例8: locked_cache_dir
def locked_cache_dir(config, cache_key, timeout=900, tag=None):
if LockFile is DummyLock:
cache_key = cache_key + os.environ.get('PYTEST_XDIST_WORKER', '')
base_dir = config.cache.makedir(cache_key)
lockfile = join(six.text_type(base_dir), 'lock')
cache_dir = join(six.text_type(base_dir), 'cache')
lock = LockFile(lockfile)
lock.acquire(timeout=timeout)
try:
# Clear cache dir contents if it was generated with different
# asv version
tag_fn = join(six.text_type(base_dir), 'tag.json')
tag_content = [asv.__version__, repr(tag)]
if os.path.isdir(cache_dir):
try:
if util.load_json(tag_fn) != tag_content:
raise ValueError()
except (IOError, ValueError, util.UserError):
shutil.rmtree(cache_dir)
if not os.path.isdir(cache_dir):
os.makedirs(cache_dir)
yield cache_dir
util.write_json(tag_fn, tag_content)
finally:
lock.release()
示例9: write_relay_log
def write_relay_log(relayNumber, relaySeconds):
config = ConfigParser.RawConfigParser()
if not os.path.exists(lock_directory):
os.makedirs(lock_directory)
if not Terminate:
lock = LockFile(relay_lock_path)
while not lock.i_am_locking():
try:
logging.info("[Write Relay Log] Acquiring Lock: %s", lock.path)
lock.acquire(timeout=60) # wait up to 60 seconds
except:
logging.warning("[Write Relay Log] Breaking Lock to Acquire: %s", lock.path)
lock.break_lock()
lock.acquire()
logging.info("[Write Relay Log] Gained lock: %s", lock.path)
relay = [0] * 9
for n in range(1, 9):
if n == relayNumber:
relay[relayNumber] = relaySeconds
try:
with open(relay_log_file_tmp, "ab") as relaylog:
relaylog.write('{0} {1} {2} {3} {4} {5} {6} {7} {8}\n'.format(
datetime.datetime.now().strftime("%Y %m %d %H %M %S"),
relay[1], relay[2], relay[3], relay[4],
relay[5], relay[6], relay[7], relay[8]))
except:
logging.warning("[Write Relay Log] Unable to append data to %s", relay_log_file_tmp)
logging.info("[Write Relay Log] Removing lock: %s", lock.path)
lock.release()
示例10: addScanResult
def addScanResult( self,\
scanResult,\
ADD_MODE = NEW_SCAN_RESULT ):
lock = LockFile(self.m_TokenFileName)
#ОБРАБОТКА НЕВОЗМОЖНОСТИ ДОЖДАТЬСЯ РАЗБЛОКИРОВАНИЯ ФАЙЛА
lock.acquire( SECONDS_WAIT_FOR_UNLOCK )
f = open(self.m_TokenFileName,\
'r+')
listScanResult = self.loadScanResults( f )
idToken = 0
if ( ADD_MODE == TO_EXIST_SCAN_RESULT ):
listScanResult.setScanResultByIdToken( scanResult )
else:
idToken = listScanResult.addScanResult( scanResult )
f.seek(0)
f.write( listScanResult.toJSON() )
f.close()
lock.release()
return idToken
示例11: dumpTasks
def dumpTasks(filename, tasklist):
lock = LockFile(LOCK_FILE)
lock.acquire()
with open(filename, 'w') as f:
f.write("[\n ")
f.write(",\n ".join(json.dumps(task) for task in tasklist))
f.write("\n]\n")
lock.release()
示例12: get_auth_token
def get_auth_token(use_client_file=True, **kwargs):
ctx.logger.info("In auth.get_auth_token")
if use_client_file:
if constants.AUTH_TOKEN_VALUE in ctx.instance.runtime_properties:
# If you are here , it means that this is during bootstrap
ctx.logger.info("In auth.get_auth_token returning token from runtime props")
ctx.logger.info("In auth.get_auth_token token from runtime props is:{}".format(ctx.instance.runtime_properties[constants.AUTH_TOKEN_VALUE]))
return ctx.instance.runtime_properties[constants.AUTH_TOKEN_VALUE]
# Check if token file exists on the client's VM. If so, take the value from it and set it in the runtime
ctx.logger.info("In auth.get_auth_token checking local azure file path {}".format(constants.path_to_local_azure_token_file))
if os.path.isfile(constants.path_to_local_azure_token_file):
# If you are here , it means that this is during bootstrap
ctx.logger.info("{} exists".format(constants.path_to_local_azure_token_file))
token, token_expires = get_token_from_client_file()
ctx.logger.info("get_auth_token expiry is {} ".format(token_expires))
ctx.instance.runtime_properties[constants.AUTH_TOKEN_VALUE] = token
ctx.instance.runtime_properties[constants.AUTH_TOKEN_EXPIRY] = token_expires
ctx.logger.info("get_auth_token token1 is {} ".format(token))
return token
# From here, this is not during bootstrap, which also means that this code runs on the manager's VM.
try:
ctx.logger.info("In auth.get_auth_token b4 locking {}".format(constants.path_to_azure_conf))
lock = LockFile(constants.path_to_azure_conf)
lock.acquire()
ctx.logger.info("{} is locked".format(lock.path))
with open(constants.path_to_azure_conf, 'r') as f:
json_data = json.load(f)
token_expires = json_data["token_expires"]
token = json_data["auth_token"]
ctx.logger.info("get_auth_token token2 is {} ".format(token))
except:
raise NonRecoverableError("Failures while locking or using {}".format(constants.path_to_azure_conf))
ctx.logger.info("In auth.get_auth_token b4 timestamp")
timestamp = int(time.time())
ctx.logger.info("In auth.get_auth_token timestamp is {}".format(timestamp))
ctx.logger.info("In auth.get_auth_token token_expires1 is {}".format(token_expires))
token_expires = int(token_expires)
ctx.logger.info("In auth.get_auth_token token_expires2 is {}".format(token_expires))
if token_expires-timestamp <= 600 or token_expires == 0 or token is None or token == "":
ctx.logger.info("In auth.get_auth_token token_expires-timestamp {}".format(token_expires-timestamp))
endpoints, payload = _get_payload_endpoints()
token, token_expires = _get_token_value_expiry(endpoints, payload)
ctx.logger.info("get_auth_token token3 is {} ".format(token))
ctx.logger.info("In auth.get_auth_token b4 opening {}".format(constants.path_to_azure_conf))
with open(constants.path_to_azure_conf, 'r+') as f:
json_data = json.load(f)
json_data["auth_token"] = token
json_data["token_expires"] = token_expires
f.seek(0)
f.write(json.dumps(json_data))
f.close()
lock.release()
ctx.logger.info("{} is released".format(lock.path))
ctx.logger.info("get_auth_token token4 is {} ".format(token))
return token
示例13: sync_folder
def sync_folder(self):
encrypted_folder_lock = LockFile(self.encrypted_folder)
if encrypted_folder_lock.is_locked():
self.info("Acquiring the lock of encrypted folder...")
with encrypted_folder_lock:
plain_folder_lock = LockFile(self.plain_folder)
if plain_folder_lock.is_locked():
self.info("Acquiring the lock of plaintext folder...")
with plain_folder_lock:
self._do_sync_folder()
示例14: _download_rpm
def _download_rpm(self, nvr, arch):
if nvr is None or arch is None:
raise ValueError("Invalid option passed to connector")
filename = '%s.%s.rpm' % (nvr, arch)
file_path = os.path.split(filename)
if file_path[0] != '':
raise ValueError("Nvr can not contain path elements")
if len(arch.split('/')) != 1 or os.path.split(arch)[0] != '':
raise ValueError("Arch can not contain path elements")
rpm_file_path = os.path.join(self._rpm_cache, filename)
if os.path.exists(rpm_file_path):
return rpm_file_path
lockfile = LockFile(file_path)
if lockfile.is_locked():
# block until the lock is released and then assume other
# thread was successful
lockfile.acquire()
lockfile.release()
return rpm_file_path
# acquire the lock and release when done
lockfile.acquire()
try:
info = self.call('getBuild', {'buildInfo': nvr})
if info is None:
return {'error': 'No such build (%s)' % filename}
if not os.path.exists(self._rpm_cache):
os.mkdir(self._rpm_cache,)
url = '%s/%s/%s/%s/%s/%s' % (
self._koji_pkg_url, info['name'], info['version'],
info['release'], arch, filename)
url_file = grabber.urlopen(url, text=filename)
out = os.open(
rpm_file_path, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0666)
try:
while 1:
buf = url_file.read(4096)
if not buf:
break
os.write(out, buf)
except Exception as e:
raise e
finally:
os.close(out)
url_file.close()
finally:
lockfile.release()
return rpm_file_path
示例15: on_post
def on_post(self, req, resp, id):
try:
user = req.context['user']
# Kontrola opravneni
if (not user.is_logged_in()) or (not user.is_org()):
req.context['result'] = 'Nedostatecna opravneni'
resp.status = falcon.HTTP_400
return
# Kontrola existence ulohy
task = session.query(model.Task).get(id)
if task is None:
req.context['result'] = 'Neexistujici uloha'
resp.status = falcon.HTTP_404
return
# Zverejnene ulohy mohou deployovat pouze admini
wave = session.query(model.Wave).get(task.wave)
if (datetime.datetime.utcnow() > wave.time_published) and (not user.is_admin()):
req.context['result'] = 'Po zverejneni ulohy muze deploy provest pouze administrator'
resp.status = falcon.HTTP_404
return
# Kontrola existence gitovske vetve a adresare v databazi
if (task.git_branch is None) or (task.git_path is None):
req.context['result'] = 'Uloha nema zadanou gitovskou vetev nebo adresar'
resp.status = falcon.HTTP_400
return
# Kontrola zamku
lock = util.lock.git_locked()
if lock:
req.context['result'] = 'GIT uzamcen zamkem ' + lock + "\nNekdo momentalne provadi akci s gitem, opakujte prosim akci za 20 sekund."
resp.status = falcon.HTTP_409
return
# Stav na deploying je potreba nastavit v tomto vlakne
task.deploy_status = 'deploying'
session.commit()
try:
deployLock = LockFile(util.admin.taskDeploy.LOCKFILE)
deployLock.acquire(60) # Timeout zamku je 1 minuta
deployThread = threading.Thread(target=util.admin.taskDeploy.deploy, args=(task.id, deployLock, scoped_session(_session)), kwargs={})
deployThread.start()
finally:
deployLock.release()
resp.status = falcon.HTTP_200
except SQLAlchemyError:
session.rollback()
raise
finally:
session.close()