当前位置: 首页>>代码示例>>Python>>正文


Python Activity.finish方法代码示例

本文整理汇总了Python中grid_control.utils.activity.Activity.finish方法的典型用法代码示例。如果您正苦于以下问题:Python Activity.finish方法的具体用法?Python Activity.finish怎么用?Python Activity.finish使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在grid_control.utils.activity.Activity的用法示例。


在下文中一共展示了Activity.finish方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _saveStateToTar

# 需要导入模块: from grid_control.utils.activity import Activity [as 别名]
# 或者: from grid_control.utils.activity.Activity import finish [as 别名]
	def _saveStateToTar(self, tar, meta, source, sourceLen, message):
		# Write the splitting info grouped into subtarfiles
		activity = Activity(message)
		(jobNum, lastValid, subTar) = (-1, -1, None)
		for jobNum, entry in enumerate(source):
			if not entry.get(DataSplitter.Invalid, False):
				lastValid = jobNum
			if jobNum % self._keySize == 0:
				self._closeSubTar(tar, subTar)
				subTar = self._createSubTar('%03dXX.tgz' % int(jobNum / self._keySize))
				activity.update('%s [%d / %d]' % (message, jobNum, sourceLen))
			# Determine shortest way to store file list
			tmp = entry.pop(DataSplitter.FileList)
			savelist = self._getReducedFileList(entry, tmp) # can modify entry
			# Write files with infos / filelist
			data = str.join('', self._fmt.format(entry, fkt = self._formatFileEntry) + lmap(lambda fn: '=%s\n' % fn, savelist))
			self._addToSubTar(subTar, '%05d' % jobNum, data)
			# Remove common prefix from info
			if DataSplitter.CommonPrefix in entry:
				entry.pop(DataSplitter.CommonPrefix)
			entry[DataSplitter.FileList] = tmp
		self._closeSubTar(tar, subTar)
		activity.finish()
		# Write metadata to allow reconstruction of data splitter
		meta['MaxJobs'] = lastValid + 1
		for (fn, data) in [('Metadata', self._fmt.format(meta)), ('Version', '2')]:
			self._addToTar(tar, fn, data)
开发者ID:Fra-nk,项目名称:grid-control,代码行数:29,代码来源:splitter_io.py

示例2: execute

# 需要导入模块: from grid_control.utils.activity import Activity [as 别名]
# 或者: from grid_control.utils.activity.Activity import finish [as 别名]
	def execute(self, wmsIDs, wmsName): # yields list of (wmsID,)
		marked_wmsIDs = lmap(lambda result: result[0], self._cancel_executor.execute(wmsIDs, wmsName))
		time.sleep(5)
		activity = Activity('Purging jobs')
		for result in self._purge_executor.execute(marked_wmsIDs, wmsName):
			yield result
		activity.finish()
开发者ID:Fra-nk,项目名称:grid-control,代码行数:9,代码来源:aspect_cancel.py

示例3: create_tarball

# 需要导入模块: from grid_control.utils.activity import Activity [as 别名]
# 或者: from grid_control.utils.activity.Activity import finish [as 别名]
def create_tarball(match_info_iter, **kwargs):
	tar = tarfile.open(mode='w:gz', **kwargs)
	activity = Activity('Generating tarball')
	for match_info in match_info_iter:
		if isinstance(match_info, tuple):
			(path_source, path_target) = match_info
		else:
			(path_source, path_target) = (match_info, None)
		if isinstance(path_source, str):
			if not os.path.exists(path_source):
				raise PathError('File %s does not exist!' % path_source)
			tar.add(path_source, path_target or os.path.basename(path_source), recursive=False)
		elif path_source is None:  # Update activity
			activity.update('Generating tarball: %s' % path_target)
		else:  # File handle
			info, handle = path_source.get_tar_info()
			if path_target:
				info.name = path_target
			info.mtime = time.time()
			info.mode = stat.S_IRUSR + stat.S_IWUSR + stat.S_IRGRP + stat.S_IROTH
			if info.name.endswith('.sh') or info.name.endswith('.py'):
				info.mode += stat.S_IXUSR + stat.S_IXGRP + stat.S_IXOTH
			tar.addfile(info, handle)
			handle.close()
	activity.finish()
	tar.close()
开发者ID:grid-control,项目名称:grid-control,代码行数:28,代码来源:__init__.py

示例4: submit_jobs

# 需要导入模块: from grid_control.utils.activity import Activity [as 别名]
# 或者: from grid_control.utils.activity.Activity import finish [as 别名]
	def submit_jobs(self, jobnum_list, task):
		requestLen = len(jobnum_list)
		activity = Activity('Submitting jobs (--%)')
		while jobnum_list:
			jobSubmitNumList = jobnum_list[-self._schedd.getSubmitScale():]
			del jobnum_list[-self._schedd.getSubmitScale():]
			activity = Activity('Submitting jobs (%2d%%)'%(100*(requestLen-len(jobnum_list))/requestLen))
			for jobnum in jobSubmitNumList:
				self._write_job_config(
					self.getJobCfgPath(jobnum)[0],
					jobnum,
					task, {}
					)
			rawJobInfoMaps = self._schedd.submit_jobs(
				jobSubmitNumList, 
				task,
				self._getQueryArgs()
				)
			# Yield (jobnum, gc_id, other data) per jobZ
			jobInfoMaps = self._digestQueueInfoMaps(rawJobInfoMaps)
			for htcID in jobInfoMaps:
				yield (
					htcID.gcJobNum,
					self._createGcId(htcID),
					jobInfoMaps[htcID]
					)
		activity.finish()
开发者ID:grid-control,项目名称:grid-control,代码行数:29,代码来源:htcondor_wms.py

示例5: _get_jobs_output

# 需要导入模块: from grid_control.utils.activity import Activity [as 别名]
# 或者: from grid_control.utils.activity.Activity import finish [as 别名]
	def _get_jobs_output(self, gc_id_jobnum_list):
		# retrieve task output files from sandbox directory
		if not len(gc_id_jobnum_list):
			raise StopIteration

		activity = Activity('retrieving job outputs')
		for gc_id, jobnum in gc_id_jobnum_list:
			sandpath = self._get_sandbox_dn(jobnum)
			if sandpath is None:
				yield (jobnum, None)
				continue
			# when working with a remote spool schedd, tell condor to return files
			if self._remote_type == PoolType.SPOOL:
				self._check_and_log_proc(self._proc_factory.logged_execute(
					self._transfer_exec, self._split_gc_id(gc_id)[1]))
			# when working with a remote [gsi]ssh schedd, manually return files
			elif self._remote_type in (PoolType.SSH, PoolType.GSISSH):
				self._check_and_log_proc(self._proc_factory.logged_copy_from_remote(
					self._get_remote_output_dn(jobnum), self._get_sandbox_dn()))
				# clean up remote working directory
				self._check_and_log_proc(self._proc_factory.logged_execute(
					'rm -rf %s' % self._get_remote_output_dn(jobnum)))
			# eventually extract wildcarded output files from the tarball
			unpack_wildcard_tar(self._log, sandpath)
			yield (jobnum, sandpath)
		# clean up if necessary
		activity.finish()
		self._cleanup_remote_output_dn()
开发者ID:mschnepf,项目名称:grid-control,代码行数:30,代码来源:condor_wms.py

示例6: hash_verify

# 需要导入模块: from grid_control.utils.activity import Activity [as 别名]
# 或者: from grid_control.utils.activity.Activity import finish [as 别名]
def hash_verify(opts, status_mon, local_se_path, jobnum, fi_idx, fi):
	if not opts.verify_md5:
		return status_mon.register_file_result(jobnum, fi_idx, 'Download successful',
			FileDownloadStatus.FILE_OK)
	# Verify => compute md5hash
	remote_hash = fi[FileInfo.Hash]
	activity = Activity('Verifying checksum')
	try:
		local_hash = ignore_exception(Exception, None, hash_calc, local_se_path.replace('file://', ''))
		if local_hash is None:
			return status_mon.register_file_result(jobnum, fi_idx, 'Unable to calculate checksum',
				FileDownloadStatus.FILE_HASH_FAILED)
	finally:
		activity.finish()
	hash_match = fi[FileInfo.Hash] == local_hash
	match_map = {True: 'MATCH', False: 'FAIL'}
	if ANSI is not None:
		match_map = {True: ANSI.reset + ANSI.color_green + 'MATCH' + ANSI.reset,
			False: ANSI.reset + ANSI.color_red + 'FAIL' + ANSI.reset}
	msg = '\tLocal  hash: %s\n' % local_hash + \
		log_intro(jobnum, fi_idx) + '\tRemote hash: %s\n' % remote_hash + \
		log_intro(jobnum, fi_idx) + 'Checksum comparison: ' + match_map[hash_match]
	if hash_match:
		return status_mon.register_file_result(jobnum, fi_idx, msg, FileDownloadStatus.FILE_OK)
	return status_mon.register_file_result(jobnum, fi_idx, msg, FileDownloadStatus.FILE_HASH_FAILED)
开发者ID:grid-control,项目名称:grid-control,代码行数:27,代码来源:se_output_download.py

示例7: _resync_psrc

# 需要导入模块: from grid_control.utils.activity import Activity [as 别名]
# 或者: from grid_control.utils.activity.Activity import finish [as 别名]
	def _resync_psrc(self):
		activity = Activity('Performing resync of datasource %r' % self.get_datasource_name())
		# Get old and new dataset information
		provider_old = DataProvider.load_from_file(self._get_data_path('cache.dat'))
		block_list_old = provider_old.get_block_list_cached(show_stats=False)
		self._provider.clear_cache()
		block_list_new = self._provider.get_block_list_cached(show_stats=False)
		self._provider.save_to_file(self._get_data_path('cache-new.dat'), block_list_new)

		# Use old splitting information to synchronize with new dataset infos
		partition_len_old = self.get_parameter_len()
		partition_changes = self._resync_partitions(
			self._get_data_path('map-new.tar'), block_list_old, block_list_new)
		activity.finish()
		if partition_changes is not None:
			# Move current splitting to backup and use the new splitting from now on
			def _rename_with_backup(new, cur, old):
				if self._keep_old:
					os.rename(self._get_data_path(cur), self._get_data_path(old))
				os.rename(self._get_data_path(new), self._get_data_path(cur))
			_rename_with_backup('map-new.tar', 'map.tar', 'map-old-%d.tar' % time.time())
			_rename_with_backup('cache-new.dat', 'cache.dat', 'cache-old-%d.dat' % time.time())
			self._set_reader(DataSplitter.load_partitions(self._get_data_path('map.tar')))
			self._log.debug('Dataset resync finished: %d -> %d partitions', partition_len_old, self._len)
			(pnum_list_redo, pnum_list_disable) = partition_changes
			return (set(pnum_list_redo), set(pnum_list_disable), partition_len_old != self._len)
开发者ID:grid-control,项目名称:grid-control,代码行数:28,代码来源:psource_data.py

示例8: __init__

# 需要导入模块: from grid_control.utils.activity import Activity [as 别名]
# 或者: from grid_control.utils.activity.Activity import finish [as 别名]
	def __init__(self, block_list_old, block_list_new):
		activity = Activity('Performing resynchronization of dataset')
		block_resync_tuple = DataProvider.resync_blocks(block_list_old, block_list_new)
		(self.block_list_added, self._block_list_missing, self._block_list_matching) = block_resync_tuple
		for block_missing in self._block_list_missing:  # Files in matching blocks are already sorted
			sort_inplace(block_missing[DataProvider.FileList], key=itemgetter(DataProvider.URL))
		activity.finish()
开发者ID:grid-control,项目名称:grid-control,代码行数:9,代码来源:resync_reorder.py

示例9: _submit_jobs

# 需要导入模块: from grid_control.utils.activity import Activity [as 别名]
# 或者: from grid_control.utils.activity.Activity import finish [as 别名]
	def _submit_jobs(self, jobnum_list, task):
		# submit_jobs: Submit a number of jobs and yield (jobnum, WMS ID, other data) sequentially
		# >>jobnum: internal ID of the Job
		# JobNum is linked to the actual *task* here
		(jdl_fn, submit_jdl_fn) = self._submit_jobs_prepare(jobnum_list, task)
		try:
			# submit all jobs simultaneously and temporarily store verbose (ClassAdd) output
			activity = Activity('queuing jobs at scheduler')
			submit_args = ' -verbose -batch-name ' + task.get_description().task_name + ' ' + submit_jdl_fn
			proc = self._proc_factory.logged_execute(self._submit_exec, submit_args)

			# extract the Condor ID (WMS ID) of the jobs from output ClassAds
			jobnum_gc_id_list = []
			for line in proc.iter():
				if 'GridControl_GCIDtoWMSID' in line:
					jobnum_wms_id = line.split('=')[1].strip(' "\n').split('@')
					jobnum, wms_id = int(jobnum_wms_id[0]), jobnum_wms_id[1].strip()
					# Condor creates a default job then overwrites settings on any subsequent job
					# i.e. skip every second, but better be sure
					if (not jobnum_gc_id_list) or (jobnum not in lzip(*jobnum_gc_id_list)[0]):
						jobnum_gc_id_list.append((jobnum, self._create_gc_id(wms_id)))

			exit_code = proc.wait()
			activity.finish()
			if (exit_code != 0) or (len(jobnum_gc_id_list) < len(jobnum_list)):
				if not self._explain_error(proc, exit_code):
					self._log.error('Submitted %4d jobs of %4d expected',
						len(jobnum_gc_id_list), len(jobnum_list))
					proc.log_error(self._error_log_fn, jdl=jdl_fn)
		finally:
			remove_files([jdl_fn])

		for (jobnum, gc_id) in jobnum_gc_id_list:
			yield (jobnum, gc_id, {})
开发者ID:mschnepf,项目名称:grid-control,代码行数:36,代码来源:condor_wms.py

示例10: _submitJob

# 需要导入模块: from grid_control.utils.activity import Activity [as 别名]
# 或者: from grid_control.utils.activity.Activity import finish [as 别名]
	def _submitJob(self, jobNum, module):
		fd, jdl = tempfile.mkstemp('.jdl')
		try:
			jdlData = self.makeJDL(jobNum, module)
			utils.safeWrite(os.fdopen(fd, 'w'), jdlData)
		except Exception:
			utils.removeFiles([jdl])
			raise BackendError('Could not write jdl data to %s.' % jdl)

		try:
			submitArgs = []
			for key_value in utils.filterDict(self._submitParams, vF = lambda v: v).items():
				submitArgs.extend(key_value)
			submitArgs.append(jdl)

			activity = Activity('submitting job %d' % jobNum)
			proc = LocalProcess(self._submitExec, '--nomsg', '--noint', '--logfile', '/dev/stderr', *submitArgs)

			gcID = None
			for line in ifilter(lambda x: x.startswith('http'), imap(str.strip, proc.stdout.iter(timeout = 60))):
				gcID = line
			retCode = proc.status(timeout = 0, terminate = True)

			activity.finish()

			if (retCode != 0) or (gcID is None):
				if self.explainError(proc, retCode):
					pass
				else:
					self._log.log_process(proc, files = {'jdl': SafeFile(jdl).read()})
		finally:
			utils.removeFiles([jdl])
		return (jobNum, utils.QM(gcID, self._createId(gcID), None), {'jdl': str.join('', jdlData)})
开发者ID:Fra-nk,项目名称:grid-control,代码行数:35,代码来源:wms_grid.py

示例11: _resync

# 需要导入模块: from grid_control.utils.activity import Activity [as 别名]
# 或者: from grid_control.utils.activity.Activity import finish [as 别名]
	def _resync(self):
		if self._data_provider:
			activity = Activity('Performing resync of datasource %r' % self._name)
			# Get old and new dataset information
			ds_old = DataProvider.loadFromFile(self._getDataPath('cache.dat')).getBlocks(show_stats = False)
			self._data_provider.clearCache()
			ds_new = self._data_provider.getBlocks(show_stats = False)
			self._data_provider.saveToFile(self._getDataPath('cache-new.dat'), ds_new)

			# Use old splitting information to synchronize with new dataset infos
			old_maxN = self._data_splitter.getMaxJobs()
			jobChanges = self._data_splitter.resyncMapping(self._getDataPath('map-new.tar'), ds_old, ds_new)
			activity.finish()
			if jobChanges is not None:
				# Move current splitting to backup and use the new splitting from now on
				def backupRename(old, cur, new):
					if self._keepOld:
						os.rename(self._getDataPath(cur), self._getDataPath(old))
					os.rename(self._getDataPath(new), self._getDataPath(cur))
				backupRename(  'map-old-%d.tar' % time.time(),   'map.tar',   'map-new.tar')
				backupRename('cache-old-%d.dat' % time.time(), 'cache.dat', 'cache-new.dat')
				self._data_splitter.importPartitions(self._getDataPath('map.tar'))
				self._maxN = self._data_splitter.getMaxJobs()
				self._log.debug('Dataset resync finished: %d -> %d partitions', old_maxN, self._maxN)
				return (set(jobChanges[0]), set(jobChanges[1]), old_maxN != self._maxN)
开发者ID:Fra-nk,项目名称:grid-control,代码行数:27,代码来源:psource_data.py

示例12: _read_jobs

# 需要导入模块: from grid_control.utils.activity import Activity [as 别名]
# 或者: from grid_control.utils.activity.Activity import finish [as 别名]
	def _read_jobs(self, job_limit):
		ensure_dir_exists(self._path_db, 'job database directory', JobError)

		candidates = []
		for job_fn in fnmatch.filter(os.listdir(self._path_db), 'job_*.txt'):
			try:  # 2xsplit is faster than regex
				jobnum = int(job_fn.split(".")[0].split("_")[1])
			except Exception:
				clear_current_exception()
				continue
			candidates.append((jobnum, job_fn))

		(job_map, max_job_len) = ({}, len(candidates))
		activity = Activity('Reading job infos')
		idx = 0
		for (jobnum, job_fn) in sorted(candidates):
			idx += 1
			if jobnum >= job_limit >= 0:
				self._log.info('Stopped reading job infos at job #%d out of %d available job files, ' +
					'since the limit of %d jobs is reached', jobnum, len(candidates), job_limit)
				break
			try:
				job_fn_full = os.path.join(self._path_db, job_fn)
				data = self._fmt.parse(SafeFile(job_fn_full).iter_close())
				job_obj = self._create_job_obj(job_fn_full, data)
			except Exception:
				raise JobError('Unable to process job file %r' % job_fn_full)
			job_map[jobnum] = job_obj
			activity.update('Reading job infos %d [%d%%]' % (idx, (100.0 * idx) / max_job_len))
		activity.finish()
		return job_map
开发者ID:grid-control,项目名称:grid-control,代码行数:33,代码来源:job_db_text.py

示例13: _readJobs

# 需要导入模块: from grid_control.utils.activity import Activity [as 别名]
# 或者: from grid_control.utils.activity.Activity import finish [as 别名]
	def _readJobs(self, jobLimit):
		utils.ensureDirExists(self._dbPath, 'job database directory', JobError)

		candidates = []
		for jobFile in fnmatch.filter(os.listdir(self._dbPath), 'job_*.txt'):
			try: # 2xsplit is faster than regex
				jobNum = int(jobFile.split(".")[0].split("_")[1])
			except Exception:
				continue
			candidates.append((jobNum, jobFile))

		(jobMap, maxJobs) = ({}, len(candidates))
		activity = Activity('Reading job infos')
		idx = 0
		for (jobNum, jobFile) in sorted(candidates):
			idx += 1
			if (jobLimit >= 0) and (jobNum >= jobLimit):
				self._log.info('Stopped reading job infos at job #%d out of %d available job files, since the limit of %d jobs is reached',
					jobNum, len(candidates), jobLimit)
				break
			jobObj = self._load_job(os.path.join(self._dbPath, jobFile))
			jobMap[jobNum] = jobObj
			if idx % 100 == 0:
				activity.update('Reading job infos %d [%d%%]' % (idx, (100.0 * idx) / maxJobs))
		activity.finish()
		return jobMap
开发者ID:Fra-nk,项目名称:grid-control,代码行数:28,代码来源:job_db_text.py

示例14: __init__

# 需要导入模块: from grid_control.utils.activity import Activity [as 别名]
# 或者: from grid_control.utils.activity.Activity import finish [as 别名]
	def __init__(self, lockfile):
		self._lockfile = lockfile
		activity = Activity('Trying to aquire lock file %s ...' % lockfile)
		while os.path.exists(self._lockfile):
			time.sleep(0.2)
		activity.finish()
		self._fd = open(self._lockfile, 'w')
		fcntl.flock(self._fd, fcntl.LOCK_EX)
开发者ID:grid-control,项目名称:grid-control,代码行数:10,代码来源:dataset_dbs3_add.py

示例15: _getJobsOutput

# 需要导入模块: from grid_control.utils.activity import Activity [as 别名]
# 或者: from grid_control.utils.activity.Activity import finish [as 别名]
	def _getJobsOutput(self, ids):
		if len(ids) == 0:
			raise StopIteration

		basePath = os.path.join(self._outputPath, 'tmp')
		try:
			if len(ids) == 1:
				# For single jobs create single subdir
				tmpPath = os.path.join(basePath, md5(ids[0][0]).hexdigest())
			else:
				tmpPath = basePath
			utils.ensureDirExists(tmpPath)
		except Exception:
			raise BackendError('Temporary path "%s" could not be created.' % tmpPath, BackendError)

		jobNumMap = dict(ids)
		jobs = self.writeWMSIds(ids)

		activity = Activity('retrieving %d job outputs' % len(ids))
		proc = LocalProcess(self._outputExec, '--noint', '--logfile', '/dev/stderr', '-i', jobs, '--dir', tmpPath)

		# yield output dirs
		todo = jobNumMap.values()
		currentJobNum = None
		for line in imap(str.strip, proc.stdout.iter(timeout = 60)):
			if line.startswith(tmpPath):
				todo.remove(currentJobNum)
				outputDir = line.strip()
				if os.path.exists(outputDir):
					if 'GC_WC.tar.gz' in os.listdir(outputDir):
						wildcardTar = os.path.join(outputDir, 'GC_WC.tar.gz')
						try:
							tarfile.TarFile.open(wildcardTar, 'r:gz').extractall(outputDir)
							os.unlink(wildcardTar)
						except Exception:
							self._log.error('Can\'t unpack output files contained in %s', wildcardTar)
				yield (currentJobNum, line.strip())
				currentJobNum = None
			else:
				currentJobNum = jobNumMap.get(self._createId(line), currentJobNum)
		retCode = proc.status(timeout = 0, terminate = True)
		activity.finish()

		if retCode != 0:
			if 'Keyboard interrupt raised by user' in proc.stderr.read(timeout = 0):
				utils.removeFiles([jobs, basePath])
				raise StopIteration
			else:
				self._log.log_process(proc, files = {'jobs': SafeFile(jobs).read()})
			self._log.error('Trying to recover from error ...')
			for dirName in os.listdir(basePath):
				yield (None, os.path.join(basePath, dirName))

		# return unretrievable jobs
		for jobNum in todo:
			yield (jobNum, None)

		utils.removeFiles([jobs, basePath])
开发者ID:Fra-nk,项目名称:grid-control,代码行数:60,代码来源:wms_grid.py


注:本文中的grid_control.utils.activity.Activity.finish方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。