本文整理汇总了Python中gppylib.commands.base.WorkerPool.addCommand方法的典型用法代码示例。如果您正苦于以下问题:Python WorkerPool.addCommand方法的具体用法?Python WorkerPool.addCommand怎么用?Python WorkerPool.addCommand使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类gppylib.commands.base.WorkerPool
的用法示例。
在下文中一共展示了WorkerPool.addCommand方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: execute
# 需要导入模块: from gppylib.commands.base import WorkerPool [as 别名]
# 或者: from gppylib.commands.base.WorkerPool import addCommand [as 别名]
def execute(self):
entry = ValidateVerificationEntry(token = self.token).run()
if not entry['verdone']:
raise WrongStateError("Only finished verification tasks may be cleaned up.")
path = os.path.join(get_masterdatadir(), 'pg_verify', self.token)
Command('cleanup', 'rm -rf %s' % path).run(validateAfter=True)
#RemoveTree(path).run()
to_clean = ValidateVerification(content = entry['vercontent'],
primaries_only = False).run()
pool = WorkerPool(min(len(to_clean), self.batch_default))
for seg in to_clean:
host = seg.getSegmentHostName()
path = os.path.join(seg.getSegmentDataDirectory(), 'pg_verify', "*%s*" % self.token)
cmd = Command('cleanup', 'rm -f %s' % path, remoteHost=host)
pool.addCommand(cmd)
logger.info('Waiting for clean commands to complete...')
pool.wait_and_printdots(len(to_clean))
for cmd in pool.getCompletedItems():
res = cmd.get_results()
if not res.wasSuccessful():
logger.error('Failed to send cleanup on %s' % cmd.host)
logger.error('Error: %s' % res.stderr)
raise CleanVerificationError()
RemoveVerificationEntry(token = self.token).run()
logger.info('Verification %s has been cleaned.' % self.token)
示例2: validate_nic_down
# 需要导入模块: from gppylib.commands.base import WorkerPool [as 别名]
# 或者: from gppylib.commands.base.WorkerPool import addCommand [as 别名]
def validate_nic_down(self):
"""
Ping validation on the nics.
"""
pool = WorkerPool()
try:
for nic, hostname in self.nic_to_address_map:
address = self.nic_to_address_map[(nic, hostname)]
cmd = Ping('ping validation', address, ctxt=REMOTE, remoteHost='localhost')
pool.addCommand(cmd)
pool.join()
for cmd in pool.getCompletedItems():
results = cmd.get_results()
if results.rc == 0:
return False
finally:
pool.haltWork()
pool.joinWorkers()
pool.join()
tinctest.logger.info("Successfully brought down nics ...")
return True
示例3: ConcurrentFilespaceMoveTestCase
# 需要导入模块: from gppylib.commands.base import WorkerPool [as 别名]
# 或者: from gppylib.commands.base.WorkerPool import addCommand [as 别名]
class ConcurrentFilespaceMoveTestCase(unittest.TestCase):
""" This test suite tests the scenario of running gpfilespace concurrently while
trying to move the filespace.
The expected behavior is that only one of the processes succeeds and the
rest error out."""
ALREADY_RUNNING_MSG = 'Another instance of gpfilespace is already running!'
def setUp(self):
self.pool = None
self.pool = WorkerPool()
def tearDown(self):
if self.pool:
self.pool.haltWork()
self.pool.joinWorkers()
self.pool.join()
def get_move_filespace_cmd(self, filespace='myfspc', file_type=FileType.TEMPORARY_FILES):
if file_type == FileType.TEMPORARY_FILES:
file_type = 'movetempfiles'
elif file_type == FileType.TRANSACTION_FILES:
file_type = 'movetransfiles'
return Command(name='move filespace', cmdStr='gpfilespace --%s %s' % (file_type, filespace))
def run_concurrently(self, cmd_list):
for cmd in cmd_list:
self.pool.addCommand(cmd)
self.pool.join()
def check_concurrent_execution_result(self, execution_results):
succeeded = 0
for cmd in execution_results:
results = cmd.get_results().stdout.strip()
if self.ALREADY_RUNNING_MSG in results:
continue
succeeded += 1
self.assertEqual(succeeded, 1)
def test00_move_temp_filespace(self):
cmd_list = [self.get_move_filespace_cmd(file_type=FileType.TEMPORARY_FILES) for i in range(2)]
self.run_concurrently(cmd_list)
self.check_concurrent_execution_result(self.pool.getCompletedItems())
def test01_move_trans_filespace(self):
cmd_list = [self.get_move_filespace_cmd(file_type=FileType.TRANSACTION_FILES) for i in range(2)]
self.run_concurrently(cmd_list)
self.check_concurrent_execution_result(self.pool.getCompletedItems())
def test02_move_temp_and_trans_filespace(self):
cmd_list = [self.get_move_filespace_cmd(file_type=FileType.TEMPORARY_FILES), self.get_move_filespace_cmd(file_type=FileType.TRANSACTION_FILES)]
self.run_concurrently(cmd_list)
self.check_concurrent_execution_result(self.pool.getCompletedItems())
示例4: get_host_status
# 需要导入模块: from gppylib.commands.base import WorkerPool [as 别名]
# 或者: from gppylib.commands.base.WorkerPool import addCommand [as 别名]
def get_host_status(hostlist):
"""
Test if SSH command works on a host and return a dictionary
Return Ex: {host1: True, host2: False}
where True represents SSH command success and False represents failure
"""
if not isinstance(hostlist, list):
raise Exception("Input parameter should be of type list")
pool = WorkerPool(min(len(hostlist), 16))
for host in hostlist:
cmd = Echo('ssh test', '', ctxt=REMOTE, remoteHost=host)
pool.addCommand(cmd)
pool.join()
pool.haltWork()
host_status_dict = {}
for cmd in pool.getCompletedItems():
if not cmd.get_results().wasSuccessful():
host_status_dict[cmd.remoteHost] = False
else:
host_status_dict[cmd.remoteHost] = True
return host_status_dict
示例5: execute
# 需要导入模块: from gppylib.commands.base import WorkerPool [as 别名]
# 或者: from gppylib.commands.base.WorkerPool import addCommand [as 别名]
def execute(self):
pool = WorkerPool()
try:
for seg in self.segments:
datadir = seg.getSegmentDataDirectory()
postmaster_pid_file = '%s/postmaster.pid' % datadir
shared_mem = None
if os.path.isfile(postmaster_pid_file):
with open(postmaster_pid_file) as fp:
shared_mem = fp.readlines()[-1].split()[-1].strip()
if shared_mem:
cmd = Command('clean up shared memory', cmdStr="ipcrm -m %s" % shared_mem)
pool.addCommand(cmd)
pool.join()
for item in pool.getCompletedItems():
result = item.get_results()
# This code is usually called after a GPDB segment has
# been terminated. In that case, it is possible that
# the shared memory has already been freed by the
# time we are called to clean up. Due to this race
# condition, it is possible to get an `ipcrm: invalid
# id1` error from ipcrm. We, therefore, ignore it.
if result.rc != 0 and not result.stderr.startswith("ipcrm: invalid id"):
raise Exception('Unable to clean up shared memory for segment: (%s)' % (result.stderr))
finally:
pool.haltWork()
pool.joinWorkers()
pool = None
示例6: impl
# 需要导入模块: from gppylib.commands.base import WorkerPool [as 别名]
# 或者: from gppylib.commands.base.WorkerPool import addCommand [as 别名]
def impl(context, tablename, dbname, poolname):
pool = WorkerPool(numWorkers=1)
cmd = Command(name='drop a table in a worker pool', cmdStr="""psql -c "DROP TABLE %s" -d %s""" % (tablename, dbname))
pool.addCommand(cmd)
if not hasattr(context, 'pool'):
context.pool = {}
context.pool[poolname] = pool
示例7: execute
# 需要导入模块: from gppylib.commands.base import WorkerPool [as 别名]
# 或者: from gppylib.commands.base.WorkerPool import addCommand [as 别名]
def execute(self):
gparray = GpArray.initFromCatalog(dbconn.DbURL(port=self.master_port), utility=True)
from_host, from_path = self.host, self.path
logger.info("Commencing remote database dump file recovery process, please wait...")
segs = [seg for seg in gparray.getDbList() if seg.isSegmentPrimary(current_role=True) or seg.isSegmentMaster()]
pool = WorkerPool(numWorkers = min(len(segs), self.batch_default))
for seg in segs:
if seg.isSegmentMaster():
file = '%s%s' % (MASTER_DBDUMP_PREFIX, self.restore_timestamp)
else:
file = '%s0_%d_%s' % (DBDUMP_PREFIX, seg.getSegmentDbId(), self.restore_timestamp)
if self.compress:
file += '.gz'
to_host = seg.getSegmentHostName()
to_path = os.path.join(seg.getSegmentDataDirectory(), DUMP_DIR, self.restore_timestamp[0:8])
if not CheckRemoteDir(to_path, to_host).run():
logger.info('Creating directory %s on %s' % (to_path, to_host))
try:
MakeRemoteDir(to_path, to_host).run()
except OSError, e:
raise ExceptionNoStackTraceNeeded("Failed to create directory %s on %s" % (to_path, to_host))
logger.info("Commencing remote copy from %s to %s:%s" % (from_host, to_host, to_path))
pool.addCommand(Scp('Copying dump for seg %d' % seg.getSegmentDbId(),
srcFile=os.path.join(from_path, file),
dstFile=os.path.join(to_path, file),
srcHost=from_host,
dstHost=to_host))
示例8: restore_pg_hba_on_segment
# 需要导入模块: from gppylib.commands.base import WorkerPool [as 别名]
# 或者: from gppylib.commands.base.WorkerPool import addCommand [as 别名]
def restore_pg_hba_on_segment(gparr):
"""
Restore the pg_hba.conf on all of the segments
present in the array
"""
logger.debug('Restoring pg_hba.conf file on segments...')
host_to_seg_map = defaultdict(list)
for seg in gparr.getDbList():
if not seg.isSegmentMaster() and not seg.isSegmentStandby():
host_to_seg_map[seg.getSegmentHostName()].append(seg.getSegmentDataDirectory())
pool = WorkerPool(numWorkers=DEFAULT_BATCH_SIZE)
try:
for host, data_dirs_list in host_to_seg_map.items():
pickled_data_dirs_list = base64.urlsafe_b64encode(pickle.dumps(data_dirs_list))
cmdStr = "$GPHOME/lib/python/gppylib/operations/initstandby.py -d %s -r" % pickled_data_dirs_list
cmd = Command('Restore the pg_hba.conf on remote hosts', cmdStr=cmdStr , ctxt=REMOTE, remoteHost=host)
pool.addCommand(cmd)
pool.join()
for item in pool.getCompletedItems():
result = item.get_results()
if result.rc != 0:
logger.error('Unable to restore pg_hba.conf %s' % str(result.stderr))
logger.error('Please check the segment for more details')
finally:
pool.haltWork()
pool.joinWorkers()
pool = None
示例9: scp_file_to_hosts
# 需要导入模块: from gppylib.commands.base import WorkerPool [as 别名]
# 或者: from gppylib.commands.base.WorkerPool import addCommand [as 别名]
def scp_file_to_hosts(host_list, filename, batch_default):
pool = WorkerPool(numWorkers=min(len(host_list), batch_default))
for hname in host_list:
pool.addCommand(
Scp("Copying table_filter_file to %s" % hname, srcFile=filename, dstFile=filename, dstHost=hname)
)
pool.join()
pool.check_results()
示例10: test_print_progress
# 需要导入模块: from gppylib.commands.base import WorkerPool [as 别名]
# 或者: from gppylib.commands.base.WorkerPool import addCommand [as 别名]
def test_print_progress(self, mock1):
w = WorkerPool(numWorkers=32)
c1 = Command('dummy command1', '')
c2 = Command('dummy command2', '')
w.addCommand(c1)
w.addCommand(c2)
w.join()
w.print_progress(2)
self.assertTrue(mock1.called_with('100.00% of jobs completed'))
示例11: run_pool_command
# 需要导入模块: from gppylib.commands.base import WorkerPool [as 别名]
# 或者: from gppylib.commands.base.WorkerPool import addCommand [as 别名]
def run_pool_command(host_list, cmd_str, batch_default, check_results=True):
pool = WorkerPool(numWorkers = min(len(host_list), batch_default))
for h in host_list:
cmd = Command(h, cmd_str, ctxt=REMOTE, remoteHost = h)
pool.addCommand(cmd)
pool.join()
if check_results:
pool.check_results()
示例12: run_using_workerpool
# 需要导入模块: from gppylib.commands.base import WorkerPool [as 别名]
# 或者: from gppylib.commands.base.WorkerPool import addCommand [as 别名]
def run_using_workerpool(self, option=''):
if not (set(option.split()) <= set(['-F' , '-r', '--persistent-check', ' '])):
raise GpRecoversegException('Not a valid option with gprecoverseg')
rcvr_cmd = 'gprecoverseg -a %s' % option
cmd = Command(name='Run gprecoverseg', cmdStr='source %s/greenplum_path.sh;%s' % (self.gphome, rcvr_cmd))
tinctest.logger.info("Running gprecoverseg : %s" % cmd)
pool = WorkerPool(numWorkers=1, daemonize=True)
pool.addCommand(cmd)
示例13: TriggerFilerepVerifyMessages
# 需要导入模块: from gppylib.commands.base import WorkerPool [as 别名]
# 或者: from gppylib.commands.base.WorkerPool import addCommand [as 别名]
class TriggerFilerepVerifyMessages(Operation):
def __init__(self, content, token, batch_default, full=None, verify_file=None, verify_dir=None,
abort=None, suspend=None, resume=None, ignore_dir=None, ignore_file=None,
results=None, results_level=None):
self.content = content
self.token = token
self.full = full
self.verify_file = verify_file
self.verify_dir = verify_dir
self.abort = abort
self.suspend = suspend
self.resume = resume
self.ignore_dir = ignore_dir
self.ignore_file = ignore_file
self.results = results
self.results_level = results_level
self.batch_default = batch_default
self.pool = None
def execute(self):
"""
Sends arbitrary gp_primarymirror requests to the backend processes defined.
"""
to_trigger = ValidateVerification(content = self.content).run()
logger.info('Sending gp_primarymirror requests...')
self.pool = WorkerPool(min(len(to_trigger), self.batch_default))
for pseg in to_trigger:
host, port = pseg.getSegmentHostName(), pseg.getSegmentPort()
cmd = SendFilerepVerifyMessage(name = 'verify %s' % host, host = host, port = port,
token = self.token,
full = self.full,
verify_file = self.verify_file,
verify_dir = self.verify_dir,
abort = self.abort,
suspend = self.suspend,
resume = self.resume,
ignore_dir = self.ignore_dir,
ignore_file = self.ignore_file,
results = self.results,
results_level = self.results_level)
logger.debug("Sending request to %s:%d" % (host, port))
self.pool.addCommand(cmd)
logger.info('Waiting for gp_primarymirror commands to complete...')
self.pool.wait_and_printdots(len(to_trigger))
for cmd in self.pool.getCompletedItems():
res = cmd.get_results()
if not res.wasSuccessful():
logger.error('Failed to send gp_primarymirror message to %s:%s' % (cmd.host, cmd.port))
logger.error('Error: %s' % res.stderr)
raise TriggerGpPrimaryMirrorFailure()
logger.info('gp_primarymirror messages have been triggered succesfully.')
示例14: _get_pgcontrol_data_from_segments
# 需要导入模块: from gppylib.commands.base import WorkerPool [as 别名]
# 或者: from gppylib.commands.base.WorkerPool import addCommand [as 别名]
def _get_pgcontrol_data_from_segments(self, gpdb_list):
pool = WorkerPool(numWorkers=self.workers)
try:
for gpdb in gpdb_list: # iterate for all segments
cmd = PgControlData(name='run pg_controldata', datadir=gpdb.getSegmentDataDirectory(),
ctxt=REMOTE, remoteHost=gpdb.getSegmentHostName())
cmd.gparray_gpdb = gpdb
pool.addCommand(cmd)
pool.join()
finally:
# Make sure that we halt the workers or else we'll hang
pool.haltWork()
pool.joinWorkers()
return pool.getCompletedItems()
示例15: execute
# 需要导入模块: from gppylib.commands.base import WorkerPool [as 别名]
# 或者: from gppylib.commands.base.WorkerPool import addCommand [as 别名]
def execute(self):
pool = WorkerPool()
given = set(ListFiles(self.pid_dir).run())
try:
for host in self.trail:
available = ListRemoteFiles(self.pid_dir, host).run()
to_copy = [dir for dir in available if dir not in given]
for dir in to_copy:
comp_dir = os.path.join(self.pid_dir, dir)
pool.addCommand(Scp('collect coverage',
srcFile = comp_dir,
srcHost = host,
dstFile = comp_dir,
recursive = True))
finally:
pool.join()