本文整理汇总了Python中gppylib.commands.base.WorkerPool.getCompletedItems方法的典型用法代码示例。如果您正苦于以下问题:Python WorkerPool.getCompletedItems方法的具体用法?Python WorkerPool.getCompletedItems怎么用?Python WorkerPool.getCompletedItems使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类gppylib.commands.base.WorkerPool
的用法示例。
在下文中一共展示了WorkerPool.getCompletedItems方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: ConcurrentFilespaceMoveTestCase
# 需要导入模块: from gppylib.commands.base import WorkerPool [as 别名]
# 或者: from gppylib.commands.base.WorkerPool import getCompletedItems [as 别名]
class ConcurrentFilespaceMoveTestCase(unittest.TestCase):
""" This test suite tests the scenario of running gpfilespace concurrently while
trying to move the filespace.
The expected behavior is that only one of the processes succeeds and the
rest error out."""
ALREADY_RUNNING_MSG = 'Another instance of gpfilespace is already running!'
def setUp(self):
self.pool = None
self.pool = WorkerPool()
def tearDown(self):
if self.pool:
self.pool.haltWork()
self.pool.joinWorkers()
self.pool.join()
def get_move_filespace_cmd(self, filespace='myfspc', file_type=FileType.TEMPORARY_FILES):
if file_type == FileType.TEMPORARY_FILES:
file_type = 'movetempfiles'
elif file_type == FileType.TRANSACTION_FILES:
file_type = 'movetransfiles'
return Command(name='move filespace', cmdStr='gpfilespace --%s %s' % (file_type, filespace))
def run_concurrently(self, cmd_list):
for cmd in cmd_list:
self.pool.addCommand(cmd)
self.pool.join()
def check_concurrent_execution_result(self, execution_results):
succeeded = 0
for cmd in execution_results:
results = cmd.get_results().stdout.strip()
if self.ALREADY_RUNNING_MSG in results:
continue
succeeded += 1
self.assertEqual(succeeded, 1)
def test00_move_temp_filespace(self):
cmd_list = [self.get_move_filespace_cmd(file_type=FileType.TEMPORARY_FILES) for i in range(2)]
self.run_concurrently(cmd_list)
self.check_concurrent_execution_result(self.pool.getCompletedItems())
def test01_move_trans_filespace(self):
cmd_list = [self.get_move_filespace_cmd(file_type=FileType.TRANSACTION_FILES) for i in range(2)]
self.run_concurrently(cmd_list)
self.check_concurrent_execution_result(self.pool.getCompletedItems())
def test02_move_temp_and_trans_filespace(self):
cmd_list = [self.get_move_filespace_cmd(file_type=FileType.TEMPORARY_FILES), self.get_move_filespace_cmd(file_type=FileType.TRANSACTION_FILES)]
self.run_concurrently(cmd_list)
self.check_concurrent_execution_result(self.pool.getCompletedItems())
示例2: get_host_status
# 需要导入模块: from gppylib.commands.base import WorkerPool [as 别名]
# 或者: from gppylib.commands.base.WorkerPool import getCompletedItems [as 别名]
def get_host_status(hostlist):
"""
Test if SSH command works on a host and return a dictionary
Return Ex: {host1: True, host2: False}
where True represents SSH command success and False represents failure
"""
if not isinstance(hostlist, list):
raise Exception("Input parameter should be of type list")
pool = WorkerPool(min(len(hostlist), 16))
for host in hostlist:
cmd = Echo('ssh test', '', ctxt=REMOTE, remoteHost=host)
pool.addCommand(cmd)
pool.join()
pool.haltWork()
host_status_dict = {}
for cmd in pool.getCompletedItems():
if not cmd.get_results().wasSuccessful():
host_status_dict[cmd.remoteHost] = False
else:
host_status_dict[cmd.remoteHost] = True
return host_status_dict
示例3: execute
# 需要导入模块: from gppylib.commands.base import WorkerPool [as 别名]
# 或者: from gppylib.commands.base.WorkerPool import getCompletedItems [as 别名]
def execute(self):
entry = ValidateVerificationEntry(token = self.token).run()
if not entry['verdone']:
raise WrongStateError("Only finished verification tasks may be cleaned up.")
path = os.path.join(get_masterdatadir(), 'pg_verify', self.token)
Command('cleanup', 'rm -rf %s' % path).run(validateAfter=True)
#RemoveTree(path).run()
to_clean = ValidateVerification(content = entry['vercontent'],
primaries_only = False).run()
pool = WorkerPool(min(len(to_clean), self.batch_default))
for seg in to_clean:
host = seg.getSegmentHostName()
path = os.path.join(seg.getSegmentDataDirectory(), 'pg_verify', "*%s*" % self.token)
cmd = Command('cleanup', 'rm -f %s' % path, remoteHost=host)
pool.addCommand(cmd)
logger.info('Waiting for clean commands to complete...')
pool.wait_and_printdots(len(to_clean))
for cmd in pool.getCompletedItems():
res = cmd.get_results()
if not res.wasSuccessful():
logger.error('Failed to send cleanup on %s' % cmd.host)
logger.error('Error: %s' % res.stderr)
raise CleanVerificationError()
RemoveVerificationEntry(token = self.token).run()
logger.info('Verification %s has been cleaned.' % self.token)
示例4: restore_pg_hba_on_segment
# 需要导入模块: from gppylib.commands.base import WorkerPool [as 别名]
# 或者: from gppylib.commands.base.WorkerPool import getCompletedItems [as 别名]
def restore_pg_hba_on_segment(gparr):
"""
Restore the pg_hba.conf on all of the segments
present in the array
"""
logger.debug('Restoring pg_hba.conf file on segments...')
host_to_seg_map = defaultdict(list)
for seg in gparr.getDbList():
if not seg.isSegmentMaster() and not seg.isSegmentStandby():
host_to_seg_map[seg.getSegmentHostName()].append(seg.getSegmentDataDirectory())
pool = WorkerPool(numWorkers=DEFAULT_BATCH_SIZE)
try:
for host, data_dirs_list in host_to_seg_map.items():
pickled_data_dirs_list = base64.urlsafe_b64encode(pickle.dumps(data_dirs_list))
cmdStr = "$GPHOME/lib/python/gppylib/operations/initstandby.py -d %s -r" % pickled_data_dirs_list
cmd = Command('Restore the pg_hba.conf on remote hosts', cmdStr=cmdStr , ctxt=REMOTE, remoteHost=host)
pool.addCommand(cmd)
pool.join()
for item in pool.getCompletedItems():
result = item.get_results()
if result.rc != 0:
logger.error('Unable to restore pg_hba.conf %s' % str(result.stderr))
logger.error('Please check the segment for more details')
finally:
pool.haltWork()
pool.joinWorkers()
pool = None
示例5: validate_nic_down
# 需要导入模块: from gppylib.commands.base import WorkerPool [as 别名]
# 或者: from gppylib.commands.base.WorkerPool import getCompletedItems [as 别名]
def validate_nic_down(self):
"""
Ping validation on the nics.
"""
pool = WorkerPool()
try:
for nic, hostname in self.nic_to_address_map:
address = self.nic_to_address_map[(nic, hostname)]
cmd = Ping('ping validation', address, ctxt=REMOTE, remoteHost='localhost')
pool.addCommand(cmd)
pool.join()
for cmd in pool.getCompletedItems():
results = cmd.get_results()
if results.rc == 0:
return False
finally:
pool.haltWork()
pool.joinWorkers()
pool.join()
tinctest.logger.info("Successfully brought down nics ...")
return True
示例6: execute
# 需要导入模块: from gppylib.commands.base import WorkerPool [as 别名]
# 或者: from gppylib.commands.base.WorkerPool import getCompletedItems [as 别名]
def execute(self):
pool = WorkerPool()
try:
for seg in self.segments:
datadir = seg.getSegmentDataDirectory()
postmaster_pid_file = '%s/postmaster.pid' % datadir
shared_mem = None
if os.path.isfile(postmaster_pid_file):
with open(postmaster_pid_file) as fp:
shared_mem = fp.readlines()[-1].split()[-1].strip()
if shared_mem:
cmd = Command('clean up shared memory', cmdStr="ipcrm -m %s" % shared_mem)
pool.addCommand(cmd)
pool.join()
for item in pool.getCompletedItems():
result = item.get_results()
# This code is usually called after a GPDB segment has
# been terminated. In that case, it is possible that
# the shared memory has already been freed by the
# time we are called to clean up. Due to this race
# condition, it is possible to get an `ipcrm: invalid
# id1` error from ipcrm. We, therefore, ignore it.
if result.rc != 0 and not result.stderr.startswith("ipcrm: invalid id"):
raise Exception('Unable to clean up shared memory for segment: (%s)' % (result.stderr))
finally:
pool.haltWork()
pool.joinWorkers()
pool = None
示例7: TriggerFilerepVerifyMessages
# 需要导入模块: from gppylib.commands.base import WorkerPool [as 别名]
# 或者: from gppylib.commands.base.WorkerPool import getCompletedItems [as 别名]
class TriggerFilerepVerifyMessages(Operation):
def __init__(self, content, token, batch_default, full=None, verify_file=None, verify_dir=None,
abort=None, suspend=None, resume=None, ignore_dir=None, ignore_file=None,
results=None, results_level=None):
self.content = content
self.token = token
self.full = full
self.verify_file = verify_file
self.verify_dir = verify_dir
self.abort = abort
self.suspend = suspend
self.resume = resume
self.ignore_dir = ignore_dir
self.ignore_file = ignore_file
self.results = results
self.results_level = results_level
self.batch_default = batch_default
self.pool = None
def execute(self):
"""
Sends arbitrary gp_primarymirror requests to the backend processes defined.
"""
to_trigger = ValidateVerification(content = self.content).run()
logger.info('Sending gp_primarymirror requests...')
self.pool = WorkerPool(min(len(to_trigger), self.batch_default))
for pseg in to_trigger:
host, port = pseg.getSegmentHostName(), pseg.getSegmentPort()
cmd = SendFilerepVerifyMessage(name = 'verify %s' % host, host = host, port = port,
token = self.token,
full = self.full,
verify_file = self.verify_file,
verify_dir = self.verify_dir,
abort = self.abort,
suspend = self.suspend,
resume = self.resume,
ignore_dir = self.ignore_dir,
ignore_file = self.ignore_file,
results = self.results,
results_level = self.results_level)
logger.debug("Sending request to %s:%d" % (host, port))
self.pool.addCommand(cmd)
logger.info('Waiting for gp_primarymirror commands to complete...')
self.pool.wait_and_printdots(len(to_trigger))
for cmd in self.pool.getCompletedItems():
res = cmd.get_results()
if not res.wasSuccessful():
logger.error('Failed to send gp_primarymirror message to %s:%s' % (cmd.host, cmd.port))
logger.error('Error: %s' % res.stderr)
raise TriggerGpPrimaryMirrorFailure()
logger.info('gp_primarymirror messages have been triggered succesfully.')
示例8: _get_pgcontrol_data_from_segments
# 需要导入模块: from gppylib.commands.base import WorkerPool [as 别名]
# 或者: from gppylib.commands.base.WorkerPool import getCompletedItems [as 别名]
def _get_pgcontrol_data_from_segments(self, gpdb_list):
pool = WorkerPool(numWorkers=self.workers)
try:
for gpdb in gpdb_list: # iterate for all segments
cmd = PgControlData(name='run pg_controldata', datadir=gpdb.getSegmentDataDirectory(),
ctxt=REMOTE, remoteHost=gpdb.getSegmentHostName())
cmd.gparray_gpdb = gpdb
pool.addCommand(cmd)
pool.join()
finally:
# Make sure that we halt the workers or else we'll hang
pool.haltWork()
pool.joinWorkers()
return pool.getCompletedItems()
示例9: checkSSH
# 需要导入模块: from gppylib.commands.base import WorkerPool [as 别名]
# 或者: from gppylib.commands.base.WorkerPool import getCompletedItems [as 别名]
def checkSSH(self):
'''Check that ssh to hostlist is okay.'''
pool = WorkerPool()
for h in self.list:
cmd = Echo('ssh test', '', ctxt=REMOTE, remoteHost=h)
pool.addCommand(cmd)
pool.join()
pool.haltWork()
for cmd in pool.getCompletedItems():
if not cmd.get_results().wasSuccessful():
raise SSHError("Unable to ssh to '%s'" % cmd.remoteHost)
return True
示例10: removeBadHosts
# 需要导入模块: from gppylib.commands.base import WorkerPool [as 别名]
# 或者: from gppylib.commands.base.WorkerPool import getCompletedItems [as 别名]
def removeBadHosts(self):
''' Update list of host to include only the host on which SSH was successful'''
pool = WorkerPool()
for h in self.list:
cmd = Echo('ssh test', '', ctxt=REMOTE, remoteHost=h)
pool.addCommand(cmd)
pool.join()
pool.haltWork()
bad_hosts = []
working_hosts = []
for cmd in pool.getCompletedItems():
if not cmd.get_results().wasSuccessful():
bad_hosts.append(cmd.remoteHost)
else:
working_hosts.append(cmd.remoteHost)
self.list = working_hosts[:]
return bad_hosts
示例11: findFsDetails
# 需要导入模块: from gppylib.commands.base import WorkerPool [as 别名]
# 或者: from gppylib.commands.base.WorkerPool import getCompletedItems [as 别名]
def findFsDetails():
global serverFSMap
try:
# find the mount points in parallel
pool = WorkerPool()
for hname in serverFSMap.keys():
hname.strip()
subCmd = "df -P %s" % (serverFSMap[hname])
cmdStr = 'ssh -o PasswordAuthentication=no %s "%s"' % (hname, subCmd)
pool.addCommand(Command(hname, cmdStr, REMOTE, hname))
pool.join()
items = pool.getCompletedItems()
for i in items:
if i.results.rc == 0:
df_with_header = i.results.stdout.strip()
df_list = df_with_header.splitlines()
df_list.pop(0)
fsList = serverFSMap[i.remoteHost].split()
if len(df_list) != len(fsList):
print "Mismatch"
continue
for df_vals in df_list:
df_val = df_vals.split()
fsDetailsMap[fsList.pop(0).strip()] = [i.remoteHost, df_val[0], df_val[5]]
else:
print("Failure in talking to host %s" % (i.remoteHost))
pool.join()
pool.haltWork()
pool.joinWorkers()
except Exception, e:
print e.__str__()
pool.join()
pool.haltWork()
pool.joinWorkers()
示例12: filterMultiHomedHosts
# 需要导入模块: from gppylib.commands.base import WorkerPool [as 别名]
# 或者: from gppylib.commands.base.WorkerPool import getCompletedItems [as 别名]
def filterMultiHomedHosts(self):
'''For multiple host that is of the same node, keep only one in the hostlist.'''
unique = {}
pool = WorkerPool()
for h in self.list:
cmd = Hostname('hostname', ctxt=REMOTE, remoteHost=h)
pool.addCommand(cmd)
pool.join()
pool.haltWork()
for finished_cmd in pool.getCompletedItems():
hostname = finished_cmd.get_hostname()
if (not hostname):
unique[finished_cmd.remoteHost] = finished_cmd.remoteHost
elif not unique.get(hostname):
unique[hostname] = finished_cmd.remoteHost
elif hostname == finished_cmd.remoteHost:
unique[hostname] = finished_cmd.remoteHost
self.list = unique.values()
return self.list
示例13: bring_down_nic
# 需要导入模块: from gppylib.commands.base import WorkerPool [as 别名]
# 或者: from gppylib.commands.base.WorkerPool import getCompletedItems [as 别名]
def bring_down_nic(self, nics, hostname):
"""
Bring down nics based on the input nic names
"""
if nics is None:
return False
pool = WorkerPool()
try:
#get the ip address of the interface
for nic in nics:
cmd = Command(name='get the ip of the interface', cmdStr="/sbin/ifconfig %s | grep \'inet addr:\' | cut -d: -f2 | awk \'{ print $1}\'" % nic, ctxt=REMOTE, remoteHost=hostname)
cmd.run(validateAfter=True)
results = cmd.get_results()
if results.rc != 0:
raise Exception('Unable to map interface to ipaddress')
self.nic_to_address_map[(nic, hostname)] = results.stdout.split()[0].strip()
for nic in nics:
tinctest.logger.info("Bringing down %s:%s ..." % (hostname, nic))
cmd = Command(name='bring NIC down', cmdStr='sudo /sbin/ifdown %s' % nic, ctxt=REMOTE, remoteHost=hostname)
pool.addCommand(cmd)
pool.join()
for cmd in pool.getCompletedItems():
results = cmd.get_results()
if results.rc != 0:
return False
finally:
pool.haltWork()
pool.joinWorkers()
pool.join()
return True
示例14: rebalance
# 需要导入模块: from gppylib.commands.base import WorkerPool [as 别名]
# 或者: from gppylib.commands.base.WorkerPool import getCompletedItems [as 别名]
def rebalance(self):
# Get the unbalanced primary segments grouped by hostname
# These segments are what we will shutdown.
logger.info("Getting unbalanced segments")
unbalanced_primary_segs = GpArray.getSegmentsByHostName(self.gpArray.get_unbalanced_primary_segdbs())
pool = WorkerPool()
count = 0
try:
# Disable ctrl-c
signal.signal(signal.SIGINT,signal.SIG_IGN)
logger.info("Stopping unbalanced primary segments...")
for hostname in unbalanced_primary_segs.keys():
cmd = GpSegStopCmd("stop unbalanced primary segs",
self.gpEnv.getGpHome(),
self.gpEnv.getGpVersion(),
'fast',
unbalanced_primary_segs[hostname],
ctxt=REMOTE,
remoteHost=hostname,
timeout=600)
pool.addCommand(cmd)
count+=1
pool.wait_and_printdots(count, False)
failed_count = 0
completed = pool.getCompletedItems()
for res in completed:
if not res.get_results().wasSuccessful():
failed_count+=1
if failed_count > 0:
logger.warn("%d segments failed to stop. A full rebalance of the")
logger.warn("system is not possible at this time. Please check the")
logger.warn("log files, correct the problem, and run gprecoverseg -r")
logger.warn("again.")
logger.info("gprecoverseg will continue with a partial rebalance.")
pool.empty_completed_items()
# issue a distributed query to make sure we pick up the fault
# that we just caused by shutting down segments
conn = None
try:
logger.info("Triggering segment reconfiguration")
dburl = dbconn.DbURL()
conn = dbconn.connect(dburl)
cmd = ReconfigDetectionSQLQueryCommand(conn)
pool.addCommand(cmd)
pool.wait_and_printdots(1, False)
except Exception:
# This exception is expected
pass
finally:
if conn:
conn.close()
# Final step is to issue a recoverseg operation to resync segments
logger.info("Starting segment synchronization")
cmd = GpRecoverseg("rebalance recoverseg")
pool.addCommand(cmd)
pool.wait_and_printdots(1, False)
except Exception, ex:
raise ex
示例15: WorkerPoolTest
# 需要导入模块: from gppylib.commands.base import WorkerPool [as 别名]
# 或者: from gppylib.commands.base.WorkerPool import getCompletedItems [as 别名]
class WorkerPoolTest(unittest.TestCase):
def setUp(self):
self.pool = WorkerPool(numWorkers=1, logger=mock.Mock())
def tearDown(self):
# All background threads must be stopped, or else the test runner will
# hang waiting. Join the stopped threads to make sure we're completely
# clean for the next test.
self.pool.haltWork()
self.pool.joinWorkers()
def test_pool_must_have_some_workers(self):
with self.assertRaises(Exception):
WorkerPool(numWorkers=0)
def test_pool_runs_added_command(self):
cmd = mock.Mock(spec=Command)
self.pool.addCommand(cmd)
self.pool.join()
cmd.run.assert_called_once_with()
def test_completed_commands_are_retrievable(self):
cmd = mock.Mock(spec=Command)
self.pool.addCommand(cmd) # should quickly be completed
self.pool.join()
self.assertEqual(self.pool.getCompletedItems(), [cmd])
def test_pool_is_not_marked_done_until_commands_finish(self):
cmd = mock.Mock(spec=Command)
# cmd.run() will block until this Event is set.
event = threading.Event()
def wait_for_event():
event.wait()
cmd.run.side_effect = wait_for_event
self.assertTrue(self.pool.isDone())
try:
self.pool.addCommand(cmd)
self.assertFalse(self.pool.isDone())
finally:
# Make sure that we unblock the thread even on a test failure.
event.set()
self.pool.join()
self.assertTrue(self.pool.isDone())
def test_pool_can_be_emptied_of_completed_commands(self):
cmd = mock.Mock(spec=Command)
self.pool.addCommand(cmd)
self.pool.join()
self.pool.empty_completed_items()
self.assertEqual(self.pool.getCompletedItems(), [])
def test_check_results_succeeds_when_no_items_fail(self):
cmd = mock.Mock(spec=Command)
# Command.get_results() returns a CommandResult.
# CommandResult.wasSuccessful() should return True if the command
# succeeds.
result = cmd.get_results.return_value
result.wasSuccessful.return_value = True
self.pool.addCommand(cmd)
self.pool.join()
self.pool.check_results()
def test_check_results_throws_exception_at_first_failure(self):
cmd = mock.Mock(spec=Command)
# Command.get_results() returns a CommandResult.
# CommandResult.wasSuccessful() should return False to simulate a
# failure.
result = cmd.get_results.return_value
result.wasSuccessful.return_value = False
self.pool.addCommand(cmd)
self.pool.join()
with self.assertRaises(ExecutionError):
self.pool.check_results()
def test_join_with_timeout_returns_done_immediately_if_there_is_nothing_to_do(self):
start = time.time()
done = self.pool.join(10)
delta = time.time() - start
self.assertTrue(done)
# "Returns immediately" is a difficult thing to test. Longer than two
# seconds seems like a reasonable failure case, even on a heavily loaded
#.........这里部分代码省略.........