本文整理汇总了Python中gppylib.commands.base.WorkerPool.haltWork方法的典型用法代码示例。如果您正苦于以下问题:Python WorkerPool.haltWork方法的具体用法?Python WorkerPool.haltWork怎么用?Python WorkerPool.haltWork使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类gppylib.commands.base.WorkerPool
的用法示例。
在下文中一共展示了WorkerPool.haltWork方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: ConcurrentFilespaceMoveTestCase
# 需要导入模块: from gppylib.commands.base import WorkerPool [as 别名]
# 或者: from gppylib.commands.base.WorkerPool import haltWork [as 别名]
class ConcurrentFilespaceMoveTestCase(unittest.TestCase):
""" This test suite tests the scenario of running gpfilespace concurrently while
trying to move the filespace.
The expected behavior is that only one of the processes succeeds and the
rest error out."""
ALREADY_RUNNING_MSG = 'Another instance of gpfilespace is already running!'
def setUp(self):
self.pool = None
self.pool = WorkerPool()
def tearDown(self):
if self.pool:
self.pool.haltWork()
self.pool.joinWorkers()
self.pool.join()
def get_move_filespace_cmd(self, filespace='myfspc', file_type=FileType.TEMPORARY_FILES):
if file_type == FileType.TEMPORARY_FILES:
file_type = 'movetempfiles'
elif file_type == FileType.TRANSACTION_FILES:
file_type = 'movetransfiles'
return Command(name='move filespace', cmdStr='gpfilespace --%s %s' % (file_type, filespace))
def run_concurrently(self, cmd_list):
for cmd in cmd_list:
self.pool.addCommand(cmd)
self.pool.join()
def check_concurrent_execution_result(self, execution_results):
succeeded = 0
for cmd in execution_results:
results = cmd.get_results().stdout.strip()
if self.ALREADY_RUNNING_MSG in results:
continue
succeeded += 1
self.assertEqual(succeeded, 1)
def test00_move_temp_filespace(self):
cmd_list = [self.get_move_filespace_cmd(file_type=FileType.TEMPORARY_FILES) for i in range(2)]
self.run_concurrently(cmd_list)
self.check_concurrent_execution_result(self.pool.getCompletedItems())
def test01_move_trans_filespace(self):
cmd_list = [self.get_move_filespace_cmd(file_type=FileType.TRANSACTION_FILES) for i in range(2)]
self.run_concurrently(cmd_list)
self.check_concurrent_execution_result(self.pool.getCompletedItems())
def test02_move_temp_and_trans_filespace(self):
cmd_list = [self.get_move_filespace_cmd(file_type=FileType.TEMPORARY_FILES), self.get_move_filespace_cmd(file_type=FileType.TRANSACTION_FILES)]
self.run_concurrently(cmd_list)
self.check_concurrent_execution_result(self.pool.getCompletedItems())
示例2: validate_nic_down
# 需要导入模块: from gppylib.commands.base import WorkerPool [as 别名]
# 或者: from gppylib.commands.base.WorkerPool import haltWork [as 别名]
def validate_nic_down(self):
"""
Ping validation on the nics.
"""
pool = WorkerPool()
try:
for nic, hostname in self.nic_to_address_map:
address = self.nic_to_address_map[(nic, hostname)]
cmd = Ping('ping validation', address, ctxt=REMOTE, remoteHost='localhost')
pool.addCommand(cmd)
pool.join()
for cmd in pool.getCompletedItems():
results = cmd.get_results()
if results.rc == 0:
return False
finally:
pool.haltWork()
pool.joinWorkers()
pool.join()
tinctest.logger.info("Successfully brought down nics ...")
return True
示例3: get_host_status
# 需要导入模块: from gppylib.commands.base import WorkerPool [as 别名]
# 或者: from gppylib.commands.base.WorkerPool import haltWork [as 别名]
def get_host_status(hostlist):
"""
Test if SSH command works on a host and return a dictionary
Return Ex: {host1: True, host2: False}
where True represents SSH command success and False represents failure
"""
if not isinstance(hostlist, list):
raise Exception("Input parameter should be of type list")
pool = WorkerPool(min(len(hostlist), 16))
for host in hostlist:
cmd = Echo('ssh test', '', ctxt=REMOTE, remoteHost=host)
pool.addCommand(cmd)
pool.join()
pool.haltWork()
host_status_dict = {}
for cmd in pool.getCompletedItems():
if not cmd.get_results().wasSuccessful():
host_status_dict[cmd.remoteHost] = False
else:
host_status_dict[cmd.remoteHost] = True
return host_status_dict
示例4: execute
# 需要导入模块: from gppylib.commands.base import WorkerPool [as 别名]
# 或者: from gppylib.commands.base.WorkerPool import haltWork [as 别名]
def execute(self):
pool = WorkerPool()
try:
for seg in self.segments:
datadir = seg.getSegmentDataDirectory()
postmaster_pid_file = '%s/postmaster.pid' % datadir
shared_mem = None
if os.path.isfile(postmaster_pid_file):
with open(postmaster_pid_file) as fp:
shared_mem = fp.readlines()[-1].split()[-1].strip()
if shared_mem:
cmd = Command('clean up shared memory', cmdStr="ipcrm -m %s" % shared_mem)
pool.addCommand(cmd)
pool.join()
for item in pool.getCompletedItems():
result = item.get_results()
# This code is usually called after a GPDB segment has
# been terminated. In that case, it is possible that
# the shared memory has already been freed by the
# time we are called to clean up. Due to this race
# condition, it is possible to get an `ipcrm: invalid
# id1` error from ipcrm. We, therefore, ignore it.
if result.rc != 0 and not result.stderr.startswith("ipcrm: invalid id"):
raise Exception('Unable to clean up shared memory for segment: (%s)' % (result.stderr))
finally:
pool.haltWork()
pool.joinWorkers()
pool = None
示例5: restore_pg_hba_on_segment
# 需要导入模块: from gppylib.commands.base import WorkerPool [as 别名]
# 或者: from gppylib.commands.base.WorkerPool import haltWork [as 别名]
def restore_pg_hba_on_segment(gparr):
"""
Restore the pg_hba.conf on all of the segments
present in the array
"""
logger.debug('Restoring pg_hba.conf file on segments...')
host_to_seg_map = defaultdict(list)
for seg in gparr.getDbList():
if not seg.isSegmentMaster() and not seg.isSegmentStandby():
host_to_seg_map[seg.getSegmentHostName()].append(seg.getSegmentDataDirectory())
pool = WorkerPool(numWorkers=DEFAULT_BATCH_SIZE)
try:
for host, data_dirs_list in host_to_seg_map.items():
pickled_data_dirs_list = base64.urlsafe_b64encode(pickle.dumps(data_dirs_list))
cmdStr = "$GPHOME/lib/python/gppylib/operations/initstandby.py -d %s -r" % pickled_data_dirs_list
cmd = Command('Restore the pg_hba.conf on remote hosts', cmdStr=cmdStr , ctxt=REMOTE, remoteHost=host)
pool.addCommand(cmd)
pool.join()
for item in pool.getCompletedItems():
result = item.get_results()
if result.rc != 0:
logger.error('Unable to restore pg_hba.conf %s' % str(result.stderr))
logger.error('Please check the segment for more details')
finally:
pool.haltWork()
pool.joinWorkers()
pool = None
示例6: scp_file_to_hosts
# 需要导入模块: from gppylib.commands.base import WorkerPool [as 别名]
# 或者: from gppylib.commands.base.WorkerPool import haltWork [as 别名]
def scp_file_to_hosts(host_list, filename, batch_default):
pool = WorkerPool(numWorkers=min(len(host_list), batch_default))
for hname in host_list:
pool.addCommand(
Scp("Copying table_filter_file to %s" % hname, srcFile=filename, dstFile=filename, dstHost=hname)
)
pool.join()
pool.haltWork()
pool.check_results()
示例7: test_print_progress
# 需要导入模块: from gppylib.commands.base import WorkerPool [as 别名]
# 或者: from gppylib.commands.base.WorkerPool import haltWork [as 别名]
def test_print_progress(self, mock1):
w = WorkerPool(numWorkers=32)
c1 = Command('dummy command1', '')
c2 = Command('dummy command2', '')
w.addCommand(c1)
w.addCommand(c2)
w.join()
w.print_progress(2)
self.assertTrue(mock1.called_with('100.00% of jobs completed'))
w.haltWork()
示例8: run_pool_command
# 需要导入模块: from gppylib.commands.base import WorkerPool [as 别名]
# 或者: from gppylib.commands.base.WorkerPool import haltWork [as 别名]
def run_pool_command(host_list, cmd_str, batch_default, check_results=True):
pool = WorkerPool(numWorkers=min(len(host_list), batch_default))
for host in host_list:
cmd = Command(host, cmd_str, ctxt=REMOTE, remoteHost=host)
pool.addCommand(cmd)
pool.join()
pool.haltWork()
if check_results:
pool.check_results()
示例9: _get_pgcontrol_data_from_segments
# 需要导入模块: from gppylib.commands.base import WorkerPool [as 别名]
# 或者: from gppylib.commands.base.WorkerPool import haltWork [as 别名]
def _get_pgcontrol_data_from_segments(self, gpdb_list):
pool = WorkerPool(numWorkers=self.workers)
try:
for gpdb in gpdb_list: # iterate for all segments
cmd = PgControlData(name='run pg_controldata', datadir=gpdb.getSegmentDataDirectory(),
ctxt=REMOTE, remoteHost=gpdb.getSegmentHostName())
cmd.gparray_gpdb = gpdb
pool.addCommand(cmd)
pool.join()
finally:
# Make sure that we halt the workers or else we'll hang
pool.haltWork()
pool.joinWorkers()
return pool.getCompletedItems()
示例10: execute
# 需要导入模块: from gppylib.commands.base import WorkerPool [as 别名]
# 或者: from gppylib.commands.base.WorkerPool import haltWork [as 别名]
def execute(self):
pool = WorkerPool()
given = set(ListFiles(self.pid_dir).run())
try:
for host in self.trail:
available = ListRemoteFiles(self.pid_dir, host).run()
to_copy = [dir for dir in available if dir not in given]
for dir in to_copy:
comp_dir = os.path.join(self.pid_dir, dir)
pool.addCommand(Scp('collect coverage',
srcFile = comp_dir,
srcHost = host,
dstFile = comp_dir,
recursive = True))
pool.join()
finally:
pool.haltWork()
示例11: checkSSH
# 需要导入模块: from gppylib.commands.base import WorkerPool [as 别名]
# 或者: from gppylib.commands.base.WorkerPool import haltWork [as 别名]
def checkSSH(self):
'''Check that ssh to hostlist is okay.'''
pool = WorkerPool()
for h in self.list:
cmd = Echo('ssh test', '', ctxt=REMOTE, remoteHost=h)
pool.addCommand(cmd)
pool.join()
pool.haltWork()
for cmd in pool.getCompletedItems():
if not cmd.get_results().wasSuccessful():
raise SSHError("Unable to ssh to '%s'" % cmd.remoteHost)
return True
示例12: removeBadHosts
# 需要导入模块: from gppylib.commands.base import WorkerPool [as 别名]
# 或者: from gppylib.commands.base.WorkerPool import haltWork [as 别名]
def removeBadHosts(self):
''' Update list of host to include only the host on which SSH was successful'''
pool = WorkerPool()
for h in self.list:
cmd = Echo('ssh test', '', ctxt=REMOTE, remoteHost=h)
pool.addCommand(cmd)
pool.join()
pool.haltWork()
bad_hosts = []
working_hosts = []
for cmd in pool.getCompletedItems():
if not cmd.get_results().wasSuccessful():
bad_hosts.append(cmd.remoteHost)
else:
working_hosts.append(cmd.remoteHost)
self.list = working_hosts[:]
return bad_hosts
示例13: findFsDetails
# 需要导入模块: from gppylib.commands.base import WorkerPool [as 别名]
# 或者: from gppylib.commands.base.WorkerPool import haltWork [as 别名]
def findFsDetails():
global serverFSMap
try:
# find the mount points in parallel
pool = WorkerPool()
for hname in serverFSMap.keys():
hname.strip()
subCmd = "df -P %s" % (serverFSMap[hname])
cmdStr = 'ssh -o PasswordAuthentication=no %s "%s"' % (hname, subCmd)
pool.addCommand(Command(hname, cmdStr, REMOTE, hname))
pool.join()
items = pool.getCompletedItems()
for i in items:
if i.results.rc == 0:
df_with_header = i.results.stdout.strip()
df_list = df_with_header.splitlines()
df_list.pop(0)
fsList = serverFSMap[i.remoteHost].split()
if len(df_list) != len(fsList):
print "Mismatch"
continue
for df_vals in df_list:
df_val = df_vals.split()
fsDetailsMap[fsList.pop(0).strip()] = [i.remoteHost, df_val[0], df_val[5]]
else:
print("Failure in talking to host %s" % (i.remoteHost))
pool.join()
pool.haltWork()
pool.joinWorkers()
except Exception, e:
print e.__str__()
pool.join()
pool.haltWork()
pool.joinWorkers()
示例14: filterMultiHomedHosts
# 需要导入模块: from gppylib.commands.base import WorkerPool [as 别名]
# 或者: from gppylib.commands.base.WorkerPool import haltWork [as 别名]
def filterMultiHomedHosts(self):
'''For multiple host that is of the same node, keep only one in the hostlist.'''
unique = {}
pool = WorkerPool()
for h in self.list:
cmd = Hostname('hostname', ctxt=REMOTE, remoteHost=h)
pool.addCommand(cmd)
pool.join()
pool.haltWork()
for finished_cmd in pool.getCompletedItems():
hostname = finished_cmd.get_hostname()
if (not hostname):
unique[finished_cmd.remoteHost] = finished_cmd.remoteHost
elif not unique.get(hostname):
unique[hostname] = finished_cmd.remoteHost
elif hostname == finished_cmd.remoteHost:
unique[hostname] = finished_cmd.remoteHost
self.list = unique.values()
return self.list
示例15: bring_down_nic
# 需要导入模块: from gppylib.commands.base import WorkerPool [as 别名]
# 或者: from gppylib.commands.base.WorkerPool import haltWork [as 别名]
def bring_down_nic(self, nics, hostname):
"""
Bring down nics based on the input nic names
"""
if nics is None:
return False
pool = WorkerPool()
try:
#get the ip address of the interface
for nic in nics:
cmd = Command(name='get the ip of the interface', cmdStr="/sbin/ifconfig %s | grep \'inet addr:\' | cut -d: -f2 | awk \'{ print $1}\'" % nic, ctxt=REMOTE, remoteHost=hostname)
cmd.run(validateAfter=True)
results = cmd.get_results()
if results.rc != 0:
raise Exception('Unable to map interface to ipaddress')
self.nic_to_address_map[(nic, hostname)] = results.stdout.split()[0].strip()
for nic in nics:
tinctest.logger.info("Bringing down %s:%s ..." % (hostname, nic))
cmd = Command(name='bring NIC down', cmdStr='sudo /sbin/ifdown %s' % nic, ctxt=REMOTE, remoteHost=hostname)
pool.addCommand(cmd)
pool.join()
for cmd in pool.getCompletedItems():
results = cmd.get_results()
if results.rc != 0:
return False
finally:
pool.haltWork()
pool.joinWorkers()
pool.join()
return True