本文整理汇总了Python中tinctest.lib.run_shell_command函数的典型用法代码示例。如果您正苦于以下问题:Python run_shell_command函数的具体用法?Python run_shell_command怎么用?Python run_shell_command使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了run_shell_command函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: put_file_in_hdfs
def put_file_in_hdfs(self, input_path, hdfs_path):
if hdfs_path.rfind('/') > 0:
hdfs_dir = hdfs_path[:hdfs_path.rfind('/')]
cmd_str = "%s/bin/hdfs dfs -mkdir -p %s" %(self.HADOOP_ENVS['HADOOP_HOME'], hdfs_dir)
run_shell_command(cmd_str, "Creating parent HDFS dir for path %s" %input_path)
cmd_str = "%s/bin/hdfs dfs -put %s %s" %(self.HADOOP_ENVS['HADOOP_HOME'], input_path, hdfs_path)
run_shell_command(cmd_str, "Copy to HDFS : file %s" %input_path)
示例2: create_filespace
def create_filespace(self, filespace):
'''
@param filespace: Filespace Name
'''
if self.exists(filespace) is True:
tinctest.logger.info('Filespace %s exists' % filespace)
return
file1 = local_path(filespace)
f1 = open(file1+".fs.config","w")
f1.write('filespace:%s\n' % filespace)
for record in self.config.record:
if record.role:
fileloc = '%s/%s/primary' % (os.path.split(record.datadir)[0], filespace)
else:
fileloc = '%s/%s/mirror' % (os.path.split(record.datadir)[0], filespace)
# @todo: use a common utility to create/delete remotely
cmd = "gpssh -h %s -e 'rm -rf %s; mkdir -p %s'" % (record.hostname, fileloc, fileloc)
run_shell_command(cmd)
f1.write("%s:%s:%s/%s\n" % (record.hostname, record.dbid, fileloc, os.path.split(record.datadir)[1]))
f1.close()
result = self.run(config=f1.name)
if result.rc != 0:
raise GPfilespaceException('"gpfilespace creation filespace FAILED". Output = %s ' % out)
示例3: test_option_port_offset
def test_option_port_offset(self):
"""
primary port + offset = mirror database port
primary port + (2 * offset) = mirror replication port
primary port + (3 * offset) = primary replication port
"""
gprecover = GpRecover()
port_offset = 500
self._setup_gpaddmirrors(port_offset = port_offset)
self._cleanup_segment_data_dir(self.host_file, self.mirror_data_dir)
res = {'rc': 0, 'stdout' : '', 'stderr': ''}
run_shell_command("gpaddmirrors -a -i %s -d %s --verbose" % (self.mirror_config_file, self.mdd), 'run gpaddmirrros with non default port_offset', res)
self.assertEqual(0, res['rc'])
query_ports = 'SELECT port, replication_port FROM gp_segment_configuration WHERE content = 0 ORDER BY preferred_role DESC;'
result = PSQL.run_sql_command(query_ports, flags='-q -t', dbname='template1')
ports = result.strip().split('\n')
primary_ports = ports[0]
mirror_ports = ports[1]
primary_ports = primary_ports.split('|')
primary_ports = [port.strip() for port in primary_ports]
primary_db_port = int(primary_ports[0])
primary_replic_port = int(primary_ports[1])
mirror_ports = mirror_ports.split('|')
mirror_ports = [port.strip() for port in mirror_ports]
mirror_db_port = int(mirror_ports[0])
mirror_replic_port = int(mirror_ports[1])
self.assertEqual(primary_db_port + port_offset, mirror_db_port)
self.assertEqual(primary_db_port + 2*port_offset, mirror_replic_port)
self.assertEqual(primary_db_port + 3*port_offset, primary_replic_port)
gprecover.wait_till_insync_transition()
self.verify_config_file_with_gp_config()
self.check_mirror_seg()
示例4: test_no_corruption
def test_no_corruption(self):
"""
Test that gpcheckcat does not report any errors and it does
not generate the verify file if the gpcheckcat test succeeds.
We choose missing_extraneous test for this purpose.
"""
dbname = 'test_no_corruption'
PSQL.run_sql_command('DROP DATABASE IF EXISTS %s' % dbname)
stdout = PSQL.run_sql_command('CREATE DATABASE %s' % dbname)
if not stdout.endswith('CREATE DATABASE\n'):
self.fail('failed to create database: %s' % stdout)
sql_file = local_path('sql/create_tables.sql')
if not PSQL.run_sql_file(sql_file, dbname=dbname,
output_to_file=False):
self.fail('failed to create tables')
res = {'rc': 0, 'stdout' : '', 'stderr': ''}
run_shell_command(
"cd %s && $GPHOME/bin/lib/gpcheckcat -p %s -R missing_extraneous %s" %
(self.gpcheckcat_test_dir, self.master_port, dbname),
results=res)
self.assertEqual(0, res['rc'])
for f in os.listdir(self.gpcheckcat_test_dir):
if fnmatch.fnmatch(f, 'gpcheckcat.verify.%s.*' % dbname):
self.fail('found verify file when not expecting it')
示例5: test_MPP24237
def test_MPP24237(self):
cmd_cleanup = "psql -Atc \"select datname from pg_database where datname != \'template0\'\" | while read a; do echo \"check for ${a}\";psql -Atc \"select \'drop schema if exists \' || nspname || \' cascade;\' from (select nspname from pg_namespace where nspname like \'pg_temp%\' union select nspname from gp_dist_random(\'pg_namespace\') where nspname like \'pg_temp%\' except select \'pg_temp_\' || sess_id::varchar from pg_stat_activity) as foo\" ${a}; done"
res = {'rc':0, 'stderr':'', 'stdout':''}
run_shell_command(cmd_cleanup, 'do_clean', res)
if res['rc'] > 0:
raise Exception("Failed to do cleanup %s" %res[stderr])
PSQL.run_sql_file(local_path('pre_script.sql'), out_file=local_path('pre_script.out'))
self.assertTrue(Gpdiff.are_files_equal(local_path('pre_script.out'), local_path('pre_script.ans')))
cmd = "select count(*) from pg_tables where schemaname like 'pg_temp%';"
out = PSQL.run_sql_command(cmd, flags ='-q -t')
if int(out) != 0:
tinctest.logger.info("temp tables found")
tinctest.logger.info(PSQL.run_sql_command("select * from pg_tables where schemaname like 'pg_temp%';"))
self.fail("temp tables were found")
PSQL.run_sql_file(local_path('clean_script.sql'))
PSQL.run_sql_file(local_path('clean_script.sql'))
run_shell_command(cmd_cleanup, 'do_clean', res)
if res['rc'] > 0:
raise Exception("Failed to do cleanup %s" %res[stderr])
示例6: setUpClass
def setUpClass(cls):
super(EtablefuncGppcTestCase, cls).setUpClass()
"""
compile tablefunc_gppc_demo.c and install the tablefunc_gppc_demo.so
"""
gppkg = Gppkg()
gpccutil.pre_process(product_version)
result = gppkg.gppkg_install(product_version, "libgppc")
# makeLog = loal_path('test00MakeLog.out')
if result:
cmdMakeInstall = (
"cd " + local_path("data") + " && make clean && make CPPFLAGS=-D_GNU_SOURCE && make install"
)
res = {"rc": 0, "stderr": "", "stdout": ""}
run_shell_command(cmdMakeInstall, "compile tablefunc_gppc_demo.c", res)
# Current make file works for linux, but not for Solaris or OSX.
# If compilation fails or installation fails, force system quit: os._exit(1)
if res["rc"]:
os._exit(1) # This will exit the test including the next test suites
sharedObj = "%s/tabfunc_gppc_demo.so" % (LIBDIR)
if not os.path.isfile(sharedObj):
os._exit(1)
# For multinode cluster, need to copy shared object tabfunc_gppc_demo.so to all primary segments
if gpdbconfig.is_multinode():
res = {"rc": 0, "stderr": "", "stdout": ""}
hosts = gpdbconfig.get_hosts(segments=True)
scp_cmd = "gpscp -h " + " -h ".join(map(str, hosts)) + " " + sharedObj + " =:%s" % LIBDIR
run_shell_command(scp_cmd)
if res["rc"]:
raise Excpetion("Could not copy shared object to primary segment")
示例7: install_kerberos_conf
def install_kerberos_conf(self):
"""
Update the kerberos configuration files according the env
and copy in appropriate locations
"""
transforms = {
"%DOMAIN%" : self.kdc_domain,
"%HOSTNAME%" : self.kdc_host
}
input_file_path = local_path(self.krb_template_conf + "/" + self.KRB_CONF_TEMPLATE)
output_file_path = local_path(self.krb_template_conf + "/" + self.KRB_CONF_TEMPLATE[:-2])
with open(input_file_path, 'r') as input:
with open(output_file_path, 'w') as output:
for line in input.readlines():
for key,value in transforms.iteritems():
line = re.sub(key,value,line)
output.write(line)
cmd_str = "sudo cp %s %s" %(output_file_path, self.KRB_CONF)
if not run_shell_command(cmd_str,"Copying krb5.conf"):
raise KerberosUtilException("Couldn't copy krb5.conf")
cmd_str = "sudo cp %s %s" %(local_path(self.krb_template_conf + "/" + self.KDC_CONF_TEMPLATE), self.KDC_CONF)
if not run_shell_command(cmd_str,"Copying kdc.conf"):
raise KerberosUtilException("Couldn't copy kdc.conf")
cmd_str = "sudo cp %s %s" %(local_path(self.krb_template_conf + "/" + self.KADMIN_ACL_CONF_TEMPLATE), self.KADMIN_ACL_CONF)
if not run_shell_command(cmd_str,"Copying kadm5.acl"):
raise KerberosUtilException("Couldn't copy kadm5.acl")
示例8: killGpfdist
def killGpfdist(self, wait=60, port=None):
"""
kill the gpfdist process
@change: Johnny Soedomo, check from netstat whether the system has released the process rather than waiting a flat 10s
@todo: Support for stopping gpfdist process on remote host
"""
if port is None:
port = self.port
cmd_str = ' | '.join([self.ps_command + " -ef",
"grep \"[g]pfdist -p %s\"" % (port),
"awk '\"'\"'{print $2}'\"'\"'",
"xargs kill"])
cmd = "gpssh -h %s '%s'" %(self.hostname, cmd_str)
res = {'rc':0, 'stderr':'', 'stdout':''}
run_shell_command(cmd, 'kill gpfdist', res)
if not self.is_gpfdist_killed():
raise GPFDISTError("Could not kill gpfdist process on %s:%s" %(self.hostname, self.port))
# Make sure the port is released
is_released = False
count = 0
while (not is_released and count < wait):
is_released = self.is_port_released()
count = count + 1
time.sleep(1)
示例9: copyJARFILE
def copyJARFILE(self, srcjarfile):
""" copy jar file to $GPHOME/lib/postgresql/java on master and all segments """
if not os.path.isfile(srcjarfile):
raise Exception("Can not find jar file %s" % (srcjarfile))
hosts = config.get_hosts()
hoststr = ""
for host in hosts:
hoststr += " -h %s" % (host)
# set acccess permissions to existing jar file so that gpscp can overwrite it with current one
jarfilename = os.path.basename(srcjarfile)
cmd = "gpssh%s -e 'chmod -Rf 755 %s/java/%s'" % (hoststr, LIBDIR, jarfilename)
Command(name = 'set acccess permissions to existing jar', cmdStr = cmd).run(validateAfter=True)
# copy current jar file to all hosts using gpscp
cmd = "gpscp%s %s =:%s/java" % (hoststr, srcjarfile, LIBDIR)
res = {'rc': 0, 'stdout' : '', 'stderr': ''}
run_shell_command(cmd, 'copy current jar file to all hosts', res)
if res['rc']:
raise Exception("Can not copy jar file %s to hosts" % (srcjarfile))
# set access permissions to current jar file so that it can be accessed by applications
cmd = "gpssh%s -e 'chmod -Rf 755 %s/java/%s'" % (hoststr, LIBDIR, jarfilename)
res = {'rc': 0, 'stdout' : '', 'stderr': ''}
run_shell_command(cmd, 'set access permissions to current jar file', res)
if res['rc']:
raise Exception("Can not set access permissions of jar file %s to 755" % (jarfilename))
示例10: is_gpfdist_killed
def is_gpfdist_killed(self, port=None, wait=1):
"""
Check whether the gpfdist process is killed
"""
if port is None:
port = self.port
process_killed = False
count = 0
while (not process_killed and count < wait):
cmd_str = " | ".join([
self.ps_command + ' -ef',
'grep \"[g]pfdist -p %s\"' % (port)])
cmd = "gpssh -h %s '%s'" %(self.hostname, cmd_str)
res = {'rc':0, 'stderr':'', 'stdout':''}
run_shell_command(cmd, 'gpfdist process check', res)
content = res['stdout']
# strip hostname prefix from gpssh output
content = content.replace(self.hostname, '').strip('[]').strip()
if len(content)>0 or content.find("gpfdist -p %s" %port) > 0:
tinctest.logger.warning("gpfdist process still exists on %s:%s" %(self.hostname, self.port))
else:
return True
count = count + 1
time.sleep(1)
tinctest.logger.warning("gpfdist process not killed on %s:%s" %(self.hostname, self.port))
return False
示例11: check_gpfdist_process
def check_gpfdist_process(self, wait=60, port=None, raise_assert=True):
"""
Check for the gpfdist process
Wait at least 60s until gpfdist starts, else raise an exception
"""
if port is None:
port = self.port
process_started = False
count = 0
while (not process_started and count<wait):
cmd_str = " | ".join([
self.ps_command + ' -ef',
'grep \"[g]pfdist -p %s\"' % (port)])
cmd = "gpssh -h %s '%s'" %(self.hostname, cmd_str)
res = {'rc':0, 'stderr':'', 'stdout':''}
run_shell_command(cmd, 'gpfdist process check', res)
content = res['stdout']
if len(content)>0:
if content.find("gpfdist -p %s" % port)>0:
process_started = self.is_gpfdist_connected(port)
if process_started:
return True
count = count + 1
time.sleep(1)
if raise_assert:
raise GPFDISTError("Could not start gpfdist process")
else:
tinctest.logger.warning("Could not start gpfdist process")
示例12: test_with_fault_injection
def test_with_fault_injection(self):
"""
add new mirrors run workload to verify if cluster functioning correctly, and
inject the mirror to bring cluster into change tracking, then recoverseg
"""
filerepUtil = Filerepe2e_Util()
gprecover = GpRecover()
self._setup_gpaddmirrors()
self._cleanup_segment_data_dir(self.host_file, self.mirror_data_dir)
res = {'rc': 0, 'stdout' : '', 'stderr': ''}
run_shell_command("gpaddmirrors -a -i %s -d %s --verbose" % (self.mirror_config_file, self.mdd), 'run gpaddmirrros with fault injection', res)
gprecover.wait_till_insync_transition()
self.assertEqual(0, res['rc'])
self.run_simple_ddl_dml()
# after adding new mirrors, check the intergrity between primary and mirror
self.check_mirror_seg()
out_file = local_path('inject_fault_into_ct')
filerepUtil.inject_fault(f='filerep_consumer', m='async', y='fault', r='mirror', H='ALL', outfile=out_file)
# trigger the transtion to change tracking
PSQL.run_sql_command('drop table if exists foo;', dbname = 'template1')
filerepUtil.wait_till_change_tracking_transition()
gprecover.incremental()
gprecover.wait_till_insync_transition()
out_file=local_path('reset_fault')
filerepUtil.inject_fault(f='filerep_consumer', m='async', y='reset', r='mirror', H='ALL', outfile=out_file)
示例13: setUpClass
def setUpClass(self):
super(MapreduceMPPTestCase, self).setUpClass()
gppkg = Gppkg()
gppkg.gppkg_install(product_version, 'plperl')
setup_command = "create language plperl;"
PSQL.run_sql_command(setup_command, dbname = os.environ.get('PGDATABASE'))
"compile functions.c and build functions.so"
makeLog = local_path('testBuildSOLog.out')
cmdMake = 'cd '+local_path('c_functions') + ' && make clean && make'
res = {'rc': 0, 'stdout' : '', 'stderr': ''}
run_shell_command(cmdMake, 'compile functions.c', res)
file = open(makeLog, 'w')
file.write(res['stdout'])
file.close()
if res['rc']:
raise Exception('a problem occurred while creating the so files ')
so_dir = local_path('c_functions')
sharedObj = local_path('c_functions/functions.so')
# if not os.path.isfile(sharedObj):
#raise gptest.GPTestError('so files does not exist')
# For multinode cluster, need to copy shared object tabfunc_gppc_demo.so to all primary segments
if gpdbconfig.is_multinode():
res = {'rc':0, 'stderr':'', 'stdout':''}
hosts = gpdbconfig.get_hosts(segments=True)
scp_cmd = 'gpscp -h ' +' -h '.join(map(str,hosts)) +' '+ sharedObj + ' =:%s' % so_dir
run_shell_command(scp_cmd)
if res['rc']:
raise Exception('Could not copy shared object to primary segment')
示例14: changetracking
def changetracking(self, type = 'mirror'):
''' Routine to inject fault that places system in change tracking'''
tinctest.logger.info("Put system in changetracking ")
cmd_str = 'gpfaultinjector -f filerep_consumer -m async -y fault -r %s -H ALL' %type
results={'rc':0, 'stdout':'', 'stderr':''}
run_shell_command(cmd_str, results=results)
return results['stdout']
示例15: put_file_in_hdfs
def put_file_in_hdfs(self, input_path, hdfs_path):
if hdfs_path.rfind('/') > 0:
hdfs_dir = hdfs_path[:hdfs_path.rfind('/')]
cmd_str = "hdfs dfs -mkdir -p %s" %hdfs_dir
run_shell_command(cmd_str, "Creating parent HDFS dir for path %s" %input_path)
cmd_str = "hdfs dfs -put %s %s" %(input_path, hdfs_path)
run_shell_command(cmd_str, "Copy to HDFS : file %s" %input_path)