本文整理汇总了Python中mpp.lib.config.GPDBConfig类的典型用法代码示例。如果您正苦于以下问题:Python GPDBConfig类的具体用法?Python GPDBConfig怎么用?Python GPDBConfig使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了GPDBConfig类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_substitutions
def get_substitutions(self):
subst = {}
config = GPDBConfig()
host, _ = config.get_hostandport_of_segment(0)
subst['@[email protected]'] = 'rh55-qavm44'
subst['@[email protected]'] = os.path.join(self.get_sql_dir(), 'datagen.py')
return subst
示例2: wait_for_shutdown_before_commit
def wait_for_shutdown_before_commit(self):
self.check_system()
config = GPDBConfig()
db_id = config.get_dbid(-1,'p')
test_case_list0 = []
test_case_list0.append(('mpp.gpdb.tests.storage.fts.fts_transitions.FtsTransitions.set_faults', ['fts_wait_for_shutdown', 'infinite_loop'], {'seg_id': db_id}))
self.test_case_scenario.append(test_case_list0)
test_case_list1 = []
test_case_list1.append(('mpp.gpdb.tests.storage.fts.fts_transitions.FtsTransitions.set_faults', ['filerep_consumer', 'fault', 'primary']))
self.test_case_scenario.append(test_case_list1)
test_case_list2 = []
test_case_list2.append(('mpp.gpdb.tests.storage.fts.fts_transitions.FtsTransitions.check_fault_status', ['fts_wait_for_shutdown'], {'seg_id': db_id}))
self.test_case_scenario.append(test_case_list2)
test_case_list3 = []
test_case_list3.append('mpp.gpdb.tests.storage.fts.fts_transitions.FtsTransitions.restart_db_with_no_rc_check')
self.test_case_scenario.append(test_case_list3)
test_case_list4 = []
test_case_list4.append('mpp.gpdb.tests.storage.fts.fts_transitions.FtsTransitions.cluster_state')
self.test_case_scenario.append(test_case_list4)
示例3: GpRecover
class GpRecover(GpRecoverseg):
'''Class for gprecoverseg utility methods '''
MAX_COUNTER=400
def __init__(self, config=None):
if config is not None:
self.config = config
else:
self.config = GPDBConfig()
self.gphome = os.environ.get('GPHOME')
def incremental(self, workerPool=False):
'''Incremental Recoverseg '''
tinctest.logger.info('Running Incremental gprecoverseg...')
if workerPool:
return self.run_using_workerpool()
else:
return self.run()
def full(self):
'''Full Recoverseg '''
tinctest.logger.info('Running Full gprecoverseg...')
return self.run(option = '-F')
def rebalance(self):
'''Run gprecoverseg to rebalance the cluster '''
tinctest.logger.info('Running gprecoverseg rebalance...')
return self.run(option = '-r')
def wait_till_insync_transition(self):
'''
Poll till all the segments transition to insync state.
Number of trials set to MAX_COUNTER
'''
counter= 1
while(not self.config.is_not_insync_segments()):
if counter > self.MAX_COUNTER:
raise Exception('Segments did not come insync after 20 minutes')
else:
counter = counter + 1
time.sleep(3) #Wait 3 secs before polling again
tinctest.logger.info('Segments are synchronized ...')
return True
def recover_rebalance_segs(self):
if not self.config.is_balanced_segments():
# recover
if not self.incremental():
raise Exception('Gprecvoerseg failed')
if not self.wait_till_insync_transition():
raise Exception('Segments not in sync')
tinctest.logger.info('Segments recovered and back in sync')
# rebalance
if not self.rebalance():
raise Exception('Gprecvoerseg -r failed')
if not self.wait_till_insync_transition():
raise Exception('Segments not in sync')
tinctest.logger.info('Segments rebalanced and back in sync')
示例4: setUpClass
def setUpClass(cls):
# we need an empty db to run the tests
tinctest.logger.info("recreate database wet using dropdb/createdb")
cmd = Command('recreatedb', 'dropdb wet; createdb wet')
cmd.run(validateAfter=False)
cls.drop_roles()
super(LegacyWETTestCase, cls).setUpClass()
source_dir = cls.get_source_dir()
config = GPDBConfig()
host, _ = config.get_hostandport_of_segment(0)
port = mppUtil.getOpenPort(8080)
tinctest.logger.info("gpfdist host = {0}, port = {1}".format(host, port))
cls.config = config
data_dir = os.path.join(source_dir, 'data')
cls.gpfdist = GPFDIST(port, host, directory=data_dir)
cls.gpfdist.startGpfdist()
# WET writes into this directory.
data_out_dir = os.path.join(cls.gpfdist.getdir(), 'output')
shutil.rmtree(data_out_dir, ignore_errors=True)
os.mkdir(data_out_dir)
示例5: test_insert_commit_before_truncate
def test_insert_commit_before_truncate(self):
'''
@description We suspend the vacuum on master after the first
transaction, and connect to segment. Modify the
relation in vacuum and commit the segment local
transaction before the truncate transaction starts.
'''
fault_name = 'vacuum_relation_end_of_first_round'
gpdbconfig = GPDBConfig()
seghost, segport = gpdbconfig.get_hostandport_of_segment(0, 'p')
filereputil = Filerepe2e_Util()
filereputil.inject_fault(f=fault_name, y='suspend', seg_id='1')
# run vacuum in background, it'll be blocked.
sql_file1, ans_file1, out_file1 = self.get_file_names('conn1')
psql1 = PSQL(sql_file=sql_file1, out_file=out_file1)
thread1 = threading.Thread(target=self.run_psql, args=(psql1,))
thread1.start()
self.check_fault_triggered(fault_name)
sql_file2, ans_file2, out_file2 = self.get_file_names('conn2')
# utility to seg0
psql2 = PSQL(sql_file=sql_file2, out_file=out_file2,
host=seghost, port=segport,
PGOPTIONS='-c gp_session_role=utility')
self.run_psql(psql2)
# resume vacuum
filereputil.inject_fault(f=fault_name, y='reset', seg_id='1')
thread1.join()
self.assertTrue(Gpdiff.are_files_equal(out_file1, ans_file1))
self.assertTrue(Gpdiff.are_files_equal(out_file2, ans_file2))
示例6: setUpFilespaceForCTAS
def setUpFilespaceForCTAS(isForHawq):
config = GPDBConfig()
if isForHawq:
filespace = HAWQGpfilespace()
else:
filespace = Gpfilespace()
if config.is_not_insync_segments():
filespace.create_filespace('tincrepo_qp_ddl_ctas')
示例7: get_host_and_db_path
def get_host_and_db_path(self, dbname, contentid=0):
''' Get the host and database path for the content'''
config = GPDBConfig()
db_oid = PSQL.run_sql_command("select oid from pg_database where datname='%s'" % dbname, flags='-q -t', dbname='postgres')
dbid = PSQL.run_sql_command("select dbid from gp_segment_configuration where content=%s and role='p'" % contentid, flags='-q -t', dbname='postgres')
(host, address) = config.get_host_and_datadir_of_segment(dbid= dbid.strip())
db_path = os.path.join(address, 'base', db_oid.strip())
return (host.strip(), db_path)
示例8: is_changetracking
def is_changetracking(self):
"""
@summary: return true if system is in change tracking mode
@return: Boolean value representing the whether the cluster is insync or not
"""
config = GPDBConfig()
return not config.is_not_insync_segments()
示例9: copy_files_to_master
def copy_files_to_master(filename, location):
config = GPDBConfig()
host = config.get_masterhost()
cmd = 'gpssh -h %s -e "scp %s %s:%s/" ' % (host, filename, host, location)
tinctest.logger.debug(cmd)
res = {"rc": 0, "stderr": "", "stdout": ""}
run_shell_command(cmd, "run scp", res)
if res["rc"] > 0:
raise Exception("Copying to host %s failed" % host)
示例10: get_substitutions
def get_substitutions(self):
"""
Returns sustitution variables.
"""
config = GPDBConfig()
host, _ = config.get_hostandport_of_segment(0)
variables = {
'HOST': host,
}
return variables
示例11: test_pg_aocsseg_corruption
def test_pg_aocsseg_corruption(self):
self.create_appendonly_tables(row=False)
config = GPDBConfig()
host, port = config.get_hostandport_of_segment()
self.transform_sql_file(os.path.join(self.sql_dir, 'corrupt_pg_aocsseg.sql.t'), 'co1')
out_file = os.path.join(self.output_dir, 'corrupt_pg_aocsseg.out')
ans_file = os.path.join(self.ans_dir, 'corrupt_pg_aocsseg.ans')
sql_file = os.path.join(self.sql_dir, 'corrupt_pg_aocsseg.sql')
PSQL.run_sql_file_utility_mode(sql_file, out_file=out_file, host=host,
port=port, dbname=os.environ['PGDATABASE'])
if not Gpdiff.are_files_equal(out_file, ans_file, match_sub=[local_path('sql/init_file')]):
raise Exception('Corruption test of pg_aocsseg failed for appendonly tables !')
示例12: test_insert_unlock_before_truncate
def test_insert_unlock_before_truncate(self):
'''
@description This is rather complicated. We suspend the vacuum on
master after the first transaction, and connect to
segment, modify the relation in question, and release the
lock, keep the transaction. To release the lock, we need
a special UDF. Vacuum is supposed to skip truncate if it
sees such in-progress transaction. Usually this should
not happen, but it rather simulates catalog DDL.
'''
fault_name = 'vacuum_relation_end_of_first_round'
gpdbconfig = GPDBConfig()
seghost, segport = gpdbconfig.get_hostandport_of_segment(0, 'p')
filereputil = Filerepe2e_Util()
filereputil.inject_fault(f=fault_name, y='suspend', seg_id='1')
PSQL.run_sql_command(sql_cmd='drop table if exists sync_table; create table sync_table(a int)')
# Use pygresql to keep the connection and issue commands seprately.
# thread2 will wait on sync_table before finish its work, so we
# can keep the transaction open until the vacuum completes its work.
conn = pygresql.pg.connect(host=seghost, port=int(segport), opt='-c gp_session_role=utility')
conn.query('begin')
conn.query('lock sync_table in access exclusive mode')
# run vacuum background, it'll be blocked.
sql_file1, ans_file1, out_file1 = self.get_file_names('conn1')
psql1 = PSQL(sql_file=sql_file1, out_file=out_file1)
thread1 = threading.Thread(target=self.run_psql, args=(psql1,))
thread1.start()
self.check_fault_triggered(fault_name)
sql_file2, ans_file2, out_file2 = self.get_file_names('conn2')
# utility to seg0
psql2 = PSQL(sql_file=sql_file2, out_file=out_file2,
host=seghost, port=segport,
PGOPTIONS='-c gp_session_role=utility')
thread2 = threading.Thread(target=self.run_psql, args=(psql2,))
thread2.start()
# resume vacuum
filereputil.inject_fault(f=fault_name, y='reset', seg_id='1')
# Once thread1 finishes, we can now release the lock on sync_table,
# so that thread2 can proceed.
thread1.join()
conn.query('commit')
thread2.join()
self.assertTrue(Gpdiff.are_files_equal(out_file1, ans_file1))
self.assertTrue(Gpdiff.are_files_equal(out_file2, ans_file2))
示例13: check_logs
def check_logs(search_string_list):
"""
Check all the segment logs(master/primary/mirror) for keywords in the
search_string_list
"""
dbid_list = PSQL.run_sql_command("select dbid from gp_segment_configuration;", flags="-q -t", dbname="postgres")
dbid_list = dbid_list.split()
config = GPDBConfig()
for dbid in dbid_list:
(host, data_dir) = config.get_host_and_datadir_of_segment(dbid.strip())
(rc, msg) = search_string(host, search_string_list, data_dir)
if rc:
return (False, msg)
return (True, "No Issues found")
示例14: setUpClass
def setUpClass(cls):
super(PreExpansionWorkloadTests, cls).setUpClass()
# gpscp the script required for external table in create_base_workload
scp_file = os.path.join(cls.get_sql_dir(), 'datagen.py')
gpconfig = GPDBConfig()
hosts = gpconfig.get_hosts()
hosts_file = os.path.join(cls.get_out_dir(), 'hostfile')
with open(hosts_file, 'w') as f:
f.write('\n'.join(hosts))
res = {'rc':0, 'stderr':'', 'stdout':''}
run_shell_command("gpscp -f %s %s =:$GPHOME/bin" %(hosts_file, scp_file), 'gpscp script', res)
if res['rc'] > 0:
tinctest.logger.warning("Failed to gpscp the required script to all the segments for external table queries. The script might already exist !")
示例15: test_do_full_recovery
def test_do_full_recovery(self):
"""
[feature]: Performs Full Recovery
"""
config = GPDBConfig()
recoverseg = GpRecoverseg()
tinctest.logger.info('Running Full gprecoverseg...')
recoverseg.run(option = '-F')
rtrycnt = 0
while (not config.is_not_insync_segments()):
tinctest.logger.info("Waiting [%s] for DB to recover" %rtrycnt)
rtrycnt = rtrycnt + 1