本文整理汇总了Python中mpp.lib.PSQL.PSQL类的典型用法代码示例。如果您正苦于以下问题:Python PSQL类的具体用法?Python PSQL怎么用?Python PSQL使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了PSQL类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_reindex_pg_class
def test_reindex_pg_class(self):
tinctest.logger.info("create checkpoint")
results = {'rc':0, 'stdout':'', 'stderr':''}
PSQL.run_sql_command("checkpoint", results=results)
assert results['rc'] == 0, results['stderr']
tinctest.logger.info("inject fault to skip checkpoints")
cmd = Command("skip checkpoint on primaries",
"gpfaultinjector -f checkpoint -m async -y skip -o 0"
" -H ALL -r primary")
cmd.run(validateAfter=True)
tinctest.logger.info(cmd.get_results().printResult())
cmd = Command("skip checkpoint on master",
"gpfaultinjector -f checkpoint -m async -y skip -o 0 -s 1")
cmd.run(validateAfter=True)
tinctest.logger.info(cmd.get_results().printResult())
tinctest.logger.info("reindex pg_class indexes")
assert PSQL.run_sql_file(local_path('reindex_pg_class.sql'))
tinctest.logger.info("shutdown immediate")
cmd = Command("shutdown immediate", "gpstop -ai")
cmd.run(validateAfter=True)
tinctest.logger.info(cmd.get_results().printResult())
tinctest.logger.info("trigger recovery")
cmd = Command("restart the cluster", "gpstart -a")
cmd.run(validateAfter=True)
tinctest.logger.info(cmd.get_results().printResult())
tinctest.logger.info("validate recovery succeeded")
results = {'rc':0, 'stdout':'', 'stderr':''}
PSQL.run_sql_command("DROP TABLE reindex_pg_class_test", results=results)
assert results['rc'] == 0, results['stderr']
示例2: test_21_use_udf_gp_aovisimap_hidden_info_uao_upd_vacuum
def test_21_use_udf_gp_aovisimap_hidden_info_uao_upd_vacuum(self):
tablename ='uao_table_test11'
tinctest.logger.info("-------------------------------")
tinctest.logger.info('test_21 Verify the hidden tup_count using UDF gp_aovisimap_hidden_info(oid) for uao relation after update_vacuum')
tinctest.logger.info("-------------------------------\n")
out_file = os.path.join(self.outpath,'create_tab_tupcount_in_pg_aoseg_uaotable_upd_11.out')
sql_file = os.path.join(self.sqlpath,'create_tab_tupcount_in_pg_aoseg_uaotable_upd_11.sql')
ans_file= os.path.join(self.anspath,'create_tab_tupcount_in_pg_aoseg_uaotable_upd_11.ans')
#create uao table and insert 10 rows
sql_out=PSQL.run_sql_file(sql_file = sql_file,out_file=out_file)
assert Gpdiff.are_files_equal(out_file, ans_file)
#get relid for newly created table
relid = self.get_relid(file_name=tablename )
#get relid for newly created table
relid = self.get_relid(file_name=tablename )
#get utility mode connection info
utilitymodeinfo=self.get_utilitymode_conn_info( relid=relid)
u_port=utilitymodeinfo[0]
u_host=utilitymodeinfo[1]
assert(0 == int(self.get_hidden_tup_cnt(relid=relid,host=u_host,port= u_port)))
# update rows
sql_cmd3="update %s set j = 'test11' ;" % ( tablename)
PSQL.run_sql_command_utility_mode(sql_cmd= sql_cmd3,host=u_host, port=u_port,flags='-q -t')
assert(int(self.get_hidden_tup_cnt(relid=relid,host=u_host,port= u_port)) > 0)
self.vacuum_full(tablename=tablename)
assert(0 == int(self.get_hidden_tup_cnt(relid=relid,host=u_host,port= u_port)))
示例3: template0_wrap_around
def template0_wrap_around(self):
"""
Raise next xid so that age(template0) suffers a wrap around and
becomes negative. Create a new database off template0, which
also suffers wrap around. Reset the new db's age. Sanity
must succeed on the new db.
"""
self._raise_template0_age(self.WRAP_LIMIT, self.gparray.master)
PSQL(sql_cmd="CREATE DATABASE newdb TEMPLATE template0").run(
validateAfter=True)
sql = "SELECT age(datfrozenxid) FROM pg_database WHERE datname='newdb'"
dburl = dbconn.DbURL()
with dbconn.connect(dburl, utility=True) as conn:
age_newdb = int(dbconn.execSQLForSingleton(conn, sql))
# Xid wrap-around should cause template0 and newdb's age to be negative.
self.assertTrue(age_newdb < 0)
# All xids in newdb are frozen at this point. Therefore, we
# can reset its age so that it is not negative.
self._reset_age("newdb")
with dbconn.connect(dburl, utility=True) as conn:
age_newdb = int(dbconn.execSQLForSingleton(conn, sql))
self.assertTrue(age_newdb > 0)
# Verify that normal operations can be performed on newdb post recovery
# from wraparound.
self._basic_sanity_check("clean", {"dbname":"newdb"})
logger.info("Sanity succeeded on newdb, dropping it.")
PSQL.drop_database(dbname="newdb")
示例4: test_08_call_udf_gp_aovisimap_fordelete
def test_08_call_udf_gp_aovisimap_fordelete(self):
tinctest.logger.info("-------------------------------")
tinctest.logger.info('test_08 Verify the usage of UDF gp_aovisimap in utility mode for deleted tuple')
tinctest.logger.info("-------------------------------\n")
out_file = os.path.join(self.outpath,'create_tab_gp_aovisimap_del_08.out')
sql_file = os.path.join(self.sqlpath,'create_tab_gp_aovisimap_del_08.sql')
ans_file= os.path.join(self.anspath,'create_tab_gp_aovisimap_del_08.ans')
tablename='uao_visimap_test08'
#create uao table and insert 10 rows
sql_out=PSQL.run_sql_file(sql_file = sql_file,out_file=out_file)
assert Gpdiff.are_files_equal(out_file, ans_file)
#get relid for newly created table
relid = self.get_relid(file_name=tablename )
#get the segment_id where we'll log in utility mode and then get the hostname and port for this segment
utilitymodeinfo=self.get_utilitymode_conn_info( relid=relid)
u_port=utilitymodeinfo[0]
u_host=utilitymodeinfo[1]
before_tablerowcnt=self.get_rowcnt_table_on_segment(tablename=tablename, host=u_host,port=u_port)
before_visimaprowcnt=self.get_visimap_cnt_on_segment(relid=relid,host=u_host,port=u_port)
assert(int(before_visimaprowcnt) == 0)
sql_cmd="delete from uao_visimap_test08 ;"
PSQL.run_sql_command_utility_mode(sql_cmd=sql_cmd,host=u_host, port=u_port,flags='-q -t')
after_tablerowcnt=self.get_rowcnt_table_on_segment(tablename=tablename, host=u_host,port=u_port)
after_visimaprowcnt=self.get_visimap_cnt_on_segment(relid=relid,host=u_host,port=u_port)
assert(int(after_tablerowcnt) == 0)
示例5: test_18_gp_persistent_relation_node_uaocs_table_eof_upd
def test_18_gp_persistent_relation_node_uaocs_table_eof_upd(self):
tablename ='uaocs_table_test14'
tinctest.logger.info("-------------------------------")
tinctest.logger.info('test_18 Verify the eof mark in pg_aoseg and gp_persistant_rel_node table for uaocs relation after update ')
tinctest.logger.info("-------------------------------\n")
out_file = os.path.join(self.outpath,'create_tab_gp_persistent_relation_node_uaocs_table_upd_14.out')
sql_file = os.path.join(self.sqlpath,'create_tab_gp_persistent_relation_node_uaocs_table_upd_14.sql')
ans_file= os.path.join(self.anspath,'create_tab_gp_persistent_relation_node_uaocs_table_upd_14.ans')
#create uaocs table and insert 10 rows
sql_out=PSQL.run_sql_file(sql_file = sql_file,out_file=out_file)
assert Gpdiff.are_files_equal(out_file, ans_file)
#get relid for newly created table
relid = self.get_relid(file_name=tablename )
#get utility mode connection info
utilitymodeinfo=self.get_utilitymode_conn_uaocs_info( relid=relid)
u_port=utilitymodeinfo[0]
u_host=utilitymodeinfo[1]
assert (self.is_same_eof_uaocs_on_segment(relid=relid,host=u_host,port= u_port))
# delete 1 row
sql_cmd3="delete from %s where i = (select min(i) from %s );" % (tablename, tablename)
PSQL.run_sql_command_utility_mode(sql_cmd= sql_cmd3,host=u_host, port=u_port,flags='-q -t')
self.vacuum_full(tablename=tablename)
assert (self.is_same_eof_uaocs_on_segment(relid=relid,host=u_host,port= u_port))
示例6: test_xlogcleanup
def test_xlogcleanup(self):
"""
Test for verifying if xlog seg created while basebackup
dumps out data does not get cleaned
"""
shutil.rmtree('base', True)
PSQL.run_sql_command('DROP table if exists foo')
# Inject fault at post checkpoint create (basebackup)
logger.info ('Injecting base_backup_post_create_checkpoint fault ...')
result = self.suspend_at(
'base_backup_post_create_checkpoint')
logger.info(result.stdout)
self.assertEqual(result.rc, 0, result.stdout)
# Now execute basebackup. It will be blocked due to the
# injected fault.
logger.info ('Perform basebackup with xlog & recovery.conf...')
pg_basebackup = subprocess.Popen(['pg_basebackup', '-x', '-R', '-D', 'base']
, stdout = subprocess.PIPE
, stderr = subprocess.PIPE)
# Give basebackup a moment to reach the fault &
# trigger it
logger.info('Check if suspend fault is hit ...')
triggered = self.wait_triggered(
'base_backup_post_create_checkpoint')
self.assertTrue(triggered, 'Fault was not triggered')
# Perform operations that causes xlog seg generation
logger.info ('Performing xlog seg generation ...')
count = 0
while (count < 10):
PSQL.run_sql_command('select pg_switch_xlog(); select pg_switch_xlog(); checkpoint;')
count = count + 1
# Resume basebackup
result = self.resume('base_backup_post_create_checkpoint')
logger.info(result.stdout)
self.assertEqual(result.rc, 0, result.stdout)
# Wait until basebackup end
logger.info('Waiting for basebackup to end ...')
sql = "SELECT count(*) FROM pg_stat_replication"
with dbconn.connect(dbconn.DbURL(), utility=True) as conn:
while (True):
curs = dbconn.execSQL(conn, sql)
results = curs.fetchall()
if (int(results[0][0]) == 0):
break;
# Verify if basebackup completed successfully
# See if recovery.conf exists (Yes - Pass)
self.assertTrue(os.path.exists(os.path.join('base','recovery.conf')))
logger.info ('Found recovery.conf in the backup directory.')
logger.info ('Pass')
示例7: test_run_sql_command_catalog_update
def test_run_sql_command_catalog_update(self):
sql_cmd = 'show gp_session_role;'
out_file = os.path.join(os.path.dirname(inspect.getfile(self.__class__)),'test_catalog_update.out')
self.assertFalse(os.path.exists(out_file))
try:
PSQL.run_sql_command_catalog_update(sql_cmd = sql_cmd, out_file = out_file)
self.assertTrue(os.path.exists(out_file))
with open(out_file, 'r') as f:
output = f.read()
self.assertIsNotNone(re.search('utility', output))
finally:
os.remove(out_file)
self.assertFalse(os.path.exists(out_file))
sql_cmd = 'show allow_system_table_mods;'
out_file = os.path.join(os.path.dirname(inspect.getfile(self.__class__)),'test_catalog_update.out')
self.assertFalse(os.path.exists(out_file))
try:
PSQL.run_sql_command_catalog_update(sql_cmd = sql_cmd, out_file = out_file)
self.assertTrue(os.path.exists(out_file))
with open(out_file, 'r') as f:
output = f.read()
self.assertIsNotNone(re.search('DML', output))
finally:
os.remove(out_file)
self.assertFalse(os.path.exists(out_file))
示例8: doQuery
def doQuery(self, sqlfile, default=''):
sql_file = local_path(sqlfile)
filename_prefix = sqlfile.split('.sql')[0]
out_file = local_path(filename_prefix + '.out')
ans_file = local_path(filename_prefix + '.ans')
PSQL.run_sql_file(sql_file = sql_file, out_file = out_file)
self.assertTrue(Gpdiff.are_files_equal(out_file, ans_file))
示例9: verify_config_file_with_gp_config
def verify_config_file_with_gp_config(self):
"""
compares the gp_segment_configuration and pg_filespace_entry with input file mirror_data_dir, double check
if the cluster is configured as intended
"""
with open(self.mirror_config_file, 'r') as f:
next(f)
for line in f:
line = line.strip()
mirror_seg_infor = line.split('=')[1]
cols = mirror_seg_infor.split(':')
content_id = cols[0]
adress = cols[1]
port = cols[2]
mir_replication_port = cols[3]
query_on_configuration = '''select * from gp_segment_configuration where content=\'%s\' and address=\'%s\' and port=\'%s\'
and replication_port=\'%s\'''' % (content_id, adress, port, mir_replication_port)
config_info = PSQL.run_sql_command(query_on_configuration, flags='-q -t', dbname='template1')
config_info = config_info.strip()
# as intended, the entry should be existing in the cluster
self.assertNotEqual(0, len(config_info))
query_on_fselocation = ''' select fselocation from gp_segment_configuration, pg_filespace_entry where dbid=fsedbid
and preferred_role=\'m\' and content=\'%s\''''%content_id
fs_locations = PSQL.run_sql_command(query_on_fselocation, flags='-q -t', dbname='template1')
size = len(cols)
for fs_index in range(5, size):
fs_location = cols[fs_index]
self.assertIn(os.path.dirname(fs_location), fs_locations)
示例10: setUpClass
def setUpClass(self):
super(MapreduceMPPTestCase, self).setUpClass()
gppkg = Gppkg()
gppkg.gppkg_install(product_version, 'plperl')
setup_command = "create language plperl;"
PSQL.run_sql_command(setup_command, dbname = os.environ.get('PGDATABASE'))
"compile functions.c and build functions.so"
makeLog = local_path('testBuildSOLog.out')
cmdMake = 'cd '+local_path('c_functions') + ' && make clean && make'
res = {'rc': 0, 'stdout' : '', 'stderr': ''}
run_shell_command(cmdMake, 'compile functions.c', res)
file = open(makeLog, 'w')
file.write(res['stdout'])
file.close()
if res['rc']:
raise Exception('a problem occurred while creating the so files ')
so_dir = local_path('c_functions')
sharedObj = local_path('c_functions/functions.so')
# if not os.path.isfile(sharedObj):
#raise gptest.GPTestError('so files does not exist')
# For multinode cluster, need to copy shared object tabfunc_gppc_demo.so to all primary segments
if gpdbconfig.is_multinode():
res = {'rc':0, 'stderr':'', 'stdout':''}
hosts = gpdbconfig.get_hosts(segments=True)
scp_cmd = 'gpscp -h ' +' -h '.join(map(str,hosts)) +' '+ sharedObj + ' =:%s' % so_dir
run_shell_command(scp_cmd)
if res['rc']:
raise Exception('Could not copy shared object to primary segment')
示例11: test_scalar_consolidation_NEG_paramDiffType
def test_scalar_consolidation_NEG_paramDiffType(self):
"""
scalar Consolidation NEG two parameters with different type
"""
self.runFunctionTest("scalar_consolidation","NEG_paramDiffType")
filename = local_path("c_functions/scalar_consolidation/NEG_paramDiffType_cleanup.sql")
PSQL.run_sql_file(filename)
示例12: test_streaming
def test_streaming(self):
"""
Run sendtest, let database emit WAL.
sendtest should receive a new WAL records. After all, we kill
the walsender process, otherwise the test doesn't finish.
@tags sanity
"""
PSQL.run_sql_command('DROP TABLE IF EXISTS foo')
with WalClient("replication=true") as client:
(sysid, tli, xpos) = client.identify_system()
xpos_ptr = XLogRecPtr.from_string(xpos)
client.start_replication(xpos_ptr)
# Can't use PSQL here as it is blocked due to Sync Rep
subprocess.Popen(['psql', '-c', 'CREATE TABLE foo(a int, b int)'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
while True:
msg = client.receive(1000)
if isinstance(msg, WalMessageData):
header = msg.header
# sync replication needs a reply otherwise backend blocks
client.reply(header.walEnd, header.walEnd, header.walEnd)
# success, should get some 'w' message
break
elif isinstance(msg, WalMessageNoData):
# could be timeout
client.reply(xpos_ptr, xpos_ptr, xpos_ptr)
else:
raise StandardError(msg.errmsg)
示例13: run_test
def run_test(self):
"""
Override of SQLTestCase. Initialize standby, run some sql,
then activate it, and check if the data is streamed correctly.
"""
sql_file = self.sql_file
ans_file = self.ans_file
nsender = self.wait_for_walsender()
self.assertEqual(nsender, 1, 'replication has not begun')
# setup script is run on primary while standby is running.
# .in file will be substitute with runtime information, if any.
setup_file = sql_file.replace('.sql', '_setup.sql')
if os.path.exists(setup_file + '.in'):
self.preprocess_file(setup_file + '.in')
self.assertTrue(PSQL.run_sql_file(setup_file, dbname=self.db_name))
self.standby_dir = self.activatestdby.get_standby_dd()
self.standby_port = self.activatestdby.get_standby_port()
self.standby_host = self.gpinit_stdby.get_standbyhost()
self.activatestdby.activate()
datadir = os.path.abspath(self.standby_datadir)
with walrepl.NewEnv(MASTER_DATA_DIRECTORY=self.standby_dir,
PGPORT=self.standby_port) as env:
result = super(gpactivatestandby, self).run_test()
sql = 'SHOW gp_dbid'
result = PSQL.run_sql_command(sql, flags='-A -t')
self.assertEqual(result.strip(), '1')
self.assertEqual(self.get_gp_dbid(self.standby_dir), 1, 'gp_dbid should show 1')
if 'cleanup_filespace' in self._metadata:
mpp.gpdb.tests.storage.walrepl.lib.cleanupFilespaces(dbname=self.db_name)
return result
示例14: backend_terminate
def backend_terminate(self):
"""
Get the backend process pid by issuing a query to table pg_stat_activity, then
execute pg_terminate_backend(pid)
"""
MAX_TRY = 5
counter = 0
get_backend_pid = 'SELECT procpid FROM pg_stat_activity WHERE current_query like \'create temp table t as select%\';'
result = PSQL.run_sql_command(get_backend_pid, flags = '-q -t', dbname = self.db_name)
tinctest.logger.info('result from getting backend procepid is %s'%result)
procpid = result.strip()
while not procpid and counter < MAX_TRY:
result = PSQL.run_sql_command(get_backend_pid, flags = '-q -t', dbname = self.db_name)
procpid = result.strip()
counter += 1
if counter == MAX_TRY:
raise Exception('unable to select out the backend process pid')
kill_backend = 'SELECT pg_terminate_backend(%s);'%procpid
result = PSQL.run_sql_command(kill_backend, flags = '-q -t', dbname = self.db_name)
tinctest.logger.info('result from pg_terminate_backend is %s'%result)
# check if the process was terminated already
result = PSQL.run_sql_command(get_backend_pid, flags = '-q -t', dbname = self.db_name)
procpid_after_terminate = result.strip()
counter = 0
while procpid_after_terminate == procpid and counter < MAX_TRY:
result = PSQL.run_sql_command(get_backend_pid, flags = '-q -t', dbname = self.db_name)
procpid_after_terminate = result.strip()
counter += 1
time.sleep(1)
if counter == MAX_TRY:
raise Exception('Running pg_terminated_backend failed!')
示例15: test_gprecoverseg_rebalance
def test_gprecoverseg_rebalance(self):
self.gprec.wait_till_insync_transition()
if(self.failover('primary')):
PSQL.run_sql_file(local_path('mirror_failover_trigger.sql'))
self.gprec.incremental()
if (self.gprec.wait_till_insync_transition()):
self.assertTrue(self.gprec.rebalance())