本文整理汇总了Python中mpp.lib.config.GPDBConfig.get_countprimarysegments方法的典型用法代码示例。如果您正苦于以下问题:Python GPDBConfig.get_countprimarysegments方法的具体用法?Python GPDBConfig.get_countprimarysegments怎么用?Python GPDBConfig.get_countprimarysegments使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mpp.lib.config.GPDBConfig
的用法示例。
在下文中一共展示了GPDBConfig.get_countprimarysegments方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_host_port_mapping
# 需要导入模块: from mpp.lib.config import GPDBConfig [as 别名]
# 或者: from mpp.lib.config.GPDBConfig import get_countprimarysegments [as 别名]
def get_host_port_mapping(self,role):
"""
Returns a dictionary having key as hostname and value as a list of port nos.
For e.g {'vm9':['22001','22000'] , 'vm10':{'42000','42001'}...}
"""
config = GPDBConfig()
no_of_segments = config.get_countprimarysegments()
hosts_dict = {}
counter = 0
while counter < no_of_segments:
(host,port) = config.get_hostandport_of_segment(counter,role)
if hosts_dict.has_key(host):
hosts_dict[host].append(port)
else:
hosts_dict[host] = list()
hosts_dict[host].append(port)
counter += 1
return hosts_dict
示例2: GPAddmirrorsTestCase
# 需要导入模块: from mpp.lib.config import GPDBConfig [as 别名]
# 或者: from mpp.lib.config.GPDBConfig import get_countprimarysegments [as 别名]
class GPAddmirrorsTestCase(MPPTestCase):
def __init__(self, methodName):
self.config = GPDBConfig()
self.mdd = os.environ.get('MASTER_DATA_DIRECTORY')
self.seg_prefix = os.path.basename(self.mdd).split('-')[0]
self.master_host = self.config.get_masterhost()
self.gpinitconfig_template = local_path('configs/gpinitconfig_template')
self.datadir_config_file = local_path('configs/datadir_config_file')
self.mirror_config_file = local_path('configs/mirror_config_file')
self.gpinitconfig_file = local_path('configs/gpinitconfig')
self.host_file = local_path('configs/hosts')
self.hosts = self.config.get_hosts(segments = True)
self.port_base = '40000'
self.master_port = os.environ.get('PGPORT', '5432')
self.primary_data_dir = self.config.get_host_and_datadir_of_segment(dbid = 2)[1]
# initially set the mirror data dir same to primary's
self.mirror_data_dir = os.path.join(os.path.dirname(os.path.dirname(self.primary_data_dir)), 'mirror')
self.gpinitsystem = True
self.number_of_segments = self.config.get_countprimarysegments()
self.number_of_segments_per_host = self.number_of_segments / len(self.hosts)
self.standby_enabled = False
self.number_of_parallelism = 4
self.fs_location = []
super(GPAddmirrorsTestCase, self).__init__(methodName)
def setUp(self):
super(GPAddmirrorsTestCase, self).setUp()
def _setup_gpaddmirrors(self, port_offset=1000):
"""
Takes care of creating all the directories required for gpaddmirrors
and generating input files for gpaddmirrors
"""
# Generate gpaddmirrors config files
try:
self._generate_gpaddmirrors_input_files(port_offset)
except Exception, e:
tinctest.logger.exception("Encountered exception during generation of input files: %s" % e)
raise
示例3: wait_till_change_tracking_transition
# 需要导入模块: from mpp.lib.config import GPDBConfig [as 别名]
# 或者: from mpp.lib.config.GPDBConfig import get_countprimarysegments [as 别名]
def wait_till_change_tracking_transition(self,num_seg=None):
"""
PURPOSE:
Poll till change tracking state achieved: Wait till all segments transition to change tracking state
@num_seg : Excepted number of segments down. If not given checks for all segments
@return:
True [if success] False [if state not in ct for more than 600 secs]
number of nodes not in ct
"""
gpcfg = GPDBConfig()
if num_seg is None:
num_seg = gpcfg.get_countprimarysegments()
num_cl = gpcfg.count_of_nodes_in_mode('c')
count = 0
while(int(num_cl) < num_seg):
tinctest.logger.info("waiting for DB to go into change tracking")
sleep(10)
num_cl = gpcfg.count_of_nodes_in_mode('c')
count = count + 1
if (count > 80):
raise Exception("Timed out: cluster not in change tracking")
return (True,num_cl)
示例4: GPDBConfigRegressionTests
# 需要导入模块: from mpp.lib.config import GPDBConfig [as 别名]
# 或者: from mpp.lib.config.GPDBConfig import get_countprimarysegments [as 别名]
class GPDBConfigRegressionTests(unittest.TestCase):
def __init__(self, methodName):
self.gpconfig = GPDBConfig()
super(GPDBConfigRegressionTests,self).__init__(methodName)
def test_get_countprimarysegments(self):
nprimary = self.gpconfig.get_countprimarysegments()
self.assertTrue(nprimary > 0)
def test_get_hostandport_of_segment(self):
(host,port) = self.gpconfig.get_hostandport_of_segment(psegmentNumber = -1, pRole = 'p')
myhost = socket.gethostname()
self.assertEquals(host, myhost)
def test_get_count_segments(self):
seg_count = self.gpconfig.get_count_segments()
self.assertTrue(seg_count.strip() >0)
def test_seghostnames(self):
hostlist = self.gpconfig.get_hosts()
self.assertTrue(len(hostlist) >0)
def test_hostnames(self):
hostlist = self.gpconfig.get_hosts(segments=False)
self.assertTrue(len(hostlist) >0)
def tes_get_masterhost(self):
master_host = self.gpconfig.get_masterhost()
myhost = socket.gethostname()
self.assertEquals(master_host, myhost)
def test_get_masterdata_directory(self):
master_dd = self.gpconfig.get_masterdata_directory()
my_mdd = os.getenv("MASTER_DATA_DIRECTORY")
self.assertEquals(master_dd, my_mdd)
示例5: PgtwoPhaseClass
# 需要导入模块: from mpp.lib.config import GPDBConfig [as 别名]
# 或者: from mpp.lib.config.GPDBConfig import get_countprimarysegments [as 别名]
#.........这里部分代码省略.........
return False
def run_gprecover(self, crash_type, cluster_state='sync'):
'''Recover the cluster if required. '''
if crash_type in ('failover_to_primary', 'failover_to_mirror') or cluster_state == 'change_tracking' :
rc = self.gprecover.incremental()
if not rc:
raise Exception('Gprecvoerseg failed')
if not self.gprecover.wait_till_insync_transition():
raise Exception('Segments not in sync')
tinctest.logger.info('Cluster in sync state')
if crash_type == 'failover_to_mirror' :
#rc = self.gprecover.rebalance()
# -r has issues occasionally, may need another gprecoverseg, so using a local function
rc = self.gprecover_rebalance()
if not rc:
raise Exception('Rebalance failed')
if not self.gprecover.wait_till_insync_transition():
raise Exception('Segments not in sync')
tinctest.logger.info('Successfully Rebalanced the cluster')
else:
tinctest.logger.info('No need to run gprecoverseg. The cluster should be already in sync')
def switch_ckpt_faults_before_trigger(self, cluster_state, fault_type):
'''
@param cluster_state : sync/change_tracking/resync
@param fault_type : dtm_broadcast_prepare/dtm_broadcast_commit_prepared/dtm_xlog_distributed_commit
'''
if cluster_state in ('change_tracking', 'resync'):
self.invoke_fault('filerep_consumer', 'fault')
self.filereputil.wait_till_change_tracking_transition()
tinctest.logger.info('Change_tracking transition complete')
if cluster_state == 'resync':
self.invoke_fault('filerep_resync', 'suspend', role='primary')
rc = self.gprecover.incremental()
if not rc:
raise Exception('Gprecvoerseg failed')
tinctest.logger.info('Cluster in resync state')
self.inject_fault(fault_type)
def switch_ckpt_switch_xlog(self):
'''
@description: pg_switch_xlog on segments
'''
sql_cmd = 'select * from pg_switch_xlog();'
num_primary = self.config.get_countprimarysegments()
for i in range(num_primary):
(host, port) = self.config.get_hostandport_of_segment(psegmentNumber=i)
PSQL.run_sql_command_utility_mode(sql_cmd, host = host, port = port)
def switch_checkpoint_loop(self, fault_type):
'''
@description: Run switch_xlog and checkpoint based on the fault_type
'''
if fault_type == 'dtm_xlog_distributed_commit':
self.switch_ckpt_switch_xlog()
else:
for i in range(5):
self.switch_ckpt_switch_xlog()
def switch_ckpt_crash_and_recover(self, crash_type, fault_type, test_dir, cluster_state='sync', checkpoint='noskip'):
'''
@param crash_type : gpstop_i/gpstop_a/failover_to_mirror/failover_to_primary
@param fault_type : dtm_broadcast_prepare/dtm_broadcast_commit_prepared/dtm_xlog_distributed_commit
@param test_dir : dir of the trigger_sqls
'''
trigger_status = self.check_trigger_sql_hang(test_dir, fault_type)
tinctest.logger.info('trigger_status %s' % trigger_status)
if trigger_status == True:
if cluster_state == 'resync':
self.filereputil.inject_fault(f='filerep_resync', y='resume', r='primary')
sleep(30) #Give a little time before crash.
self.crash_and_recover(crash_type, fault_type, checkpoint, cluster_state)
else:
tinctest.logger.info('The fault_status is not triggered')
def cleanup_dangling_processes(self):
'''
@description: Since the test suspend transactions at different stages and does immediate shutdown,
few processes will not be cleaned up and eventually will eat up on the system resources
This methods takes care of killing them at the end of each test, if such processes exists
'''
num_primary = self.config.get_countprimarysegments()
for i in range(num_primary):
(host, port) = self.config.get_hostandport_of_segment(psegmentNumber=i)
grep_cmd = "ps -ef|grep %s|grep 'Distributed'" % port
cmd = Command('Check for dangling process', cmdStr = 'gpssh -h %s -e "%s" ' % (host, grep_cmd))
cmd.run()
result = cmd.get_results()
if len(result.stdout.splitlines()) > 2 :
grep_and_kill_cmd = "ps -ef|grep %s|grep 'Distributed'|awk '{print \$2}'|xargs kill -9" % port
cmd = Command('Kill dangling processes', cmdStr='gpssh -h %s -e "%s" ' % (host, grep_and_kill_cmd ))
cmd.run()
tinctest.logger.info('Killing the dangling processes')