当前位置: 首页>>代码示例>>Python>>正文


Python LogWatcher.setwatch方法代码示例

本文整理汇总了Python中cts.watcher.LogWatcher.setwatch方法的典型用法代码示例。如果您正苦于以下问题:Python LogWatcher.setwatch方法的具体用法?Python LogWatcher.setwatch怎么用?Python LogWatcher.setwatch使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在cts.watcher.LogWatcher的用法示例。


在下文中一共展示了LogWatcher.setwatch方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: partition_stable

# 需要导入模块: from cts.watcher import LogWatcher [as 别名]
# 或者: from cts.watcher.LogWatcher import setwatch [as 别名]
    def partition_stable(self, nodes, timeout=None):
        watchpats = [ ]
        watchpats.append("Current ping state: S_IDLE")
        watchpats.append(self.templates["Pat:DC_IDLE"])
        self.debug("Waiting for cluster stability...") 

        if timeout == None:
            timeout = self.Env["DeadTime"]

        if len(nodes) < 3:
            self.debug("Cluster is inactive") 
            return 1

        idle_watch = LogWatcher(self.Env["LogFileName"], watchpats, "ClusterStable", timeout, hosts=nodes.split(), kind=self.Env["LogWatcher"])
        idle_watch.setwatch()

        for node in nodes.split():
            # have each node dump its current state
            self.rsh(node, self.templates["StatusCmd"] % node, 1)

        ret = idle_watch.look()
        while ret:
            self.debug(ret) 
            for node in nodes.split():
                if re.search(node, ret):
                    return 1
            ret = idle_watch.look()

        self.debug("Warn: Partition %s not IDLE after %ds" % (repr(nodes), timeout)) 
        return None
开发者ID:SynetoNet,项目名称:pacemaker,代码行数:32,代码来源:CM_lha.py

示例2: prepare_fencing_watcher

# 需要导入模块: from cts.watcher import LogWatcher [as 别名]
# 或者: from cts.watcher.LogWatcher import setwatch [as 别名]
    def prepare_fencing_watcher(self, name):
        # If we don't have quorum now but get it as a result of starting this node,
        # then a bunch of nodes might get fenced
        upnode = None
        if self.HasQuorum(None):
            self.debug("Have quorum")
            return None

        if not self.templates["Pat:Fencing_start"]:
            print("No start pattern")
            return None

        if not self.templates["Pat:Fencing_ok"]:
            print("No ok pattern")
            return None

        stonith = None
        stonithPats = []
        for peer in self.Env["nodes"]:
            if self.ShouldBeStatus[peer] != "up":
                stonithPats.append(self.templates["Pat:Fencing_ok"] % peer)
                stonithPats.append(self.templates["Pat:Fencing_start"] % peer)

        stonith = LogWatcher(self.Env["LogFileName"], stonithPats, "StartupFencing", 0, hosts=self.Env["nodes"], kind=self.Env["LogWatcher"])
        stonith.setwatch()
        return stonith
开发者ID:miz-take,项目名称:pacemaker,代码行数:28,代码来源:CTS.py

示例3: setup_scenario

# 需要导入模块: from cts.watcher import LogWatcher [as 别名]
# 或者: from cts.watcher.LogWatcher import setwatch [as 别名]
    def setup_scenario(self, cluster_manager):
        cluster_manager.log("Reusing cluster")

        # Disable STONITH by default. A dedicated ScenarioComponent
        # is in charge of enabling it if requested
        self.rsh_check(self.Env["nodes"][0], "pcs property set stonith-enabled=false")

        # Stop and remove galera if it exists
        # Note1: in order to avoid error when stopping the resource while
        # in unknown state, we first reprobe the resource state.
        # Note2: if you clean and delete before pacemaker had a
        # chance to re-probe state, it will consider resource is stopped
        # and will happily delete the resource from the cib even if
        # galera is still running!
        # Note3: after a cleanup, pacemaker may log a warning log
        # if it finds the resource is still running. This does not
        # count as an error for the CTS test
        target=self.Env["nodes"][0]
        rc = self.rsh(target, "pcs resource unmanage galera")
        if rc == 0:
            patterns = [r"crmd.*:\s*Initiating action.*: probe_complete probe_complete-%s on %s"%(n,n) \
                    for n in self.Env["nodes"]]
            watch=LogWatcher(self.Env["LogFileName"], patterns, None, self.Env["DeadTime"], kind=self.Env["LogWatcher"], hosts=self.Env["nodes"])
            watch.setwatch()
            self.rsh(target, "pcs resource cleanup galera")
            watch.lookforall()
            assert not watch.unmatched, watch.unmatched
            self.rsh(target, "pcs resource disable galera")
            self.rsh(target, "pcs resource manage galera")
            self.rsh(target, "pcs resource delete galera --wait")
开发者ID:dciabrin,项目名称:ra-tester,代码行数:32,代码来源:galerascenarios.py

示例4: prepare_fencing_watcher

# 需要导入模块: from cts.watcher import LogWatcher [as 别名]
# 或者: from cts.watcher.LogWatcher import setwatch [as 别名]
    def prepare_fencing_watcher(self, name):
        # If we don't have quorum now but get it as a result of starting this node,
        # then a bunch of nodes might get fenced
        upnode = None
        if self.HasQuorum(None):
            self.debug("Have quorum")
            return None

        if not self.templates["Pat:Fencing_start"]:
            print("No start pattern")
            return None

        if not self.templates["Pat:Fencing_ok"]:
            print("No ok pattern")
            return None

        stonith = None
        stonithPats = []
        for peer in self.Env["nodes"]:
            if self.ShouldBeStatus[peer] != "up":
                stonithPats.append(self.templates["Pat:Fencing_ok"] % peer)
                stonithPats.append(self.templates["Pat:Fencing_start"] % peer)
            elif self.Env["Stack"] == "corosync (cman)":
                # There is a delay between gaining quorum and CMAN starting fencing
                # This can mean that even nodes that are fully up get fenced
                # There is no use fighting it, just look for everyone so that CTS doesn't get confused
                stonithPats.append(self.templates["Pat:Fencing_ok"] % peer)
                stonithPats.append(self.templates["Pat:Fencing_start"] % peer)

        stonith = LogWatcher(self.Env["LogFileName"], stonithPats, "StartupFencing", 0, hosts=self.Env["nodes"], kind=self.Env["LogWatcher"])
        stonith.setwatch()
        return stonith
开发者ID:davidvossel,项目名称:pacemaker,代码行数:34,代码来源:CTS.py

示例5: setup_scenario

# 需要导入模块: from cts.watcher import LogWatcher [as 别名]
# 或者: from cts.watcher.LogWatcher import setwatch [as 别名]
    def setup_scenario(self, cluster_manager):
        # mysql setup
        self.init_and_setup_mysql_defaults()
        self.setup_galera_config()

        remote_authkey = "/etc/pacemaker/authkey"
        if not self.rsh.exists_on_all(remote_authkey, self.Env["nodes"]):
            self.log("Creating auth key for communication with pacemaker remote")
            with tempfile.NamedTemporaryFile() as tmp:
                tmp.write(os.urandom(4096))
                tmp.flush()
                self.copy_to_nodes([(tmp.name, remote_authkey)], True, "root:haclient", "440")

        # cluster_manager.prepare()

        # stop cluster if previously running, failure is not fatal
        for node in self.Env["nodes"]:
            self.rsh(node, "pcs cluster destroy")
            self.rsh(node, "systemctl stop pacemaker_remote")
            self.rsh(node, "systemctl enable pacemaker")

        # reconfigure cluster for 2-nodes + one remote arbitrator
        self.Env["arb"]=self.Env["nodes"][-1]
        self.Env["nodes"]=self.Env["nodes"][:-1]
        self.rsh_check(self.Env["nodes"][0], "pcs cluster setup --force --name ratester %s %s" % \
                       (self.Env["nodes"][0],self.Env["nodes"][1]))
        # note: setting up cluster disable pacemaker service. re-enable it
        self.rsh_check(self.Env["nodes"][0], "systemctl enable pacemaker")
        self.rsh_check(self.Env["nodes"][0], "pcs cluster start --all")

        # TODO: better way to wait until cluster is started
        time.sleep(8)

        # Disable STONITH by default. A dedicated ScenarioComponent
        # is in charge of enabling it if requested
        self.rsh_check(self.Env["nodes"][0], "pcs property set stonith-enabled=false")

        for node in self.Env["nodes"]:
            self.rsh_check(node, "pcs property set --node %s osprole=controller"%node)

        # cluster_manager.prepare()

        # pacemaker remote to host garbd
        res=self.rsh_check(self.Env["arb"], "systemctl disable pacemaker")
        res=self.rsh_check(self.Env["arb"], "systemctl enable pacemaker_remote")
        res=self.rsh_check(self.Env["arb"], "systemctl start pacemaker_remote")

        remote_ok_pat = self.ratemplates.build("Pat:RscRemoteOp", "start", "arb", "\S+", "ok")
        watch=LogWatcher(self.Env["LogFileName"], [remote_ok_pat], None, self.Env["DeadTime"], kind=self.Env["LogWatcher"], hosts=self.Env["nodes"])
        # watch = self.create_watch([remote_ok_pat], self.Env["DeadTime"])
        watch.setwatch()
        res=self.rsh_check(self.Env["nodes"][0], "pcs resource create arb ocf:pacemaker:remote server=%s reconnect_interval=60 op monitor interval=20"%self.Env["arb"])
        watch.lookforall()
        assert not watch.unmatched, watch.unmatched

        self.rsh_check(self.Env["nodes"][0], "pcs property set --node arb osprole=arbitrator")

        # there's no selinux context for garbd currently
        res=self.rsh_check(self.Env["arb"], "test -x /usr/sbin/setenforce && setenforce 0 || true")
开发者ID:dciabrin,项目名称:ra-tester,代码行数:61,代码来源:garbdscenarios.py

示例6: startall

# 需要导入模块: from cts.watcher import LogWatcher [as 别名]
# 或者: from cts.watcher.LogWatcher import setwatch [as 别名]
    def startall(self, nodelist=None, verbose=False, quick=False):

        '''Start the cluster manager on every node in the cluster.
        We can do it on a subset of the cluster if nodelist is not None.
        '''
        map = {}
        if not nodelist:
            nodelist = self.Env["nodes"]

        for node in nodelist:
            if self.ShouldBeStatus[node] == "down":
                self.ns.WaitForAllNodesToComeUp(nodelist, 300)

        if not quick:
            # This is used for "basic sanity checks", so only start one node ...
            if not self.StartaCM(node, verbose=verbose):
                return 0
            return 1

        # Approximation of SimulStartList for --boot 
        watchpats = [ ]
        watchpats.append(self.templates["Pat:DC_IDLE"])
        for node in nodelist:
            watchpats.append(self.templates["Pat:Local_started"] % node)
            watchpats.append(self.templates["Pat:InfraUp"] % node)
            watchpats.append(self.templates["Pat:PacemakerUp"] % node)

        #   Start all the nodes - at about the same time...
        watch = LogWatcher(self.Env["LogFileName"], watchpats, "fast-start", self.Env["DeadTime"]+10, hosts=self.Env["nodes"], kind=self.Env["LogWatcher"])
        watch.setwatch()

        if not self.StartaCM(nodelist[0], verbose=verbose):
            return 0
        for node in nodelist:
            self.StartaCMnoBlock(node, verbose=verbose)

        watch.lookforall()
        if watch.unmatched:
            for regex in watch.unmatched:
                self.logger.log ("Warn: Startup pattern not found: %s" % (regex))

        if not self.cluster_stable():
            self.logger.log("Cluster did not stabilize")
            return 0

        return 1
开发者ID:aspiers,项目名称:pacemaker,代码行数:48,代码来源:CTS.py

示例7: test_node_CM

# 需要导入模块: from cts.watcher import LogWatcher [as 别名]
# 或者: from cts.watcher.LogWatcher import setwatch [as 别名]
    def test_node_CM(self, node):
        '''Report the status of the cluster manager on a given node'''

        watchpats = [ ]
        watchpats.append("Current ping state: (S_IDLE|S_NOT_DC)")
        watchpats.append(self.templates["Pat:Slave_started"]%node)
        watchpats.append(self.templates["Pat:Master_started"]%node)
        idle_watch = LogWatcher(self.Env["LogFileName"], watchpats, "ClusterIdle", hosts=[node], kind=self.Env["LogWatcher"])
        idle_watch.setwatch()

        out = self.rsh(node, self.templates["StatusCmd"]%node, 1)
        self.debug("Node %s status: '%s'" %(node, out))            

        if not out or string.find(out, 'ok') < 0:
            if self.ShouldBeStatus[node] == "up":
                self.log(
                    "Node status for %s is %s but we think it should be %s"
                    % (node, "down", self.ShouldBeStatus[node]))
            self.ShouldBeStatus[node] = "down"
            return 0

        if self.ShouldBeStatus[node] == "down":
            self.log(
                "Node status for %s is %s but we think it should be %s: %s"
                % (node, "up", self.ShouldBeStatus[node], out))

        self.ShouldBeStatus[node] = "up"

        # check the output first - because syslog-ng looses messages
        if string.find(out, 'S_NOT_DC') != -1:
            # Up and stable
            return 2
        if string.find(out, 'S_IDLE') != -1:
            # Up and stable
            return 2

        # fall back to syslog-ng and wait
        if not idle_watch.look():
            # just up
            self.debug("Warn: Node %s is unstable: %s" % (node, out))
            return 1

        # Up and stable
        return 2
开发者ID:SynetoNet,项目名称:pacemaker,代码行数:46,代码来源:CM_lha.py

示例8: prepare_fencing_watcher

# 需要导入模块: from cts.watcher import LogWatcher [as 别名]
# 或者: from cts.watcher.LogWatcher import setwatch [as 别名]
    def prepare_fencing_watcher(self, node):
        # If we don't have quorum now but get it as a result of starting this node,
        # then a bunch of nodes might get fenced
        upnode = None
        if self.HasQuorum(None):
            return None

        if not self.has_key("Pat:Fencing_start"):
            return None

        if not self.has_key("Pat:Fencing_ok"):
            return None

        stonith = None
        stonithPats = []
        for peer in self.Env["nodes"]:
            if peer != node and self.ShouldBeStatus[peer] != "up":
                stonithPats.append(self.templates["Pat:Fencing_ok"] % peer)
                stonithPats.append(self.templates["Pat:Fencing_start"] % peer)
            elif self.Env["Stack"] == "corosync (cman)":
                # There is a delay between gaining quorum and CMAN starting fencing
                # This can mean that even nodes that are fully up get fenced
                # There is no use fighting it, just look for everyone so that CTS doesn't get confused
                stonithPats.append(self.templates["Pat:Fencing_ok"] % peer)
                stonithPats.append(self.templates["Pat:Fencing_start"] % peer)

            if peer != node and not upnode and self.ShouldBeStatus[peer] == "up":
                upnode = peer

        # Look for STONITH ops, depending on Env["at-boot"] we might need to change the nodes status
        if not upnode:
            return None

        stonith = LogWatcher(self.Env["LogFileName"], stonithPats, "StartupFencing", 0, hosts=[upnode], kind=self.Env["LogWatcher"])
        stonith.setwatch()
        return stonith
开发者ID:kjperry,项目名称:pacemaker,代码行数:38,代码来源:CTS.py

示例9: StartaCM

# 需要导入模块: from cts.watcher import LogWatcher [as 别名]
# 或者: from cts.watcher.LogWatcher import setwatch [as 别名]
    def StartaCM(self, node, verbose=False):

        '''Start up the cluster manager on a given node'''
        if verbose: self.logger.log("Starting %s on node %s" % (self.templates["Name"], node))
        else: self.debug("Starting %s on node %s" % (self.templates["Name"], node))
        ret = 1

        if not node in self.ShouldBeStatus:
            self.ShouldBeStatus[node] = "down"

        if self.ShouldBeStatus[node] != "down":
            return 1

        patterns = []
        # Technically we should always be able to notice ourselves starting
        patterns.append(self.templates["Pat:Local_started"] % node)
        if self.upcount() == 0:
            patterns.append(self.templates["Pat:Master_started"] % node)
        else:
            patterns.append(self.templates["Pat:Slave_started"] % node)

        watch = LogWatcher(
            self.Env["LogFileName"], patterns, "StartaCM", self.Env["StartTime"]+10, hosts=self.Env["nodes"], kind=self.Env["LogWatcher"])

        self.install_config(node)

        self.ShouldBeStatus[node] = "any"
        if self.StataCM(node) and self.cluster_stable(self.Env["DeadTime"]):
            self.logger.log ("%s was already started" % (node))
            return 1

        # Clear out the host cache so autojoin can be exercised
        if self.clear_cache:
            self.debug("Removing cache file on: "+node)
            self.rsh(node, "rm -f "+CTSvars.HA_VARLIBHBDIR+"/hostcache")

        if not(self.Env["valgrind-tests"]):
            startCmd = self.templates["StartCmd"]
        else:
            if self.Env["valgrind-prefix"]:
                prefix = self.Env["valgrind-prefix"]
            else:
                prefix = "cts"

            startCmd = """G_SLICE=always-malloc HA_VALGRIND_ENABLED='%s' VALGRIND_OPTS='%s --log-file=/tmp/%s-%s.valgrind' %s""" % (
                self.Env["valgrind-procs"], self.Env["valgrind-opts"], prefix, """%p""", self.templates["StartCmd"])

        stonith = self.prepare_fencing_watcher(node)

        watch.setwatch()

        if self.rsh(node, startCmd) != 0:
            self.logger.log ("Warn: Start command failed on node %s" % (node))
            self.fencing_cleanup(node, stonith)
            return None

        self.ShouldBeStatus[node] = "up"
        watch_result = watch.lookforall()

        if watch.unmatched:
            for regex in watch.unmatched:
                self.logger.log ("Warn: Startup pattern not found: %s" % (regex))

        if watch_result and self.cluster_stable(self.Env["DeadTime"]):
            #self.debug("Found match: "+ repr(watch_result))
            self.fencing_cleanup(node, stonith)
            return 1

        elif self.StataCM(node) and self.cluster_stable(self.Env["DeadTime"]):
            self.fencing_cleanup(node, stonith)
            return 1

        self.logger.log ("Warn: Start failed for node %s" % (node))
        return None
开发者ID:davidvossel,项目名称:pacemaker,代码行数:76,代码来源:CTS.py

示例10: SetUp

# 需要导入模块: from cts.watcher import LogWatcher [as 别名]
# 或者: from cts.watcher.LogWatcher import setwatch [as 别名]
class Scenario:
    (
'''The basic idea of a scenario is that of an ordered list of
ScenarioComponent objects.  Each ScenarioComponent is SetUp() in turn,
and then after the tests have been run, they are torn down using TearDown()
(in reverse order).

A Scenario is applicable to a particular cluster manager iff each
ScenarioComponent is applicable.

A partially set up scenario is torn down if it fails during setup.
''')

    def __init__(self, ClusterManager, Components, Audits, Tests):

        "Initialize the Scenario from the list of ScenarioComponents"

        self.ClusterManager = ClusterManager
        self.Components = Components
        self.Audits  = Audits
        self.Tests = Tests

        self.BadNews = None
        self.TestSets = []
        self.Stats = {"success":0, "failure":0, "BadNews":0, "skipped":0}
        self.Sets = []

        #self.ns=CTS.NodeStatus(self.Env)

        for comp in Components:
            if not issubclass(comp.__class__, ScenarioComponent):
                raise ValueError("Init value must be subclass of ScenarioComponent")

        for audit in Audits:
            if not issubclass(audit.__class__, ClusterAudit):
                raise ValueError("Init value must be subclass of ClusterAudit")

        for test in Tests:
            if not issubclass(test.__class__, CTSTest):
                raise ValueError("Init value must be a subclass of CTSTest")

    def IsApplicable(self):
        (
'''A Scenario IsApplicable() iff each of its ScenarioComponents IsApplicable()
'''
        )

        for comp in self.Components:
            if not comp.IsApplicable():
                return None
        return 1

    def SetUp(self):
        '''Set up the Scenario. Return TRUE on success.'''

        self.ClusterManager.prepare()
        self.audit() # Also detects remote/local log config
        self.ClusterManager.StatsMark(0)
        self.ClusterManager.ns.WaitForAllNodesToComeUp(self.ClusterManager.Env["nodes"])

        self.audit()
        if self.ClusterManager.Env["valgrind-tests"]:
            self.ClusterManager.install_helper("cts.supp")

        self.BadNews = LogWatcher(self.ClusterManager.Env["LogFileName"],
                                  self.ClusterManager.templates.get_patterns(
                                      self.ClusterManager.Env["Name"], "BadNews"), "BadNews", 0,
                                  kind=self.ClusterManager.Env["LogWatcher"],
                                  hosts=self.ClusterManager.Env["nodes"])
        self.BadNews.setwatch() # Call after we've figured out what type of log watching to do in LogAudit

        j = 0
        while j < len(self.Components):
            if not self.Components[j].SetUp(self.ClusterManager):
                # OOPS!  We failed.  Tear partial setups down.
                self.audit()
                self.ClusterManager.log("Tearing down partial setup")
                self.TearDown(j)
                return None
            j = j + 1

        self.audit()
        return 1

    def TearDown(self, max=None):

        '''Tear Down the Scenario - in reverse order.'''

        if max == None:
            max = len(self.Components)-1
        j = max
        while j >= 0:
            self.Components[j].TearDown(self.ClusterManager)
            j = j - 1

        self.audit()
        self.ClusterManager.StatsExtract()

    def incr(self, name):
        '''Increment (or initialize) the value associated with the given name'''
#.........这里部分代码省略.........
开发者ID:beekhof,项目名称:pacemaker,代码行数:103,代码来源:CTSscenarios.py


注:本文中的cts.watcher.LogWatcher.setwatch方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。