当前位置: 首页>>代码示例>>Python>>正文


Python misc.new_node函数代码示例

本文整理汇总了Python中tools.misc.new_node函数的典型用法代码示例。如果您正苦于以下问题:Python new_node函数的具体用法?Python new_node怎么用?Python new_node使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了new_node函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: bootstrap_test

    def bootstrap_test(self):
        """ Test repaired data remains in sync after a bootstrap """
        cluster = self.cluster
        cluster.set_configuration_options(values={'hinted_handoff_enabled': False, 'commitlog_sync_period_in_ms': 500})
        cluster.populate(3).start()
        node1, node2, node3 = cluster.nodelist()

        session = self.patient_exclusive_cql_connection(node3)
        session.execute("CREATE KEYSPACE ks WITH REPLICATION={'class':'SimpleStrategy', 'replication_factor': 2}")
        session.execute("CREATE TABLE ks.tbl (k INT PRIMARY KEY, v INT)")

        # insert some data
        stmt = SimpleStatement("INSERT INTO ks.tbl (k,v) VALUES (%s, %s)")
        for i in range(1000):
            session.execute(stmt, (i, i))

        node1.repair(options=['ks'])

        for i in range(1000):
            v = i + 1000
            session.execute(stmt, (v, v))

        # everything should be in sync
        for node in [node1, node2, node3]:
            result = node.repair(options=['ks', '--validate'])
            self.assertIn("Repaired data is in sync", result.stdout)

        node4 = new_node(self.cluster)
        node4.start(wait_for_binary_proto=True)

        self.assertEqual(len(self.cluster.nodelist()), 4)
        # everything should still be in sync
        for node in self.cluster.nodelist():
            result = node.repair(options=['ks', '--validate'])
            self.assertIn("Repaired data is in sync", result.stdout)
开发者ID:snazy,项目名称:cassandra-dtest,代码行数:35,代码来源:incremental_repair_test.py

示例2: decommissioned_wiped_node_can_gossip_to_single_seed_test

    def decommissioned_wiped_node_can_gossip_to_single_seed_test(self):
        """
        @jira_ticket CASSANDRA-8072
        @jira_ticket CASSANDRA-8422
        Test that if we decommission a node, kill it and wipe its data, it can join a cluster with a single
        seed node.
        """
        cluster = self.cluster
        cluster.populate(1)
        cluster.start(wait_for_binary_proto=True)

        # Add a new node, bootstrap=True ensures that it is not a seed
        node2 = new_node(cluster, bootstrap=True)
        node2.start(wait_for_binary_proto=True, wait_other_notice=True)

        # Decommision the new node and kill it
        debug("Decommissioning & stopping node2")
        node2.decommission()
        node2.stop(wait_other_notice=False)

        # Wipe its data
        for data_dir in node2.data_directories():
            debug("Deleting {}".format(data_dir))
            shutil.rmtree(data_dir)

        commitlog_dir = os.path.join(node2.get_path(), 'commitlogs')
        debug("Deleting {}".format(commitlog_dir))
        shutil.rmtree(commitlog_dir)

        # Now start it, it should be allowed to join
        mark = node2.mark_log()
        debug("Restarting wiped node2")
        node2.start(wait_other_notice=False)
        node2.watch_log_for("JOINING:", from_mark=mark)
开发者ID:jeffjirsa,项目名称:cassandra-dtest,代码行数:34,代码来源:bootstrap_test.py

示例3: test_bootstrap_with_reset_bootstrap_state

    def test_bootstrap_with_reset_bootstrap_state(self):
        """Test bootstrap with resetting bootstrap progress"""
        cluster = self.cluster
        cluster.set_configuration_options(values={'stream_throughput_outbound_megabits_per_sec': 1})
        cluster.populate(2).start(wait_other_notice=True)

        node1 = cluster.nodes['node1']
        node1.stress(['write', 'n=100K', '-schema', 'replication(factor=2)'])
        node1.flush()

        # kill node1 in the middle of streaming to let it fail
        t = InterruptBootstrap(node1)
        t.start()

        # start bootstrapping node3 and wait for streaming
        node3 = new_node(cluster)
        try:
            node3.start()
        except NodeError:
            pass  # node doesn't start as expected
        t.join()
        node1.start()

        # restart node3 bootstrap with resetting bootstrap progress
        node3.stop(signal_event=signal.SIGKILL)
        mark = node3.mark_log()
        node3.start(jvm_args=["-Dcassandra.reset_bootstrap_progress=true"])
        # check if we reset bootstrap state
        node3.watch_log_for("Resetting bootstrap progress to start fresh", from_mark=mark)
        # wait for node3 ready to query
        node3.wait_for_binary_interface(from_mark=mark)

        # check if 2nd bootstrap succeeded
        assert_bootstrap_state(self, node3, 'COMPLETED')
开发者ID:beobal,项目名称:cassandra-dtest,代码行数:34,代码来源:bootstrap_test.py

示例4: test_manual_bootstrap

    def test_manual_bootstrap(self):
        """
            Test adding a new node and bootstrapping it manually. No auto_bootstrap.
            This test also verify that all data are OK after the addition of the new node.
            @jira_ticket CASSANDRA-9022
        """
        cluster = self.cluster
        cluster.populate(2).start(wait_other_notice=True)
        (node1, node2) = cluster.nodelist()

        node1.stress(['write', 'n=1K', 'no-warmup', '-schema', 'replication(factor=2)',
                      '-rate', 'threads=1', '-pop', 'dist=UNIFORM(1..1000)'])

        session = self.patient_exclusive_cql_connection(node2)
        stress_table = 'keyspace1.standard1'

        original_rows = list(session.execute("SELECT * FROM %s" % stress_table))

        # Add a new node
        node3 = new_node(cluster, bootstrap=False)
        node3.start(wait_for_binary_proto=True)
        node3.repair()
        node1.cleanup()

        current_rows = list(session.execute("SELECT * FROM %s" % stress_table))
        assert original_rows == current_rows
开发者ID:beobal,项目名称:cassandra-dtest,代码行数:26,代码来源:bootstrap_test.py

示例5: test_decommissioned_wiped_node_can_join

    def test_decommissioned_wiped_node_can_join(self):
        """
        @jira_ticket CASSANDRA-9765
        Test that if we decommission a node and then wipe its data, it can join the cluster.
        """
        cluster = self.cluster
        cluster.populate(3)
        cluster.start(wait_for_binary_proto=True)

        stress_table = 'keyspace1.standard1'

        # write some data
        node1 = cluster.nodelist()[0]
        node1.stress(['write', 'n=10K', 'no-warmup', '-rate', 'threads=8'])

        session = self.patient_cql_connection(node1)
        original_rows = list(session.execute("SELECT * FROM {}".format(stress_table,)))

        # Add a new node, bootstrap=True ensures that it is not a seed
        node4 = new_node(cluster, bootstrap=True)
        node4.start(wait_for_binary_proto=True, wait_other_notice=True)

        session = self.patient_cql_connection(node4)
        assert original_rows == list(session.execute("SELECT * FROM {}".format(stress_table,)))

        # Decommission the new node and wipe its data
        node4.decommission()
        node4.stop()
        self._cleanup(node4)
        # Now start it, it should be allowed to join
        mark = node4.mark_log()
        node4.start(wait_other_notice=True)
        node4.watch_log_for("JOINING:", from_mark=mark)
开发者ID:beobal,项目名称:cassandra-dtest,代码行数:33,代码来源:bootstrap_test.py

示例6: _wiped_node_cannot_join_test

    def _wiped_node_cannot_join_test(self, gently):
        """
        @jira_ticket CASSANDRA-9765
        Test that if we stop a node and wipe its data then the node cannot join
        when it is not a seed. Test both a nice shutdown or a forced shutdown, via
        the gently parameter.
        """
        cluster = self.cluster
        cluster.populate(3)
        cluster.start(wait_for_binary_proto=True)

        stress_table = 'keyspace1.standard1'

        # write some data
        node1 = cluster.nodelist()[0]
        node1.stress(['write', 'n=10K', 'no-warmup', '-rate', 'threads=8'])

        session = self.patient_cql_connection(node1)
        original_rows = list(session.execute("SELECT * FROM {}".format(stress_table,)))

        # Add a new node, bootstrap=True ensures that it is not a seed
        node4 = new_node(cluster, bootstrap=True)
        node4.start(wait_for_binary_proto=True)

        session = self.patient_cql_connection(node4)
        assert original_rows == list(session.execute("SELECT * FROM {}".format(stress_table,)))

        # Stop the new node and wipe its data
        node4.stop(gently=gently)
        self._cleanup(node4)
        # Now start it, it should not be allowed to join.
        mark = node4.mark_log()
        node4.start(no_wait=True, wait_other_notice=False)
        node4.watch_log_for("A node with address {} already exists, cancelling join".format(node4.address_for_current_version_slashy()), from_mark=mark)
开发者ID:beobal,项目名称:cassandra-dtest,代码行数:34,代码来源:bootstrap_test.py

示例7: _do_upgrade

    def _do_upgrade(self, login_keyspace=True):
        cluster = self.cluster
        node1 = cluster.nodelist()[0]

        node1.flush()
        time.sleep(.5)
        node1.stop(wait_other_notice=True)

        node1.set_install_dir(install_dir=self.fixture_dtest_setup.default_install_dir)
        node1.start(wait_other_notice=True, wait_for_binary_proto=True)

        if self.fixture_dtest_setup.bootstrap:
            cluster.set_install_dir(install_dir=self.fixture_dtest_setup.default_install_dir)
            # Add a new node, bootstrap=True ensures that it is not a seed
            node2 = new_node(cluster, bootstrap=True)
            node2.start(wait_for_binary_proto=True, jvm_args=self.fixture_dtest_setup.jvm_args)

            temp_files = self.glob_data_dirs(os.path.join('*', "tmp", "*.dat"))
            logger.debug("temp files: " + str(temp_files))
            assert 0 == len(temp_files), "Temporary files were not cleaned up."

        cursor = self.patient_cql_connection(node1)
        if login_keyspace:
            cursor.execute('USE ks')
        return cursor
开发者ID:vinaykumarchella,项目名称:cassandra-dtest,代码行数:25,代码来源:storage_engine_upgrade_test.py

示例8: test_cleanup

 def test_cleanup(self):
     """
     @jira_ticket CASSANDRA-11179
     Make sure we remove processed files during cleanup
     """
     cluster = self.cluster
     cluster.set_configuration_options(values={'concurrent_compactors': 4})
     cluster.populate(1)
     cluster.start(wait_for_binary_proto=True)
     node1, = cluster.nodelist()
     for x in range(0, 5):
         node1.stress(['write', 'n=100k', 'no-warmup', '-schema', 'compaction(strategy=SizeTieredCompactionStrategy,enabled=false)', 'replication(factor=1)', '-rate', 'threads=10'])
         node1.flush()
     node2 = new_node(cluster)
     node2.start(wait_for_binary_proto=True, wait_other_notice=True)
     event = threading.Event()
     failed = threading.Event()
     jobs = 1
     thread = threading.Thread(target=self._monitor_datadir, args=(node1, event, len(node1.get_sstables("keyspace1", "standard1")), jobs, failed))
     thread.setDaemon(True)
     thread.start()
     node1.nodetool("cleanup -j {} keyspace1 standard1".format(jobs))
     event.set()
     thread.join()
     assert not failed.is_set()
开发者ID:beobal,项目名称:cassandra-dtest,代码行数:25,代码来源:bootstrap_test.py

示例9: _disk_balance_after_boundary_change_test

    def _disk_balance_after_boundary_change_test(self, lcs):
        """
            @jira_ticket CASSANDRA-13948

            - Creates a 1 node cluster with 5 disks and insert data with compaction disabled
            - Bootstrap a node2 to make disk boundary changes on node1
            - Enable compaction on node1 and check disks are balanced
            - Decommission node1 to make disk boundary changes on node2
            - Enable compaction on node2 and check disks are balanced
        """

        cluster = self.cluster
        if self.dtest_config.use_vnodes:
            cluster.set_configuration_options(values={'num_tokens': 1024})
        num_disks = 5
        cluster.set_datadir_count(num_disks)
        cluster.set_configuration_options(values={'concurrent_compactors': num_disks})

        logger.debug("Starting node1 with {} data dirs and concurrent_compactors".format(num_disks))
        cluster.populate(1).start(wait_for_binary_proto=True)
        [node1] = cluster.nodelist()

        session = self.patient_cql_connection(node1)
        # reduce system_distributed RF to 1 so we don't require forceful decommission
        session.execute("ALTER KEYSPACE system_distributed WITH REPLICATION = {'class':'SimpleStrategy', 'replication_factor':'1'};")
        session.execute("ALTER KEYSPACE system_traces WITH REPLICATION = {'class':'SimpleStrategy', 'replication_factor':'1'};")

        num_flushes = 10
        keys_per_flush = 10000
        keys_to_write = num_flushes * keys_per_flush

        compaction_opts = "LeveledCompactionStrategy,sstable_size_in_mb=1" if lcs else "SizeTieredCompactionStrategy"
        logger.debug("Writing {} keys in {} flushes (compaction_opts={})".format(keys_to_write, num_flushes, compaction_opts))
        total_keys = num_flushes * keys_per_flush
        current_keys = 0
        while current_keys < total_keys:
            start_key = current_keys + 1
            end_key = current_keys + keys_per_flush
            logger.debug("Writing keys {}..{} and flushing".format(start_key, end_key))
            node1.stress(['write', 'n={}'.format(keys_per_flush), "no-warmup", "cl=ALL", "-pop",
                          "seq={}..{}".format(start_key, end_key), "-rate", "threads=1", "-schema", "replication(factor=1)",
                          "compaction(strategy={},enabled=false)".format(compaction_opts)])
            node1.nodetool('flush keyspace1 standard1')
            current_keys = end_key

        # Add a new node, so disk boundaries will change
        logger.debug("Bootstrap node2 and flush")
        node2 = new_node(cluster, bootstrap=True)
        node2.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.migration_task_wait_in_seconds=10"], set_migration_task=False)
        node2.flush()

        self._assert_balanced_after_boundary_change(node1, total_keys, lcs)

        logger.debug("Decommissioning node1")
        node1.decommission()
        node1.stop()

        self._assert_balanced_after_boundary_change(node2, total_keys, lcs)
开发者ID:beobal,项目名称:cassandra-dtest,代码行数:58,代码来源:disk_balance_test.py

示例10: test_simultaneous_bootstrap

    def test_simultaneous_bootstrap(self):
        """
        Attempt to bootstrap two nodes at once, to assert the second bootstrapped node fails, and does not interfere.

        Start a one node cluster and run a stress write workload.
        Start up a second node, and wait for the first node to detect it has joined the cluster.
        While the second node is bootstrapping, start a third node. This should fail.

        @jira_ticket CASSANDRA-7069
        @jira_ticket CASSANDRA-9484
        """

        bootstrap_error = "Other bootstrapping/leaving/moving nodes detected," \
                          " cannot bootstrap while cassandra.consistent.rangemovement is true"

        cluster = self.cluster
        cluster.populate(1)
        cluster.start(wait_for_binary_proto=True)

        node1, = cluster.nodelist()

        node1.stress(['write', 'n=500K', 'no-warmup', '-schema', 'replication(factor=1)',
                      '-rate', 'threads=10'])

        node2 = new_node(cluster)
        node2.start(wait_other_notice=True)

        node3 = new_node(cluster, remote_debug_port='2003')
        try:
            node3.start(wait_other_notice=False, verbose=False)
        except NodeError:
            pass  # node doesn't start as expected

        time.sleep(.5)
        node2.watch_log_for("Starting listening for CQL clients")

        node3.watch_log_for(bootstrap_error)

        session = self.patient_exclusive_cql_connection(node2)

        # Repeat the select count(*) query, to help catch
        # bugs like 9484, where count(*) fails at higher
        # data loads.
        for _ in range(5):
            assert_one(session, "SELECT count(*) from keyspace1.standard1", [500000], cl=ConsistencyLevel.ONE)
开发者ID:beobal,项目名称:cassandra-dtest,代码行数:45,代码来源:bootstrap_test.py

示例11: _bootstrap_test_with_replica_down

    def _bootstrap_test_with_replica_down(self, consistent_range_movement, rf=2):
        """
        Test to check consistent bootstrap will not succeed when there are insufficient replicas
        @jira_ticket CASSANDRA-11848
        """
        cluster = self.cluster

        cluster.populate(2)
        node1, node2 = cluster.nodelist()

        node3_token = None
        # Make token assignment deterministic
        if not self.dtest_config.use_vnodes:
            cluster.set_configuration_options(values={'num_tokens': 1})
            tokens = cluster.balanced_tokens(3)
            logger.debug("non-vnode tokens: %r" % (tokens,))
            node1.set_configuration_options(values={'initial_token': tokens[0]})
            node2.set_configuration_options(values={'initial_token': tokens[2]})
            node3_token = tokens[1]  # Add node 3 between node1 and node2

        cluster.start()

        node1.stress(['write', 'n=10K', 'no-warmup', '-rate', 'threads=8', '-schema', 'replication(factor={})'.format(rf)])

        # change system_auth keyspace to 2 (default is 1) to avoid
        # "Unable to find sufficient sources for streaming" warning
        if cluster.cassandra_version() >= '2.2.0':
            session = self.patient_cql_connection(node1)
            session.execute("""
                ALTER KEYSPACE system_auth
                    WITH replication = {'class':'SimpleStrategy', 'replication_factor':2};
            """)

        # Stop node2, so node3 will not be able to perform consistent range movement
        node2.stop(wait_other_notice=True)

        successful_bootstrap_expected = not consistent_range_movement

        node3 = new_node(cluster, token=node3_token)
        node3.start(wait_for_binary_proto=successful_bootstrap_expected, wait_other_notice=successful_bootstrap_expected,
                    jvm_args=["-Dcassandra.consistent.rangemovement={}".format(consistent_range_movement)])

        if successful_bootstrap_expected:
            # with rf=1 and cassandra.consistent.rangemovement=false, missing sources are ignored
            if not consistent_range_movement and rf == 1:
                node3.watch_log_for("Unable to find sufficient sources for streaming range")
            assert node3.is_running()
            assert_bootstrap_state(self, node3, 'COMPLETED')
        else:
            if consistent_range_movement:
                if cluster.version() < '4.0':
                    node3.watch_log_for("A node required to move the data consistently is down")
                else:
                    node3.watch_log_for("Necessary replicas for strict consistency were removed by source filters")
            else:
                node3.watch_log_for("Unable to find sufficient sources for streaming range")
            assert_not_running(node3)
开发者ID:vinaykumarchella,项目名称:cassandra-dtest,代码行数:57,代码来源:bootstrap_test.py

示例12: bootstrap_with_compatibility_flag_on

 def bootstrap_with_compatibility_flag_on(cluster, token):
     node2 = new_node(cluster)
     node2.set_configuration_options(values={'initial_token': token})
     # cassandra.force_3_0_protocol_version parameter is needed to allow schema
     # changes during the bootstrapping for upgrades from 3.0.14+ to anything upwards for 3.0.x or 3.x clusters.
     # @jira_ticket CASSANDRA-13004 for detailed context on `cassandra.force_3_0_protocol_version` flag
     node2.start(jvm_args=["-Dcassandra.force_3_0_protocol_version=true"],
                 wait_for_binary_proto=True)
     return node2
开发者ID:vinaykumarchella,项目名称:cassandra-dtest,代码行数:9,代码来源:bootstrap_test.py

示例13: _bootstrap_new_node

 def _bootstrap_new_node(self):
     # Check we can bootstrap a new node on the upgraded cluster:
     debug("Adding a node to the cluster")
     nnode = new_node(self.cluster, remote_debug_port=str(2000 + len(self.cluster.nodes)))
     nnode.start(use_jna=True, wait_other_notice=240, wait_for_binary_proto=True)
     self._write_values()
     self._increment_counters()
     self._check_values()
     self._check_counters()
开发者ID:snazy,项目名称:cassandra-dtest,代码行数:9,代码来源:upgrade_through_versions_test.py

示例14: test_decommission

    def test_decommission(self):
        """Test decommissioning a node correctly streams out all the data"""
        node4 = new_node(self.cluster, bootstrap=True, token='00040')
        patch_start(node4)
        node4.start(wait_for_binary_proto=True, wait_other_notice=True)
        main_session = self.patient_cql_connection(self.node1)
        nodes = [self.node1, self.node2, self.node3, node4]

        for i in range(0, 40, 2):
            print("Inserting " + str(i))
            self.insert_row(i, i, i, main_session)

        # Make sure at least a little data is repaired
        repair_nodes(nodes)

        # Ensure that there is at least some transient data around, because of this if it's missing after bootstrap
        # We know we failed to get it from the transient replica losing the range entirely
        nodes[1].stop(wait_other_notice=True)

        for i in range(1, 40, 2):
            print("Inserting " + str(i))
            self.insert_row(i, i, i, main_session)

        nodes[1].start(wait_for_binary_proto=True, wait_other_notice=True)
        sessions = [self.exclusive_cql_connection(node) for node in [self.node1, self.node2, self.node3, node4]]

        expected = [gen_expected(range(0, 11), range(31, 40)),
                    gen_expected(range(0, 21, 2)),
                    gen_expected(range(1, 11, 2), range(11, 31)),
                    gen_expected(range(11, 20, 2), range(21, 40))]

        self.check_expected(sessions, expected)

        #node1 has transient data we want to see streamed out on move
        nodes[3].nodetool('decommission')

        nodes = nodes[:-1]
        sessions = sessions[:-1]

        expected = [gen_expected(range(0, 11), range(11, 21, 2), range(21, 40)),
                    gen_expected(range(0, 21, 2), range(21, 30, 2), range(31, 40)),
                    gen_expected(range(1, 11, 2), range(11, 31), range(31, 40, 2))]

        cleanup_nodes(nodes)

        self.check_replication(sessions, gte=2, lte=3)
        self.check_expected(sessions, expected)

        repair_nodes(nodes)

        #There should be no transient data anywhere
        expected = [gen_expected(range(0, 11), range(21, 40)),
                    gen_expected(range(0, 21), range(31, 40)),
                    gen_expected(range(11, 31))]

        self.check_expected(sessions, expected, nodes, cleanup=True)
        self.check_replication(sessions, exactly=2)
开发者ID:vinaykumarchella,项目名称:cassandra-dtest,代码行数:57,代码来源:transient_replication_ring_test.py

示例15: bootstrap_on_write_survey_and_join

        def bootstrap_on_write_survey_and_join(cluster, token):
            node2 = new_node(cluster)
            node2.set_configuration_options(values={'initial_token': token})
            node2.start(jvm_args=["-Dcassandra.write_survey=true"], wait_for_binary_proto=True)

            assert len(node2.grep_log('Startup complete, but write survey mode is active, not becoming an active ring member.'))
            assert_bootstrap_state(self, node2, 'IN_PROGRESS')

            node2.nodetool("join")
            assert len(node2.grep_log('Leaving write survey mode and joining ring at operator request'))
            return node2
开发者ID:beobal,项目名称:cassandra-dtest,代码行数:11,代码来源:bootstrap_test.py


注:本文中的tools.misc.new_node函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。