当前位置: 首页>>代码示例>>Python>>正文


Python ConsistencyLevel.ALL属性代码示例

本文整理汇总了Python中cassandra.ConsistencyLevel.ALL属性的典型用法代码示例。如果您正苦于以下问题:Python ConsistencyLevel.ALL属性的具体用法?Python ConsistencyLevel.ALL怎么用?Python ConsistencyLevel.ALL使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在cassandra.ConsistencyLevel的用法示例。


在下文中一共展示了ConsistencyLevel.ALL属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _simple_repair

# 需要导入模块: from cassandra import ConsistencyLevel [as 别名]
# 或者: from cassandra.ConsistencyLevel import ALL [as 别名]
def _simple_repair(self, order_preserving_partitioner=False, sequential=True):
        """
        * Configure a three node cluster to not use hinted handoff, and to use batch commitlog
        * Launch the cluster
        * Create a keyspace at RF 3 and table
        * Insert one thousand rows at CL ALL
        * Flush on node3 and shut it down
        * Insert one row at CL TWO
        * Restart node3
        * Insert one thousand more rows at CL ALL
        * Flush all nodes
        * Check node3 only has 2000 keys
        * Check node1 and node2 have 2001 keys
        * Perform the repair type specified by the parent test
        * Assert the appropriate messages are logged
        * Assert node3 now has all data

        @jira_ticket CASSANDRA-4373
        """
        if order_preserving_partitioner:
            self.cluster.set_partitioner('org.apache.cassandra.dht.ByteOrderedPartitioner')

        self._populate_cluster()
        self._repair_and_verify(sequential) 
开发者ID:apache,项目名称:cassandra-dtest,代码行数:26,代码来源:repair_test.py

示例2: test_with_less_results_than_page_size

# 需要导入模块: from cassandra import ConsistencyLevel [as 别名]
# 或者: from cassandra.ConsistencyLevel import ALL [as 别名]
def test_with_less_results_than_page_size(self):
        session = self.prepare()
        create_ks(session, 'test_paging_size', 2)
        session.execute("CREATE TABLE paging_test ( id int PRIMARY KEY, value text )")

        data = """
            |id| value          |
            +--+----------------+
            |1 |testing         |
            |2 |and more testing|
            |3 |and more testing|
            |4 |and more testing|
            |5 |and more testing|
            """
        expected_data = create_rows(data, session, 'paging_test', cl=CL.ALL, format_funcs={'id': int, 'value': str})

        future = session.execute_async(
            SimpleStatement("select * from paging_test", fetch_size=100, consistency_level=CL.ALL)
        )
        pf = PageFetcher(future)
        pf.request_all()

        assert not pf.has_more_pages
        assert len(expected_data) == len(pf.all_data()) 
开发者ID:apache,项目名称:cassandra-dtest,代码行数:26,代码来源:paging_test.py

示例3: test_paging_a_single_wide_row

# 需要导入模块: from cassandra import ConsistencyLevel [as 别名]
# 或者: from cassandra.ConsistencyLevel import ALL [as 别名]
def test_paging_a_single_wide_row(self):
        session = self.prepare()
        create_ks(session, 'test_paging_size', 2)
        session.execute("CREATE TABLE paging_test ( id int, value text, PRIMARY KEY (id, value) )")

        def random_txt(text):
            return str(uuid.uuid4())

        data = """
              | id | value                  |
              +----+------------------------+
        *10000| 1  | [replaced with random] |
            """
        expected_data = create_rows(data, session, 'paging_test', cl=CL.ALL, format_funcs={'id': int, 'value': random_txt})

        future = session.execute_async(
            SimpleStatement("select * from paging_test where id = 1", fetch_size=3000, consistency_level=CL.ALL)
        )

        pf = PageFetcher(future).request_all()

        assert pf.pagecount() == 4
        assert pf.num_results_all(), [3000, 3000, 3000, 1000]
        assert_lists_equal_ignoring_order(expected_data, pf.all_data(), sort_key="value") 
开发者ID:apache,项目名称:cassandra-dtest,代码行数:26,代码来源:paging_test.py

示例4: test_paging_across_multi_wide_rows

# 需要导入模块: from cassandra import ConsistencyLevel [as 别名]
# 或者: from cassandra.ConsistencyLevel import ALL [as 别名]
def test_paging_across_multi_wide_rows(self):
        session = self.prepare()
        create_ks(session, 'test_paging_size', 2)
        session.execute("CREATE TABLE paging_test ( id int, value text, PRIMARY KEY (id, value) )")

        def random_txt(text):
            return str(uuid.uuid4())

        data = """
              | id | value                  |
              +----+------------------------+
         *5000| 1  | [replaced with random] |
         *5000| 2  | [replaced with random] |
            """
        expected_data = create_rows(data, session, 'paging_test', cl=CL.ALL, format_funcs={'id': int, 'value': random_txt})

        future = session.execute_async(
            SimpleStatement("select * from paging_test where id in (1,2)", fetch_size=3000, consistency_level=CL.ALL)
        )

        pf = PageFetcher(future).request_all()

        assert pf.pagecount() == 4
        assert pf.num_results_all(), [3000, 3000, 3000, 1000]
        assert_lists_equal_ignoring_order(expected_data, pf.all_data(), sort_key="value") 
开发者ID:apache,项目名称:cassandra-dtest,代码行数:27,代码来源:paging_test.py

示例5: test_multiple_row_deletions

# 需要导入模块: from cassandra import ConsistencyLevel [as 别名]
# 或者: from cassandra.ConsistencyLevel import ALL [as 别名]
def test_multiple_row_deletions(self):
        """
        Test multiple row deletions.
        This test should be finished when CASSANDRA-6237 is done.
        """
        self.session = self.prepare()
        expected_data = self.setup_data()

        # Delete a bunch of rows
        rows = expected_data[100:105]
        expected_data = expected_data[0:100] + expected_data[105:]
        in_condition = ','.join("'{}'".format(r['mytext']) for r in rows)

        self.session.execute(SimpleStatement(
            ("delete from paging_test where "
             "id = {} and mytext in ({})".format(3, in_condition)),
            consistency_level=CL.ALL)
        )
        self.check_all_paging_results(expected_data, 8,
                                      [25, 25, 25, 25, 25, 25, 25, 20]) 
开发者ID:apache,项目名称:cassandra-dtest,代码行数:22,代码来源:paging_test.py

示例6: _required_nodes

# 需要导入模块: from cassandra import ConsistencyLevel [as 别名]
# 或者: from cassandra.ConsistencyLevel import ALL [as 别名]
def _required_nodes(self, cl, rf_factors, dc):
        """
        Return the number of nodes required by this consistency level
        in the current data center, specified by the dc parameter,
        given a list of replication factors, one per dc.
        """
        return {
            ConsistencyLevel.ANY: 1,
            ConsistencyLevel.ONE: 1,
            ConsistencyLevel.TWO: 2,
            ConsistencyLevel.THREE: 3,
            ConsistencyLevel.QUORUM: sum(rf_factors) // 2 + 1,
            ConsistencyLevel.ALL: sum(rf_factors),
            ConsistencyLevel.LOCAL_QUORUM: rf_factors[dc] // 2 + 1,
            ConsistencyLevel.EACH_QUORUM: rf_factors[dc] // 2 + 1,
            ConsistencyLevel.SERIAL: sum(rf_factors) // 2 + 1,
            ConsistencyLevel.LOCAL_SERIAL: rf_factors[dc] // 2 + 1,
            ConsistencyLevel.LOCAL_ONE: 1,
        }[cl] 
开发者ID:apache,项目名称:cassandra-dtest,代码行数:21,代码来源:consistency_test.py

示例7: test_with_no_results

# 需要导入模块: from cassandra import ConsistencyLevel [as 别名]
# 或者: from cassandra.ConsistencyLevel import ALL [as 别名]
def test_with_no_results(self):
        """
        No errors when a page is requested and query has no results.
        """
        cursor = self.prepare()
        cursor.execute("CREATE TABLE paging_test ( id int PRIMARY KEY, value text )")

        for is_upgraded, cursor in self.do_upgrade(cursor, row_factory=dict_factory):
            logger.debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))

            # run a query that has no results and make sure it's exhausted
            future = cursor.execute_async(
                SimpleStatement("select * from paging_test", fetch_size=100, consistency_level=CL.ALL)
            )

            pf = PageFetcher(future)
            pf.request_all()
            assert [] == pf.all_data()
            assert not pf.has_more_pages 
开发者ID:apache,项目名称:cassandra-dtest,代码行数:21,代码来源:paging_test.py

示例8: test_multiple_partition_deletions

# 需要导入模块: from cassandra import ConsistencyLevel [as 别名]
# 或者: from cassandra.ConsistencyLevel import ALL [as 别名]
def test_multiple_partition_deletions(self):
        """Test multiple partition deletions """
        cursor = self.prepare()
        self.setup_schema(cursor)

        for is_upgraded, cursor in self.do_upgrade(cursor, row_factory=dict_factory):
            logger.debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
            cursor.execute("TRUNCATE paging_test")
            expected_data = self.setup_data(cursor)

            # Keep only the partition '1'
            cursor.execute(
                SimpleStatement("delete from paging_test where id in (2,3,4,5)",
                                consistency_level=CL.ALL)
            )
            expected_data = [row for row in expected_data if row['id'] == 1]
            self.check_all_paging_results(cursor, expected_data, 2, [25, 15],
                                          timeout=10) 
开发者ID:apache,项目名称:cassandra-dtest,代码行数:20,代码来源:paging_test.py

示例9: test_ttl_deletions

# 需要导入模块: from cassandra import ConsistencyLevel [as 别名]
# 或者: from cassandra.ConsistencyLevel import ALL [as 别名]
def test_ttl_deletions(self):
        """Test ttl deletions. Paging over a query that has only tombstones """
        cursor = self.prepare()
        self.setup_schema(cursor)

        for is_upgraded, cursor in self.do_upgrade(cursor, row_factory=dict_factory):
            logger.debug("Querying %s node" % ("upgraded" if is_upgraded else "old",))
            cursor.execute("TRUNCATE paging_test")
            data = self.setup_data(cursor)

            # Set TTL to all row
            for row in data:
                s = ("insert into paging_test (id, mytext, col1, col2, col3) "
                     "values ({}, '{}', {}, {}, {}) using ttl 3;").format(
                         row['id'], row['mytext'], row['col1'],
                         row['col2'], row['col3'])
                cursor.execute(SimpleStatement(s, consistency_level=CL.ALL))
            time.sleep(5)
            self.check_all_paging_results(cursor, [], 0, []) 
开发者ID:apache,项目名称:cassandra-dtest,代码行数:21,代码来源:paging_test.py

示例10: _init_session

# 需要导入模块: from cassandra import ConsistencyLevel [as 别名]
# 或者: from cassandra.ConsistencyLevel import ALL [as 别名]
def _init_session(self):
        if not self._session:
            s = self._session = self.cluster.connect()
            s.default_consistency_level = ConsistencyLevel.ALL
            s.default_serial_consistency_level = ConsistencyLevel.SERIAL
            s.default_timeout = 120 
开发者ID:Cobliteam,项目名称:cassandra-migrate,代码行数:8,代码来源:migrator.py

示例11: test_concurrent_decommission_not_allowed

# 需要导入模块: from cassandra import ConsistencyLevel [as 别名]
# 或者: from cassandra.ConsistencyLevel import ALL [as 别名]
def test_concurrent_decommission_not_allowed(self):
        """
        Test concurrent decommission is not allowed
        """
        cluster = self.cluster
        cluster.set_configuration_options(values={'stream_throughput_outbound_megabits_per_sec': 1})
        cluster.populate(2).start(wait_other_notice=True)
        node1, node2 = cluster.nodelist()

        session = self.patient_cql_connection(node2)
        create_ks(session, 'ks', 1)
        create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'})
        insert_c1c2(session, n=10000, consistency=ConsistencyLevel.ALL)

        mark = node2.mark_log()

        def decommission():
            node2.nodetool('decommission')

        # Launch first decommission in a external thread
        t = Thread(target=decommission)
        t.start()

        # Make sure first decommission is initialized before second decommission
        node2.watch_log_for('DECOMMISSIONING', filename='debug.log')

        # Launch a second decommission, should fail
        with pytest.raises(ToolError):
            node2.nodetool('decommission')

        # Check data is correctly forwarded to node1 after node2 is decommissioned
        t.join()
        node2.watch_log_for('DECOMMISSIONED', from_mark=mark)
        session = self.patient_cql_connection(node1)
        session.execute('USE ks')
        for n in range(0, 10000):
            query_c1c2(session, n, ConsistencyLevel.ONE) 
开发者ID:apache,项目名称:cassandra-dtest,代码行数:39,代码来源:topology_test.py

示例12: test_simple

# 需要导入模块: from cassandra import ConsistencyLevel [as 别名]
# 或者: from cassandra.ConsistencyLevel import ALL [as 别名]
def test_simple(self):
        """
        Test the SimpleStrategy on a 3 node cluster
        """
        self.cluster.populate(3).start(wait_for_binary_proto=True, wait_other_notice=True)
        node1 = self.cluster.nodelist()[0]
        session = self.patient_exclusive_cql_connection(node1, consistency_level=ConsistencyLevel.ALL)
        session.max_trace_wait = 120

        replication_factor = 3
        create_ks(session, 'test', replication_factor)
        session.execute('CREATE TABLE test.test (id int PRIMARY KEY, value text)', trace=False)

        for key, token in list(murmur3_hashes.items()):
            logger.debug('murmur3 hash key={key},token={token}'.format(key=key, token=token))
            query = SimpleStatement("INSERT INTO test (id, value) VALUES ({}, 'asdf')".format(key), consistency_level=ConsistencyLevel.ALL)
            future = session.execute_async(query, trace=True)
            future.result()
            block_on_trace(session)

            trace = future.get_query_trace(max_wait=120)
            self.pprint_trace(trace)

            stats = self.get_replicas_from_trace(trace)
            replicas_should_be = set(self.get_replicas_for_token(
                token, replication_factor))
            logger.debug('\nreplicas should be: %s' % replicas_should_be)
            logger.debug('replicas were: %s' % stats['replicas'])

            # Make sure the correct nodes are replicas:
            assert stats['replicas'] == replicas_should_be
            # Make sure that each replica node was contacted and
            # acknowledged the write:
            assert stats['nodes_sent_write'] == stats['nodes_responded_write'] 
开发者ID:apache,项目名称:cassandra-dtest,代码行数:36,代码来源:replication_test.py

示例13: test_force_with_none_down

# 需要导入模块: from cassandra import ConsistencyLevel [as 别名]
# 或者: from cassandra.ConsistencyLevel import ALL [as 别名]
def test_force_with_none_down(self):
        """
        if we force an incremental repair, but all the involved nodes are up, 
        we should run normally and promote sstables afterwards
        """
        self.fixture_dtest_setup.setup_overrides.cluster_options = ImmutableMapping({'hinted_handoff_enabled': 'false',
                                                                                     'num_tokens': 1,
                                                                                     'commitlog_sync_period_in_ms': 500})
        self.init_default_config()
        self.cluster.populate(3).start()
        node1, node2, node3 = self.cluster.nodelist()

        session = self.patient_exclusive_cql_connection(node3)
        session.execute("CREATE KEYSPACE ks WITH REPLICATION={'class':'SimpleStrategy', 'replication_factor': 3}")
        session.execute("CREATE TABLE ks.tbl (k INT PRIMARY KEY, v INT)")
        stmt = SimpleStatement("INSERT INTO ks.tbl (k,v) VALUES (%s, %s)")
        stmt.consistency_level = ConsistencyLevel.ALL
        for i in range(10):
            session.execute(stmt, (i, i))

        # run with force flag
        node1.repair(options=['ks', '--force'])

        # ... and verify everything was still promoted
        self.assertAllRepairedSSTables(node1, 'ks')
        self.assertAllRepairedSSTables(node2, 'ks')
        self.assertAllRepairedSSTables(node3, 'ks') 
开发者ID:apache,项目名称:cassandra-dtest,代码行数:29,代码来源:incremental_repair_test.py

示例14: test_hosts

# 需要导入模块: from cassandra import ConsistencyLevel [as 别名]
# 或者: from cassandra.ConsistencyLevel import ALL [as 别名]
def test_hosts(self):
        """
        running an incremental repair with hosts specified should incrementally repair
        the given nodes, but should not promote the sstables to repaired
        """
        self.fixture_dtest_setup.setup_overrides.cluster_options = ImmutableMapping({'hinted_handoff_enabled': 'false',
                                                                                     'num_tokens': 1,
                                                                                     'commitlog_sync_period_in_ms': 500})
        self.init_default_config()
        self.cluster.populate(3).start()
        node1, node2, node3 = self.cluster.nodelist()

        session = self.patient_exclusive_cql_connection(node3)
        session.execute("CREATE KEYSPACE ks WITH REPLICATION={'class':'SimpleStrategy', 'replication_factor': 3}")
        session.execute("CREATE TABLE ks.tbl (k INT PRIMARY KEY, v INT)")
        stmt = SimpleStatement("INSERT INTO ks.tbl (k,v) VALUES (%s, %s)")
        stmt.consistency_level = ConsistencyLevel.ALL
        for i in range(10):
            session.execute(stmt, (i, i))

        # run with force flag
        node1.repair(options=['ks', '-hosts', ','.join([node1.address(), node2.address()])])

        # ... and verify nothing was promoted to repaired
        self.assertNoRepairedSSTables(node1, 'ks')
        self.assertNoRepairedSSTables(node2, 'ks') 
开发者ID:apache,项目名称:cassandra-dtest,代码行数:28,代码来源:incremental_repair_test.py

示例15: test_subrange

# 需要导入模块: from cassandra import ConsistencyLevel [as 别名]
# 或者: from cassandra.ConsistencyLevel import ALL [as 别名]
def test_subrange(self):
        """
        running an incremental repair with hosts specified should incrementally repair
        the given nodes, but should not promote the sstables to repaired
        """
        self.fixture_dtest_setup.setup_overrides.cluster_options = ImmutableMapping({'hinted_handoff_enabled': 'false',
                                                                                     'num_tokens': 1,
                                                                                     'commitlog_sync_period_in_ms': 500,
                                                                                     'partitioner': 'org.apache.cassandra.dht.Murmur3Partitioner'})
        self.init_default_config()
        self.cluster.populate(3).start()
        node1, node2, node3 = self.cluster.nodelist()

        session = self.patient_exclusive_cql_connection(node3)
        session.execute("CREATE KEYSPACE ks WITH REPLICATION={'class':'SimpleStrategy', 'replication_factor': 3}")
        session.execute("CREATE TABLE ks.tbl (k INT PRIMARY KEY, v INT)")
        stmt = SimpleStatement("INSERT INTO ks.tbl (k,v) VALUES (%s, %s)")
        stmt.consistency_level = ConsistencyLevel.ALL
        for i in range(10):
            session.execute(stmt, (i, i))

        for node in self.cluster.nodelist():
            node.flush()
            self.assertNoRepairedSSTables(node, 'ks')

        # only repair the partition k=0
        token = Murmur3Token.from_key(bytes([0, 0, 0, 0]))
        # import ipdb; ipdb.set_trace()
        # run with force flag
        node1.repair(options=['ks', '-st', str(token.value - 1), '-et', str(token.value)])

        # verify we have a mix of repaired and unrepaired sstables
        self.assertRepairedAndUnrepaired(node1, 'ks')
        self.assertRepairedAndUnrepaired(node2, 'ks')
        self.assertRepairedAndUnrepaired(node3, 'ks') 
开发者ID:apache,项目名称:cassandra-dtest,代码行数:37,代码来源:incremental_repair_test.py


注:本文中的cassandra.ConsistencyLevel.ALL属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。