本文整理汇总了Python中cassandra.cluster.Cluster类的典型用法代码示例。如果您正苦于以下问题:Python Cluster类的具体用法?Python Cluster怎么用?Python Cluster使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Cluster类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_unavailable
def test_unavailable(self):
"""
Trigger and ensure unavailables are counted
Write a key, value pair. Kill a node while waiting for the cluster to register the death.
Attempt an insert/read at cl.ALL and receive a Unavailable Exception.
"""
cluster = Cluster(metrics_enabled=True)
session = cluster.connect()
# Test write
session.execute("INSERT INTO test3rf.test (k, v) VALUES (1, 1)")
# Assert read
query = SimpleStatement("SELECT v FROM test3rf.test WHERE k=%(k)s", consistency_level=ConsistencyLevel.ALL)
results = session.execute(query, {'k': 1})
self.assertEqual(1, results[0].v)
# Force kill ccm node
get_node(1).stop(wait=True, gently=True)
try:
# Test write
query = SimpleStatement("INSERT INTO test3rf.test (k, v) VALUES (2, 2)", consistency_level=ConsistencyLevel.ALL)
self.assertRaises(Unavailable, session.execute, query)
self.assertEqual(1, cluster.metrics.stats.unavailables)
# Test write
query = SimpleStatement("SELECT v FROM test3rf.test WHERE k=%(k)s", consistency_level=ConsistencyLevel.ALL)
self.assertRaises(Unavailable, session.execute, query, {'k': 1})
self.assertEqual(2, cluster.metrics.stats.unavailables)
finally:
get_node(1).start(wait_other_notice=True, wait_for_binary_proto=True)
示例2: handle_noargs
def handle_noargs(self, **options):
cluster = Cluster()
session = cluster.connect()
# Checking if keysapce exists
query = "SELECT * FROM system.schema_keyspaces WHERE keyspace_name='%s';" % KEYSPACE_NAME
result = session.execute(query)
if len(result) != 0:
msg = 'Looks like you already have a %s keyspace.\nDo you want to delete it and recreate it? All current data will be deleted! (y/n): ' % KEYSPACE_NAME
resp = raw_input(msg)
if not resp or resp[0] != 'y':
print "Ok, then we're done here."
return
query = "DROP KEYSPACE %s" % KEYSPACE_NAME
session.execute(query)
# Creating keysapce
query = "CREATE KEYSPACE tess WITH REPLICATION = {'class': 'SimpleStrategy', 'replication_factor': 1};"
session.execute(query)
# Creating tables
query = "USE tess;"
session.execute(query)
query = "CREATE TABLE emotiv_eeg_record (test_id int, time double, AF3 double, F7 double, F3 double, FC5 double, T7 double, P7 double, O1 double, O2 double, P8 double, T8 double, FC6 double, F4 double, F8 double, AF4 double, PRIMARY KEY (test_id, time));"
session.execute(query)
cluster.shutdown()
print 'All done!'
示例3: setup
def setup(hosts):
cluster = Cluster(hosts)
cluster.set_core_connections_per_host(HostDistance.LOCAL, 1)
session = cluster.connect()
rows = session.execute("SELECT keyspace_name FROM system.schema_keyspaces")
if KEYSPACE in [row[0] for row in rows]:
log.debug("dropping existing keyspace...")
session.execute("DROP KEYSPACE " + KEYSPACE)
log.debug("Creating keyspace...")
session.execute(
"""
CREATE KEYSPACE %s
WITH replication = { 'class': 'SimpleStrategy', 'replication_factor': '2' }
"""
% KEYSPACE
)
log.debug("Setting keyspace...")
session.set_keyspace(KEYSPACE)
log.debug("Creating table...")
session.execute(
"""
CREATE TABLE %s (
thekey text,
col1 text,
col2 text,
PRIMARY KEY (thekey, col1)
)
"""
% TABLE
)
示例4: test_refresh_schema_type
def test_refresh_schema_type(self):
if get_server_versions()[0] < (2, 1, 0):
raise unittest.SkipTest('UDTs were introduced in Cassandra 2.1')
if PROTOCOL_VERSION < 3:
raise unittest.SkipTest('UDTs are not specified in change events for protocol v2')
# We may want to refresh types on keyspace change events in that case(?)
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
session = cluster.connect()
keyspace_name = 'test1rf'
type_name = self._testMethodName
session.execute('CREATE TYPE IF NOT EXISTS %s.%s (one int, two text)' % (keyspace_name, type_name))
original_meta = cluster.metadata.keyspaces
original_test1rf_meta = original_meta[keyspace_name]
original_type_meta = original_test1rf_meta.user_types[type_name]
# only refresh one type
cluster.refresh_user_type_metadata('test1rf', type_name)
current_meta = cluster.metadata.keyspaces
current_test1rf_meta = current_meta[keyspace_name]
current_type_meta = current_test1rf_meta.user_types[type_name]
self.assertIs(original_meta, current_meta)
self.assertIs(original_test1rf_meta, current_test1rf_meta)
self.assertIsNot(original_type_meta, current_type_meta)
self.assertEqual(original_type_meta.as_cql_query(), current_type_meta.as_cql_query())
session.shutdown()
示例5: ConnectionTimeoutTest
class ConnectionTimeoutTest(unittest.TestCase):
def setUp(self):
self.defaultInFlight = Connection.max_in_flight
Connection.max_in_flight = 2
self.cluster = Cluster(protocol_version=PROTOCOL_VERSION, load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.1']))
self.session = self.cluster.connect()
def tearDown(self):
Connection.max_in_flight = self.defaultInFlight
self.cluster.shutdown()
def test_in_flight_timeout(self):
"""
Test to ensure that connection id fetching will block when max_id is reached/
In previous versions of the driver this test will cause a
NoHostAvailable exception to be thrown, when the max_id is restricted
@since 3.3
@jira_ticket PYTHON-514
@expected_result When many requests are run on a single node connection acquisition should block
until connection is available or the request times out.
@test_category connection timeout
"""
futures = []
query = '''SELECT * FROM system.local'''
for i in range(100):
futures.append(self.session.execute_async(query))
for future in futures:
future.result()
示例6: DuplicateRpcTest
class DuplicateRpcTest(unittest.TestCase):
load_balancing_policy = WhiteListRoundRobinPolicy(['127.0.0.1'])
def setUp(self):
self.cluster = Cluster(protocol_version=PROTOCOL_VERSION, load_balancing_policy=self.load_balancing_policy)
self.session = self.cluster.connect()
self.session.execute("UPDATE system.peers SET rpc_address = '127.0.0.1' WHERE peer='127.0.0.2'")
def tearDown(self):
self.session.execute("UPDATE system.peers SET rpc_address = '127.0.0.2' WHERE peer='127.0.0.2'")
self.cluster.shutdown()
def test_duplicate(self):
"""
Test duplicate RPC addresses.
Modifies the system.peers table to make hosts have the same rpc address. Ensures such hosts are filtered out and a message is logged
@since 3.4
@jira_ticket PYTHON-366
@expected_result only one hosts' metadata will be populated
@test_category metadata
"""
mock_handler = MockLoggingHandler()
logger = logging.getLogger(cassandra.cluster.__name__)
logger.addHandler(mock_handler)
test_cluster = self.cluster = Cluster(protocol_version=PROTOCOL_VERSION, load_balancing_policy=self.load_balancing_policy)
test_cluster.connect()
warnings = mock_handler.messages.get("warning")
self.assertEqual(len(warnings), 1)
self.assertTrue('multiple' in warnings[0])
logger.removeHandler(mock_handler)
示例7: test_connect_to_already_shutdown_cluster
def test_connect_to_already_shutdown_cluster(self):
"""
Ensure you cannot connect to a cluster that's been shutdown
"""
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
cluster.shutdown()
self.assertRaises(Exception, cluster.connect)
示例8: test_for_schema_disagreement_attribute
def test_for_schema_disagreement_attribute(self):
"""
Tests to ensure that schema disagreement is properly surfaced on the response future.
Creates and destroys keyspaces/tables with various schema agreement timeouts set.
First part runs cql create/drop cmds with schema agreement set in such away were it will be impossible for agreement to occur during timeout.
It then validates that the correct value is set on the result.
Second part ensures that when schema agreement occurs, that the result set reflects that appropriately
@since 3.1.0
@jira_ticket PYTHON-458
@expected_result is_schema_agreed is set appropriately on response thefuture
@test_category schema
"""
# This should yield a schema disagreement
cluster = Cluster(protocol_version=PROTOCOL_VERSION, max_schema_agreement_wait=0.001)
session = cluster.connect(wait_for_all_pools=True)
rs = session.execute("CREATE KEYSPACE test_schema_disagreement WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}")
self.check_and_wait_for_agreement(session, rs, False)
rs = session.execute("CREATE TABLE test_schema_disagreement.cf (key int PRIMARY KEY, value int)")
self.check_and_wait_for_agreement(session, rs, False)
rs = session.execute("DROP KEYSPACE test_schema_disagreement")
self.check_and_wait_for_agreement(session, rs, False)
# These should have schema agreement
cluster = Cluster(protocol_version=PROTOCOL_VERSION, max_schema_agreement_wait=100)
session = cluster.connect()
rs = session.execute("CREATE KEYSPACE test_schema_disagreement WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}")
self.check_and_wait_for_agreement(session, rs, True)
rs = session.execute("CREATE TABLE test_schema_disagreement.cf (key int PRIMARY KEY, value int)")
self.check_and_wait_for_agreement(session, rs, True)
rs = session.execute("DROP KEYSPACE test_schema_disagreement")
self.check_and_wait_for_agreement(session, rs, True)
示例9: _test_downgrading_cl
def _test_downgrading_cl(self, keyspace, rf, accepted):
cluster = Cluster(
load_balancing_policy=TokenAwarePolicy(RoundRobinPolicy()),
default_retry_policy=DowngradingConsistencyRetryPolicy(),
protocol_version=PROTOCOL_VERSION)
session = cluster.connect()
create_schema(session, keyspace, replication_factor=rf)
self._insert(session, keyspace, 1)
self._query(session, keyspace, 1)
self.coordinator_stats.assert_query_count_equals(self, 1, 0)
self.coordinator_stats.assert_query_count_equals(self, 2, 1)
self.coordinator_stats.assert_query_count_equals(self, 3, 0)
try:
force_stop(2)
wait_for_down(cluster, 2)
self._assert_writes_succeed(session, keyspace, accepted)
self._assert_reads_succeed(session, keyspace,
accepted - set([ConsistencyLevel.ANY]))
self._assert_writes_fail(session, keyspace,
SINGLE_DC_CONSISTENCY_LEVELS - accepted)
self._assert_reads_fail(session, keyspace,
SINGLE_DC_CONSISTENCY_LEVELS - accepted)
finally:
start(2)
wait_for_up(cluster, 2)
示例10: test_can_insert_tuples_all_primitive_datatypes
def test_can_insert_tuples_all_primitive_datatypes(self):
"""
Ensure tuple subtypes are appropriately handled.
"""
if self.cass_version < (2, 1, 0):
raise unittest.SkipTest("The tuple type was introduced in Cassandra 2.1")
c = Cluster(protocol_version=PROTOCOL_VERSION)
s = c.connect(self.keyspace_name)
s.encoder.mapping[tuple] = s.encoder.cql_encode_tuple
s.execute("CREATE TABLE tuple_primitive ("
"k int PRIMARY KEY, "
"v frozen<tuple<%s>>)" % ','.join(PRIMITIVE_DATATYPES))
values = []
type_count = len(PRIMITIVE_DATATYPES)
for i, data_type in enumerate(PRIMITIVE_DATATYPES):
# create tuples to be written and ensure they match with the expected response
# responses have trailing None values for every element that has not been written
values.append(get_sample(data_type))
expected = tuple(values + [None] * (type_count - len(values)))
s.execute("INSERT INTO tuple_primitive (k, v) VALUES (%s, %s)", (i, tuple(values)))
result = s.execute("SELECT v FROM tuple_primitive WHERE k=%s", (i,))[0]
self.assertEqual(result.v, expected)
c.shutdown()
示例11: hello_world
def hello_world():
cluster = Cluster(['172.17.0.2'])
session = cluster.connect()
session.execute("CREATE KEYSPACE IF NOT EXISTS results WITH replication = {'class':'SimpleStrategy', 'replication_factor' : 3};")
session.execute("USE results;")
session.execute("create table IF NOT EXISTS result_table(translate_source text,translate_result text,analyze_result text,result_id text PRIMARY KEY);")
language_translation = LanguageTranslation(
username='11f6222c-67c8-4b0d-b374-3ab7875a9033',
password='TxAX4nscSzRM')
f = open('translate_file.txt',"r")
line = f.readline()
translation = language_translation.translate(
text=line,
source='fr',
target='en')
translated_text = json.dumps(translation, indent=2, ensure_ascii=False)
tone_analyzer = ToneAnalyzerV3(
username='a098ee33-fc5e-46a8-aced-1c163b186ad4',
password='nNe5JRZwVGAE',
version='2016-05-19 ')
analyze = json.dumps(tone_analyzer.tone(text=translated_text), indent=2)
session.execute("INSERT INTO result_table (translate_source,translate_result,analyze_result,result_id) VALUES(%s,%s,%s,%s)",(line,translated_text,analyze,1))
session.execute("SELECT * FROM result_table;")
return(analyze)
示例12: main
def main(ip):
cluster = Cluster([ip])
session = cluster.connect()
rows = session.execute("select keyspace_name from system_schema.columns")
if KEYSPACE in [row[0] for row in rows]:
log.info("dropping existing keyspace...")
session.execute("DROP KEYSPACE " + KEYSPACE)
log.info("creating keyspace...")
session.execute("""
CREATE KEYSPACE %s
WITH replication = { 'class': 'SimpleStrategy', 'replication_factor': '1' }
""" % KEYSPACE)
log.info("setting keyspace...")
session.set_keyspace(KEYSPACE)
rows = session.execute("select table_name from system_schema.columns;")
if TABLE not in [row[0] for row in rows]:
log.info("creating table...")
session.execute("""
CREATE TABLE %s (
thekey text,
col1 text,
col2 text,
PRIMARY KEY (thekey, col1)
)
""" % TABLE)
示例13: read_check
def read_check(count):
from cassandra.cluster import Cluster
cluster = Cluster()
session = cluster.connect()
gen = 0
counter = 0
random.seed(0);
while counter <= count:
data, gen = mkdata(gen)
try:
query = "SELECT data FROM sha.test WHERE data='%s';" % (data)
rows = session.execute(query)
count = 0
for row in rows:
count +=1
if count != 1:
return False
except Exception, e:
print e
keep_going = False
continue
counter += 1
示例14: writer
def writer():
"""thread writer function"""
from cassandra.cluster import Cluster
cluster = Cluster()
session = cluster.connect()
create_database(session);
global keep_going
global counter
global lock
counter = 0
gen = 0;
keep_going = True
random.seed(0);
lock.acquire()
while keep_going:
data, gen = mkdata(gen)
try:
query = "INSERT INTO sha.test (data) VALUES('%s');" % (data)
session.execute(query)
except Exception, e:
print e
keep_going = False
continue
counter += 1
# let a change to the other thread to acquire the lock
lock.release()
lock.acquire()
示例15: run_query
def run_query(self, query, user):
connection = None
try:
if self.configuration.get('username', '') and self.configuration.get('password', ''):
auth_provider = PlainTextAuthProvider(username='{}'.format(self.configuration.get('username', '')),
password='{}'.format(self.configuration.get('password', '')))
connection = Cluster([self.configuration.get('host', '')],
auth_provider=auth_provider,
port=self.configuration.get('port', ''),
protocol_version=self.configuration.get('protocol', 3))
else:
connection = Cluster([self.configuration.get('host', '')],
port=self.configuration.get('port', ''),
protocol_version=self.configuration.get('protocol', 3))
session = connection.connect()
session.set_keyspace(self.configuration['keyspace'])
logger.debug("Cassandra running query: %s", query)
result = session.execute(query)
column_names = result.column_names
columns = self.fetch_columns(map(lambda c: (c, 'string'), column_names))
rows = [dict(zip(column_names, row)) for row in result]
data = {'columns': columns, 'rows': rows}
json_data = json.dumps(data, cls=CassandraJSONEncoder)
error = None
except KeyboardInterrupt:
error = "Query cancelled by user."
json_data = None
return json_data, error