本文整理汇总了Python中helpers.cluster.ClickHouseCluster类的典型用法代码示例。如果您正苦于以下问题:Python ClickHouseCluster类的具体用法?Python ClickHouseCluster怎么用?Python ClickHouseCluster使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了ClickHouseCluster类的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: setup_module
def setup_module(module):
global DICTIONARIES
global cluster
global node
dict_configs_path = os.path.join(SCRIPT_DIR, 'configs/dictionaries')
for f in os.listdir(dict_configs_path):
os.remove(os.path.join(dict_configs_path, f))
for layout in LAYOUTS:
for source in SOURCES:
if source.compatible_with_layout(layout):
structure = DictionaryStructure(layout, FIELDS[layout.layout_type])
dict_name = source.name + "_" + layout.name
dict_path = os.path.join(dict_configs_path, dict_name + '.xml')
dictionary = Dictionary(dict_name, structure, source, dict_path, "table_" + dict_name)
dictionary.generate_config()
DICTIONARIES.append(dictionary)
else:
print "Source", source.name, "incompatible with layout", layout.name
main_configs = []
for fname in os.listdir(dict_configs_path):
main_configs.append(os.path.join(dict_configs_path, fname))
cluster = ClickHouseCluster(__file__, base_configs_dir=os.path.join(SCRIPT_DIR, 'configs'))
node = cluster.add_instance('node', main_configs=main_configs, with_mysql=True, with_mongo=True)
cluster.add_instance('clickhouse1')
示例2: setup_module
def setup_module(module):
global cluster
global instance
global test_table
structure = generate_structure()
dictionary_files = generate_dictionaries(os.path.join(SCRIPT_DIR, 'configs/dictionaries'), structure)
cluster = ClickHouseCluster(__file__, base_configs_dir=os.path.join(SCRIPT_DIR, 'configs'))
instance = cluster.add_instance('instance', main_configs=dictionary_files)
test_table = DictionaryTestTable(os.path.join(SCRIPT_DIR, 'configs/dictionaries/source.tsv'))
示例3: started_cluster
def started_cluster():
global cluster
try:
clusters_schema = {
"0" : {
"0" : ["0", "1"],
"1" : ["0"]
},
"1" : {
"0" : ["0", "1"],
"1" : ["0"]
}
}
cluster = ClickHouseCluster(__file__)
for cluster_name, shards in clusters_schema.iteritems():
for shard_name, replicas in shards.iteritems():
for replica_name in replicas:
name = "s{}_{}_{}".format(cluster_name, shard_name, replica_name)
cluster.add_instance(name,
config_dir="configs",
macroses={"cluster": cluster_name, "shard": shard_name, "replica": replica_name},
with_zookeeper=True)
cluster.start()
yield cluster
finally:
pass
cluster.shutdown()
示例4: test_different_user
def test_different_user():
current_user_id = os.getuid()
if current_user_id != 0:
return
other_user_id = pwd.getpwnam('nobody').pw_uid
cluster = ClickHouseCluster(__file__)
node = cluster.add_instance('node')
cluster.start()
docker_api = docker.from_env().api
container = node.get_docker_handle()
container.stop()
container.start()
container.exec_run('chown {} /var/lib/clickhouse'.format(other_user_id), privileged=True)
container.exec_run(CLICKHOUSE_START_COMMAND)
cluster.shutdown() # cleanup
with open(os.path.join(node.path, 'logs/clickhouse-server.err.log')) as log:
expected_message = "Effective user of the process \(.*\) does not match the owner of the data \(.*\)\. Run under 'sudo -u .*'\."
last_message = log.readlines()[-1].strip()
if re.search(expected_message, last_message) is None:
pytest.fail('Expected the server to fail with a message "{}", but the last message is "{}"'.format(expected_message, last_message))
示例5: started_cluster
def started_cluster():
global cluster
global instance
try:
cluster = ClickHouseCluster(__file__)
cluster.add_instance('ch1', config_dir="configs")
cluster.start()
instance = cluster.instances['ch1']
instance.query('CREATE DATABASE dictionaries ENGINE = Dictionary')
instance.query('CREATE TABLE dictionary_source (id UInt64, value UInt8) ENGINE = Memory')
#print instance.query('SELECT * FROM system.dictionaries FORMAT Vertical')
print "Started ", instance.ip_address
yield cluster
finally:
pass
cluster.shutdown()
示例6: _fill_nodes
def _fill_nodes(nodes, shard, connections_count):
for node in nodes:
node.query(
'''
CREATE DATABASE test;
CREATE TABLE test_table(date Date, id UInt32, dummy UInt32)
ENGINE = ReplicatedMergeTree('/clickhouse/tables/test{shard}/replicated', '{replica}')
PARTITION BY date
ORDER BY id
SETTINGS
replicated_max_parallel_fetches_for_host={connections},
index_granularity=8192;
'''.format(shard=shard, replica=node.name, connections=connections_count))
cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance('node1', config_dir="configs", main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'], with_zookeeper=True)
node2 = cluster.add_instance('node2', config_dir="configs", main_configs=['configs/remote_servers.xml', 'configs/log_conf.xml'], with_zookeeper=True)
@pytest.fixture(scope="module")
def start_small_cluster():
try:
cluster.start()
_fill_nodes([node1, node2], 1, 1)
yield cluster
finally:
cluster.shutdown()
示例7: ClickHouseCluster
import time
import pytest
import requests
from tempfile import NamedTemporaryFile
from helpers.hdfs_api import HDFSApi
import os
from helpers.cluster import ClickHouseCluster
import subprocess
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance('node1', with_hdfs=True, config_dir="configs", main_configs=['configs/log_conf.xml'])
@pytest.fixture(scope="module")
def started_cluster():
try:
cluster.start()
yield cluster
except Exception as ex:
print(ex)
raise ex
finally:
cluster.shutdown()
def test_read_write_storage(started_cluster):
示例8: ClickHouseCluster
import pytest
import time
import os, sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import helpers
from helpers.cluster import ClickHouseCluster
from helpers.network import PartitionManager
cluster = ClickHouseCluster(__file__)
# Cluster with 2 shards of 2 replicas each. node_1_1 is the instance with Distributed table.
# Thus we have a shard with a local replica and a shard with remote replicas.
node_1_1 = instance_with_dist_table = cluster.add_instance(
'node_1_1', with_zookeeper=True, main_configs=['configs/remote_servers.xml'])
node_1_2 = cluster.add_instance('node_1_2', with_zookeeper=True)
node_2_1 = cluster.add_instance('node_2_1', with_zookeeper=True)
node_2_2 = cluster.add_instance('node_2_2', with_zookeeper=True)
@pytest.fixture(scope="module")
def started_cluster():
try:
cluster.start()
for shard in (1, 2):
for replica in (1, 2):
node = cluster.instances['node_{}_{}'.format(shard, replica)]
node.query('''
CREATE TABLE replicated (d Date, x UInt32) ENGINE =
示例9: ClickHouseCluster
import pytest
from helpers.cluster import ClickHouseCluster
cluster = ClickHouseCluster(__file__)
instance = cluster.add_instance('instance',
config_dir="configs")
@pytest.fixture(scope="module")
def started_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
def test_read_only_constraint(started_cluster):
# Change a setting for session with SET.
assert instance.query("SELECT value FROM system.settings WHERE name='force_index_by_date'") ==\
"0\n"
expected_error = "Setting force_index_by_date should not be changed"
assert expected_error in instance.query_and_get_error("SET force_index_by_date=1")
# Change a setting for query with SETTINGS.
assert instance.query("SELECT value FROM system.settings WHERE name='force_index_by_date'") ==\
"0\n"
示例10: test_chroot_with_same_root
def test_chroot_with_same_root():
cluster_1 = ClickHouseCluster(__file__, zookeeper_config_path='configs/zookeeper_config_root_a.xml')
cluster_2 = ClickHouseCluster(__file__, zookeeper_config_path='configs/zookeeper_config_root_a.xml')
node1 = cluster_1.add_instance('node1', config_dir='configs', with_zookeeper=True)
node2 = cluster_2.add_instance('node2', config_dir='configs', with_zookeeper=True)
nodes = [node1, node2]
def create_zk_root(zk):
zk.ensure_path('/root_a')
print(zk.get_children('/'))
cluster_1.add_zookeeper_startup_command(create_zk_root)
try:
cluster_1.start()
try:
cluster_2.start(destroy_dirs=False)
for i, node in enumerate(nodes):
node.query('''
CREATE TABLE simple (date Date, id UInt32)
ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/simple', '{replica}', date, id, 8192);
'''.format(replica=node.name))
for j in range(2): # Second insert to test deduplication
node.query("INSERT INTO simple VALUES ({0}, {0})".format(i))
time.sleep(1)
assert node1.query('select count() from simple').strip() == '2'
assert node2.query('select count() from simple').strip() == '2'
finally:
cluster_2.shutdown()
finally:
cluster_1.shutdown()
示例11: test_identity
def test_identity():
cluster_1 = ClickHouseCluster(__file__, zookeeper_config_path='configs/zookeeper_config_with_password.xml')
cluster_2 = ClickHouseCluster(__file__)
node1 = cluster_1.add_instance('node1', config_dir='configs', with_zookeeper=True)
node2 = cluster_2.add_instance('node2', config_dir='configs', with_zookeeper=True)
try:
cluster_1.start()
node1.query('''
CREATE TABLE simple (date Date, id UInt32)
ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/simple', '{replica}', date, id, 8192);
'''.format(replica=node1.name))
with pytest.raises(Exception):
cluster_2.start(destroy_dirs=False)
node2.query('''
CREATE TABLE simple (date Date, id UInt32)
ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/simple', '1', date, id, 8192);
''')
finally:
cluster_1.shutdown()
cluster_2.shutdown()
示例12: ClickHouseCluster
from contextlib import contextmanager
import pytest
## sudo -H pip install PyMySQL
import pymysql.cursors
from helpers.cluster import ClickHouseCluster
cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance('node1', main_configs=['configs/remote_servers.xml'], with_mysql = True)
create_table_sql_template = """
CREATE TABLE `clickhouse`.`{}` (
`id` int(11) NOT NULL,
`name` varchar(50) NOT NULL,
`age` int NOT NULL default 0,
`money` int NOT NULL default 0,
PRIMARY KEY (`id`)) ENGINE=InnoDB;
"""
@pytest.fixture(scope="module")
def started_cluster():
try:
cluster.start()
conn = get_mysql_conn()
## create mysql db and table
create_mysql_db(conn, 'clickhouse')
yield cluster
示例13: ClickHouseCluster
import time
import pytest
import string
import random
from helpers.cluster import ClickHouseCluster
cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance('node1', main_configs=['configs/zstd_compression_by_default.xml'])
node2 = cluster.add_instance('node2', main_configs=['configs/lz4hc_compression_by_default.xml'])
node3 = cluster.add_instance('node3', main_configs=['configs/custom_compression_by_default.xml'])
node4 = cluster.add_instance('node4', user_configs=['configs/enable_uncompressed_cache.xml'])
node5 = cluster.add_instance('node5', main_configs=['configs/zstd_compression_by_default.xml'], user_configs=['configs/enable_uncompressed_cache.xml'])
@pytest.fixture(scope="module")
def start_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
def test_preconfigured_default_codec(start_cluster):
for node in [node1, node2]:
node.query("""
CREATE TABLE compression_codec_multiple_with_key (
somedate Date CODEC(ZSTD, ZSTD, ZSTD(12), LZ4HC(12)),
id UInt64 CODEC(LZ4, ZSTD, NONE, LZ4HC),
示例14: test_chroot_with_same_root
def test_chroot_with_same_root():
cluster_1 = ClickHouseCluster(__file__, zookeeper_config_path='configs/zookeeper_config_root_a.xml')
cluster_2 = ClickHouseCluster(__file__, zookeeper_config_path='configs/zookeeper_config_root_a.xml')
node1 = cluster_1.add_instance('node1', config_dir='configs', with_zookeeper=True)
node2 = cluster_2.add_instance('node2', config_dir='configs', with_zookeeper=True)
nodes = [node1, node2]
cluster_1.add_zookeeper_startup_command('create /root_a ""')
cluster_1.add_zookeeper_startup_command('ls / ')
try:
cluster_1.start()
try:
cluster_2.start(destroy_dirs=False)
for i, node in enumerate(nodes):
node.query('''
CREATE TABLE simple (date Date, id UInt32)
ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/simple', '{replica}', date, id, 8192);
'''.format(replica=node.name))
node.query("INSERT INTO simple VALUES ({0}, {0})".format(i))
assert node1.query('select count() from simple').strip() == '2'
assert node2.query('select count() from simple').strip() == '2'
finally:
cluster_2.shutdown()
finally:
cluster_1.shutdown()