本文整理汇总了Python中confluent_kafka.Producer方法的典型用法代码示例。如果您正苦于以下问题:Python confluent_kafka.Producer方法的具体用法?Python confluent_kafka.Producer怎么用?Python confluent_kafka.Producer使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类confluent_kafka
的用法示例。
在下文中一共展示了confluent_kafka.Producer方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _initialize_members
# 需要导入模块: import confluent_kafka [as 别名]
# 或者: from confluent_kafka import Producer [as 别名]
def _initialize_members(self, topic, server, port, zk_server, zk_port, partitions):
# get logger isinstance
self._logger = logging.getLogger("SPOT.INGEST.KafkaProducer")
# kafka requirements
self._server = server
self._port = port
self._zk_server = zk_server
self._zk_port = zk_port
self._topic = topic
self._num_of_partitions = partitions
self._partitions = []
self._partitioner = None
self._kafka_brokers = '{0}:{1}'.format(self._server, self._port)
# create topic with partitions
self._create_topic()
self._kafka_conf = self._producer_config(self._kafka_brokers)
self._p = Producer(**self._kafka_conf)
示例2: create_producer
# 需要导入模块: import confluent_kafka [as 别名]
# 或者: from confluent_kafka import Producer [as 别名]
def create_producer(self, retry_limit, buffering_max):
console_out_many(["Creating producer with:",
f" bootstrap.servers={self.broker_manager.get_bootstrap_servers()}",
f" acks={self.acks_mode}",
f" retries={retry_limit}",
f" buffering={buffering_max}"], self.get_actor())
self.producer = Producer({'bootstrap.servers': self.broker_manager.get_bootstrap_servers(),
'message.send.max.retries': retry_limit,
'queue.buffering.max.ms': buffering_max,
#'queue.buffering.max.ms': 100,
#'batch.num.messages': 1000,
#'stats_cb': my_stats_callback,
#'statistics.interval.ms': 100,
'metadata.max.age.ms': 60000,
'default.topic.config': { 'request.required.acks': self.acks_mode }})
示例3: create_idempotent_producer
# 需要导入模块: import confluent_kafka [as 别名]
# 或者: from confluent_kafka import Producer [as 别名]
def create_idempotent_producer(self, retry_limit, buffering_max):
console_out_many(["Creating idempotent producer with:",
f" bootstrap.servers={self.broker_manager.get_bootstrap_servers()}",
f" acks={self.acks_mode}",
f" retries={retry_limit}",
" metadata.max.age.ms: 60000",
f" buffering={buffering_max}"], self.get_actor())
self.producer = Producer({'bootstrap.servers': self.broker_manager.get_bootstrap_servers(),
'message.send.max.retries': retry_limit,
'enable.idempotence': True,
'queue.buffering.max.ms': buffering_max,
#'batch.num.messages': 1000,
#'stats_cb': my_stats_callback,
#'statistics.interval.ms': 100,
'metadata.max.age.ms': 60000,
'default.topic.config': { 'request.required.acks': self.acks_mode }
})
示例4: prepareProducer
# 需要导入模块: import confluent_kafka [as 别名]
# 或者: from confluent_kafka import Producer [as 别名]
def prepareProducer(self,groupID = "pythonproducers"):
options ={
'bootstrap.servers': self.kafka_brokers,
'group.id': groupID
}
# We need this test as local kafka does not expect SSL protocol.
if (self.kafka_env != 'LOCAL'):
options['security.protocol'] = 'SASL_SSL'
options['sasl.mechanisms'] = 'PLAIN'
options['sasl.username'] = 'token'
options['sasl.password'] = self.kafka_apikey
if (self.kafka_env == 'OCP'):
options['ssl.ca.location'] = os.environ['PEM_CERT']
print("[KafkaProducer] - This is the configuration for the producer:")
print('[KafkaProducer] - {}'.format(options))
self.producer = Producer(options)
示例5: test_ksql_create_stream_w_properties
# 需要导入模块: import confluent_kafka [as 别名]
# 或者: from confluent_kafka import Producer [as 别名]
def test_ksql_create_stream_w_properties(self):
""" Test GET requests """
topic = self.exist_topic
stream_name = self.test_prefix + "test_ksql_create_stream"
stream_name = "test_ksql_create_stream"
ksql_string = "CREATE STREAM {} (ORDER_ID INT, TOTAL_AMOUNT DOUBLE, CUSTOMER_NAME VARCHAR) \
WITH (kafka_topic='{}', value_format='JSON');".format(stream_name, topic)
streamProperties = {"ksql.streams.auto.offset.reset": "earliest"}
if 'TEST_KSQL_CREATE_STREAM' not in utils.get_all_streams(self.api_client):
r = self.api_client.ksql(ksql_string, stream_properties=streamProperties)
self.assertEqual(r[0]['commandStatus']['status'], 'SUCCESS')
producer = Producer({'bootstrap.servers': self.bootstrap_servers})
producer.produce(self.exist_topic, '''{"order_id":3,"total_amount":43,"customer_name":"Palo Alto"}''')
producer.flush()
print()
chunks = self.api_client.query("select * from {}".format(stream_name), stream_properties=streamProperties, idle_timeout=10)
for chunk in chunks:
pass
assert json.loads(chunk)['row']['columns'][-1]=='Palo Alto'
示例6: test_conf_none
# 需要导入模块: import confluent_kafka [as 别名]
# 或者: from confluent_kafka import Producer [as 别名]
def test_conf_none():
""" Issue #133
Test that None can be passed for NULL by setting bootstrap.servers
to None. If None would be converted to a string then a broker would
show up in statistics. Verify that it doesnt. """
def stats_cb_check_no_brokers(stats_json_str):
""" Make sure no brokers are reported in stats """
global seen_stats_cb_check_no_brokers
stats = json.loads(stats_json_str)
assert len(stats['brokers']) == 0, "expected no brokers in stats: %s" % stats_json_str
seen_stats_cb_check_no_brokers = True
conf = {'bootstrap.servers': None, # overwrites previous value
'statistics.interval.ms': 10,
'stats_cb': stats_cb_check_no_brokers}
p = confluent_kafka.Producer(conf)
p.poll(timeout=1)
global seen_stats_cb_check_no_brokers
assert seen_stats_cb_check_no_brokers
示例7: test_error_cb
# 需要导入模块: import confluent_kafka [as 别名]
# 或者: from confluent_kafka import Producer [as 别名]
def test_error_cb():
""" Test the error callback. """
global seen_all_brokers_down
# Configure an invalid broker and make sure the ALL_BROKERS_DOWN
# error is seen in the error callback.
p = Producer({'bootstrap.servers': '127.0.0.1:1', 'socket.timeout.ms': 10,
'error_cb': error_cb})
t_end = time.time() + 5
while not seen_all_brokers_down and time.time() < t_end:
p.poll(1)
assert seen_all_brokers_down
示例8: test_fatal
# 需要导入模块: import confluent_kafka [as 别名]
# 或者: from confluent_kafka import Producer [as 别名]
def test_fatal():
""" Test fatal exceptions """
# Configure an invalid broker and make sure the ALL_BROKERS_DOWN
# error is seen in the error callback.
p = Producer({'error_cb': error_cb})
with pytest.raises(KafkaException) as exc:
raise KafkaException(KafkaError(KafkaError.MEMBER_ID_REQUIRED,
fatal=True))
err = exc.value.args[0]
assert isinstance(err, KafkaError)
assert err.fatal()
assert not err.retriable()
assert not err.txn_requires_abort()
p.poll(0) # Need some p use to avoid flake8 unused warning
示例9: _get_kafka_producer
# 需要导入模块: import confluent_kafka [as 别名]
# 或者: from confluent_kafka import Producer [as 别名]
def _get_kafka_producer(self):
try:
if self.kafka_endpoint.startswith('@'):
try:
_k_endpoint = get_endpoint_from_consul(self.consul_endpoint,
self.kafka_endpoint[1:])
log.debug('found-kafka-service', endpoint=_k_endpoint)
except Exception as e:
log.exception('no-kafka-service-in-consul', e=e)
self.kproducer = None
self.kclient = None
return
else:
_k_endpoint = self.kafka_endpoint
self.kproducer = _kafkaProducer(
{'bootstrap.servers' :_k_endpoint}
)
pass
except Exception, e:
log.exception('failed-get-kafka-producer', e=e)
return
示例10: Kafka
# 需要导入模块: import confluent_kafka [as 别名]
# 或者: from confluent_kafka import Producer [as 别名]
def Kafka(node, servers='', topic='', json=False, wrap=False):
'''Connect to kafka server and send data
Args:
foo (callable): input stream
foo_kwargs (dict): kwargs for the input stream
servers (list): kafka bootstrap servers
group (str): kafka group id
topics (list): list of kafka topics to connect to
json (bool): load input data as json
wrap (bool): wrap result in a list
interval (int): kafka poll interval
'''
p = Producer({'bootstrap.servers': servers})
def _send(data, producer=p, topic=topic, json=json, wrap=wrap):
# Trigger any available delivery report callbacks from previous produce() calls
producer.poll(0)
if wrap:
data = [data]
if json:
data = JSON.dumps(data)
producer.produce(topic, data.encode('utf-8'))
return data
ret = Node(foo=_send, name='Kafka', inputs=1, graphvizshape=_OUTPUT_GRAPHVIZSHAPE)
node >> ret
return ret
示例11: create_kafka_consumer
# 需要导入模块: import confluent_kafka [as 别名]
# 或者: from confluent_kafka import Producer [as 别名]
def create_kafka_consumer(brokers_ips: str, extra_params: Dict):
config = extra_params or dict()
config.update({'bootstrap.servers': ",".join(brokers_ips)})
return confluent_kafka.Producer(config)
示例12: __init__
# 需要导入模块: import confluent_kafka [as 别名]
# 或者: from confluent_kafka import Producer [as 别名]
def __init__(self, config): # pragma: no cover
"""
Streaming client implementation based on Kafka.
Configuration keys:
KAFKA_ADDRESS
KAFKA_CONSUMER_GROUP
KAFKA_TOPIC
TIMEOUT
EVENT_HUB_KAFKA_CONNECTION_STRING
"""
self.logger = Logger()
self.topic = config.get("KAFKA_TOPIC")
if not self.topic:
raise ValueError("KAFKA_TOPIC is not set in the config object.")
if not config.get("KAFKA_ADDRESS"):
raise ValueError("KAFKA_ADDRESS is not set in the config object.")
if config.get("TIMEOUT"):
try:
self.timeout = int(config.get("TIMEOUT"))
except ValueError:
self.timeout = None
else:
self.timeout = None
kafka_config = self.create_kafka_config(config)
self.admin = admin.AdminClient(kafka_config)
if config.get("KAFKA_CONSUMER_GROUP") is None:
self.logger.info('Creating Producer')
self.producer = Producer(kafka_config)
self.run = False
else:
self.logger.info('Creating Consumer')
self.consumer = Consumer(kafka_config)
self.run = True
signal.signal(signal.SIGTERM, self.exit_gracefully)
示例13: main
# 需要导入模块: import confluent_kafka [as 别名]
# 或者: from confluent_kafka import Producer [as 别名]
def main():
"""Main entry for script"""
parser = _get_parser()
args = parser.parse_args()
sources = _get_sources(_get_items(args.items), args.limit)
timestamp = pd.Timestamp(args.start)
freq = pd.Timedelta(args.freq)
logging.basicConfig(level=_VERBOSITY.get(args.verbosity, logging.DEBUG))
if args.broker_list is None:
def _produce(timestamp, name, price):
print('{},{},{}'.format(timestamp, name, price))
LOGGER.debug('Running in console mode')
_run(sources, timestamp, freq, args.real_time, args.real_time_multiplier, _produce)
else:
if args.topic is None:
raise ValueError('Must specify --topic when using Kafka')
from confluent_kafka import Producer
producer = Producer({'bootstrap.servers': args.broker_list})
def _produce(timestamp, name, price):
data = '{},{},{}'.format(timestamp, name, price)
produced = False
while not produced:
try:
producer.produce(args.topic, value=data.encode('utf-8'), key=name)
producer.poll(0)
produced = True
except BufferError:
producer.poll(10)
LOGGER.debug('Producing to %s on %s', args.topic, args.broker_list)
_run(sources, timestamp, freq, args.real_time, args.real_time_multiplier, _produce)
producer.flush()
示例14: producer
# 需要导入模块: import confluent_kafka [as 别名]
# 或者: from confluent_kafka import Producer [as 别名]
def producer(self):
# TODO: Must set all config values applicable to a producer
return kafka.Producer({'bootstrap.servers': self.config.BOOTSTRAP_SERVERS})
示例15: init
# 需要导入模块: import confluent_kafka [as 别名]
# 或者: from confluent_kafka import Producer [as 别名]
def init():
global log
global kafka_producer
if not log:
log = create_logger(Config().get("logging"))
if kafka_producer:
raise Exception("XOSKafkaProducer already initialized")
else:
log.info(
"Connecting to Kafka with bootstrap servers: %s"
% Config.get("kafka_bootstrap_servers")
)
try:
producer_config = {
"bootstrap.servers": ",".join(Config.get("kafka_bootstrap_servers"))
}
kafka_producer = confluent_kafka.Producer(**producer_config)
log.info("Connected to Kafka: %s" % kafka_producer)
except confluent_kafka.KafkaError as e:
log.exception("Kafka Error: %s" % e)