当前位置: 首页>>代码示例>>Python>>正文


Python BinLogStreamReader.close方法代码示例

本文整理汇总了Python中pymysqlreplication.BinLogStreamReader.close方法的典型用法代码示例。如果您正苦于以下问题:Python BinLogStreamReader.close方法的具体用法?Python BinLogStreamReader.close怎么用?Python BinLogStreamReader.close使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在pymysqlreplication.BinLogStreamReader的用法示例。


在下文中一共展示了BinLogStreamReader.close方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: main

# 需要导入模块: from pymysqlreplication import BinLogStreamReader [as 别名]
# 或者: from pymysqlreplication.BinLogStreamReader import close [as 别名]
def main():
    # connect rethinkdb
    rethinkdb.connect("localhost", 28015, "mysql")
    try:
        rethinkdb.db_drop("mysql").run()
    except:
        pass
    rethinkdb.db_create("mysql").run()

    tables = ["dept_emp", "dept_manager", "titles",
              "salaries", "employees", "departments"]
    for table in tables:
        rethinkdb.db("mysql").table_create(table).run()

    stream = BinLogStreamReader(
        connection_settings=MYSQL_SETTINGS,
        blocking=True,
        only_events=[DeleteRowsEvent, WriteRowsEvent, UpdateRowsEvent],
    )

    # process Feed
    for binlogevent in stream:
        if not isinstance(binlogevent, WriteRowsEvent):
            continue

        for row in binlogevent.rows:
            if not binlogevent.schema == "employees":
                continue

            vals = {}
            vals = {str(k): str(v) for k, v in row["values"].iteritems()}
            rethinkdb.table(binlogevent.table).insert(vals).run()

    stream.close()
开发者ID:Affirm,项目名称:python-mysql-replication,代码行数:36,代码来源:rethinkdb_sync.py

示例2: main

# 需要导入模块: from pymysqlreplication import BinLogStreamReader [as 别名]
# 或者: from pymysqlreplication.BinLogStreamReader import close [as 别名]
def main():
    rclient = redis.from_url(redis_url)
    cache = rcache.Rcache(cache_url, server_id)
 
    log_file = rclient.get("log_file")
    log_pos = rclient.get("log_pos")
    log_pos = int(log_pos) if log_pos else None
 
    only_events = _trans_events(events)
    only_events.append(RotateEvent)
 
    stream = BinLogStreamReader(
        connection_settings=mysql_settings,
        server_id=server_id,
        blocking=blocking,
        only_events=only_events,                                                                                                                                                       
        only_tables=tables,
        only_schemas=schemas,
        resume_stream=True,  # for resuming
        freeze_schema=False, # do not support alter table event for faster
        log_file=log_file,
        log_pos=log_pos)
    row_count = 0
 
    for binlogevent in stream:
        if int(time.time()) - binlogevent.timestamp > binlog_max_latency:
            logger.warn("latency[{}] too large".format(
                int(time.time()) - binlogevent.timestamp))
        logger.debug("catch {}".format(binlogevent.__class__.__name__))
        if isinstance(binlogevent, RotateEvent):  #listen log_file changed event
            rclient.set("log_file", binlogevent.next_binlog)
            rclient.set("log_pos", binlogevent.position)
            logger.info("log_file:{}, log_position:{}".format(
                binlogevent.next_binlog, binlogevent.position))
        else:
            row_count += 1
            table = "%s.%s" % (binlogevent.schema, binlogevent.table)
            vals_lst = _get_row_values(binlogevent)
            if not binlogevent.primary_key:
                tables_without_primary_key.get(table, None)
            try:
                cache.save(table, binlogevent.primary_key, vals_lst)
                logger.debug("save {} {} rows to cache".format(
                    table, len(vals_lst)))
            except rcache.SaveIgnore as err:
                logger.warning(str(err))
            except rcache.FullError as err:
                logger.info("cache OOM occured: {}.trigger dump command".format(
                    str(err)))
                dump_code = _trigger_dumping()
                cache.save(table, binlogevent.primary_key, vals_lst)
            if cache_max_rows and cache.size > cache_max_rows:
                logger.info("cache size:{} >= {}, trigger dumping".format(
                   cache.size, cache_max_rows))
                _trigger_dumping()
            rclient.set("log_pos", binlogevent.packet.log_pos)
        if row_count % 1000 == 0:
            logger.info("save {} changed rows".format(row_count))
 
    stream.close()
开发者ID:dlf412,项目名称:mysql-cdc-redis,代码行数:62,代码来源:cdc.py

示例3: proc_binlog

# 需要导入模块: from pymysqlreplication import BinLogStreamReader [as 别名]
# 或者: from pymysqlreplication.BinLogStreamReader import close [as 别名]
	def proc_binlog(self):
		stream = BinLogStreamReader(
			connection_settings = self.config['mysql'],
			server_id = self.config['slave']['server_id'],
			log_file = self.log_file,
			log_pos = self.log_pos,
			only_schemas = self.config['slave']['schemas'],
			blocking = True,
			resume_stream = bool(self.log_file and self.log_pos),
			only_events=[WriteRowsEvent, UpdateRowsEvent, DeleteRowsEvent]
		)
		for binlogevent in stream:
			#binlogevent.dump()
			self.log_file = stream.log_file
			self.log_pos  = stream.log_pos
			for row in binlogevent.rows:
				pk = binlogevent.primary_key
				table = binlogevent.table
				schema = binlogevent.schema
				if isinstance(binlogevent, WriteRowsEvent):
					yield self.es.index_op(self._format(row['values']), doc_type=table, index=schema, id=row['values'][pk])
				elif isinstance(binlogevent, UpdateRowsEvent):
					yield self.es.update_op(self._format(row['after_values']), doc_type=table, index=schema, id=row['after_values'][pk])
				elif isinstance(binlogevent, DeleteRowsEvent):
					yield self.es.delete_op(doc_type=table, index=schema, id=row['values'][pk])
				else:
					continue

		stream.close()
开发者ID:xhook7,项目名称:py-mysql-es,代码行数:31,代码来源:sync.py

示例4: main

# 需要导入模块: from pymysqlreplication import BinLogStreamReader [as 别名]
# 或者: from pymysqlreplication.BinLogStreamReader import close [as 别名]
def main():
    stream = BinLogStreamReader(
        connection_settings=MYSQL_SETTINGS,
        only_events=[DeleteRowsEvent, WriteRowsEvent, UpdateRowsEvent])

    for binlogevent in stream:
        for row in binlogevent.rows:
            event = {}
            event["schema"] = binlogevent.schema
            event["table"] = binlogevent.table

            if isinstance(binlogevent, DeleteRowsEvent):
                event["action"] = "delete"
                event = dict(event.items() + row["values"].items())
            elif isinstance(binlogevent, UpdateRowsEvent):
                event["action"] = "update"
                event = dict(event.items() + row["after_values"].items())
            elif isinstance(binlogevent, WriteRowsEvent):
                event["action"] = "insert"
                event = dict(event.items() + row["values"].items())
            print json.dumps(event)
            sys.stdout.flush()


    stream.close()
开发者ID:Affirm,项目名称:python-mysql-replication,代码行数:27,代码来源:mysql_to_logstash.py

示例5: Listener

# 需要导入模块: from pymysqlreplication import BinLogStreamReader [as 别名]
# 或者: from pymysqlreplication.BinLogStreamReader import close [as 别名]
class Listener(object):
    def __init__(self, connection_settings, server_id, blocking=True, resume_stream=True):
        self._stream = BinLogStreamReader(
            connection_settings=connection_settings,
            server_id=server_id,
            only_events=[DeleteRowsEvent, WriteRowsEvent, UpdateRowsEvent],
            blocking=blocking,
            resume_stream=resume_stream
        )

    def __del__(self):
        self._stream.close()

    def accept(self, callback):
        for log in self._stream:
            for row in log.rows:
                fields = {}
                method = ''
                if isinstance(log, DeleteRowsEvent):
                    fields = row["values"]
                    method = 'DELETE'
                elif isinstance(log, UpdateRowsEvent):
                    fields = row["after_values"]
                    method = 'UPDATE'
                elif isinstance(log, WriteRowsEvent):
                    method = 'INSERT'
                    fields = row["values"]

                logger.debug(
                    "捕获mysql %r事件, 值为: %r",
                    method, json.dumps(fields)
                )
                callback(log.schema, log.table, method, fields)
开发者ID:JianfuLi,项目名称:hamal,代码行数:35,代码来源:listener.py

示例6: main

# 需要导入模块: from pymysqlreplication import BinLogStreamReader [as 别名]
# 或者: from pymysqlreplication.BinLogStreamReader import close [as 别名]
def main():
    utils.drop_privileges()
    if BinLogStreamReader is None:
        utils.err("error: Python module `pymysqlreplication' is missing")
        return 1
    settings = zabbix_bridge_conf.get_settings()

    # Set blocking to True if you want to block and wait for the next event at
    # the end of the stream
    stream = BinLogStreamReader(connection_settings=settings['mysql'],
                                server_id=settings['slaveid'],
                                only_events=[WriteRowsEvent],
                                resume_stream=True,
                                blocking=True)

    db_filename = settings['sqlitedb']
    dbcache = sqlite3.connect(':memory:')
    cachecur = dbcache.cursor()
    cachecur.execute("ATTACH DATABASE '%s' as 'dbfile'" % (db_filename,))
    cachecur.execute('CREATE TABLE zabbix_cache AS SELECT * FROM dbfile.zabbix_cache')
    cachecur.execute('CREATE UNIQUE INDEX uniq_zid on zabbix_cache (id)')

    # tcollector.zabbix_bridge namespace for internal Zabbix bridge metrics.
    log_pos = 0
    key_lookup_miss = 0
    sample_last_ts = int(time.time())
    last_key_lookup_miss = 0

    for binlogevent in stream:
        if binlogevent.schema == settings['mysql']['db']:
            table = binlogevent.table
            log_pos = binlogevent.packet.log_pos
            if table == 'history' or table == 'history_uint':
                for row in binlogevent.rows:
                    r = row['values']
                    itemid = r['itemid']
                    cachecur.execute('SELECT id, key, host, proxy FROM zabbix_cache WHERE id=?', (itemid,))
                    row = cachecur.fetchone()
                    if (row is not None):
                        print("zbx.%s %d %s host=%s proxy=%s" % (row[1], r['clock'], r['value'], row[2], row[3]))
                        if ((int(time.time()) - sample_last_ts) > settings['internal_metric_interval']): # Sample internal metrics @ 10s intervals
                            sample_last_ts = int(time.time())
                            print("tcollector.zabbix_bridge.log_pos %d %s" % (sample_last_ts, log_pos))
                            print("tcollector.zabbix_bridge.key_lookup_miss %d %s" % (sample_last_ts, key_lookup_miss))
                            print("tcollector.zabbix_bridge.timestamp_drift %d %s" % (sample_last_ts, (sample_last_ts - r['clock'])))
                            if ((key_lookup_miss - last_key_lookup_miss) > settings['dbrefresh']):
                                print("tcollector.zabbix_bridge.key_lookup_miss_reload %d %s" % (sample_last_ts, (key_lookup_miss - last_key_lookup_miss)))
                                cachecur.execute('DROP TABLE zabbix_cache')
                                cachecur.execute('CREATE TABLE zabbix_cache AS SELECT * FROM dbfile.zabbix_cache')
                                cachecur.execute('CREATE UNIQUE INDEX uniq_zid on zabbix_cache (id)')
                                last_key_lookup_miss = key_lookup_miss
                    else:
                        # TODO: Consider https://wiki.python.org/moin/PythonDecoratorLibrary#Retry
                        utils.err("error: Key lookup miss for %s" % (itemid))
                        key_lookup_miss += 1
                sys.stdout.flush()

    dbcache.close()
    stream.close()
开发者ID:OpenTSDB,项目名称:tcollector,代码行数:61,代码来源:zabbix_bridge.py

示例7: main

# 需要导入模块: from pymysqlreplication import BinLogStreamReader [as 别名]
# 或者: from pymysqlreplication.BinLogStreamReader import close [as 别名]
def main():
  global repLogFile
  global repLogPosition
  global repLogConfig

  graphiteConfig = readGraphiteConfig()

  try:
    print "Start"
    sock = socket.socket()
    sock.connect((CARBON_SERVER, CARBON_PORT))
    print 'Carbon socket opened.'
    stream = BinLogStreamReader(
        connection_settings=MYSQL_SETTINGS,
        server_id=2, #server id needs to be unique
        only_events=[WriteRowsEvent,DeleteRowsEvent,UpdateRowsEvent],
        blocking=True,
        log_file=repLogFile,
        log_pos=repLogPosition,
        resume_stream=False if repLogPosition==None else True)
    print "Binlog stream opened"

    for binlogevent in stream:
      #put replication log file and position in variables so we can save them later
      repLogFile = stream.log_file
      repLogPosition = stream.log_pos
      #also check for changes in graphite configuration and read again if needed
      if binlogevent.schema == "weather" and binlogevent.table == "graphite":
        graphiteConfig = readGraphiteConfig()
      #this is the data we are interested in
      if binlogevent.schema == "weather" and binlogevent.table == "data":

        for row in binlogevent.rows:
          #we only care about inserts
          if isinstance(binlogevent, WriteRowsEvent):
            vals = row["values"]
            #check if the sensor is one that we have configuration for
            if vals["sensorid"] in graphiteConfig:
              conf = graphiteConfig[vals["sensorid"]]
              value = float(vals["value"])
              #do a conversion if needed
              if conf["formula"]!=None and conf["formula"]!="":
                value=eval(conf["formula"], {"__builtins__": {}}, {"value":value,"round":round})
              #construc the message and send it to carbon
              message = '%s %f %d\n' % (conf["graphitepath"], value, round((vals["time"] - _EPOCH).total_seconds()))
              sock.sendall(message)
              print str(vals["sensorid"]), str(vals["time"]), str(value)
              print message

  except KeyboardInterrupt:
    #close open connections
    stream.close()
    sock.close()
    #save replication log position
    repLogConfig.set('replicationlog','file',repLogFile)
    repLogConfig.set('replicationlog','position',str(repLogPosition))
    with open('replogposition.ini', 'w') as f:
      repLogConfig.write(f)
开发者ID:mika-koivusaari,项目名称:mysql_to_graphite,代码行数:60,代码来源:mysql_to_graphite.py

示例8: mysql_stream

# 需要导入模块: from pymysqlreplication import BinLogStreamReader [as 别名]
# 或者: from pymysqlreplication.BinLogStreamReader import close [as 别名]
def mysql_stream(conf, mongo, queue_out):
    logger = logging.getLogger(__name__)

    # server_id is your slave identifier, it should be unique.
    # set blocking to True if you want to block and wait for the next event at
    # the end of the stream
    mysql_settings = {
        "host": conf['host'],
        "port": conf.getint('port'),
        "user": conf['user'],
        "passwd": conf['password']
    }

    last_log = mongo.get_log_pos()
    if last_log['log_file'] == 'NA':
        log_file = None
        log_pos = None
        resume_stream = False
    else:
        log_file = last_log['log_file']
        log_pos = int(last_log['log_pos'])
        resume_stream = True

    stream = BinLogStreamReader(connection_settings=mysql_settings,
                                server_id=conf.getint('slaveid'),
                                only_events=[DeleteRowsEvent, WriteRowsEvent, UpdateRowsEvent],
                                blocking=True,
                                resume_stream=resume_stream,
                                log_file=log_file,
                                log_pos=log_pos,
                                only_schemas=conf['databases'].split(','))

    for binlogevent in stream:
        binlogevent.dump()
        schema = "%s" % binlogevent.schema
        table = "%s" % binlogevent.table

        for row in binlogevent.rows:
            if isinstance(binlogevent, DeleteRowsEvent):
                vals = row["values"]
                event_type = 'delete'
            elif isinstance(binlogevent, UpdateRowsEvent):
                vals = dict()
                vals["before"] = row["before_values"]
                vals["after"] = row["after_values"]
                event_type = 'update'
            elif isinstance(binlogevent, WriteRowsEvent):
                vals = row["values"]
                event_type = 'insert'

            seqnum = mongo.write_to_queue(event_type, vals, schema, table)
            mongo.write_log_pos(stream.log_file, stream.log_pos)
            queue_out.put({'seqnum': seqnum})
            logger.debug(row)
            logger.debug(stream.log_pos)
            logger.debug(stream.log_file)

    stream.close()
开发者ID:njordr,项目名称:mymongo,代码行数:60,代码来源:mysql.py

示例9: main

# 需要导入模块: from pymysqlreplication import BinLogStreamReader [as 别名]
# 或者: from pymysqlreplication.BinLogStreamReader import close [as 别名]
def main():
    # server_id is your slave identifier, it should be unique.
    # set blocking to True if you want to block and wait for the next event at
    # the end of the stream
    stream = BinLogStreamReader(connection_settings=MYSQL_SETTINGS,
                                server_id=3,
                                blocking=True)

    for binlogevent in stream:
        binlogevent.dump()

    stream.close()
开发者ID:atopos0627,项目名称:Handle_jsonData_inMysql,代码行数:14,代码来源:Handle_jsonData_inMysql.py

示例10: consume_events

# 需要导入模块: from pymysqlreplication import BinLogStreamReader [as 别名]
# 或者: from pymysqlreplication.BinLogStreamReader import close [as 别名]
def consume_events():
    stream = BinLogStreamReader(connection_settings=database,
                                server_id=3,
                                resume_stream=False,
                                blocking=True,
                                only_events = [UpdateRowsEvent],
                                only_tables = ['test'] )
    start = time.clock()
    i = 0.0
    for binlogevent in stream:
            i += 1.0
            if i % 1000 == 0:
                print("%d event by seconds (%d total)" % (i / (time.clock() - start), i))
    stream.close()
开发者ID:0xcc,项目名称:python-mysql-replication,代码行数:16,代码来源:benchmark.py

示例11: main

# 需要导入模块: from pymysqlreplication import BinLogStreamReader [as 别名]
# 或者: from pymysqlreplication.BinLogStreamReader import close [as 别名]
def main():
    # server_id is your slave identifier, it should be unique.
    # set blocking to True if you want to block and wait for the next event at
    # the end of the stream
    stream = BinLogStreamReader(connection_settings=MYSQL_SETTINGS,
                                server_id=3,
                                log_file="mysql-bin.000002",
                                blocking=True,
                                only_events=[DeleteRowsEvent, WriteRowsEvent, UpdateRowsEvent])

    for binlogevent in stream:
        binlogevent.dump()

    stream.close()
开发者ID:3manuek,项目名称:python-mysql-replication,代码行数:16,代码来源:dump_events_only.py

示例12: binlog_process

# 需要导入模块: from pymysqlreplication import BinLogStreamReader [as 别名]
# 或者: from pymysqlreplication.BinLogStreamReader import close [as 别名]
def binlog_process(args):
    file = None
    stream = None
    sql_list = []
    try:
        file = open(args.out_file, "w+")
        stream = BinLogStreamReader(connection_settings=connection_settings, log_file=args.log_file, log_pos=args.start_pos,
                                    resume_stream=True, only_schemas=args.databases, only_tables=args.tables, server_id=args.server_id)

        for binlogevent in stream:
            if(args.log_file != stream.log_file):
                break

            if(args.end_pos != None):
                if(binlogevent.packet.log_pos > args.end_pos):
                    break

            if(args.start_datetime != None):
                if(datetime.datetime.fromtimestamp(binlogevent.timestamp) < args.start_datetime):
                    continue

            if(args.end_datetime != None):
                if(datetime.datetime.fromtimestamp(binlogevent.timestamp) > args.end_datetime):
                    break

            if (isinstance(binlogevent, WriteRowsEvent)):
                for row in binlogevent.rows:
                    if(args.flashback):
                        sql_list.append(delete_to_sql(row, binlogevent) + "\n")
                    else:
                        sql_list.append(insert_to_sql(row, binlogevent) + "\n")
            elif (isinstance(binlogevent, DeleteRowsEvent)):
                for row in binlogevent.rows:
                    if(args.flashback):
                        sql_list.append(insert_to_sql(row, binlogevent) + "\n")
                    else:
                        sql_list.append(delete_to_sql(row, binlogevent) + "\n")
            elif (isinstance(binlogevent, UpdateRowsEvent)):
                for row in binlogevent.rows:
                    sql_list.append(update_to_sql(row, binlogevent, args.flashback) + "\n")
        file.writelines(sql_list)
    finally:
        if(stream != None):
            stream.close()
        if(file != None):
            file.close()
开发者ID:ycg,项目名称:Python-Tools,代码行数:48,代码来源:binlog.py

示例13: _binlog_loader

# 需要导入模块: from pymysqlreplication import BinLogStreamReader [as 别名]
# 或者: from pymysqlreplication.BinLogStreamReader import close [as 别名]
    def _binlog_loader(self):
        """
        read row from binlog
        """
        if self.is_binlog_sync:
            resume_stream = True
            logging.info("Resume from binlog_file: {file}  binlog_pos: {pos}".format(file=self.log_file,
                                                                                     pos=self.log_pos))
        else:
            resume_stream = False

        stream = BinLogStreamReader(connection_settings=self.binlog_conf,
                                    server_id=self.config['mysql']['server_id'],
                                    only_events=[DeleteRowsEvent, WriteRowsEvent, UpdateRowsEvent],
                                    only_tables=[self.config['mysql']['table']],
                                    resume_stream=resume_stream,
                                    blocking=True,
                                    log_file=self.log_file,
                                    log_pos=self.log_pos)
        for binlogevent in stream:
            self.log_file = stream.log_file
            self.log_pos = stream.log_pos
            for row in binlogevent.rows:
                if isinstance(binlogevent, DeleteRowsEvent):
                    rv = {
                        'action': 'delete',
                        'doc': row['values']
                    }
                elif isinstance(binlogevent, UpdateRowsEvent):
                    rv = {
                        'action': 'update',
                        'doc': row['after_values']
                    }
                elif isinstance(binlogevent, WriteRowsEvent):
                    rv = {
                        'action': 'index',
                        'doc': row['values']
                    }
                else:
                    logging.error('unknown action type in binlog')
                    raise TypeError('unknown action type in binlog')
                yield rv
                # print(rv)
        stream.close()
        raise IOError('mysql connection closed')
开发者ID:FashtimeDotCom,项目名称:py-mysql-elasticsearch-sync,代码行数:47,代码来源:__init__.py

示例14: doRep

# 需要导入模块: from pymysqlreplication import BinLogStreamReader [as 别名]
# 或者: from pymysqlreplication.BinLogStreamReader import close [as 别名]
def doRep(logPosConObj,MYSQL_SETTINGS):
	key = MYSQL_SETTINGS["host"]+":"+str(MYSQL_SETTINGS["port"])
	try:
		stream = BinLogStreamReader(
			connection_settings=MYSQL_SETTINGS,server_id=100,
			only_events=[DeleteRowsEvent, WriteRowsEvent, UpdateRowsEvent, RotateEvent,QueryEvent],blocking=True,
			log_file=logPosConObj["log_file"],log_pos=logPosConObj["log_pos"])
		for binlogevent in stream:
			#prefix = "%s:%s:" % (binlogevent.schema, binlogevent.table)
		
			if isinstance(binlogevent, RotateEvent):
				#pprint (vars(binlogevent.packet))
				logPosConObj["log_file"]=binlogevent.packet.event.next_binlog
				logPosConObj["log_pos"]=binlogevent.packet.log_pos
				#logPosObject.setData(logPosConObj)
				continue
			if isinstance(binlogevent, QueryEvent):
				#pprint (vars(binlogevent.packet))
				sendMsg(key,binlogevent.query,binlogevent.timestamp)
				#logPosObject.setData(logPosConObj)
				continue
			for row in binlogevent.rows:
				#dbtable = binlogevent.schema+"_"+binlogevent.table
				if isinstance(binlogevent, DeleteRowsEvent):
					#print 'DeleteRowsEvent'
					sendMsg(key,row.get("values",object),binlogevent.timestamp)
					#func(row.get("values",object))
				elif isinstance(binlogevent, UpdateRowsEvent):
					#print 'UpdateRowsEvent'
					#print row
					sendMsg(key,row,binlogevent.timestamp)
					#func(row.get("after_values",object))
				elif isinstance(binlogevent, WriteRowsEvent):
					#print 'WriteRowsEvent'
					#print row
					sendMsg(key,row.get("values",object),binlogevent.timestamp)
					#func(row.get("values",object))
				#logPosConObj["log_pos"]=binlogevent.packet.log_pos
				#logPosObject.setData(logPosConObj)
		
		stream.close()
	except BaseException,e :
		print(e)
		return
开发者ID:jc3wish,项目名称:mysqlDev_debug_by_MonitorBinlog,代码行数:46,代码来源:mysql_dev_debug_monitor.py

示例15: main

# 需要导入模块: from pymysqlreplication import BinLogStreamReader [as 别名]
# 或者: from pymysqlreplication.BinLogStreamReader import close [as 别名]
def main():
    # server_id is your slave identifier, it should be unique.
    # set blocking to True if you want to block and wait for the next event at
    # the end of the stream
    stream = BinLogStreamReader(connection_settings=MYSQL_SETTINGS,
                                server_id=3,
                                blocking=True)

    for binlogevent in stream:
        #print binlogevent
        #if isinstance(binlogevent, QueryEvent):
        #    print binlogevent.query
        if isinstance(binlogevent, WriteRowsEvent):
            for rows in binlogevent.rows:
                print rows
                #print binlogevent.query
        #binlogevent.dump()

    stream.close()
开发者ID:3manuek,项目名称:python-mysql-replication,代码行数:21,代码来源:test.py


注:本文中的pymysqlreplication.BinLogStreamReader.close方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。