本文整理汇总了Python中pydio.job.localdb.LocalDbHandler.get_local_changes_as_stream方法的典型用法代码示例。如果您正苦于以下问题:Python LocalDbHandler.get_local_changes_as_stream方法的具体用法?Python LocalDbHandler.get_local_changes_as_stream怎么用?Python LocalDbHandler.get_local_changes_as_stream使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pydio.job.localdb.LocalDbHandler
的用法示例。
在下文中一共展示了LocalDbHandler.get_local_changes_as_stream方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: ContinuousDiffMerger
# 需要导入模块: from pydio.job.localdb import LocalDbHandler [as 别名]
# 或者: from pydio.job.localdb.LocalDbHandler import get_local_changes_as_stream [as 别名]
#.........这里部分代码省略.........
very_first = True
self.remote_target_seq = self.load_remote_changes_in_store(self.remote_seq, self.current_store)
self.current_store.sync()
else:
self.remote_target_seq = 1
self.ping_remote()
except RequestException as ce:
logging.exception(ce)
if not connection_helper.is_connected_to_internet(self.sdk.proxies):
error = _('No Internet connection detected! Waiting for %s seconds to retry') % self.offline_timer
else:
error = _('Connection to server failed, server is probably down. Waiting %s seconds to retry') % self.offline_timer
self.marked_for_snapshot_pathes = []
logging.error(error)
self.logger.log_state(error, "wait")
self.sleep_offline()
continue
except Exception as e:
error = 'Error while connecting to remote server (%s), waiting for %i seconds before retempting ' % (e.message, self.offline_timer)
logging.exception(e)
self.logger.log_state(_('Error while connecting to remote server (%s)') % e.message, "error")
self.marked_for_snapshot_pathes = []
self.sleep_offline()
continue
self.online_status = True
if not self.job_config.server_configs:
self.job_config.server_configs = self.sdk.load_server_configs()
self.sdk.set_server_configs(self.job_config.server_configs)
if self.job_config.direction != 'down' or (self.job_config.direction == 'down' and self.job_config.solve != 'remote'):
logging.info(
'Loading local changes with sequence {0:s} for job id {1:s}'.format(str(self.local_seq),
str(self.job_config.id)))
self.local_target_seq = self.db_handler.get_local_changes_as_stream(self.local_seq, self.current_store.flatten_and_store)
self.current_store.sync()
else:
self.local_target_seq = 1
if not connection_helper.internet_ok:
connection_helper.is_connected_to_internet(self.sdk.proxies)
changes_length = len(self.current_store)
if not changes_length:
self.processing = False
logging.info('No changes detected in ' + self.job_config.id)
self.update_min_seqs_from_store()
self.exit_loop_clean(self.logger)
very_first = False
#logging.info("CheckSync of " + self.job_config.id)
#self.db_handler.list_non_idle_nodes()
if not self.watcher.isAlive() and not self.interrupt:
logging.info("File watcher died, restarting...")
self.watcher.stop()
self.watcher = LocalWatcher(self.job_config.directory,
self.configs_path,
event_handler=self.event_handler)
self.start_watcher()
continue
self.global_progress['status_indexing'] = 1
logging.info('Reducing changes for ' + self.job_config.id)
self.logger.log_state(_('Merging changes between remote and local, please wait...'), 'sync')
# We are updating the status to IDLE here for the nodes which has status as NEW
# The reason is when we create a new sync on the existing folder, some of the files might
# already be synchronized and we ignore those files while we Dedup changes and those files
# remain untouched later.
示例2: ContinuousDiffMerger
# 需要导入模块: from pydio.job.localdb import LocalDbHandler [as 别名]
# 或者: from pydio.job.localdb.LocalDbHandler import get_local_changes_as_stream [as 别名]
#.........这里部分代码省略.........
from pydio.job.change_stores import SqliteChangeStore
self.current_store = SqliteChangeStore(self.data_base + '/changes.sqlite', self.job_config.filters['includes'], self.job_config.filters['excludes'])
self.current_store.open()
try:
if self.job_config.direction != 'up':
logging.info('Loading remote changes with sequence ' + str(self.remote_seq))
if self.remote_seq == 0:
logger.log_state('Gathering data from remote workspace, this can take a while...', 'sync')
very_first = True
self.remote_target_seq = self.load_remote_changes_in_store(self.remote_seq, self.current_store)
self.current_store.sync()
else:
self.remote_target_seq = 1
self.ping_remote()
except ConnectionError as ce:
error = 'No connection detected, waiting %s seconds to retry' % self.offline_timer
logging.error(error)
logger.log_state(error, "wait")
self.sleep_offline()
continue
except Exception as e:
error = 'Error while connecting to remote server (%s), waiting for %i seconds before retempting ' % (e.message, self.offline_timer)
logging.error(error)
logger.log_state('Error while connecting to remote server (%s)' % e.message, "error")
self.sleep_offline()
continue
self.online_status = True
if not self.job_config.server_configs:
self.job_config.server_configs = self.sdk.load_server_configs()
self.sdk.set_server_configs(self.job_config.server_configs)
if self.job_config.direction != 'down':
logging.info('Loading local changes with sequence ' + str(self.local_seq))
self.local_target_seq = self.db_handler.get_local_changes_as_stream(self.local_seq, self.current_store.flatten_and_store)
self.current_store.sync()
else:
self.local_target_seq = 1
logging.info('Reducing changes')
self.current_store.delete_copies()
self.update_min_seqs_from_store()
self.current_store.dedup_changes()
self.update_min_seqs_from_store()
self.current_store.detect_unnecessary_changes(local_sdk=self.system, remote_sdk=self.sdk)
self.update_min_seqs_from_store()
#self.current_store.filter_out_echoes_events()
#self.update_min_seqs_from_store()
self.current_store.clear_operations_buffer()
self.current_store.prune_folders_moves()
self.update_min_seqs_from_store()
store_conflicts = self.current_store.clean_and_detect_conflicts(self.db_handler)
if store_conflicts:
logging.info('Conflicts detected, cannot continue!')
logger.log_state('Conflicts detected, cannot continue!', 'error')
self.current_store.close()
self.sleep_offline()
continue
changes_length = len(self.current_store)
if changes_length:
import change_processor
self.global_progress['queue_length'] = changes_length
logging.info('Processing %i changes' % changes_length)
logger.log_state('Processing %i changes' % changes_length, "start")