本文整理汇总了Python中pydio.job.localdb.LocalDbHandler类的典型用法代码示例。如果您正苦于以下问题:Python LocalDbHandler类的具体用法?Python LocalDbHandler怎么用?Python LocalDbHandler使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了LocalDbHandler类的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get
def get(self, job_id='', relative_path=''):
"""
retrieves the stat info for a given file / list the active job details
:param job_id: (optional) Job Id of the file/ folder
:param relative_path: (optional) relative path of the file/folder with respect
to the corresponding repository(job_id)
:returns a json response
"""
if request.path == '/stat':
jobs = JobsLoader.Instance().get_jobs()
json_jobs = {}
for job in jobs:
if jobs[job].active:
json_jobs.update({jobs[job].id: [jobs[job].directory, jobs[job].server, jobs[job].label, jobs[job].workspace]})
return json_jobs
else:
directory_path = JobsLoader.Instance().get_job(job_id).directory
base_path = JobsLoader.Instance().build_job_data_path(job_id)
path = os.path.join(directory_path, relative_path)
#r = os.stat(path)
# Get the status of the file idle/busy... by join of ajxp_index and ajxp_node_status tables
db_handler = LocalDbHandler(base_path, directory_path)
if Path(str(path.encode("utf-8"))).is_dir():
node_status = db_handler.get_directory_node_status("/" + relative_path)
else:
node_status = db_handler.get_node_status("/" + relative_path)
return {"node_status": node_status}
示例2: post
def post(self):
json_conflict = request.get_json()
job_id = json_conflict['job_id']
try:
job_config = JobsLoader.Instance().get_job(job_id)
except Exception:
return "Can't find any job config with this ID.", 404
dbHandler = LocalDbHandler(JobsLoader.Instance().build_job_data_path(job_id))
dbHandler.update_node_status(json_conflict['node_path'], json_conflict['status'])
if not dbHandler.count_conflicts() and job_config.active:
t = PydioScheduler.Instance().get_thread(job_id)
if t:
t.start_now()
return json_conflict
示例3: __init__
def __init__(self, job_config, job_data_path, pub_socket=False):
threading.Thread.__init__(self)
self.data_base = job_data_path
self.job_config = job_config
self.progress = 0
self.basepath = job_config.directory
self.ws_id = job_config.workspace
self.sdk = PydioSdk(
job_config.server,
ws_id=self.ws_id,
remote_folder=job_config.remote_folder,
user_id=job_config.user_id
)
self.system = SystemSdk(job_config.directory)
self.remote_seq = 0
self.local_seq = 0
self.local_target_seq = 0
self.remote_target_seq = 0
self.local_seqs = []
self.remote_seqs = []
self.db_handler = LocalDbHandler(self.data_base, job_config.directory)
self.interrupt = False
self.online_timer = 10
self.offline_timer = 60
self.online_status = True
self.job_status_running = True
self.direction = job_config.direction
if pub_socket:
self.pub_socket = pub_socket
self.info('Job Started', toUser='START', channel='status')
if os.path.exists(self.data_base + "/sequences"):
sequences = pickle.load(open(self.data_base + "/sequences", "rb"))
self.remote_seq = sequences['remote']
self.local_seq = sequences['local']
if job_config.direction != 'down':
self.watcher = LocalWatcher(job_config.directory,
job_config.filters['includes'],
job_config.filters['excludes'],
job_data_path)
dispatcher.connect( self.handle_progress_event, signal=PROGRESS_SIGNAL, sender=dispatcher.Any )
示例4: __init__
def __init__(self, job_config, job_data_path):
"""
Initialize thread internals
:param job_config: JobConfig instance
:param job_data_path: Filesystem path where the job data are stored
:return:
"""
threading.Thread.__init__(self)
self.last_run = 0
self.configs_path = job_data_path
self.job_config = job_config
sqlite_files = [file for file in os.listdir(self.configs_path) if file.endswith(".sqlite")]
for sqlite_file in sqlite_files:
try:
exists_and_correct = check_sqlite_file(os.path.join(self.configs_path, sqlite_file))
if exists_and_correct:
logging.info("Structure and Integrity of SQLite file %s is intact " % str(
os.path.join(self.configs_path, sqlite_file)))
except DBCorruptedException as e:
logging.debug("SQLite file %s is corrupted (Reason: %s), Deleting file and Reinitialising sync"
% (str(os.path.join(self.configs_path, sqlite_file)), e.message))
os.unlink(os.path.join(self.configs_path, sqlite_file))
self.update_sequences_file(0, 0)
self.init_global_progress()
self.basepath = job_config.directory
self.ws_id = job_config.workspace
self.sdk = PydioSdk(
job_config.server,
ws_id=self.ws_id,
remote_folder=job_config.remote_folder,
user_id=job_config.user_id,
device_id=ConfigManager.Instance().get_device_id(),
skip_ssl_verify=job_config.trust_ssl,
proxies=ConfigManager.Instance().get_defined_proxies(),
timeout=job_config.timeout
)
self.system = SystemSdk(job_config.directory)
self.remote_seq = 0
self.local_seq = 0
self.local_target_seq = 0
self.remote_target_seq = 0
self.local_seqs = []
self.remote_seqs = []
self.db_handler = LocalDbHandler(self.configs_path, job_config.directory)
self.interrupt = False
self.event_timer = 2
self.online_timer = job_config.online_timer
self.offline_timer = 60
self.online_status = True
self.job_status_running = True
self.direction = job_config.direction
self.event_logger = EventLogger(self.configs_path)
self.processing_signals = {}
self.current_tasks = []
self.event_handler = None
self.watcher = None
self.watcher_first_run = True
# TODO: TO BE LOADED FROM CONFIG
self.storage_watcher = job_config.label.startswith('LSYNC')
self.marked_for_snapshot_pathes = []
self.processing = False # indicates whether changes are being processed
dispatcher.send(signal=PUBLISH_SIGNAL, sender=self, channel='status', message='START')
if job_config.direction != 'down' or (self.job_config.direction == 'down' and self.job_config.solve != 'remote'):
self.event_handler = SqlEventHandler(includes=job_config.filters['includes'],
excludes=job_config.filters['excludes'],
basepath=job_config.directory,
job_data_path=self.configs_path)
self.watcher = LocalWatcher(job_config.directory,
self.configs_path,
event_handler=self.event_handler)
self.db_handler.check_lock_on_event_handler(self.event_handler)
if os.path.exists(os.path.join(self.configs_path, "sequences")):
try:
with open(os.path.join(self.configs_path, "sequences"), "rb") as f:
sequences = pickle.load(f)
self.remote_seq = sequences['remote']
self.local_seq = sequences['local']
if self.event_handler:
self.event_handler.last_seq_id = self.local_seq
except Exception as e:
logging.exception(e)
# Wrong content, remove sequences file.
os.unlink(os.path.join(self.configs_path, "sequences"))
dispatcher.connect(self.handle_transfer_rate_event, signal=TRANSFER_RATE_SIGNAL, sender=self.sdk)
dispatcher.connect(self.handle_transfer_callback_event, signal=TRANSFER_CALLBACK_SIGNAL, sender=self.sdk)
if self.job_config.frequency == 'manual':
self.job_status_running = False
self.logger = EventLogger(self.configs_path)
示例5: ContinuousDiffMerger
class ContinuousDiffMerger(threading.Thread):
"""Main Thread grabbing changes from both sides, computing the necessary changes to apply, and applying them"""
@pydio_profile
def __init__(self, job_config, job_data_path):
"""
Initialize thread internals
:param job_config: JobConfig instance
:param job_data_path: Filesystem path where the job data are stored
:return:
"""
threading.Thread.__init__(self)
self.last_run = 0
self.configs_path = job_data_path
self.job_config = job_config
sqlite_files = [file for file in os.listdir(self.configs_path) if file.endswith(".sqlite")]
for sqlite_file in sqlite_files:
try:
exists_and_correct = check_sqlite_file(os.path.join(self.configs_path, sqlite_file))
if exists_and_correct:
logging.info("Structure and Integrity of SQLite file %s is intact " % str(
os.path.join(self.configs_path, sqlite_file)))
except DBCorruptedException as e:
logging.debug("SQLite file %s is corrupted (Reason: %s), Deleting file and Reinitialising sync"
% (str(os.path.join(self.configs_path, sqlite_file)), e.message))
os.unlink(os.path.join(self.configs_path, sqlite_file))
self.update_sequences_file(0, 0)
self.init_global_progress()
self.basepath = job_config.directory
self.ws_id = job_config.workspace
self.sdk = PydioSdk(
job_config.server,
ws_id=self.ws_id,
remote_folder=job_config.remote_folder,
user_id=job_config.user_id,
device_id=ConfigManager.Instance().get_device_id(),
skip_ssl_verify=job_config.trust_ssl,
proxies=ConfigManager.Instance().get_defined_proxies(),
timeout=job_config.timeout
)
self.system = SystemSdk(job_config.directory)
self.remote_seq = 0
self.local_seq = 0
self.local_target_seq = 0
self.remote_target_seq = 0
self.local_seqs = []
self.remote_seqs = []
self.db_handler = LocalDbHandler(self.configs_path, job_config.directory)
self.interrupt = False
self.event_timer = 2
self.online_timer = job_config.online_timer
self.offline_timer = 60
self.online_status = True
self.job_status_running = True
self.direction = job_config.direction
self.event_logger = EventLogger(self.configs_path)
self.processing_signals = {}
self.current_tasks = []
self.event_handler = None
self.watcher = None
self.watcher_first_run = True
# TODO: TO BE LOADED FROM CONFIG
self.storage_watcher = job_config.label.startswith('LSYNC')
self.marked_for_snapshot_pathes = []
self.processing = False # indicates whether changes are being processed
dispatcher.send(signal=PUBLISH_SIGNAL, sender=self, channel='status', message='START')
if job_config.direction != 'down' or (self.job_config.direction == 'down' and self.job_config.solve != 'remote'):
self.event_handler = SqlEventHandler(includes=job_config.filters['includes'],
excludes=job_config.filters['excludes'],
basepath=job_config.directory,
job_data_path=self.configs_path)
self.watcher = LocalWatcher(job_config.directory,
self.configs_path,
event_handler=self.event_handler)
self.db_handler.check_lock_on_event_handler(self.event_handler)
if os.path.exists(os.path.join(self.configs_path, "sequences")):
try:
with open(os.path.join(self.configs_path, "sequences"), "rb") as f:
sequences = pickle.load(f)
self.remote_seq = sequences['remote']
self.local_seq = sequences['local']
if self.event_handler:
self.event_handler.last_seq_id = self.local_seq
except Exception as e:
logging.exception(e)
# Wrong content, remove sequences file.
os.unlink(os.path.join(self.configs_path, "sequences"))
dispatcher.connect(self.handle_transfer_rate_event, signal=TRANSFER_RATE_SIGNAL, sender=self.sdk)
dispatcher.connect(self.handle_transfer_callback_event, signal=TRANSFER_CALLBACK_SIGNAL, sender=self.sdk)
if self.job_config.frequency == 'manual':
self.job_status_running = False
#.........这里部分代码省略.........
示例6: ContinuousDiffMerger
class ContinuousDiffMerger(threading.Thread):
"""Main Thread grabbing changes from both sides, computing the necessary changes to apply, and applying them"""
def __init__(self, job_config, job_data_path, pub_socket=False):
threading.Thread.__init__(self)
self.data_base = job_data_path
self.job_config = job_config
self.progress = 0
self.basepath = job_config.directory
self.ws_id = job_config.workspace
self.sdk = PydioSdk(
job_config.server,
ws_id=self.ws_id,
remote_folder=job_config.remote_folder,
user_id=job_config.user_id
)
self.system = SystemSdk(job_config.directory)
self.remote_seq = 0
self.local_seq = 0
self.local_target_seq = 0
self.remote_target_seq = 0
self.local_seqs = []
self.remote_seqs = []
self.db_handler = LocalDbHandler(self.data_base, job_config.directory)
self.interrupt = False
self.online_timer = 10
self.offline_timer = 60
self.online_status = True
self.job_status_running = True
self.direction = job_config.direction
if pub_socket:
self.pub_socket = pub_socket
self.info('Job Started', toUser='START', channel='status')
if os.path.exists(self.data_base + "/sequences"):
sequences = pickle.load(open(self.data_base + "/sequences", "rb"))
self.remote_seq = sequences['remote']
self.local_seq = sequences['local']
if job_config.direction != 'down':
self.watcher = LocalWatcher(job_config.directory,
job_config.filters['includes'],
job_config.filters['excludes'],
job_data_path)
dispatcher.connect( self.handle_progress_event, signal=PROGRESS_SIGNAL, sender=dispatcher.Any )
def handle_progress_event(self, sender, progress):
self.info('Job progress is %s' % progress)
def is_running(self):
return self.job_status_running
def pause(self):
self.job_status_running = False
self.info('Job Paused', toUser='PAUSE', channel='status')
def resume(self):
self.job_status_running = True
self.info('Job Started', toUser='START', channel='status')
def stop(self):
if hasattr(self, 'watcher'):
self.watcher.stop()
self.interrupt = True
def run(self):
if hasattr(self, 'watcher'):
self.watcher.start()
while not self.interrupt:
try:
if not self.job_status_running:
time.sleep(self.online_timer)
continue
if not self.system.check_basepath():
logging.info('Cannot find local folder! Did you disconnect a volume? Waiting %s seconds before retry' % self.offline_timer)
time.sleep(self.offline_timer)
continue
# Load local and/or remote changes, depending on the direction
local_changes = dict(data=dict(), path_to_seqs=dict())
remote_changes = dict(data=dict(), path_to_seqs=dict())
try:
if self.job_config.direction != 'up':
logging.info('Loading remote changes with sequence ' + str(self.remote_seq))
self.remote_target_seq = self.get_remote_changes(self.remote_seq, remote_changes)
else:
self.remote_target_seq = 1
self.ping_remote()
except ConnectionError as ce:
logging.info('No connection detected, waiting %s seconds to retry' % self.offline_timer)
self.online_status = False
time.sleep(self.offline_timer)
continue
except Exception as e:
logging.info('Error while connecting to remote server (%s), waiting for %i seconds before retempting ' % (e.message, self.offline_timer))
self.online_status = False
#.........这里部分代码省略.........
示例7: __init__
def __init__(self, job_config, job_data_path):
"""
Initialize thread internals
:param job_config: JobConfig instance
:param job_data_path: Filesystem path where the job data are stored
:return:
"""
threading.Thread.__init__(self)
self.last_run = 0
self.data_base = job_data_path
self.job_config = job_config
self.init_global_progress()
self.basepath = job_config.directory
self.ws_id = job_config.workspace
self.sdk = PydioSdk(
job_config.server,
ws_id=self.ws_id,
remote_folder=job_config.remote_folder,
user_id=job_config.user_id,
device_id=ConfigManager.Instance().get_device_id(),
skip_ssl_verify=job_config.trust_ssl
)
self.system = SystemSdk(job_config.directory)
self.remote_seq = 0
self.local_seq = 0
self.local_target_seq = 0
self.remote_target_seq = 0
self.local_seqs = []
self.remote_seqs = []
self.db_handler = LocalDbHandler(self.data_base, job_config.directory)
self.interrupt = False
self.event_timer = 2
self.online_timer = 10
self.offline_timer = 60
self.online_status = True
self.job_status_running = True
self.direction = job_config.direction
self.event_logger = EventLogger(self.data_base)
self.processing_signals = {}
self.current_tasks = []
self.event_handler = None
self.watcher = None
self.watcher_first_run = True
dispatcher.send(signal=PUBLISH_SIGNAL, sender=self, channel='status', message='START')
if job_config.direction != 'down':
self.event_handler = SqlEventHandler(includes=job_config.filters['includes'],
excludes=job_config.filters['excludes'],
basepath=job_config.directory,
job_data_path=job_data_path)
self.watcher = LocalWatcher(job_config.directory,
job_data_path,
event_handler=self.event_handler)
self.db_handler.check_lock_on_event_handler(self.event_handler)
if os.path.exists(self.data_base + "/sequences"):
try:
sequences = pickle.load(open(self.data_base + "/sequences", "rb"))
self.remote_seq = sequences['remote']
self.local_seq = sequences['local']
if self.event_handler:
self.event_handler.last_seq_id = self.local_seq
except Exception:
# Wrong content, remove sequences file.
os.unlink(self.data_base + "/sequences")
dispatcher.connect( self.handle_transfer_rate_event, signal=TRANSFER_RATE_SIGNAL, sender=dispatcher.Any )
dispatcher.connect( self.handle_transfer_callback_event, signal=TRANSFER_CALLBACK_SIGNAL, sender=dispatcher.Any )
if self.job_config.frequency == 'manual':
self.job_status_running = False
示例8: ContinuousDiffMerger
class ContinuousDiffMerger(threading.Thread):
"""Main Thread grabbing changes from both sides, computing the necessary changes to apply, and applying them"""
def __init__(self, job_config, job_data_path):
"""
Initialize thread internals
:param job_config: JobConfig instance
:param job_data_path: Filesystem path where the job data are stored
:return:
"""
threading.Thread.__init__(self)
self.last_run = 0
self.data_base = job_data_path
self.job_config = job_config
self.init_global_progress()
self.basepath = job_config.directory
self.ws_id = job_config.workspace
self.sdk = PydioSdk(
job_config.server,
ws_id=self.ws_id,
remote_folder=job_config.remote_folder,
user_id=job_config.user_id,
device_id=ConfigManager.Instance().get_device_id(),
skip_ssl_verify=job_config.trust_ssl
)
self.system = SystemSdk(job_config.directory)
self.remote_seq = 0
self.local_seq = 0
self.local_target_seq = 0
self.remote_target_seq = 0
self.local_seqs = []
self.remote_seqs = []
self.db_handler = LocalDbHandler(self.data_base, job_config.directory)
self.interrupt = False
self.event_timer = 2
self.online_timer = 10
self.offline_timer = 60
self.online_status = True
self.job_status_running = True
self.direction = job_config.direction
self.event_logger = EventLogger(self.data_base)
self.processing_signals = {}
self.current_tasks = []
self.event_handler = None
self.watcher = None
self.watcher_first_run = True
dispatcher.send(signal=PUBLISH_SIGNAL, sender=self, channel='status', message='START')
if job_config.direction != 'down':
self.event_handler = SqlEventHandler(includes=job_config.filters['includes'],
excludes=job_config.filters['excludes'],
basepath=job_config.directory,
job_data_path=job_data_path)
self.watcher = LocalWatcher(job_config.directory,
job_data_path,
event_handler=self.event_handler)
self.db_handler.check_lock_on_event_handler(self.event_handler)
if os.path.exists(self.data_base + "/sequences"):
try:
sequences = pickle.load(open(self.data_base + "/sequences", "rb"))
self.remote_seq = sequences['remote']
self.local_seq = sequences['local']
if self.event_handler:
self.event_handler.last_seq_id = self.local_seq
except Exception:
# Wrong content, remove sequences file.
os.unlink(self.data_base + "/sequences")
dispatcher.connect( self.handle_transfer_rate_event, signal=TRANSFER_RATE_SIGNAL, sender=dispatcher.Any )
dispatcher.connect( self.handle_transfer_callback_event, signal=TRANSFER_CALLBACK_SIGNAL, sender=dispatcher.Any )
if self.job_config.frequency == 'manual':
self.job_status_running = False
def handle_transfer_callback_event(self, sender, change):
self.processing_signals[change['target']] = change
self.global_progress["queue_bytesize"] -= change['bytes_sent']
self.global_progress["queue_done"] += float(change['bytes_sent']) / float(change["total_size"])
def handle_transfer_rate_event(self, sender, transfer_rate):
"""
Handler for TRANSFER_SIGNAL to update the transfer rate internally. It's averaged with previous value.
:param sender:Any
:param transfer_rate:float
:return:
"""
if self.global_progress['last_transfer_rate'] > 0:
self.global_progress['last_transfer_rate'] = (float(transfer_rate) + self.global_progress['last_transfer_rate']) / 2.0
else:
self.global_progress['last_transfer_rate'] = float(transfer_rate)
def is_running(self):
"""
Whether the job is in Running state or not.
:return:bool
"""
#.........这里部分代码省略.........