本文整理汇总了Python中bakthat.models.Backups类的典型用法代码示例。如果您正苦于以下问题:Python Backups类的具体用法?Python Backups怎么用?Python Backups使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Backups类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: rotate_backups
def rotate_backups(filename, destination=DEFAULT_DESTINATION, profile="default", **kwargs):
"""Rotate backup using grandfather-father-son rotation scheme.
:type filename: str
:param filename: File/directory name.
:type destination: str
:param destination: s3|glacier
:type conf: dict
:keyword conf: Override/set AWS configuration.
:type days: int
:keyword days: Number of days to keep.
:type weeks: int
:keyword weeks: Number of weeks to keep.
:type months: int
:keyword months: Number of months to keep.
:type first_week_day: str
:keyword first_week_day: First week day (to calculate wich weekly backup keep, saturday by default).
:rtype: list
:return: A list containing the deleted keys (S3) or archives (Glacier).
"""
conf = kwargs.get("conf", None)
storage_backend = _get_store_backend(conf, destination, profile)
rotate = RotationConfig(conf, profile)
if not rotate:
raise Exception("You must run bakthat configure_backups_rotation or provide rotation configuration.")
deleted = []
backups = Backups.search(filename, destination, profile=profile)
backups_date = [datetime.fromtimestamp(float(backup.backup_date)) for backup in backups]
to_delete = grandfatherson.to_delete(backups_date,
days=int(rotate.conf["days"]),
weeks=int(rotate.conf["weeks"]),
months=int(rotate.conf["months"]),
firstweekday=int(rotate.conf["first_week_day"]),
now=datetime.utcnow())
for delete_date in to_delete:
backup_date = int(delete_date.strftime("%s"))
backup = Backups.search(filename, destination, backup_date=backup_date, profile=profile).get()
if backup:
real_key = backup.stored_filename
log.info("Deleting {0}".format(real_key))
storage_backend.delete(real_key)
backup.set_deleted()
deleted.append(real_key)
BakSyncer(conf).sync_auto()
return deleted
示例2: upgrade_from_shelve
def upgrade_from_shelve():
if os.path.isfile(os.path.expanduser("~/.bakthat.db")):
glacier_backend = GlacierBackend()
glacier_backend.upgrade_from_shelve()
s3_backend = S3Backend()
regex_key = re.compile(r"(?P<backup_name>.+)\.(?P<date_component>\d{14})\.tgz(?P<is_enc>\.enc)?")
# old regex for backward compatibility (for files without dot before the date component).
old_regex_key = re.compile(r"(?P<backup_name>.+)(?P<date_component>\d{14})\.tgz(?P<is_enc>\.enc)?")
for generator, backend in [(s3_backend.ls(), "s3"), ([ivt.filename for ivt in Inventory.select()], "glacier")]:
for key in generator:
match = regex_key.match(key)
# Backward compatibility
if not match:
match = old_regex_key.match(key)
if match:
filename = match.group("backup_name")
is_enc = bool(match.group("is_enc"))
backup_date = int(datetime.strptime(match.group("date_component"), "%Y%m%d%H%M%S").strftime("%s"))
else:
filename = key
is_enc = False
backup_date = 0
if backend == "s3":
backend_hash = hashlib.sha512(s3_backend.conf.get("access_key") + \
s3_backend.conf.get(s3_backend.container_key)).hexdigest()
elif backend == "glacier":
backend_hash = hashlib.sha512(glacier_backend.conf.get("access_key") + \
glacier_backend.conf.get(glacier_backend.container_key)).hexdigest()
new_backup = dict(backend=backend,
is_deleted=0,
backup_date=backup_date,
tags="",
stored_filename=key,
filename=filename,
last_updated=int(datetime.utcnow().strftime("%s")),
metadata=dict(is_enc=is_enc),
size=0,
backend_hash=backend_hash)
try:
Backups.upsert(**new_backup)
except Exception, exc:
print exc
os.remove(os.path.expanduser("~/.bakthat.db"))
示例3: sync
def sync(self):
"""Draft for implementing bakthat clients (hosts) backups data synchronization.
Synchronize Bakthat sqlite database via a HTTP POST request.
Backups are never really deleted from sqlite database, we just update the is_deleted key.
It sends the last server sync timestamp along with data updated since last sync.
Then the server return backups that have been updated on the server since last sync.
Both side (bakthat and the sync server) make upserts of the latest data avaible:
- if it doesn't exist yet, it will be created.
- if it has been modified (e.g deleted, since it's the only action we can take) we update it.
"""
log.debug("Start syncing")
self.register()
last_sync_ts = Config.get_key("sync_ts", 0)
to_insert_in_mongo = [b._data for b in Backups.search(last_updated_gt=last_sync_ts)]
data = dict(sync_ts=last_sync_ts, to_insert_in_mongo=to_insert_in_mongo)
r_kwargs = self.request_kwargs.copy()
log.debug("Initial payload: {0}".format(data))
r_kwargs.update({"data": json.dumps(data)})
r = requests.post(self.get_resource("backups/sync/status"), **r_kwargs)
if r.status_code != 200:
log.error("An error occured during sync: {0}".format(r.text))
return
log.debug("Sync result: {0}".format(r.json()))
to_insert_in_bakthat = r.json().get("to_insert_in_bakthat")
sync_ts = r.json().get("sync_ts")
for newbackup in to_insert_in_bakthat:
log.debug("Upsert {0}".format(newbackup))
Backups.upsert(**newbackup)
Config.set_key("sync_ts", sync_ts)
log.debug("Sync succcesful")
示例4: sync
def sync(self):
"""Draft for implementing bakthat clients (hosts) backups data synchronization.
Synchronize Bakthat sqlite database via a HTTP POST request.
Backups are never really deleted from sqlite database, we just update the is_deleted key.
It sends the last server sync timestamp along with data updated since last sync.
Then the server return backups that have been updated on the server since last sync.
On both sides, backups are either created if they don't exists or updated if the incoming version is newer.
"""
log.debug("Start syncing")
self.register()
last_sync_ts = Config.get_key("sync_ts", 0)
to_insert_in_mongo = [b._data for b in Backups.search(last_updated_gt=last_sync_ts)]
data = dict(sync_ts=last_sync_ts, new=to_insert_in_mongo)
r_kwargs = self.request_kwargs.copy()
log.debug("Initial payload: {0}".format(data))
r_kwargs.update({"data": json.dumps(data)})
r = requests.post(self.get_resource("backups/sync"), **r_kwargs)
if r.status_code != 200:
log.error("An error occured during sync: {0}".format(r.text))
return
log.debug("Sync result: {0}".format(r.json()))
to_insert_in_bakthat = r.json().get("updated", [])
sync_ts = r.json().get("sync_ts")
for newbackup in to_insert_in_bakthat:
log.debug("Upsert {0}".format(newbackup))
Backups.upsert(**newbackup)
Config.set_key("sync_ts", sync_ts)
log.debug("Sync succcesful")
示例5: delete
def delete(filename, destination=None, profile="default", config=CONFIG_FILE, **kwargs):
"""Delete a backup.
:type filename: str
:param filename: stored filename to delete.
:type destination: str
:param destination: glacier|s3|swift
:type profile: str
:param profile: Profile name (default by default).
:type conf: dict
:keyword conf: A dict with a custom configuration.
:type conf: dict
:keyword conf: Override/set AWS configuration.
:rtype: bool
:return: True if the file is deleted.
"""
if not filename:
log.error("No file to delete, use -f to specify one.")
return
backup = Backups.match_filename(filename, destination, profile=profile, config=config)
if not backup:
log.error("No file matched.")
return
key_name = backup.stored_filename
storage_backend, destination, conf = _get_store_backend(config, destination, profile)
session_id = str(uuid.uuid4())
events.before_delete(session_id)
log.info("Deleting {0}".format(key_name))
storage_backend.delete(key_name)
backup.set_deleted()
events.on_delete(session_id, backup)
return backup
示例6: delete
def delete(filename, destination=DEFAULT_DESTINATION, profile="default", **kwargs):
"""Delete a backup.
:type filename: str
:param filename: stored filename to delete.
:type destination: str
:param destination: glacier|s3
:type profile: str
:param profile: Profile name (default by default).
:type conf: dict
:keyword conf: A dict with a custom configuration.
:type conf: dict
:keyword conf: Override/set AWS configuration.
:rtype: bool
:return: True if the file is deleted.
"""
conf = kwargs.get("conf", None)
if not filename:
log.error("No file to delete, use -f to specify one.")
return
backup = Backups.match_filename(filename, destination, profile=profile)
if not backup:
log.error("No file matched.")
return
key_name = backup.stored_filename
storage_backend = _get_store_backend(conf, destination, profile)
log.info("Deleting {0}".format(key_name))
storage_backend.delete(key_name)
backup.set_deleted()
BakSyncer(conf).sync_auto()
return True
示例7: delete_older_than
def delete_older_than(filename, interval, profile="default", config=CONFIG_FILE, destination=None, **kwargs):
"""Delete backups matching the given filename older than the given interval string.
:type filename: str
:param filename: File/directory name.
:type interval: str
:param interval: Interval string like 1M, 1W, 1M3W4h2s...
(s => seconds, m => minutes, h => hours, D => days, W => weeks, M => months, Y => Years).
:type destination: str
:param destination: glacier|s3|swift
:type conf: dict
:keyword conf: Override/set AWS configuration.
:rtype: list
:return: A list containing the deleted keys (S3) or archives (Glacier).
"""
storage_backend, destination, conf = _get_store_backend(config, destination, profile)
session_id = str(uuid.uuid4())
events.before_delete_older_than(session_id)
interval_seconds = _interval_string_to_seconds(interval)
deleted = []
backup_date_filter = int(datetime.utcnow().strftime("%s")) - interval_seconds
for backup in Backups.search(filename, destination, older_than=backup_date_filter, profile=profile, config=config):
real_key = backup.stored_filename
log.info("Deleting {0}".format(real_key))
storage_backend.delete(real_key)
backup.set_deleted()
deleted.append(backup)
events.on_delete_older_than(session_id, deleted)
return deleted
示例8: delete_older_than
def delete_older_than(filename, interval, destination=DEFAULT_DESTINATION, profile="default", **kwargs):
"""Delete backups matching the given filename older than the given interval string.
:type filename: str
:param filename: File/directory name.
:type interval: str
:param interval: Interval string like 1M, 1W, 1M3W4h2s...
(s => seconds, m => minutes, h => hours, D => days, W => weeks, M => months, Y => Years).
:type destination: str
:param destination: glacier|s3
:type conf: dict
:keyword conf: Override/set AWS configuration.
:rtype: list
:return: A list containing the deleted keys (S3) or archives (Glacier).
"""
conf = kwargs.get("conf")
storage_backend = _get_store_backend(conf, destination, profile)
interval_seconds = _interval_string_to_seconds(interval)
deleted = []
backup_date_filter = int(datetime.utcnow().strftime("%s")) - interval_seconds
for backup in Backups.search(filename, destination, older_than=backup_date_filter, profile=profile):
real_key = backup.stored_filename
log.info("Deleting {0}".format(real_key))
storage_backend.delete(real_key)
backup.set_deleted()
deleted.append(real_key)
BakSyncer(conf).sync_auto()
return deleted
示例9: restore
def restore(filename, destination=None, profile="default", config=CONFIG_FILE, **kwargs):
"""Restore backup in the current working directory.
:type filename: str
:param filename: File/directory to backup.
:type destination: str
:param destination: s3|glacier|swift
:type profile: str
:param profile: Profile name (default by default).
:type conf: dict
:keyword conf: Override/set AWS configuration.
:rtype: bool
:return: True if successful.
"""
storage_backend, destination, conf = _get_store_backend(config, destination, profile)
if not filename:
log.error("No file to restore, use -f to specify one.")
return
backup = Backups.match_filename(filename, destination, profile=profile, config=config)
if not backup:
log.error("No file matched.")
return
session_id = str(uuid.uuid4())
events.before_restore(session_id)
key_name = backup.stored_filename
log.info("Restoring " + key_name)
# Asking password before actually download to avoid waiting
if key_name and backup.is_encrypted():
password = kwargs.get("password")
if not password:
password = getpass()
log.info("Downloading...")
download_kwargs = {}
if kwargs.get("job_check"):
download_kwargs["job_check"] = True
log.info("Job Check: " + repr(download_kwargs))
out = storage_backend.download(key_name, **download_kwargs)
if kwargs.get("job_check"):
log.info("Job Check Request")
# If it's a job_check call, we return Glacier job data
return out
if out and backup.is_encrypted():
log.info("Decrypting...")
decrypted_out = tempfile.TemporaryFile()
decrypt(out, decrypted_out, password)
out = decrypted_out
if out and (key_name.endswith(".tgz") or key_name.endswith(".tgz.enc")):
log.info("Uncompressing...")
out.seek(0)
if not backup.metadata.get("KeyValue"):
tar = tarfile.open(fileobj=out)
tar.extractall()
tar.close()
else:
with closing(GzipFile(fileobj=out, mode="r")) as f:
with open(backup.stored_filename, "w") as out:
out.write(f.read())
elif out:
log.info("Backup is not compressed")
with open(backup.filename, "w") as restored:
out.seek(0)
restored.write(out.read())
events.on_restore(session_id, backup)
return backup
示例10: show
def show(query="", destination="", tags="", profile="default", config=CONFIG_FILE):
backups = Backups.search(query, destination, profile=profile, tags=tags, config=config)
_display_backups(backups)
示例11: backup
#.........这里部分代码省略.........
backend=destination,
is_deleted=False)
# Useful only when using bakmanager.io hook
backup_key = key
password = kwargs.get("password", os.environ.get("BAKTHAT_PASSWORD"))
if password is None and prompt.lower() != "no":
password = getpass("Password (blank to disable encryption): ")
if password:
password2 = getpass("Password confirmation: ")
if password != password2:
log.error("Password confirmation doesn't match")
return
if not compress:
log.info("Compression disabled")
outname = filename
with open(outname) as outfile:
backup_data["size"] = os.fstat(outfile.fileno()).st_size
bakthat_compression = False
# Check if the file is not already compressed
elif mimetypes.guess_type(arcname) == ('application/x-tar', 'gzip'):
log.info("File already compressed")
outname = filename
# removing extension to reformat filename
new_arcname = re.sub(r'(\.t(ar\.)?gz)', '', arcname)
stored_filename = backup_file_fmt.format(new_arcname, date_component)
with open(outname) as outfile:
backup_data["size"] = os.fstat(outfile.fileno()).st_size
bakthat_compression = False
else:
# If not we compress it
log.info("Compressing...")
with tempfile.NamedTemporaryFile(delete=False) as out:
with closing(tarfile.open(fileobj=out, mode="w:gz")) as tar:
tar.add(filename, arcname=arcname, exclude=_exclude)
outname = out.name
out.seek(0)
backup_data["size"] = os.fstat(out.fileno()).st_size
bakthat_compression = True
bakthat_encryption = False
if password:
bakthat_encryption = True
log.info("Encrypting...")
encrypted_out = tempfile.NamedTemporaryFile(delete=False)
encrypt_file(outname, encrypted_out.name, password)
stored_filename += ".enc"
# We only remove the file if the archive is created by bakthat
if bakthat_compression:
os.remove(outname) # remove non-encrypted tmp file
outname = encrypted_out.name
encrypted_out.seek(0)
backup_data["size"] = os.fstat(encrypted_out.fileno()).st_size
# Handling tags metadata
if isinstance(tags, list):
tags = " ".join(tags)
backup_data["tags"] = tags
backup_data["metadata"] = dict(is_enc=bakthat_encryption,
client=socket.gethostname())
stored_filename = os.path.join(os.path.dirname(kwargs.get("custom_filename", "")), stored_filename)
backup_data["stored_filename"] = stored_filename
access_key = storage_backend.conf.get("access_key")
container_key = storage_backend.conf.get(storage_backend.container_key)
backup_data["backend_hash"] = hashlib.sha512(access_key + container_key).hexdigest()
log.info("Uploading...")
storage_backend.upload(stored_filename, outname, s3_reduced_redundancy=s3_reduced_redundancy)
# We only remove the file if the archive is created by bakthat
if bakthat_compression or bakthat_encryption:
os.remove(outname)
log.debug(backup_data)
# Insert backup metadata in SQLite
backup = Backups.create(**backup_data)
BakSyncer(conf).sync_auto()
# bakmanager.io hook, enable with -k/--key paramter
if backup_key:
bakmanager_hook(conf, backup_data, backup_key)
events.on_backup(session_id, backup)
return backup
示例12: rotate_backups
def rotate_backups(filename, destination=None, profile="default", config=CONFIG_FILE, **kwargs):
"""Rotate backup using grandfather-father-son rotation scheme.
:type filename: str
:param filename: File/directory name.
:type destination: str
:param destination: s3|glacier|swift
:type conf: dict
:keyword conf: Override/set AWS configuration.
:type days: int
:keyword days: Number of days to keep.
:type weeks: int
:keyword weeks: Number of weeks to keep.
:type months: int
:keyword months: Number of months to keep.
:type first_week_day: str
:keyword first_week_day: First week day (to calculate wich weekly backup keep, saturday by default).
:rtype: list
:return: A list containing the deleted keys (S3) or archives (Glacier).
"""
storage_backend, destination, conf = _get_store_backend(config, destination, profile)
rotate = RotationConfig(conf, profile)
if not rotate:
raise Exception("You must run bakthat configure_backups_rotation or provide rotation configuration.")
session_id = str(uuid.uuid4())
events.before_rotate_backups(session_id)
deleted = []
backups = Backups.search(filename, destination, profile=profile, config=config)
backups_date = [datetime.fromtimestamp(float(backup.backup_date)) for backup in backups]
rotate_kwargs = rotate.conf.copy()
del rotate_kwargs["first_week_day"]
for k, v in rotate_kwargs.iteritems():
rotate_kwargs[k] = int(v)
rotate_kwargs["firstweekday"] = int(rotate.conf["first_week_day"])
rotate_kwargs["now"] = datetime.utcnow()
to_delete = grandfatherson.to_delete(backups_date, **rotate_kwargs)
for delete_date in to_delete:
try:
backup_date = int(delete_date.strftime("%s"))
backup = Backups.search(filename, destination, backup_date=backup_date, profile=profile, config=config).get()
if backup:
real_key = backup.stored_filename
log.info("Deleting {0}".format(real_key))
storage_backend.delete(real_key)
backup.set_deleted()
deleted.append(backup)
except Exception, exc:
log.error("Error when deleting {0}".format(backup))
log.exception(exc)
示例13: show
def show(query="", destination="", tags="", profile="default", help="Profile, blank to show all"):
backups = Backups.search(query, destination, profile=profile, tags=tags)
_display_backups(backups)
示例14: backup
#.........这里部分代码省略.........
:rtype: dict
:return: A dict containing the following keys: stored_filename, size, metadata, backend and filename.
"""
conf = kwargs.get("conf", None)
storage_backend = _get_store_backend(conf, destination, profile)
backup_file_fmt = "{0}.{1}.tgz"
log.info("Backing up " + filename)
arcname = filename.strip('/').split('/')[-1]
now = datetime.utcnow()
date_component = now.strftime("%Y%m%d%H%M%S")
stored_filename = backup_file_fmt.format(arcname, date_component)
backup_date = int(now.strftime("%s"))
backup_data = dict(filename=kwargs.get("custom_filename", arcname),
backup_date=backup_date,
last_updated=backup_date,
backend=destination,
is_deleted=False)
password = kwargs.get("password")
if password is None and prompt.lower() != "no":
password = getpass("Password (blank to disable encryption): ")
if password:
password2 = getpass("Password confirmation: ")
if password != password2:
log.error("Password confirmation doesn't match")
return
# Check if the file is not already compressed
if mimetypes.guess_type(arcname) == ('application/x-tar', 'gzip'):
log.info("File already compressed")
outname = filename
# removing extension to reformat filename
new_arcname = re.sub(r'(\.t(ar\.)?gz)', '', arcname)
stored_filename = backup_file_fmt.format(new_arcname, date_component)
with open(outname) as outfile:
backup_data["size"] = os.fstat(outfile.fileno()).st_size
bakthat_compression = False
else:
# If not we compress it
log.info("Compressing...")
with tempfile.NamedTemporaryFile(delete=False) as out:
with closing(tarfile.open(fileobj=out, mode="w:gz")) as tar:
tar.add(filename, arcname=arcname)
outname = out.name
out.seek(0)
backup_data["size"] = os.fstat(out.fileno()).st_size
bakthat_compression = True
bakthat_encryption = False
if password:
bakthat_encryption = True
log.info("Encrypting...")
encrypted_out = tempfile.NamedTemporaryFile(delete=False)
encrypt_file(outname, encrypted_out.name, password)
stored_filename += ".enc"
# We only remove the file if the archive is created by bakthat
if bakthat_compression:
os.remove(outname) # remove non-encrypted tmp file
outname = encrypted_out.name
encrypted_out.seek(0)
backup_data["size"] = os.fstat(encrypted_out.fileno()).st_size
# Handling tags metadata
if isinstance(tags, list):
tags = " ".join(tags)
backup_data["tags"] = tags
backup_data["metadata"] = dict(is_enc=bakthat_encryption)
backup_data["stored_filename"] = stored_filename
access_key = storage_backend.conf.get("access_key")
container_key = storage_backend.conf.get(storage_backend.container_key)
backup_data["backend_hash"] = hashlib.sha512(access_key + container_key).hexdigest()
log.info("Uploading...")
storage_backend.upload(stored_filename, outname)
# We only remove the file if the archive is created by bakthat
if bakthat_encryption:
os.remove(outname)
log.debug(backup_data)
# Insert backup metadata in SQLite
Backups.create(**backup_data)
BakSyncer(conf).sync_auto()
return backup_data
示例15: restore
def restore(filename, destination=DEFAULT_DESTINATION, profile="default", **kwargs):
"""Restore backup in the current working directory.
:type filename: str
:param filename: File/directory to backup.
:type destination: str
:param destination: s3|glacier
:type profile: str
:param profile: Profile name (default by default).
:type conf: dict
:keyword conf: Override/set AWS configuration.
:rtype: bool
:return: True if successful.
"""
conf = kwargs.get("conf", None)
storage_backend = _get_store_backend(conf, destination, profile)
if not filename:
log.error("No file to restore, use -f to specify one.")
return
backup = Backups.match_filename(filename, destination, profile=profile)
if not backup:
log.error("No file matched.")
return
key_name = backup.stored_filename
log.info("Restoring " + key_name)
# Asking password before actually download to avoid waiting
if key_name and key_name.endswith(".enc"):
password = kwargs.get("password")
if not password:
password = getpass()
log.info("Downloading...")
download_kwargs = {}
if kwargs.get("job_check"):
download_kwargs["job_check"] = True
log.info("Job Check: " + repr(download_kwargs))
out = storage_backend.download(key_name, **download_kwargs)
if kwargs.get("job_check"):
log.info("Job Check Request")
# If it's a job_check call, we return Glacier job data
return out
if out and key_name.endswith(".enc"):
log.info("Decrypting...")
decrypted_out = tempfile.TemporaryFile()
decrypt(out, decrypted_out, password)
out = decrypted_out
if out:
log.info("Uncompressing...")
out.seek(0)
tar = tarfile.open(fileobj=out)
tar.extractall()
tar.close()
return True