Fix inplace-ness of inplace mode
This commit is contained in:
parent
8670a5e903
commit
bc6f7817f6
@ -1,4 +1,42 @@
|
||||
import os
|
||||
from datetime import datetime
|
||||
from os.path import join as pathjoin
|
||||
from os.path import exists
|
||||
|
||||
|
||||
DATADB_ROOT = "/nexus/datadb/backups/"
|
||||
DATADB_TMP = "/nexus/datadb/tmp/"
|
||||
|
||||
DATADB_DIR_TIMESTAMP_FORMAT = "%Y-%m-%dT%H:%M:%S.%f" # Same as isoformat(), but we need to parse it back
|
||||
|
||||
|
||||
class NoBackupException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def get_backup_dir(backup_name):
|
||||
"""
|
||||
Returns path to this profile's backup base dir. The base dir contains the 'data' directory
|
||||
"""
|
||||
return pathjoin(DATADB_ROOT, backup_name)
|
||||
|
||||
|
||||
def get_latest_backup(backup_name):
|
||||
"""
|
||||
Get the absolute local path to a backup or raise an exception if none exists. When getting a backup, sort folder
|
||||
names (they're timestamps) and return newest.
|
||||
:returns: str absolute path to backup seq /0/
|
||||
"""
|
||||
backups_dir = pathjoin(get_backup_dir(backup_name), 'data')
|
||||
|
||||
if not exists(backups_dir):
|
||||
raise NoBackupException("Backup {} does not exist".format(backup_name))
|
||||
|
||||
dirs = os.listdir(backups_dir)
|
||||
|
||||
if not dirs:
|
||||
raise NoBackupException("No backups exist for {}".format(backup_name))
|
||||
|
||||
dirs = sorted([datetime.strptime(d, DATADB_DIR_TIMESTAMP_FORMAT) for d in dirs])
|
||||
|
||||
return pathjoin(backups_dir, dirs[-1].strftime(DATADB_DIR_TIMESTAMP_FORMAT), 'data')
|
||||
|
27
get_backup
27
get_backup
@ -4,31 +4,14 @@ import traceback
|
||||
import os
|
||||
from sys import exit, stdout
|
||||
from os.path import join as pathjoin
|
||||
from os.path import exists, getsize
|
||||
from os.path import getsize
|
||||
from common.cgi import parse_qs, parse_auth, start_response
|
||||
from common.datadb import DATADB_ROOT, DATADB_DIR_TIMESTAMP_FORMAT
|
||||
from datetime import datetime
|
||||
|
||||
|
||||
def get_backup_dir(backup_name):
|
||||
"""
|
||||
Get the absolute local path to a backup or raise an exception if none exists. When getting a backup, sort folder
|
||||
names (they're timestamps) and return newest.
|
||||
:returns: str absolute path to backup seq /0/
|
||||
"""
|
||||
backups_dir = pathjoin(DATADB_ROOT, backup_name, 'data')
|
||||
|
||||
if not exists(backups_dir):
|
||||
raise Exception("Backup does not exist")
|
||||
|
||||
dirs = sorted([datetime.strptime(d, DATADB_DIR_TIMESTAMP_FORMAT) for d in os.listdir(backups_dir)])
|
||||
|
||||
return os.path.join(backups_dir, dirs[-1].strftime(DATADB_DIR_TIMESTAMP_FORMAT), 'data')
|
||||
from common.datadb import get_latest_backup
|
||||
|
||||
|
||||
def handle_head(backup_name):
|
||||
try:
|
||||
# backup_path = get_backup_dir(backup_name)
|
||||
# backup_path = get_latest_backup(backup_name)
|
||||
# TODO appropriate content-length for HEAD
|
||||
start_response(extra_headers=['Content-length: 0'])
|
||||
except:
|
||||
@ -40,7 +23,7 @@ def handle_get_rsync(backup_name):
|
||||
"""
|
||||
Prints the absolute path an rsync backup should pull from
|
||||
"""
|
||||
backup_path = get_backup_dir(backup_name)
|
||||
backup_path = get_latest_backup(backup_name)
|
||||
|
||||
start_response()
|
||||
print(backup_path + '/')
|
||||
@ -50,7 +33,7 @@ def handle_get_archive(backup_name):
|
||||
"""
|
||||
Returns .tar.gz data to the browser
|
||||
"""
|
||||
backup_path = pathjoin(get_backup_dir(backup_name), 'backup.tar.gz')
|
||||
backup_path = pathjoin(get_latest_backup(backup_name), 'backup.tar.gz')
|
||||
|
||||
with open(backup_path, 'rb') as f:
|
||||
start_response(content_type="application/x-gzip",
|
||||
|
26
new_backup
26
new_backup
@ -7,7 +7,8 @@ from os import mkdir, rename, unlink, rmdir, utime
|
||||
from os.path import exists
|
||||
from os.path import join as pathjoin
|
||||
from common.cgi import parse_qs, parse_auth, start_response
|
||||
from common.datadb import DATADB_ROOT, DATADB_TMP, DATADB_DIR_TIMESTAMP_FORMAT
|
||||
from common.datadb import DATADB_ROOT, DATADB_TMP, DATADB_DIR_TIMESTAMP_FORMAT, get_backup_dir, get_latest_backup, \
|
||||
NoBackupException
|
||||
from datetime import datetime
|
||||
from shutil import rmtree, move
|
||||
from subprocess import Popen, PIPE
|
||||
@ -18,13 +19,6 @@ from glob import iglob
|
||||
import json
|
||||
|
||||
|
||||
def get_backup_dir(backup_name):
|
||||
"""
|
||||
Returns path to this profile's backup base dir. The base dir contains the 'data' directory
|
||||
"""
|
||||
return pathjoin(DATADB_ROOT, backup_name)
|
||||
|
||||
|
||||
def rotate_backups(backup_dir, max_backups=5):
|
||||
"""
|
||||
In the backup dir, cascade backups. List the backup dir and parse folder timestamps. Sort and delete old.
|
||||
@ -64,7 +58,7 @@ def prepare_backup_dirs(backup_name, max_backups=5, rotate=True):
|
||||
"""
|
||||
# print("prepare_backup(%s, %s)" % (backup_name, proto))
|
||||
|
||||
# Ensure the following dir exists: <DATADB_ROOT>/<backup_name>/data/0/
|
||||
# Ensure the following dir exists: <DATADB_ROOT>/<backup_name>/data/
|
||||
backup_base_path = get_backup_dir(backup_name)
|
||||
if not exists(backup_base_path):
|
||||
mkdir(backup_base_path)
|
||||
@ -73,13 +67,15 @@ def prepare_backup_dirs(backup_name, max_backups=5, rotate=True):
|
||||
if not exists(backup_data_path):
|
||||
mkdir(backup_data_path)
|
||||
|
||||
if rotate:
|
||||
# Should always return bkname/data/0/data/
|
||||
new_path = rotate_backups(backup_data_path, max_backups=max_backups)
|
||||
else:
|
||||
new_path = prepare_new_backup_dir(backup_data_path)
|
||||
if not rotate:
|
||||
# Get the path to the latest backup if using in place mode
|
||||
# If no backup is found, we'll call the rotate function anyway to get one created
|
||||
try:
|
||||
return get_latest_backup(backup_name)
|
||||
except NoBackupException:
|
||||
pass
|
||||
|
||||
return new_path
|
||||
return rotate_backups(backup_data_path, max_backups=max_backups)
|
||||
|
||||
|
||||
def handle_get_rsync(backup_name, sync_prev=False, force_existing=False):
|
||||
|
Loading…
Reference in New Issue
Block a user