changed the healthcheck
All checks were successful
continuous-integration/drone/push Build is passing
All checks were successful
continuous-integration/drone/push Build is passing
This commit is contained in:
parent
c79625a64a
commit
f17988bfa9
72
.drone.yml
72
.drone.yml
@ -1,37 +1,35 @@
|
|||||||
kind: pipeline
|
kind: pipeline
|
||||||
name: default
|
name: default
|
||||||
steps:
|
steps:
|
||||||
- name: docker
|
- name: docker
|
||||||
image: plugins/docker
|
image: plugins/docker
|
||||||
settings:
|
settings:
|
||||||
registry: git.jonb.io
|
registry: git.jonb.io
|
||||||
dry_run: false
|
dry_run: false
|
||||||
username: jblu
|
username: jblu
|
||||||
password:
|
password:
|
||||||
from_secret: gittea_drone
|
from_secret: gittea_drone
|
||||||
repo: git.jonb.io/jblu/seafile-backup
|
repo: git.jonb.io/jblu/seafile-backup
|
||||||
tags:
|
tags:
|
||||||
- latest
|
- latest
|
||||||
when:
|
when:
|
||||||
branch:
|
branch:
|
||||||
- main
|
- main
|
||||||
event:
|
event:
|
||||||
- push
|
- push
|
||||||
- pull_request
|
- name: docker-test
|
||||||
- name: docker-test
|
image: plugins/docker
|
||||||
image: plugins/docker
|
settings:
|
||||||
settings:
|
registry: git.jonb.io
|
||||||
registry: git.jonb.io
|
dry_run: false
|
||||||
dry_run: false
|
username: jblu
|
||||||
username: jblu
|
password:
|
||||||
password:
|
from_secret: gittea_drone
|
||||||
from_secret: gittea_drone
|
repo: git.jonb.io/jblu/seafile-backup
|
||||||
repo: git.jonb.io/jblu/seafile-backup
|
tags:
|
||||||
tags:
|
- dev
|
||||||
- dev
|
when:
|
||||||
when:
|
branch:
|
||||||
branch:
|
- dev*
|
||||||
- dev*
|
event:
|
||||||
event:
|
- push
|
||||||
- push
|
|
||||||
- pull_request
|
|
16
.gitignore
vendored
16
.gitignore
vendored
@ -1,9 +1,9 @@
|
|||||||
.env
|
.env
|
||||||
seafile-backup.sh
|
seafile-backup.sh
|
||||||
*.log
|
*.log
|
||||||
*.conf
|
*.conf
|
||||||
*tests*
|
*tests*
|
||||||
*tests*
|
*tests*
|
||||||
*dest*
|
*dest*
|
||||||
*db*
|
*db*
|
||||||
*pycache*
|
*pycache*
|
@ -1,39 +1,39 @@
|
|||||||
import requests as r
|
import requests as r
|
||||||
from tomllib import load
|
from tomllib import load
|
||||||
import os
|
import os
|
||||||
|
|
||||||
def apprise_notify(req_obj, apprise_url, aurls, title, body):
|
def apprise_notify(req_obj, apprise_url, aurls, title, body):
|
||||||
payload = {'urls': aurls,'title': title,'body': body,}
|
payload = {'urls': aurls,'title': title,'body': body,}
|
||||||
apprise_response = req_obj.post(apprise_url, json = payload ,verify=False)
|
apprise_response = req_obj.post(apprise_url, json = payload ,verify=False)
|
||||||
return apprise_response
|
return apprise_response
|
||||||
|
|
||||||
class AppriseClient:
|
class AppriseClient:
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.config = ''
|
self.config = ''
|
||||||
try:
|
try:
|
||||||
if os.environ["DOCKER"]:
|
if os.environ["DOCKER"]:
|
||||||
self.host = os.environ["host"]
|
self.host = os.environ["host"]
|
||||||
self.port = os.environ["port"]
|
self.port = os.environ["port"]
|
||||||
self.aurls = os.environ["aurls"]
|
self.aurls = os.environ["aurls"]
|
||||||
self.title = os.environ["title"]
|
self.title = os.environ["title"]
|
||||||
self.body = os.environ["body"]
|
self.body = os.environ["body"]
|
||||||
if os.environ["toml_path"]:
|
if os.environ["toml_path"]:
|
||||||
config_file_path=os.environ["toml_path"]
|
config_file_path=os.environ["toml_path"]
|
||||||
with open(config_file_path, 'rb') as c:
|
with open(config_file_path, 'rb') as c:
|
||||||
self.config = load(c)
|
self.config = load(c)
|
||||||
except:
|
except:
|
||||||
KeyError
|
KeyError
|
||||||
if os.path.exists('./config.toml'):
|
if os.path.exists('./config.toml'):
|
||||||
config_file_path = './config.toml'
|
config_file_path = './config.toml'
|
||||||
with open(config_file_path, 'rb') as c:
|
with open(config_file_path, 'rb') as c:
|
||||||
self.config = load(c)
|
self.config = load(c)
|
||||||
if self.config:
|
if self.config:
|
||||||
self.host = self.config["apprise"]["host"]
|
self.host = self.config["apprise"]["host"]
|
||||||
self.port = self.config["apprise"]["port"]
|
self.port = self.config["apprise"]["port"]
|
||||||
self.aurls = self.config["apprise"]["aurls"]
|
self.aurls = self.config["apprise"]["aurls"]
|
||||||
self.title = self.config["apprise"]["title"]
|
self.title = self.config["apprise"]["title"]
|
||||||
self.body = self.config["apprise"]["body"]
|
self.body = self.config["apprise"]["body"]
|
||||||
self.apprise_response = apprise_notify(r,self.host,self.port,self.aurls,self.title,self.body)
|
self.apprise_response = apprise_notify(r,self.host,self.port,self.aurls,self.title,self.body)
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
AppriseClient()
|
AppriseClient()
|
@ -1,8 +1,6 @@
|
|||||||
def healthcheck_ping(req_obj, url, start=False):
|
def healthcheck_ping(req_obj, url):
|
||||||
if start == True:
|
try:
|
||||||
url + '/start'
|
req_obj.get(url, timeout=5)
|
||||||
try:
|
except req_obj.RequestException as e:
|
||||||
req_obj.get(url, timeout=10)
|
# Log ping failure here...
|
||||||
except req_obj.RequestException as e:
|
|
||||||
# Log ping failure here...
|
|
||||||
print("Ping failed: %s" % e)
|
print("Ping failed: %s" % e)
|
@ -1,7 +1,7 @@
|
|||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
|
|
||||||
CRON_CONFIG_FILE="/opt/crontab"
|
CRON_CONFIG_FILE="/opt/crontab"
|
||||||
|
|
||||||
echo "${CRON} python /opt/seafile-backup.py" > $CRON_CONFIG_FILE
|
echo "${CRON} python /opt/seafile-backup.py" > $CRON_CONFIG_FILE
|
||||||
|
|
||||||
exec supercronic -passthrough-logs -quiet $CRON_CONFIG_FILE
|
exec supercronic -passthrough-logs -quiet $CRON_CONFIG_FILE
|
74
restic.sh
74
restic.sh
@ -1,38 +1,38 @@
|
|||||||
#!/bin/ash
|
#!/bin/ash
|
||||||
: "${RESTIC_REPOSITORY:?Need the restic repository}"
|
: "${RESTIC_REPOSITORY:?Need the restic repository}"
|
||||||
: "${AWS_ACCESS_KEY_ID:?Need the access key id}"
|
: "${AWS_ACCESS_KEY_ID:?Need the access key id}"
|
||||||
: "${AWS_SECRET_ACCESS_KEY:?Need the secret access key}"
|
: "${AWS_SECRET_ACCESS_KEY:?Need the secret access key}"
|
||||||
: "${RESTIC_PASSWORD:?Need the restic password}"
|
: "${RESTIC_PASSWORD:?Need the restic password}"
|
||||||
: "${LOG_PATH:-./restic-backup.log}"
|
: "${LOG_PATH:-./restic-backup.log}"
|
||||||
: "${seafile_data_local:-/seafile}"
|
: "${seafile_data_local:-/seafile}"
|
||||||
|
|
||||||
# need to securely provide password: https://restic.readthedocs.io/en/latest/faq.html#how-can-i-specify-encryption-passwords-automatically
|
# need to securely provide password: https://restic.readthedocs.io/en/latest/faq.html#how-can-i-specify-encryption-passwords-automatically
|
||||||
restic snapshots > /dev/null || restic init
|
restic snapshots > /dev/null || restic init
|
||||||
|
|
||||||
#Define a timestamp function
|
#Define a timestamp function
|
||||||
timestamp() {
|
timestamp() {
|
||||||
date "+%b %d %Y %T %Z"
|
date "+%b %d %Y %T %Z"
|
||||||
}
|
}
|
||||||
|
|
||||||
# insert timestamp into log
|
# insert timestamp into log
|
||||||
printf "\n\n"
|
printf "\n\n"
|
||||||
echo "-------------------------------------------------------------------------------" | tee -a $LOG_PATH
|
echo "-------------------------------------------------------------------------------" | tee -a $LOG_PATH
|
||||||
echo "$(timestamp): restic-backup.sh started" | tee -a $LOG_PATH
|
echo "$(timestamp): restic-backup.sh started" | tee -a $LOG_PATH
|
||||||
|
|
||||||
# Run Backups
|
# Run Backups
|
||||||
restic backup $seafile_data_local | tee -a $LOG_PATH
|
restic backup $seafile_data_local | tee -a $LOG_PATH
|
||||||
|
|
||||||
# Remove snapshots according to policy
|
# Remove snapshots according to policy
|
||||||
# If run cron more frequently, might add --keep-hourly 24
|
# If run cron more frequently, might add --keep-hourly 24
|
||||||
restic forget --keep-daily 7 --keep-weekly 4 --keep-monthly 12 --keep-yearly 7 | tee -a $LOG_PATH
|
restic forget --keep-daily 7 --keep-weekly 4 --keep-monthly 12 --keep-yearly 7 | tee -a $LOG_PATH
|
||||||
|
|
||||||
# Remove unneeded data from the repository
|
# Remove unneeded data from the repository
|
||||||
restic prune | tee -a $LOG_PATH
|
restic prune | tee -a $LOG_PATH
|
||||||
|
|
||||||
# Check the repository for errors
|
# Check the repository for errors
|
||||||
restic check | tee -a $LOG_PATH
|
restic check | tee -a $LOG_PATH
|
||||||
|
|
||||||
# insert timestamp into log
|
# insert timestamp into log
|
||||||
printf "\n\n"
|
printf "\n\n"
|
||||||
echo "-------------------------------------------------------------------------------" | tee -a $LOG_PATH
|
echo "-------------------------------------------------------------------------------" | tee -a $LOG_PATH
|
||||||
echo "$(timestamp): restic-backup.sh finished" | tee -a $LOG_PATH
|
echo "$(timestamp): restic-backup.sh finished" | tee -a $LOG_PATH
|
@ -1,141 +1,141 @@
|
|||||||
import os
|
import os
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from dotenv import load_dotenv
|
from dotenv import load_dotenv
|
||||||
from AppriseClient import apprise_notify
|
from AppriseClient import apprise_notify
|
||||||
from HealthchecksIO import healthcheck_ping
|
from HealthchecksIO import healthcheck_ping
|
||||||
import requests as r
|
import requests as r
|
||||||
import subprocess
|
import subprocess
|
||||||
now = datetime.now()
|
now = datetime.now()
|
||||||
r.packages.urllib3.disable_warnings()
|
r.packages.urllib3.disable_warnings()
|
||||||
|
|
||||||
load_dotenv()
|
load_dotenv()
|
||||||
|
|
||||||
def to_bool(value):
|
def to_bool(value):
|
||||||
"""
|
"""
|
||||||
Converts 'something' to boolean. Raises exception for invalid formats
|
Converts 'something' to boolean. Raises exception for invalid formats
|
||||||
Possible True values: 1, True, "1", "TRue", "yes", "y", "t"
|
Possible True values: 1, True, "1", "TRue", "yes", "y", "t"
|
||||||
Possible False values: 0, False, "0", "faLse", "no", "n", "f"
|
Possible False values: 0, False, "0", "faLse", "no", "n", "f"
|
||||||
"""
|
"""
|
||||||
if str(value).lower() in ("yes", "y", "true", "t", "1"): return True
|
if str(value).lower() in ("yes", "y", "true", "t", "1"): return True
|
||||||
if str(value).lower() in ("no", "n", "false", "f", "0"): return False
|
if str(value).lower() in ("no", "n", "false", "f", "0"): return False
|
||||||
raise Exception('Invalid value for boolean conversion: ' + str(value) + \
|
raise Exception('Invalid value for boolean conversion: ' + str(value) + \
|
||||||
f'\nPossible True values: 1, True, "1", "TRue", "yes", "y", "t"\
|
f'\nPossible True values: 1, True, "1", "TRue", "yes", "y", "t"\
|
||||||
\nPossible False values: 0, False, "0", "faLse", "no", "n", "f"')
|
\nPossible False values: 0, False, "0", "faLse", "no", "n", "f"')
|
||||||
|
|
||||||
# switches
|
# switches
|
||||||
docker_command = to_bool(os.getenv("docker_command"))
|
docker_command = to_bool(os.getenv("docker_command"))
|
||||||
rclone_copy = to_bool(os.getenv("rclone_copy"))
|
rclone_copy = to_bool(os.getenv("rclone_copy"))
|
||||||
rclone_config_create = to_bool(os.getenv("rclone_config_create"))
|
rclone_config_create = to_bool(os.getenv("rclone_config_create"))
|
||||||
rclone_push = to_bool(os.getenv("rclone_push"))
|
rclone_push = to_bool(os.getenv("rclone_push"))
|
||||||
restic_push = to_bool(os.getenv("restic_push"))
|
restic_push = to_bool(os.getenv("restic_push"))
|
||||||
db_dump = to_bool(os.getenv("db_dump"))
|
db_dump = to_bool(os.getenv("db_dump"))
|
||||||
zip_db_files = to_bool(os.getenv("zip_db_files"))
|
zip_db_files = to_bool(os.getenv("zip_db_files"))
|
||||||
offload_db_files = to_bool(os.getenv("offload_db_files"))
|
offload_db_files = to_bool(os.getenv("offload_db_files"))
|
||||||
cleanup = to_bool(os.getenv("cleanup"))
|
cleanup = to_bool(os.getenv("cleanup"))
|
||||||
healthcheck = to_bool(os.getenv("healthcheck"))
|
healthcheck = to_bool(os.getenv("healthcheck"))
|
||||||
notify = to_bool(os.getenv("notify"))
|
notify = to_bool(os.getenv("notify"))
|
||||||
|
|
||||||
LOG_PATH = os.getenv("LOG_PATH")
|
LOG_PATH = os.getenv("LOG_PATH")
|
||||||
|
|
||||||
# docker
|
# docker
|
||||||
container_name = os.getenv("container_name")
|
container_name = os.getenv("container_name")
|
||||||
|
|
||||||
# data folders
|
# data folders
|
||||||
seafile_data_local = os.getenv("seafile_data_local")
|
seafile_data_local = os.getenv("seafile_data_local")
|
||||||
seafile_data_backup = os.getenv("seafile_data_backup")
|
seafile_data_backup = os.getenv("seafile_data_backup")
|
||||||
|
|
||||||
# databases
|
# databases
|
||||||
databases = os.getenv("databases")
|
databases = os.getenv("databases")
|
||||||
db_dump_host = os.getenv("db_dump_host")
|
db_dump_host = os.getenv("db_dump_host")
|
||||||
db_dump_user = os.getenv("db_dump_user")
|
db_dump_user = os.getenv("db_dump_user")
|
||||||
db_dump_password = os.getenv("db_dump_password")
|
db_dump_password = os.getenv("db_dump_password")
|
||||||
db_dump_tmp_path = os.getenv("db_dump_tmp_path")
|
db_dump_tmp_path = os.getenv("db_dump_tmp_path")
|
||||||
|
|
||||||
# Rclone remote
|
# Rclone remote
|
||||||
rclone_config_path = os.getenv("rclone_config_path")
|
rclone_config_path = os.getenv("rclone_config_path")
|
||||||
rclone_remote = os.getenv("rclone_remote")
|
rclone_remote = os.getenv("rclone_remote")
|
||||||
rclone_backend = os.getenv("rclone_backend")
|
rclone_backend = os.getenv("rclone_backend")
|
||||||
rclone_provider = os.getenv("rclone_provider")
|
rclone_provider = os.getenv("rclone_provider")
|
||||||
rclone_endpoint = os.getenv("rclone_endpoint")
|
rclone_endpoint = os.getenv("rclone_endpoint")
|
||||||
rclone_remote_path = os.getenv("rclone_remote_path")
|
rclone_remote_path = os.getenv("rclone_remote_path")
|
||||||
rclone_remote_db_path = os.getenv("rclone_remote_db_path")
|
rclone_remote_db_path = os.getenv("rclone_remote_db_path")
|
||||||
rclone_environment_auth = os.getenv("rclone_environment_auth")
|
rclone_environment_auth = os.getenv("rclone_environment_auth")
|
||||||
rclone_db_retention = os.getenv("rclone_db_retention")
|
rclone_db_retention = os.getenv("rclone_db_retention")
|
||||||
|
|
||||||
# Restic remote
|
# Restic remote
|
||||||
RESTIC_REPOSITORY = os.getenv("RESTIC_REPOSITORY")
|
RESTIC_REPOSITORY = os.getenv("RESTIC_REPOSITORY")
|
||||||
AWS_ACCESS_KEY_ID = os.getenv("AWS_ACCESS_KEY_ID")
|
AWS_ACCESS_KEY_ID = os.getenv("AWS_ACCESS_KEY_ID")
|
||||||
AWS_SECRET_ACCESS_KEY = os.getenv("AWS_SECRET_ACCESS_KEY")
|
AWS_SECRET_ACCESS_KEY = os.getenv("AWS_SECRET_ACCESS_KEY")
|
||||||
RESTIC_PASSWORD = os.getenv("RESTIC_PASSWORD")
|
RESTIC_PASSWORD = os.getenv("RESTIC_PASSWORD")
|
||||||
|
|
||||||
# healthchecks
|
# healthchecks
|
||||||
healthcheck_url = os.getenv("healthcheck_url")
|
healthcheck_url = os.getenv("healthcheck_url")
|
||||||
|
|
||||||
# notify
|
# notify
|
||||||
apprise_apprise_url = os.getenv("apprise_apprise_url")
|
apprise_apprise_url = os.getenv("apprise_apprise_url")
|
||||||
apprise_aurls = os.getenv("apprise_aurls")
|
apprise_aurls = os.getenv("apprise_aurls")
|
||||||
apprise_title = os.getenv("apprise_title")
|
apprise_title = os.getenv("apprise_title")
|
||||||
apprise_body = os.getenv("apprise_body")
|
apprise_body = os.getenv("apprise_body")
|
||||||
|
|
||||||
# healthcheck - Tell healthchecks.io we are starting the backup
|
# healthcheck - Tell healthchecks.io we are starting the backup
|
||||||
if healthcheck:
|
if healthcheck:
|
||||||
healthcheck_ping(r, healthcheck_url, start=True)
|
healthcheck_ping(r, healthcheck_url, start=True)
|
||||||
|
|
||||||
# Stop seafile and seafile hub
|
# Stop seafile and seafile hub
|
||||||
if docker_command:
|
if docker_command:
|
||||||
os.system(f'docker exec {container_name} /opt/seafile/seafile-serverlatest/seahub.sh stop')
|
os.system(f'docker exec {container_name} /opt/seafile/seafile-serverlatest/seahub.sh stop')
|
||||||
os.system(f'docker exec {container_name} /opt/seafile/seafile-server-latest/seafile.sh stop')
|
os.system(f'docker exec {container_name} /opt/seafile/seafile-server-latest/seafile.sh stop')
|
||||||
|
|
||||||
# Dump the databases
|
# Dump the databases
|
||||||
if db_dump:
|
if db_dump:
|
||||||
if not os.path.exists(db_dump_tmp_path):
|
if not os.path.exists(db_dump_tmp_path):
|
||||||
os.system(f"mkdir -p {db_dump_tmp_path}")
|
os.system(f"mkdir -p {db_dump_tmp_path}")
|
||||||
for database in databases.split(','):
|
for database in databases.split(','):
|
||||||
os.system(f'mariadb-dump -h {db_dump_host} -u {db_dump_user} -p{db_dump_password} --skip-opt\
|
os.system(f'mariadb-dump -h {db_dump_host} -u {db_dump_user} -p{db_dump_password} --skip-opt\
|
||||||
{database} > {db_dump_tmp_path}{database}.{now.strftime("%m-%d-%Y_%H-%M-%S")}.sql')
|
{database} > {db_dump_tmp_path}{database}.{now.strftime("%m-%d-%Y_%H-%M-%S")}.sql')
|
||||||
|
|
||||||
# Create the config
|
# Create the config
|
||||||
if rclone_config_create:
|
if rclone_config_create:
|
||||||
rclone_config_check = str(subprocess.check_output(["rclone", "config", "file"]))
|
rclone_config_check = str(subprocess.check_output(["rclone", "config", "file"]))
|
||||||
if "doesn't exist" in rclone_config_check:
|
if "doesn't exist" in rclone_config_check:
|
||||||
os.system(f"rclone config create {rclone_remote} {rclone_backend} provider={rclone_provider}\
|
os.system(f"rclone config create {rclone_remote} {rclone_backend} provider={rclone_provider}\
|
||||||
endpoint={rclone_endpoint} env_auth=true")
|
endpoint={rclone_endpoint} env_auth=true")
|
||||||
|
|
||||||
# Local rclone backup
|
# Local rclone backup
|
||||||
if rclone_copy:
|
if rclone_copy:
|
||||||
os.system(f'rclone sync -P {seafile_data_local} {seafile_data_backup}')
|
os.system(f'rclone sync -P {seafile_data_local} {seafile_data_backup}')
|
||||||
|
|
||||||
# Remote rclone backup
|
# Remote rclone backup
|
||||||
if rclone_push:
|
if rclone_push:
|
||||||
os.system(f'rclone sync -P {seafile_data_local} {rclone_remote}:{rclone_remote_path}')
|
os.system(f'rclone sync -P {seafile_data_local} {rclone_remote}:{rclone_remote_path}')
|
||||||
|
|
||||||
# Remote restic backup
|
# Remote restic backup
|
||||||
if restic_push:
|
if restic_push:
|
||||||
os.system("/opt/restic.sh")
|
os.system("/opt/restic.sh")
|
||||||
|
|
||||||
# Start seafile and seafile hub
|
# Start seafile and seafile hub
|
||||||
if docker_command:
|
if docker_command:
|
||||||
os.system(f'docker exec {container_name} /opt/seafile/seafile-server-latest/seafile.sh start')
|
os.system(f'docker exec {container_name} /opt/seafile/seafile-server-latest/seafile.sh start')
|
||||||
os.system(f'docker exec {container_name} /opt/seafile/seafile-server-latest/seahub.sh start')
|
os.system(f'docker exec {container_name} /opt/seafile/seafile-server-latest/seahub.sh start')
|
||||||
|
|
||||||
# compress db files
|
# compress db files
|
||||||
if zip_db_files:
|
if zip_db_files:
|
||||||
os.system(f'zip -r {db_dump_tmp_path}/sfdb_{now.strftime("%m-%d-%Y_%H-%M-%S")} {db_dump_tmp_path}')
|
os.system(f'zip -r {db_dump_tmp_path}/sfdb_{now.strftime("%m-%d-%Y_%H-%M-%S")} {db_dump_tmp_path}')
|
||||||
os.system(f'rm {db_dump_tmp_path}*.sql')
|
os.system(f'rm {db_dump_tmp_path}*.sql')
|
||||||
|
|
||||||
# offload db file
|
# offload db file
|
||||||
if offload_db_files:
|
if offload_db_files:
|
||||||
os.system(f'rclone copy -P {db_dump_tmp_path} {rclone_remote}:{rclone_remote_db_path}')
|
os.system(f'rclone copy -P {db_dump_tmp_path} {rclone_remote}:{rclone_remote_db_path}')
|
||||||
|
|
||||||
# cleanup
|
# cleanup
|
||||||
if cleanup:
|
if cleanup:
|
||||||
os.system(f'rm {db_dump_tmp_path}*sfdb_*')
|
os.system(f'rm {db_dump_tmp_path}*sfdb_*')
|
||||||
os.system(f'rclone delete -P {rclone_db_retention} {rclone_remote}:{rclone_remote_db_path}')
|
os.system(f'rclone delete -P {rclone_db_retention} {rclone_remote}:{rclone_remote_db_path}')
|
||||||
|
|
||||||
# healthcheck
|
# healthcheck
|
||||||
if healthcheck:
|
if healthcheck:
|
||||||
healthcheck_ping(r, healthcheck_url)
|
healthcheck_ping(r, healthcheck_url)
|
||||||
|
|
||||||
# notification
|
# notification
|
||||||
if notify:
|
if notify:
|
||||||
apprise_notify(r, apprise_apprise_url, apprise_aurls, apprise_title, apprise_body)
|
apprise_notify(r, apprise_apprise_url, apprise_aurls, apprise_title, apprise_body)
|
Loading…
x
Reference in New Issue
Block a user