add new per-ZFS datasets to sanoid
This commit is contained in:
110
devices/teal/config/etc/sanoid/sanoid.conf
Normal file
110
devices/teal/config/etc/sanoid/sanoid.conf
Normal file
@@ -0,0 +1,110 @@
|
||||
######################################
|
||||
# This is a sample sanoid.conf file. #
|
||||
# It should go in /etc/sanoid. #
|
||||
######################################
|
||||
|
||||
## name your backup modules with the path to their ZFS dataset - no leading slash.
|
||||
[tank/storage]
|
||||
# # pick one or more templates - they're defined (and editable) below. Comma separated, processed in order.
|
||||
use_template = production,demo
|
||||
|
||||
## Per-service datasets for Docker based services
|
||||
[tank/appdata]
|
||||
use_template = production
|
||||
recursive = yes
|
||||
process_children_only = yes
|
||||
|
||||
## you can also handle datasets recursively.
|
||||
#[zpoolname/parent]
|
||||
# use_template = production
|
||||
# recursive = yes
|
||||
# # if you want sanoid to manage the child datasets but leave this one alone, set process_children_only.
|
||||
# process_children_only = yes
|
||||
#
|
||||
## you can selectively override settings for child datasets which already fall under a recursive definition.
|
||||
#[zpoolname/parent/child]
|
||||
# # child datasets already initialized won't be wiped out, so if you use a new template, it will
|
||||
# # only override the values already set by the parent template, not replace it completely.
|
||||
# use_template = demo
|
||||
|
||||
|
||||
#############################
|
||||
# templates below this line #
|
||||
#############################
|
||||
|
||||
# name your templates template_templatename. you can create your own, and use them in your module definitions above.
|
||||
|
||||
[template_demo]
|
||||
daily = 60
|
||||
|
||||
[template_production]
|
||||
frequently = 0
|
||||
hourly = 36
|
||||
daily = 30
|
||||
monthly = 3
|
||||
yearly = 0
|
||||
autosnap = yes
|
||||
autoprune = yes
|
||||
|
||||
[template_backup]
|
||||
autoprune = yes
|
||||
frequently = 0
|
||||
hourly = 30
|
||||
daily = 90
|
||||
monthly = 12
|
||||
yearly = 0
|
||||
|
||||
### don't take new snapshots - snapshots on backup
|
||||
### datasets are replicated in from source, not
|
||||
### generated locally
|
||||
autosnap = no
|
||||
|
||||
### monitor hourlies and dailies, but don't warn or
|
||||
### crit until they're over 48h old, since replication
|
||||
### is typically daily only
|
||||
hourly_warn = 2880
|
||||
hourly_crit = 3600
|
||||
daily_warn = 48
|
||||
daily_crit = 60
|
||||
|
||||
[template_hotspare]
|
||||
autoprune = yes
|
||||
frequently = 0
|
||||
hourly = 30
|
||||
daily = 90
|
||||
monthly = 3
|
||||
yearly = 0
|
||||
|
||||
### don't take new snapshots - snapshots on backup
|
||||
### datasets are replicated in from source, not
|
||||
### generated locally
|
||||
autosnap = no
|
||||
|
||||
### monitor hourlies and dailies, but don't warn or
|
||||
### crit until they're over 4h old, since replication
|
||||
### is typically hourly only
|
||||
hourly_warn = 4h
|
||||
hourly_crit = 6h
|
||||
daily_warn = 2d
|
||||
daily_crit = 4d
|
||||
|
||||
[template_scripts]
|
||||
### information about the snapshot will be supplied as environment variables,
|
||||
### see the README.md file for details about what is passed when.
|
||||
### run script before snapshot
|
||||
pre_snapshot_script = /path/to/script.sh
|
||||
### run script after snapshot
|
||||
post_snapshot_script = /path/to/script.sh
|
||||
### run script after pruning snapshot
|
||||
pruning_script = /path/to/script.sh
|
||||
### don't take an inconsistent snapshot (skip if pre script fails)
|
||||
#no_inconsistent_snapshot = yes
|
||||
### run post_snapshot_script when pre_snapshot_script is failing
|
||||
#force_post_snapshot_script = yes
|
||||
### limit allowed execution time of scripts before continuing (<= 0: infinite)
|
||||
script_timeout = 5
|
||||
|
||||
[template_ignore]
|
||||
autoprune = no
|
||||
autosnap = no
|
||||
monitor = no
|
||||
Reference in New Issue
Block a user