]> Git — Sourcephile - julm/julm-nix.git/blob - hosts/pumpkin/backup.nix
pumpkin: syncoid: exclude Downloads
[julm/julm-nix.git] / hosts / pumpkin / backup.nix
1 {
2 pkgs,
3 lib,
4 config,
5 hostName,
6 ...
7 }:
8 with builtins;
9 {
10 systemd.services.sanoid.serviceConfig.SupplementaryGroups = [
11 config.users.groups."disk".name
12 ];
13 services.sanoid = {
14 enable = true;
15 extraArgs = [ "--verbose" ];
16 datasets = {
17 "${hostName}/root" = {
18 autosnap = true;
19 autoprune = true;
20 frequently = 0;
21 hourly = 12;
22 daily = 7;
23 monthly = 0;
24 yearly = 0;
25 recursive = true;
26 };
27 "${hostName}/root/nix" = {
28 autosnap = false;
29 };
30 "${hostName}/root/var/cache" = {
31 autosnap = false;
32 };
33 "${hostName}/root/var/log" = {
34 autosnap = false;
35 };
36 "${hostName}/root/home/julm/.cache" = {
37 autosnap = false;
38 };
39 "${hostName}/root/home/julm/.local" = {
40 hourly = 0;
41 daily = 1;
42 };
43 "${hostName}/root/home/julm/.mozilla" = {
44 hourly = 0;
45 daily = 1;
46 };
47 "off2/julm/backup/${hostName}" = {
48 autosnap = false;
49 autoprune = true;
50 hourly = 12;
51 daily = 14;
52 monthly = 3;
53 yearly = 0;
54 };
55 };
56 };
57
58 programs.bash.interactiveShellInit = ''
59 mount-zfs-backup () {
60 (
61 set -eux
62 zpool="$1"
63 zpool status "$zpool" 2>/dev/null ||
64 sudo zpool import -d /dev/disk/by-id/ "$zpool"
65 trap "sudo zpool export $zpool" EXIT
66 zfs list -rH -t filesystem -o mounted,mountpoint,name "$zpool"/"$USER"/backup |
67 grep "^no\\s*/" | cut -f 3 | xargs -ortL1 sudo zfs mount -Olv || true
68 ${pkgs.mate.caja-with-extensions}/bin/caja --browser /mnt/"$zpool"/"$USER"/backup
69 )
70 }
71 '';
72 # Trigger backups when disks are plugged
73 services.udev.extraRules = ''
74 ACTION=="add", SUBSYSTEM=="block", KERNEL=="sd*", ENV{ID_SERIAL}=="WDC_WD10JPVT-22A1YT0_WD-WX21AC2F3987", ENV{SYSTEMD_WANTS}+="zfs-local-backup-home@WD10JPVT.service", ENV{SYSTEMD_ALIAS}="/sys/subsystem/usb/WD10JPVT"
75 # See https://github.com/systemd/systemd/issues/7587#issuecomment-381428545
76 ACTION=="remove", SUBSYSTEM=="block", KERNEL=="sd*", ENV{ID_SERIAL}=="WDC_WD10JPVT-22A1YT0_WD-WX21AC2F3987", TAG+="systemd"
77 '';
78 # Show what's happening to the user
79 systemd.services."zfs-term@" = {
80 description = "ZFS terminal for: %I";
81 unitConfig.StopWhenUnneeded = false;
82 environment.DISPLAY = ":0";
83 environment.XAUTHORITY = "/home/julm/.Xauthority";
84 after = [ "graphical.target" ];
85 bindsTo = [ "sys-subsystem-usb-%i.device" ];
86 serviceConfig = {
87 Type = "simple";
88 PrivateTmp = true;
89 ExecStart =
90 pkgs.writeShellScript "zfs-force-import" ''
91 DESTPOOL=$1
92 set -eux
93 ${pkgs.xterm}/bin/xterm -fg white -bg black -fa Monospace -fs 6 \
94 -title "ZFS backup to: $DESTPOOL" -e "journalctl -f -o short \
95 -u zfs-force-import@$DESTPOOL \
96 -u zfs-local-backup-home@$DESTPOOL"
97 ''
98 + " %I";
99 };
100 };
101 # Force zpool import, even if the disk has not been exported, or has been imported on another computer
102 systemd.services."zfs-force-import@" = {
103 description = "ZFS force import: %I";
104 unitConfig = {
105 StartLimitBurst = 5;
106 StartLimitInterval = 200;
107 StopWhenUnneeded = true;
108 };
109 wants = [ "zfs-term@%i.service" ];
110 bindsTo = [ "sys-subsystem-usb-%i.device" ];
111 path = lib.mkBefore [ "/run/booted-system/sw" ];
112 serviceConfig = {
113 Type = "oneshot";
114 RemainAfterExit = true;
115 PrivateTmp = true;
116 SyslogIdentifier = "zfs-force-import@%i";
117 Restart = "on-failure";
118 ExecStart =
119 pkgs.writeShellScript "zfs-force-import" ''
120 DESTPOOL=$1
121 set -eux
122 # Import the zpool, using stable paths
123 zpool import -d /dev/disk/by-id/ || true
124 zpool import -lFd /dev/disk/by-id/ "$DESTPOOL" ||
125 zpool reopen "$DESTPOOL" ||
126 zpool import -f -d /dev/disk/by-id/ "$DESTPOOL" ||
127 zpool clear -nFX "$DESTPOOL"
128 ''
129 + " %I";
130 };
131 };
132 # Prune old snapshots on the backup and send new ones
133 systemd.services."zfs-local-backup-home@" = {
134 description = "ZFS backup home, on: %I";
135 wants = [ "zfs-term@%i.service" ];
136 after = [ "zfs-force-import@%i.service" ];
137 requires = [ "zfs-force-import@%i.service" ];
138 bindsTo = [ "sys-subsystem-usb-%i.device" ];
139 path = lib.mkBefore [ "/run/booted-system/sw" ];
140 serviceConfig = rec {
141 Type = "oneshot";
142 PrivateTmp = true;
143 CacheDirectory = [ "zfs-usb-backup/%I" ];
144 RuntimeDirectory = [ "zfs-usb-backup/%I" ];
145 User = "julm";
146 Group = "users";
147 SyslogIdentifier = "zfs-local-backup-home@%i";
148 ExecStartPre =
149 "+"
150 + pkgs.writeShellScript "zfs-local-backup-home-startPre" ''
151 DESTPOOL=$1
152 set -eux
153 if zpool status "$DESTPOOL"; then
154 zfs allow ${User} bookmark,hold,mount,send ${hostName}/home
155 zfs allow ${User} bookmark,create,destroy,load-key,mount,mountpoint,receive,rollback,snapshot "$DESTPOOL"/${User}
156 zpool scrub -p "$DESTPOOL" || true
157 fi
158 ''
159 + " %I";
160 ExecStart =
161 pkgs.writeShellScript "zfs-local-backup-home" ''
162 set -eu
163 DESTPOOL=$1
164 # sanoid is quite conservative:
165 # by setting hourly=24, a snapshot must be >24 hours old
166 # and there must been >24 total hourly snapshots,
167 # or nothing is pruned.
168 install -D -m 400 /dev/stdin /tmp/sanoid/sanoid.conf <<EOF
169 [template_remote]
170 autoprune=true
171 autosnap=false
172 process_children_only=false
173
174 [$DESTPOOL/${User}/backup/${hostName}/home]
175 hourly=6
176 daily=31
177 monthly=3
178 recursive=true
179 use_template=remote
180 EOF
181 set -x
182 ${pkgs.sanoid}/bin/sanoid \
183 --cache-dir /var/cache/zfs-usb-backup/"$DESTPOOL" \
184 --configdir /tmp/sanoid \
185 --prune-snapshots \
186 --run-dir /run/zfs-usb-backup/"$DESTPOOL" \
187 --verbose
188
189 for dataset in ${hostName}/home; do
190 ${pkgs.sanoid}/bin/syncoid \
191 --create-bookmark \
192 --exclude "home/room" \
193 --force-delete \
194 --no-privilege-elevation \
195 --no-sync-snap \
196 --recursive \
197 --recvoptions "" \
198 --sendoptions raw \
199 --skip-parent \
200 "$dataset" \
201 "$DESTPOOL"/${User}/backup/"$dataset"
202 done
203 ''
204 + " %I";
205 ExecStartPost =
206 "+"
207 + pkgs.writeShellScript "zfs-local-backup-home-startPost" ''
208 DESTPOOL=$1
209 set -eux
210 # Only if the zpool still exists to avoid uninterruptible hanging
211 if zpool status -v "$DESTPOOL"; then
212 # Scrub the zpool 1 minute (in the background)
213 zpool scrub "$DESTPOOL"
214 sleep 60
215 fi
216 while zpool status -v "$DESTPOOL"; do
217 zpool scrub -p "$DESTPOOL" || true
218 sleep 20
219 # Export the zpool (to avoid a forced import later on)
220 zpool export "$DESTPOOL" || true
221 done
222 systemctl --no-block stop zfs-term@"$DESTPOOL"
223 ''
224 + " %I";
225 };
226 };
227 programs.bash.shellAliases = {
228 mount-backup-WD10JPVT = "mount-zfs-backup WD10JPVT";
229 };
230 }