rsync -ai --delete ../sec/pass julm@sourcephile.fr:work/sourcephile/sec/
recv-pass:
rsync -ai --delete julm@sourcephile.fr:work/sourcephile/sec/pass ../sec/
+recv-gnupg:
+ rsync -ai --delete julm@sourcephile.fr:work/sourcephile/sec/gnupg ../sec/
+recv-ssh:
+ rsync -ai --delete julm@sourcephile.fr:work/sourcephile/sec/ssh ../sec/
tunnel-rspamd:
ssh -nNTL 11334:localhost:11334 root@sourcephile.fr
"zfs.zfs_arc_max=${toString (500 * 1024 * 1024)}" # bytes
];
-environment = {
- systemPackages = with pkgs; [
- pciutils
- #flashrom
- pkgs.nvme-cli
- ];
-};
+environment.systemPackages = with pkgs; [
+ pciutils
+ #flashrom
+ nvme-cli
+];
}
{ pkgs, lib, config, ... }:
{
-# The 32-bit host id of the machine, formatted as 8 hexadecimal characters.
-# You should try to make this id unique among your machines.
-# Manually generated with : head -c4 /dev/urandom | od -A none -t x4 | cut -d ' ' -f 2
-networking.hostId = "69c40b03";
-
# none is the recommended elevator with ZFS (which has its own I/O scheduler)
# and/or for SSD, whereas HDD could use mq-deadline.
services.udev.extraRules = ''
# set none scheduler for non-rotating disks
ACTION=="add|change", KERNEL=="sd[a-z]", ATTR{queue/rotational}=="0", ATTR{queue/scheduler}="none"
+ ACTION=="add|change", KERNEL=="nvme[0-9]", ATTR{queue/rotational}=="0", ATTR{queue/scheduler}="none"
'';
boot.supportedFilesystems = [ "zfs" ];
#cwd := $(notdir $(patsubst %/,%,$(dir $(abspath $(lastword $(MAKEFILE_LIST))))))
-losurdo_disk := /dev/disk/by-id/usb-Generic-_Multi-Card_20071114173400000-0:0
-#losurdo_disk := /dev/disk/by-id/usb-_USB_DISK_2.0_07009A834986F483-0:0
-#losurdo_cipher :=
-losurdo_cipher := aes-128-gcm
-losurdo_autotrim :=
-losurdo_reservation := 1G
-#losurdo_channel := $$(nix-env -p /nix/var/nix/profiles/per-user/$$USER/channels -q nixpkgs --no-name --out-path)
+#disk := /dev/disk/by-id/usb-Generic-_Multi-Card_20071114173400000-0:0
+#disk := /dev/disk/by-id/nvme-Samsung_SSD_970_EVO_Plus_250GB_S4EUNJ0N211426T
+server := losurdo
+disk := $(shell sourcephile-nix-get nodes.$(server).config.boot.loader.grub.devices.0)
+partlabel := $(server)_nvme
+rpool := $(partlabel)
+cipher := aes-128-gcm
+autotrim := on
+reservation := 1G
-wipeout: umount
- sudo zpool labelclear -f $(losurdo_disk)-part3 || true
- sudo zpool labelclear -f $(losurdo_disk)-part5 || true
- sudo $$(which sgdisk) --zap-all $(losurdo_disk)
-
-partition:
+wipeout:
sudo modprobe zfs
- set -x; if test -e sfdisk; then \
- sudo $$(which sfdisk) $(losurdo_disk) <sfdisk.txt; \
- else \
- sudo $$(which sgdisk) --zap-all $(losurdo_disk) && \
- sudo partprobe && \
- sudo $$(which sgdisk) -a1 -n1:34:2047 -t1:EF02 $(losurdo_disk) && \
- sudo $$(which sgdisk) -n2:1M:+512M -t2:EF00 $(losurdo_disk) && \
- sudo $$(which sgdisk) -n3:0:+512M -t3:8300 $(losurdo_disk) && \
- sudo $$(which sgdisk) -n4:0:+4G -t4:8200 $(losurdo_disk) && \
- sudo $$(which sgdisk) -n5:0:0 -t5:BF01 $(losurdo_disk) && \
- sudo $$(which sgdisk) --randomize-guids $(losurdo_disk) && \
- sudo $$(which sfdisk) -d $(losurdo_disk) | \
- sed -e 's&/dev/sd.&$(losurdo_disk)&' >sfdisk.txt; \
- fi
+ sudo zpool labelclear -f $(disk)-part5 || true
+ sudo $$(which sgdisk) --zap-all $(disk)
+
+partition: wipeout
+ sudo $$(which sgdisk) -a1 -n0:34:2047 -t0:EF02 -c0:"$(partlabel)_bios" $(disk)
+ sudo $$(which sgdisk) -n0:1M:+8M -t0:EF00 -c0:"$(partlabel)_efi" $(disk)
+ sudo $$(which sgdisk) -n0:0:+256M -t0:8300 -c0:"$(partlabel)_boot" $(disk)
+ sudo $$(which sgdisk) -n0:0:+8G -t0:8200 -c0:"$(partlabel)_swap" $(disk)
+ sudo $$(which sgdisk) -n0:0:0 -t0:BF01 -c0:"$(partlabel)_rpool" $(disk)
+ sudo $$(which sgdisk) --randomize-guids $(disk)
+ sudo $$(which sgdisk) --backup=sgdisk.backup $(disk)
-format:
- # DOC: https://github.com/zfsonlinux/zfs/wiki/Debian-Buster-Root-on-ZFS
- sudo mkdir -p /mnt/losurdo
- blkid -t TYPE=ext2 $(losurdo_disk)-part3; test $$? != 2 || \
- mkfs.ext2 $(losurdo_disk)-part3
- # swap
- # Note: configured with a volatile key in losurdo.nix
- #blkid -t TYPE=crypto_LUKS $(losurdo_disk)-part4; test $$? != 2 || \
- #sudo cryptsetup luksFormat --cipher aes-xts-plain64 --key-size 256 --hash sha256 $(losurdo_disk)-part4
- #sudo cryptsetup luksOpen $(losurdo_disk)-part4 swap
- #blkid -t TYPE=swap /dev/mapper/-swap; test $$? != 2 || \
- #sudo mkswap --check --label swap
- #sudo cryptsetup luksClose $(losurdo_disk)-part4 swap
- # rpool
- sudo zpool list rpool 2>/dev/null || \
+format: format-efi format-boot format-rpool
+format-efi:
+ sudo blkid $(disk)-part2 -t TYPE=vfat || \
+ sudo mkfs.vfat -F 32 -s 1 -n EFI $(disk)-part2
+format-boot:
+ sudo mkdir -p /mnt/$(server)
+ sudo blkid -t TYPE=ext2 $(disk)-part3; test $$? != 2 || \
+ sudo mkfs.ext2 $(disk)-part3
+format-rpool:
+ sudo zpool list $(rpool) 2>/dev/null || \
sudo zpool create -o ashift=12 \
- $(if $(losurdo_cipher),-O encryption=$(losurdo_cipher) \
+ $(if $(cipher),-O encryption=$(cipher) \
-O keyformat=passphrase \
-O keylocation=prompt) \
-O normalization=formD \
- -R /mnt/losurdo rpool $(losurdo_disk)-part5
+ -R /mnt/$(server) $(rpool) $(disk)-part5
+ sudo zpool set \
+ autotrim=$(autotrim) \
+ $(rpool)
sudo zfs set \
acltype=posixacl \
atime=off \
- $(if $(losurdo_autotrim),autotrim=on) \
canmount=off \
compression=lz4 \
dnodesize=auto \
relatime=on \
xattr=sa \
mountpoint=/ \
- rpool
+ $(rpool)
# https://nixos.wiki/wiki/NixOS_on_ZFS#Reservations
- sudo zfs list rpool/reserved 2>/dev/null || \
- sudo zfs create -o canmount=off -o mountpoint=none rpool/reserved
- sudo zfs set refreservation=$(losurdo_reservation) rpool/reserved
+ sudo zfs list $(rpool)/reserved 2>/dev/null || \
+ sudo zfs create -o canmount=off -o mountpoint=none $(rpool)/reserved
+ sudo zfs set refreservation=$(reservation) $(rpool)/reserved
# /
- # NOTE: mountpoint=legacy is required to let NixOS mount the ZFS filesystems.
- sudo zfs list rpool/root 2>/dev/null || \
+ # mountpoint=legacy is required to let NixOS mount the ZFS filesystems.
+ sudo zfs list $(rpool)/root 2>/dev/null || \
sudo zfs create \
-o canmount=on \
-o mountpoint=legacy \
- rpool/root
+ $(rpool)/root
# /boot
#sudo zfs list bpool/boot 2>/dev/null || \
#sudo zfs create \
# -o canmount=on \
# -o mountpoint=legacy \
# bpool/boot
- # /boot/efi
- sudo blkid $(losurdo_disk)-part2 -t TYPE=vfat || \
- sudo mkfs.vfat -F 32 -s 1 -n EFI $(losurdo_disk)-part2
# /*
for p in \
home \
var/log \
var/tmp \
; do \
- sudo zfs list rpool/"$$p" 2>/dev/null || \
+ sudo zfs list $(rpool)/"$$p" 2>/dev/null || \
sudo zfs create \
-o canmount=on \
-o mountpoint=legacy \
- rpool/"$$p" ; \
+ $(rpool)/"$$p" ; \
done
sudo zfs set \
com.sun:auto-snapshot=false \
- rpool/nix
+ $(rpool)/nix
sudo zfs set \
com.sun:auto-snapshot=false \
- rpool/var/cache
+ $(rpool)/var/cache
sudo zfs set \
com.sun:auto-snapshot=false \
sync=disabled \
- rpool/var/tmp
+ $(rpool)/var/tmp
-mount:
+mount: mount-rpool mount-boot mount-efi
+mount-rpool:
# scan needed zpools
- #sudo zpool list bpool || \
- #sudo zpool import -f bpool
- sudo zpool list rpool || \
- sudo zpool import -f rpool
+ sudo zpool list $(rpool) || \
+ sudo zpool import -f $(rpool)
# load encryption key
- sudo zfs get -H encryption rpool | \
- grep -q '^rpool\s*encryption\s*off' || \
- sudo zfs get -H keystatus rpool | \
- grep -q '^rpool\s*keystatus\s*available' || \
- sudo zfs load-key rpool
+ sudo zfs get -H encryption $(rpool) | \
+ grep -q '^$(rpool)\s*encryption\s*off' || \
+ sudo zfs get -H keystatus $(rpool) | \
+ grep -q '^$(rpool)\s*keystatus\s*available' || \
+ sudo zfs load-key $(rpool)
# /
- sudo mkdir -p /mnt/losurdo
- sudo mountpoint /mnt/losurdo || \
- sudo mount -v -t zfs rpool/root /mnt/losurdo
- # /boot
- sudo mkdir -p /mnt/losurdo/boot
- sudo mountpoint /mnt/losurdo/boot || \
- sudo mount -v $(losurdo_disk)-part3 /mnt/losurdo/boot
- #sudo mount -v -t zfs bpool/boot /mnt/losurdo/boot
- # /boot/efi
- sudo mkdir -p /mnt/losurdo/boot/efi
- sudo mountpoint /mnt/losurdo/boot/efi || \
- sudo mount -v $(losurdo_disk)-part2 /mnt/losurdo/boot/efi
+ sudo mkdir -p /mnt/$(server)
+ sudo mountpoint /mnt/$(server) || \
+ sudo mount -v -t zfs $(rpool)/root /mnt/$(server)
# /*
for p in \
home \
var/log \
var/tmp \
; do \
- sudo mkdir -p /mnt/losurdo/"$$p"; \
- sudo mountpoint /mnt/losurdo/"$$p" || \
- sudo mount -v -t zfs rpool/"$$p" /mnt/losurdo/"$$p" ; \
+ sudo mkdir -p /mnt/$(server)/"$$p"; \
+ sudo mountpoint /mnt/$(server)/"$$p" || \
+ sudo mount -v -t zfs $(rpool)/"$$p" /mnt/$(server)/"$$p" ; \
done
- sudo chmod 1777 /mnt/losurdo/var/tmp
+ sudo chmod 1777 /mnt/$(server)/var/tmp
+mount-boot:
+ sudo mkdir -p /mnt/$(server)/boot
+ sudo mountpoint /mnt/$(server)/boot || \
+ sudo mount -v $(disk)-part3 /mnt/$(server)/boot
+ #sudo mount -v -t zfs bpool/boot /mnt/$(server)/boot
+mount-efi: | mount-boot
+ sudo mkdir -p /mnt/$(server)/boot/efi
+ sudo mountpoint /mnt/$(server)/boot/efi || \
+ sudo mount -v $(disk)-part2 /mnt/$(server)/boot/efi
bootstrap: mount
- #test "$$(sudo grub-probe /mnt/losurdo/boot)" = zfs
- # NOTE: nixos-install will install GRUB following losurdo.nix
- # BIOS
- #sudo grub-install $(losurdo_disk)
- # UEFI
- #sudo grub-install \
- # --target=x86_64-efi \
- # --efi-directory=/mnt/losurdo/boot/efi \
- # --bootloader-id=nixos \
- # --recheck \
- # --no-floppy
+ # Workaround https://dev.gnupg.org/T3908
+ chmod o+rw $$GPG_TTY $$XAUTHORITY
- # Run pass as root to start gpg-agent as root not as $USER
- # otherwise the yubikey has to be unplugged/replugged…
- sudo \
- GNUPGHOME="$$GNUPGHOME" \
- GPG_TTY="$$GPG_TTY" \
- PASSWORD_STORE_DIR="$$PASSWORD_STORE_DIR" \
- PINENTRY_USER_DATA="$$PINENTRY_USER_DATA" \
- XAUTHORITY="$$XAUTHORITY" \
- pass servers/losurdo/dropbear/ecdsa.key | \
- sudo install -D -o root -g root -m 400 /dev/stdin \
- /mnt/losurdo/etc/dropbear/ecdsa.key && \
- test -s /mnt/losurdo/etc/dropbear/ecdsa.key
-
- #trap "test ! -e SHRED-ME || sudo find SHRED-ME -type f -exec shred -u {} + && sudo rm -rf SHRED-ME" EXIT ;
- sudo \
- GNUPGHOME="$$GNUPGHOME" \
- GPG_TTY="$$GPG_TTY" \
- LANG="$$LANG" \
- LC_CTYPE="$$LC_CTYPE" \
- LOSURDO_DEPLOYMENT="$$LOSURDO_DEPLOYMENT" \
- NIXOS_CONFIG="$$(readlink -e configuration.nix)" \
- NIX_CONF_DIR="$$NIX_CONF_DIR" \
- NIX_PATH="$$NIX_PATH" \
- PASSWORD_STORE_DIR="$$PASSWORD_STORE_DIR" \
- PATH="$$PATH" \
- PINENTRY_USER_DATA="$$PINENTRY_USER_DATA" \
- SSL_CERT_FILE="$$SSL_CERT_FILE" \
- XAUTHORITY="$$XAUTHORITY" \
- $$(which nixos-install) \
- --root /mnt/losurdo \
- $(if $(losurdo_channel),--channel "$(losurdo_channel)") \
+ sudo --preserve-env \
+ NIXOS_CONFIG="$$PWD/configuration.nix" \
+ $$(which nixos-install) \
+ --root /mnt/$(server) \
--no-root-passwd \
+ --no-channel-copy \
--show-trace
+
+ # End workaround https://dev.gnupg.org/T3908
+ chmod o-rw $$GPG_TTY $$XAUTHORITY
+
+ sudo sourcephile-shred-tmp
umount:
for p in \
var \
"" \
; do \
- ! sudo mountpoint /mnt/losurdo/"$$p" || \
- sudo umount -v /mnt/losurdo/"$$p" ; \
+ ! sudo mountpoint /mnt/$(server)/"$$p" || \
+ sudo umount -v /mnt/$(server)/"$$p" ; \
done
- ! sudo zpool list rpool 2>/dev/null || \
- zfs get -H encryption rpool | \
- grep -q '^rpool\s*encryption\s*off' || \
- zfs get -H keystatus rpool | \
- grep -q '^rpool\s*keystatus\s*unavailable' || \
- sudo zfs unload-key rpool
+ ! sudo zpool list $(rpool) 2>/dev/null || \
+ zfs get -H encryption $(rpool) | \
+ grep -q '^$(rpool)\s*encryption\s*off' || \
+ zfs get -H keystatus $(rpool) | \
+ grep -q '^$(rpool)\s*keystatus\s*unavailable' || \
+ sudo zfs unload-key $(rpool)
#! sudo zpool list bpool 2>/dev/null || \
#sudo zpool export bpool
- ! sudo zpool list rpool 2>/dev/null || \
- sudo zpool export rpool
+ ! sudo zpool list $(rpool) 2>/dev/null || \
+ sudo zpool export $(rpool)
unlock:
- pass servers/losurdo/zfs/rpool | \
+ pass servers/$(server)/zfs/$(rpool) | \
NIXOPS_DEPLOYMENT="$${NIXOPS_DEPLOYMENT:-$(LOSURDO_DEPLOYMENT)}" \
- nixops ssh losurdo -p 2222 'zfs load-key rpool && pkill zfs'
+ nixops ssh $(server) -p 2222 'zfs load-key $(rpool) && pkill zfs'
let
inherit (builtins) getEnv hasAttr readFile;
inherit (builtins.extraBuiltins) pass pass-chomp;
+ inherit (config.users) users;
in
{
# This value determines the NixOS release with which your system is to be
system.stateVersion = "19.09"; # Did you read the comment?
nix = {
- trustedUsers = [ "julm" ];
+ trustedUsers = [ users."julm".name ];
};
imports = [
../../defaults.nix
../../base/unbound.nix
] ++ lib.optionals (! hasAttr "nodes" attrs) [
- <nixops/share/nix/nixops/options.nix>
- <nixops/share/nix/nixops/resource.nix>
+ <nixops/options.nix>
+ <nixops/resource.nix>
];
networking = rec {
};
groups = {
wheel = {
- members = [ "julm" ];
+ members = [ users."julm".name ];
};
julm = {
- members = [ "julm" ];
+ members = [ users."julm".name ];
gid = 1000;
};
};
let inherit (config) networking; in
{
imports = [
- production/dl10j.nix
+ ../../base/dl10j.nix
production/networking.nix
- production/zfs.nix
+ production/fileSystems.nix
production/shorewall.nix
];
deployment = {
targetEnv = "none";
- targetHost = (builtins.elemAt networking.interfaces.enp1s0.ipv4.addresses 0).address;
+ targetHost = (builtins.elemAt networking.interfaces.enp5s0.ipv4.addresses 0).address;
keys = {
};
};
+++ /dev/null
-{ pkgs, lib, config, ... }:
-{
-imports = [
- ../../../base/dl10j.nix
-];
-
-boot.loader.grub.devices = [
- "/dev/disk/by-id/usb-Generic-_Multi-Card_20071114173400000-0:0"
-];
-
-fileSystems."/boot" =
- { device = "/dev/disk/by-uuid/0ea6842a-9e95-45ce-b2a9-8639918875f3";
- fsType = "ext2";
- };
-
-fileSystems."/boot/efi" =
- { device = "/dev/disk/by-uuid/6BB1-D61E";
- fsType = "vfat";
- };
-
-swapDevices =
- [ { device = "/dev/disk/by-partuuid/a0bd2fd9-2df3-4626-bc7b-db9da714eba5";
- randomEncryption = {
- enable = true;
- cipher = "aes-xts-plain64";
- source = "/dev/urandom";
- };
- }
- ];
-
-environment = {
- systemPackages = with pkgs; [
- pciutils
- flashrom
- ];
-};
-}
--- /dev/null
+{ pkgs, lib, config, ... }:
+{
+imports = [
+ ../../../base/zfs.nix
+];
+
+# The 32-bit host id of the machine, formatted as 8 hexadecimal characters.
+# You should try to make this id unique among your machines.
+# Manually generated with : head -c4 /dev/urandom | od -A none -t x4 | cut -d ' ' -f 2
+networking.hostId = "69c40b03";
+
+/*
+# Enable the (OpenSolaris-compatible) ZFS auto-snapshotting service.
+services.zfs.autoSnapshot = {
+ enable = true;
+ frequent = ;
+ hourly = ;
+ daily = ;
+ weekly = ;
+ monthly = ;
+};
+*/
+
+boot.loader.grub.devices = [
+ "/dev/disk/by-id/nvme-Samsung_SSD_970_EVO_Plus_250GB_S4EUNJ0N211426T"
+];
+
+fileSystems."/boot" =
+ { device = "/dev/disk/by-partlabel/losurdo_nvme_boot";
+ fsType = "ext2";
+ };
+
+fileSystems."/boot/efi" =
+ { device = "/dev/disk/by-partlabel/losurdo_nvme_efi";
+ fsType = "vfat";
+ };
+
+swapDevices =
+ [ { device = "/dev/disk/by-partlabel/losurdo_nvme_swap";
+ randomEncryption = {
+ enable = true;
+ cipher = "aes-xts-plain64";
+ source = "/dev/urandom";
+ };
+ }
+ ];
+
+fileSystems."/" =
+ { device = "losurdo_nvme/root";
+ fsType = "zfs";
+ };
+
+fileSystems."/home" =
+ { device = "losurdo_nvme/home";
+ fsType = "zfs";
+ };
+
+fileSystems."/nix" =
+ { device = "losurdo_nvme/nix";
+ fsType = "zfs";
+ };
+
+fileSystems."/var" =
+ { device = "losurdo_nvme/var";
+ fsType = "zfs";
+ };
+
+fileSystems."/var/cache" =
+ { device = "losurdo_nvme/var/cache";
+ fsType = "zfs";
+ };
+
+fileSystems."/var/log" =
+ { device = "losurdo_nvme/var/log";
+ fsType = "zfs";
+ };
+
+fileSystems."/var/tmp" =
+ { device = "losurdo_nvme/var/tmp";
+ fsType = "zfs";
+ };
+}
+++ /dev/null
-{ pkgs, lib, config, ... }:
-
-{
- imports = [];
-
- boot.supportedFilesystems = [ "zfs" ];
-
- # The 32-bit host id of the machine, formatted as 8 hexadecimal characters.
- # You should try to make this id unique among your machines.
- # Manually generated with : head -c4 /dev/urandom | od -A none -t x4 | cut -d ' ' -f 2
- networking.hostId = "69c40b03";
-
- # none is the recommended elevator with ZFS (which has its own I/O scheduler)
- # and/or for SSD, whereas HDD could use mq-deadline.
- services.udev.extraRules = ''
- # set none scheduler for non-rotating disks
- ACTION=="add|change", KERNEL=="sd[a-z]", ATTR{queue/rotational}=="0", ATTR{queue/scheduler}="none"
- '';
-
- # Ensure extra safeguards are active that zfs uses to protect zfs pools.
- boot.zfs.forceImportAll = false;
- boot.zfs.forceImportRoot = false;
-
- boot.zfs.enableUnstable = true;
- boot.zfs.requestEncryptionCredentials = true;
-
- # Enables periodic scrubbing of ZFS pools.
- services.zfs.autoScrub.enable = true;
-
- environment = {
- systemPackages = [
- pkgs.mbuffer
- pkgs.zfs
- ];
- };
-
- /*
- # Enable the (OpenSolaris-compatible) ZFS auto-snapshotting service.
- services.zfs.autoSnapshot = {
- enable = true;
- frequent = ;
- hourly = ;
- daily = ;
- weekly = ;
- monthly = ;
- };
- */
-
- /*
- fileSystems."/boot" =
- { device = "bpool/boot";
- fsType = "zfs";
- };
- */
- fileSystems."/" =
- { device = "rpool/root";
- fsType = "zfs";
- };
-
- fileSystems."/home" =
- { device = "rpool/home";
- fsType = "zfs";
- };
-
- fileSystems."/nix" =
- { device = "rpool/nix";
- fsType = "zfs";
- };
-
- fileSystems."/var" =
- { device = "rpool/var";
- fsType = "zfs";
- };
-
- fileSystems."/var/cache" =
- { device = "rpool/var/cache";
- fsType = "zfs";
- };
-
- fileSystems."/var/log" =
- { device = "rpool/var/log";
- fsType = "zfs";
- };
-
- fileSystems."/var/tmp" =
- { device = "rpool/var/tmp";
- fsType = "zfs";
- };
-}
+++ /dev/null
-label: gpt
-label-id: 6E079BB6-95AF-4401-94D7-16D9E788F62D
-device: /dev/disk/by-id/usb-_USB_DISK_2.0_07009A834986F483-0:0
-unit: sectors
-first-lba: 34
-last-lba: 60628958
-
-/dev/disk/by-id/usb-_USB_DISK_2.0_07009A834986F483-0:0-part1 : start= 34, size= 2014, type=21686148-6449-6E6F-744E-656564454649, uuid=11C369DF-5A4A-49DF-AF25-F97B559E63E6
-/dev/disk/by-id/usb-_USB_DISK_2.0_07009A834986F483-0:0-part2 : start= 2048, size= 1048576, type=C12A7328-F81F-11D2-BA4B-00A0C93EC93B, uuid=1A135842-E7B1-48E1-A398-10BDDED9FD3D
-/dev/disk/by-id/usb-_USB_DISK_2.0_07009A834986F483-0:0-part3 : start= 1050624, size= 1048576, type=0FC63DAF-8483-4772-8E79-3D69D8477DE4, uuid=86268225-60B9-496C-B06D-6EFF3E6E2F37
-/dev/disk/by-id/usb-_USB_DISK_2.0_07009A834986F483-0:0-part4 : start= 2099200, size= 8388608, type=0657FD6D-A4AB-43C4-84E5-0933C84B4F4F, uuid=A9DE6216-AD9A-44A1-BA0B-4C5790D0B1DD
-/dev/disk/by-id/usb-_USB_DISK_2.0_07009A834986F483-0:0-part5 : start= 10487808, size= 50141151, type=6A898CC3-1DD2-11B2-99A6-080020736631, uuid=A26AF596-86D7-4892-9904-EBD620ED37D4
../../../base/zfs.nix
];
+# The 32-bit host id of the machine, formatted as 8 hexadecimal characters.
+# You should try to make this id unique among your machines.
+# Manually generated with : head -c4 /dev/urandom | od -A none -t x4 | cut -d ' ' -f 2
+networking.hostId = "69c40b03";
+
/*
# Enable the (OpenSolaris-compatible) ZFS auto-snapshotting service.
services.zfs.autoSnapshot = {
gnupgHome = toString ../sec/gnupg;
keys = import shell/openpgp.nix;
gpgExtraConf = ''
+ # julm@sourcephile.fr
trusted-key 0xB2450D97085B7B8C
'';
};
inherit pkgs lib;
modules = [ configuration ];
}).config;
+
+ # Utility to query the Nix files like nixops show-option
+ # but without requiring a nixops database, and not limited to config entries.
+ pwd = toString (./. + "");
+ sourcephile-nix-get = pkgs.writeShellScriptBin "sourcephile-nix-get" ''
+ nix-instantiate --read-write-mode \
+ --arg networkExprs "[${pwd}/servers.nix ${pwd}/servers/production.nix]" \
+ --arg args '{}' --argstr "uuid" whatever \
+ --argstr deploymentName production "<nixops/eval-machine-info.nix>" \
+ --eval-only --strict --arg checkConfigurationOptions false -A "$@"
+ '';
+ sourcephile-shred-tmp = pkgs.writeShellScriptBin "sourcephile-shred-tmp" ''
+ # Cleanup "../sec/tmp/"
+ # This is done when entering the nix-shell
+ # because direnv already hooks trap EXIT.
+ cd "${pwd}"
+ find ../sec/tmp -type f -exec shred -fu {} +
+ '';
in
pkgs.mkShell {
name = "sourcephile-nix";
#preferLocalBuild = true;
#allowSubstitutes = false;
buildInputs = modules.nix-shell.buildInputs ++ [
+ sourcephile-nix-get
+ sourcephile-shred-tmp
nixos.nixos-generate-config
nixos.nixos-install
nixos.nixos-enter
# Nix
PATH=$NIX_SHELL_PATH:$PATH
- export NIX_PATH="nixpkgs=${toString pkgs.path}:nixpkgs-overlays="$PWD"/overlays:nixops=${toString pkgs.nixops}"
+ export NIX_PATH="nixpkgs=${toString pkgs.path}:nixpkgs-overlays="$PWD"/overlays:nixops=${toString pkgs.nixops}/share/nix/nixops"
export nixpkgs_channel=${nixpkgs_channel}
- # Cleanup "../sec/tmp/"
- # This is done when entering the nix-shell
- # because direnv already hooks trap EXIT.
- (cd "$PWD" && find ../sec/tmp -type f -exec shred -fu {} +)
+ # Since the .envrc calls this shellHook
+ # the EXIT trap cannot be freely used
+ # because it's already used by direnv,
+ # hence shred at startup, which is not ideal.
+ sourcephile-shred-tmp
${modules.nix-shell.shellHook}
export GPG_TTY=$(tty)
gpg-connect-agent updatestartuptty /bye >/dev/null
- # NixOS
- export MERMET_DEPLOYMENT=production
- export LOSURDO_DEPLOYMENT=production
-
# nixops
#export NIXOPS_DEPLOYMENT="staging"
+ export MERMET_DEPLOYMENT=production
+ export LOSURDO_DEPLOYMENT=production
export NIXOPS_STATE="$PWD"/../sec/nixops/state.nixops
NIXOPS_OPTS+=" --show-trace"
export NIXOPS_OPTS
# choose pinentry depending on PINENTRY_USER_DATA
# this *only works* with gpg2
# see https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=802020
- set -x
- case $PINENTRY_USER_DATA in
- "")
- exec pinentry-tty "$@";;
- curses|emacs|gnome3|gtk-2|qt|tty)
- exec pinentry-$PINENTRY_USER_DATA "$@";;
- none) exit 1;; # do not ask for passphrase
- *)
- exec ${pkgs.pinentry.gtk2}/bin/pinentry-gtk-2 "$@"
+ case "''${PINENTRY_USER_DATA:-tty}" in
+ curses) exec ${pkgs.pinentry.curses}/bin/pinentry-curses "$@";;
+ #emacs) exec ''${pkgs.pinentry.emacs}/bin/pinentry-emacs "$@";;
+ #gnome3) exec ''${pkgs.pinentry.gnome3}/bin/pinentry-gnome3 "$@";;
+ gtk-2) exec ${pkgs.pinentry.gtk2}/bin/pinentry-gtk-2 "$@";;
+ none) exit 1;; # do not ask for passphrase
+ #qt) exec ''${pkgs.pinentry.qt}/bin/pinentry-qt "$@";;
+ tty) exec ${pkgs.pinentry.tty}/bin/pinentry-tty "$@";;
esac
'';
in ''
+ allow-loopback-pinentry
allow-preset-passphrase
default-cache-ttl 17200
default-cache-ttl-ssh 17200
enable-ssh-support
max-cache-ttl 17200
max-cache-ttl-ssh 17200
- pinentry-program ${pinentry}
no-allow-external-cache
- #pinentry-program ${pkgs.pinentry}/bin/pinentry
+ pinentry-program ${pinentry}
'';
description = ''
GnuPG's gpg-agent.conf content.