losurdo: move everything from the SD card to the NVMe
authorJulien Moutinho <julm@sourcephile.fr>
Wed, 18 Mar 2020 10:40:04 +0000 (11:40 +0100)
committerJulien Moutinho <julm@sourcephile.fr>
Wed, 18 Mar 2020 10:40:04 +0000 (11:40 +0100)
15 files changed:
Makefile
base/dl10j.nix
base/zfs.nix
servers/losurdo/Makefile
servers/losurdo/configuration.nix
servers/losurdo/configuration.nix.gpg [new file with mode: 0644]
servers/losurdo/production.nix
servers/losurdo/production/dl10j.nix [deleted file]
servers/losurdo/production/fileSystems.nix [new file with mode: 0644]
servers/losurdo/production/zfs.nix [deleted file]
servers/losurdo/sfdisk.txt [deleted file]
servers/losurdo/sgdisk.backup [new file with mode: 0644]
servers/mermet/production/zfs.nix
shell.nix
shell/modules/tools/security/gnupg.nix

index f247f78bcd8ecf0d9a4de7522438214238be8d73..da792b43e348535af8895fd3ca3a8025a30e40d2 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -11,6 +11,10 @@ send-pass:
        rsync -ai --delete ../sec/pass julm@sourcephile.fr:work/sourcephile/sec/
 recv-pass:
        rsync -ai --delete julm@sourcephile.fr:work/sourcephile/sec/pass  ../sec/
+recv-gnupg:
+       rsync -ai --delete julm@sourcephile.fr:work/sourcephile/sec/gnupg  ../sec/
+recv-ssh:
+       rsync -ai --delete julm@sourcephile.fr:work/sourcephile/sec/ssh  ../sec/
 
 tunnel-rspamd:
        ssh -nNTL 11334:localhost:11334 root@sourcephile.fr
index 5171e87a5a3e7e38fcd352218d90582e7e4510bc..78c84208142d89f4269da4a27fcec3e106b76fa9 100644 (file)
@@ -76,11 +76,9 @@ boot.kernelParams = [
   "zfs.zfs_arc_max=${toString (500 * 1024 * 1024)}" # bytes
 ];
 
-environment = {
-  systemPackages = with pkgs; [
-    pciutils
-    #flashrom
-    pkgs.nvme-cli
-  ];
-};
+environment.systemPackages = with pkgs; [
+  pciutils
+  #flashrom
+  nvme-cli
+];
 }
index 42a86f9443196681a4ba34dbe921995af7854332..fe8b7faa20f1208fb7ae16ee5b1249fec0cbbd91 100644 (file)
@@ -1,15 +1,11 @@
 { pkgs, lib, config, ... }:
 {
-# The 32-bit host id of the machine, formatted as 8 hexadecimal characters.
-# You should try to make this id unique among your machines.
-# Manually generated with : head -c4 /dev/urandom | od -A none -t x4 | cut -d ' ' -f 2
-networking.hostId = "69c40b03";
-
 # none is the recommended elevator with ZFS (which has its own I/O scheduler)
 # and/or for SSD, whereas HDD could use mq-deadline.
 services.udev.extraRules = ''
   # set none scheduler for non-rotating disks
   ACTION=="add|change", KERNEL=="sd[a-z]", ATTR{queue/rotational}=="0", ATTR{queue/scheduler}="none"
+  ACTION=="add|change", KERNEL=="nvme[0-9]", ATTR{queue/rotational}=="0", ATTR{queue/scheduler}="none"
 '';
 
 boot.supportedFilesystems = [ "zfs" ];
index 1ac00c3721b9d6412db2d9a2f1eededfefefd797..c7d2cbbc5d5dc73014253eb47963a3ad46482754 100644 (file)
@@ -1,86 +1,74 @@
 #cwd := $(notdir $(patsubst %/,%,$(dir $(abspath $(lastword $(MAKEFILE_LIST))))))
-losurdo_disk        := /dev/disk/by-id/usb-Generic-_Multi-Card_20071114173400000-0:0
-#losurdo_disk        := /dev/disk/by-id/usb-_USB_DISK_2.0_07009A834986F483-0:0
-#losurdo_cipher      :=
-losurdo_cipher      := aes-128-gcm
-losurdo_autotrim    :=
-losurdo_reservation := 1G
-#losurdo_channel     := $$(nix-env -p /nix/var/nix/profiles/per-user/$$USER/channels -q nixpkgs --no-name --out-path)
+#disk       := /dev/disk/by-id/usb-Generic-_Multi-Card_20071114173400000-0:0
+#disk       := /dev/disk/by-id/nvme-Samsung_SSD_970_EVO_Plus_250GB_S4EUNJ0N211426T
+server      := losurdo
+disk        := $(shell sourcephile-nix-get nodes.$(server).config.boot.loader.grub.devices.0)
+partlabel   := $(server)_nvme
+rpool       := $(partlabel)
+cipher      := aes-128-gcm
+autotrim    := on
+reservation := 1G
 
-wipeout: umount
-       sudo zpool labelclear -f $(losurdo_disk)-part3 || true
-       sudo zpool labelclear -f $(losurdo_disk)-part5 || true
-       sudo $$(which sgdisk) --zap-all $(losurdo_disk)
-
-partition:
+wipeout:
        sudo modprobe zfs
-       set -x; if test -e sfdisk; then \
-               sudo $$(which sfdisk) $(losurdo_disk) <sfdisk.txt; \
-       else \
-               sudo $$(which sgdisk) --zap-all $(losurdo_disk) && \
-               sudo partprobe && \
-               sudo $$(which sgdisk) -a1 -n1:34:2047  -t1:EF02 $(losurdo_disk) && \
-               sudo $$(which sgdisk)     -n2:1M:+512M -t2:EF00 $(losurdo_disk) && \
-               sudo $$(which sgdisk)     -n3:0:+512M  -t3:8300 $(losurdo_disk) && \
-               sudo $$(which sgdisk)     -n4:0:+4G    -t4:8200 $(losurdo_disk) && \
-               sudo $$(which sgdisk)     -n5:0:0      -t5:BF01 $(losurdo_disk) && \
-               sudo $$(which sgdisk) --randomize-guids $(losurdo_disk) && \
-               sudo $$(which sfdisk) -d $(losurdo_disk) | \
-               sed -e 's&/dev/sd.&$(losurdo_disk)&' >sfdisk.txt; \
-       fi
+       sudo zpool labelclear -f $(disk)-part5 || true
+       sudo $$(which sgdisk) --zap-all $(disk)
+
+partition: wipeout
+       sudo $$(which sgdisk) -a1 -n0:34:2047 -t0:EF02 -c0:"$(partlabel)_bios"  $(disk)
+       sudo $$(which sgdisk)     -n0:1M:+8M  -t0:EF00 -c0:"$(partlabel)_efi"   $(disk)
+       sudo $$(which sgdisk)     -n0:0:+256M -t0:8300 -c0:"$(partlabel)_boot"  $(disk)
+       sudo $$(which sgdisk)     -n0:0:+8G   -t0:8200 -c0:"$(partlabel)_swap"  $(disk)
+       sudo $$(which sgdisk)     -n0:0:0     -t0:BF01 -c0:"$(partlabel)_rpool" $(disk)
+       sudo $$(which sgdisk) --randomize-guids $(disk)
+       sudo $$(which sgdisk) --backup=sgdisk.backup $(disk)
 
-format:
-       # DOC: https://github.com/zfsonlinux/zfs/wiki/Debian-Buster-Root-on-ZFS
-       sudo mkdir -p /mnt/losurdo
-       blkid -t TYPE=ext2 $(losurdo_disk)-part3; test $$? != 2 || \
-       mkfs.ext2 $(losurdo_disk)-part3
-       # swap
-       # Note: configured with a volatile key in losurdo.nix
-       #blkid -t TYPE=crypto_LUKS $(losurdo_disk)-part4; test $$? != 2 || \
-       #sudo cryptsetup luksFormat --cipher aes-xts-plain64 --key-size 256 --hash sha256 $(losurdo_disk)-part4
-       #sudo cryptsetup luksOpen $(losurdo_disk)-part4 swap
-       #blkid -t TYPE=swap /dev/mapper/-swap; test $$? != 2 || \
-       #sudo mkswap --check --label swap
-       #sudo cryptsetup luksClose $(losurdo_disk)-part4 swap
-       # rpool
-       sudo zpool list rpool 2>/dev/null || \
+format: format-efi format-boot format-rpool
+format-efi:
+       sudo blkid $(disk)-part2 -t TYPE=vfat || \
+       sudo mkfs.vfat -F 32 -s 1 -n EFI $(disk)-part2
+format-boot:
+       sudo mkdir -p /mnt/$(server)
+       sudo blkid -t TYPE=ext2 $(disk)-part3; test $$? != 2 || \
+       sudo mkfs.ext2 $(disk)-part3
+format-rpool:
+       sudo zpool list $(rpool) 2>/dev/null || \
        sudo zpool create -o ashift=12 \
-        $(if $(losurdo_cipher),-O encryption=$(losurdo_cipher) \
+        $(if $(cipher),-O encryption=$(cipher) \
         -O keyformat=passphrase \
         -O keylocation=prompt) \
         -O normalization=formD \
-        -R /mnt/losurdo rpool $(losurdo_disk)-part5
+        -R /mnt/$(server) $(rpool) $(disk)-part5
+       sudo zpool set \
+        autotrim=$(autotrim) \
+        $(rpool)
        sudo zfs set \
         acltype=posixacl \
         atime=off \
-        $(if $(losurdo_autotrim),autotrim=on) \
         canmount=off \
         compression=lz4 \
         dnodesize=auto \
         relatime=on \
         xattr=sa \
         mountpoint=/ \
-        rpool
+        $(rpool)
        # https://nixos.wiki/wiki/NixOS_on_ZFS#Reservations
-       sudo zfs list rpool/reserved 2>/dev/null || \
-       sudo zfs create -o canmount=off -o mountpoint=none rpool/reserved
-       sudo zfs set refreservation=$(losurdo_reservation) rpool/reserved
+       sudo zfs list $(rpool)/reserved 2>/dev/null || \
+       sudo zfs create -o canmount=off -o mountpoint=none $(rpool)/reserved
+       sudo zfs set refreservation=$(reservation) $(rpool)/reserved
        # /
-       # NOTE: mountpoint=legacy is required to let NixOS mount the ZFS filesystems.
-       sudo zfs list rpool/root 2>/dev/null || \
+       # mountpoint=legacy is required to let NixOS mount the ZFS filesystems.
+       sudo zfs list $(rpool)/root 2>/dev/null || \
        sudo zfs create \
         -o canmount=on \
         -o mountpoint=legacy \
-        rpool/root
+        $(rpool)/root
        # /boot
        #sudo zfs list bpool/boot 2>/dev/null || \
        #sudo zfs create \
        # -o canmount=on \
        # -o mountpoint=legacy \
        # bpool/boot
-       # /boot/efi
-       sudo blkid $(losurdo_disk)-part2 -t TYPE=vfat || \
-       sudo mkfs.vfat -F 32 -s 1 -n EFI $(losurdo_disk)-part2
        # /*
        for p in \
         home \
@@ -90,48 +78,38 @@ format:
         var/log \
         var/tmp \
         ; do \
-               sudo zfs list rpool/"$$p" 2>/dev/null || \
+               sudo zfs list $(rpool)/"$$p" 2>/dev/null || \
                sudo zfs create \
                 -o canmount=on \
                 -o mountpoint=legacy \
-                rpool/"$$p" ; \
+                $(rpool)/"$$p" ; \
         done
        sudo zfs set \
         com.sun:auto-snapshot=false \
-        rpool/nix
+        $(rpool)/nix
        sudo zfs set \
         com.sun:auto-snapshot=false \
-        rpool/var/cache
+        $(rpool)/var/cache
        sudo zfs set \
         com.sun:auto-snapshot=false \
         sync=disabled \
-        rpool/var/tmp
+        $(rpool)/var/tmp
 
-mount:
+mount: mount-rpool mount-boot mount-efi
+mount-rpool:
        # scan needed zpools
-       #sudo zpool list bpool || \
-       #sudo zpool import -f bpool
-       sudo zpool list rpool || \
-       sudo zpool import -f rpool
+       sudo zpool list $(rpool) || \
+       sudo zpool import -f $(rpool)
        # load encryption key
-       sudo zfs get -H encryption rpool | \
-       grep -q '^rpool\s*encryption\s*off' || \
-       sudo zfs get -H keystatus rpool | \
-       grep -q '^rpool\s*keystatus\s*available' || \
-       sudo zfs load-key rpool
+       sudo zfs get -H encryption $(rpool) | \
+       grep -q '^$(rpool)\s*encryption\s*off' || \
+       sudo zfs get -H keystatus $(rpool) | \
+       grep -q '^$(rpool)\s*keystatus\s*available' || \
+       sudo zfs load-key $(rpool)
        # /
-       sudo mkdir -p /mnt/losurdo
-       sudo mountpoint /mnt/losurdo || \
-       sudo mount -v -t zfs rpool/root /mnt/losurdo
-       # /boot
-       sudo mkdir -p /mnt/losurdo/boot
-       sudo mountpoint /mnt/losurdo/boot || \
-       sudo mount -v $(losurdo_disk)-part3 /mnt/losurdo/boot
-       #sudo mount -v -t zfs bpool/boot /mnt/losurdo/boot
-       # /boot/efi
-       sudo mkdir -p /mnt/losurdo/boot/efi
-       sudo mountpoint /mnt/losurdo/boot/efi || \
-       sudo mount -v $(losurdo_disk)-part2 /mnt/losurdo/boot/efi
+       sudo mkdir -p /mnt/$(server)
+       sudo mountpoint /mnt/$(server) || \
+       sudo mount -v -t zfs $(rpool)/root /mnt/$(server)
        # /*
        for p in \
         home \
@@ -141,58 +119,37 @@ mount:
         var/log \
         var/tmp \
         ; do \
-               sudo mkdir -p /mnt/losurdo/"$$p"; \
-               sudo mountpoint /mnt/losurdo/"$$p" || \
-               sudo mount -v -t zfs rpool/"$$p" /mnt/losurdo/"$$p" ; \
+               sudo mkdir -p /mnt/$(server)/"$$p"; \
+               sudo mountpoint /mnt/$(server)/"$$p" || \
+               sudo mount -v -t zfs $(rpool)/"$$p" /mnt/$(server)/"$$p" ; \
         done
-       sudo chmod 1777 /mnt/losurdo/var/tmp
+       sudo chmod 1777 /mnt/$(server)/var/tmp
+mount-boot:
+       sudo mkdir -p /mnt/$(server)/boot
+       sudo mountpoint /mnt/$(server)/boot || \
+       sudo mount -v $(disk)-part3 /mnt/$(server)/boot
+       #sudo mount -v -t zfs bpool/boot /mnt/$(server)/boot
+mount-efi: | mount-boot
+       sudo mkdir -p /mnt/$(server)/boot/efi
+       sudo mountpoint /mnt/$(server)/boot/efi || \
+       sudo mount -v $(disk)-part2 /mnt/$(server)/boot/efi
 
 bootstrap: mount
-       #test "$$(sudo grub-probe /mnt/losurdo/boot)" = zfs
-       # NOTE: nixos-install will install GRUB following losurdo.nix
-       # BIOS
-       #sudo grub-install $(losurdo_disk)
-       # UEFI
-       #sudo grub-install \
-       # --target=x86_64-efi \
-       # --efi-directory=/mnt/losurdo/boot/efi \
-       # --bootloader-id=nixos \
-       # --recheck \
-       # --no-floppy
+       # Workaround https://dev.gnupg.org/T3908
+       chmod o+rw $$GPG_TTY $$XAUTHORITY
        
-       # Run pass as root to start gpg-agent as root not as $USER
-       # otherwise the yubikey has to be unplugged/replugged…
-       sudo \
-        GNUPGHOME="$$GNUPGHOME" \
-        GPG_TTY="$$GPG_TTY" \
-        PASSWORD_STORE_DIR="$$PASSWORD_STORE_DIR" \
-        PINENTRY_USER_DATA="$$PINENTRY_USER_DATA" \
-        XAUTHORITY="$$XAUTHORITY" \
-        pass servers/losurdo/dropbear/ecdsa.key | \
-       sudo install -D -o root -g root -m 400 /dev/stdin \
-        /mnt/losurdo/etc/dropbear/ecdsa.key && \
-       test -s /mnt/losurdo/etc/dropbear/ecdsa.key
-       
-       #trap "test ! -e SHRED-ME || sudo find SHRED-ME -type f -exec shred -u {} + && sudo rm -rf SHRED-ME" EXIT ;
-       sudo \
-        GNUPGHOME="$$GNUPGHOME" \
-        GPG_TTY="$$GPG_TTY" \
-        LANG="$$LANG" \
-        LC_CTYPE="$$LC_CTYPE" \
-        LOSURDO_DEPLOYMENT="$$LOSURDO_DEPLOYMENT" \
-        NIXOS_CONFIG="$$(readlink -e configuration.nix)" \
-        NIX_CONF_DIR="$$NIX_CONF_DIR" \
-        NIX_PATH="$$NIX_PATH" \
-        PASSWORD_STORE_DIR="$$PASSWORD_STORE_DIR" \
-        PATH="$$PATH" \
-        PINENTRY_USER_DATA="$$PINENTRY_USER_DATA" \
-        SSL_CERT_FILE="$$SSL_CERT_FILE" \
-        XAUTHORITY="$$XAUTHORITY" \
-        $$(which nixos-install) \
-        --root /mnt/losurdo \
-        $(if $(losurdo_channel),--channel "$(losurdo_channel)") \
+       sudo --preserve-env \
+       NIXOS_CONFIG="$$PWD/configuration.nix" \
+       $$(which nixos-install) \
+        --root /mnt/$(server) \
         --no-root-passwd \
+        --no-channel-copy \
         --show-trace
+       
+       # End workaround https://dev.gnupg.org/T3908
+       chmod o-rw $$GPG_TTY $$XAUTHORITY
+       
+       sudo sourcephile-shred-tmp
 
 umount:
        for p in \
@@ -206,21 +163,21 @@ umount:
         var \
         "" \
         ; do \
-               ! sudo mountpoint /mnt/losurdo/"$$p" || \
-               sudo umount -v /mnt/losurdo/"$$p" ; \
+               ! sudo mountpoint /mnt/$(server)/"$$p" || \
+               sudo umount -v /mnt/$(server)/"$$p" ; \
         done
-       ! sudo zpool list rpool 2>/dev/null || \
-       zfs get -H encryption rpool | \
-       grep -q '^rpool\s*encryption\s*off' || \
-       zfs get -H keystatus rpool | \
-       grep -q '^rpool\s*keystatus\s*unavailable' || \
-       sudo zfs unload-key rpool
+       ! sudo zpool list $(rpool) 2>/dev/null || \
+       zfs get -H encryption $(rpool) | \
+       grep -q '^$(rpool)\s*encryption\s*off' || \
+       zfs get -H keystatus $(rpool) | \
+       grep -q '^$(rpool)\s*keystatus\s*unavailable' || \
+       sudo zfs unload-key $(rpool)
        #! sudo zpool list bpool 2>/dev/null || \
        #sudo zpool export bpool
-       ! sudo zpool list rpool 2>/dev/null || \
-       sudo zpool export rpool
+       ! sudo zpool list $(rpool) 2>/dev/null || \
+       sudo zpool export $(rpool)
 
 unlock:
-       pass servers/losurdo/zfs/rpool | \
+       pass servers/$(server)/zfs/$(rpool) | \
        NIXOPS_DEPLOYMENT="$${NIXOPS_DEPLOYMENT:-$(LOSURDO_DEPLOYMENT)}" \
-       nixops ssh losurdo -p 2222 'zfs load-key rpool && pkill zfs'
+       nixops ssh $(server) -p 2222 'zfs load-key $(rpool) && pkill zfs'
index 3eab8d02c8593675a9c649596903d0c18ad61a22..74f57e0e0ed3d58c814926903d92d7a22eb7ebcc 100644 (file)
@@ -6,6 +6,7 @@
 let
   inherit (builtins) getEnv hasAttr readFile;
   inherit (builtins.extraBuiltins) pass pass-chomp;
+  inherit (config.users) users;
 in
 {
 # This value determines the NixOS release with which your system is to be
@@ -14,7 +15,7 @@ in
 system.stateVersion = "19.09"; # Did you read the comment?
 
 nix = {
-  trustedUsers = [ "julm" ];
+  trustedUsers = [ users."julm".name ];
 };
 
 imports = [
@@ -22,8 +23,8 @@ imports = [
   ../../defaults.nix
   ../../base/unbound.nix
 ] ++ lib.optionals (! hasAttr "nodes" attrs) [
-  <nixops/share/nix/nixops/options.nix>
-  <nixops/share/nix/nixops/resource.nix>
+  <nixops/options.nix>
+  <nixops/resource.nix>
 ];
 
 networking = rec {
@@ -61,10 +62,10 @@ users = {
   };
   groups = {
     wheel = {
-      members = [ "julm" ];
+      members = [ users."julm".name ];
     };
     julm = {
-      members = [ "julm" ];
+      members = [ users."julm".name ];
       gid = 1000;
     };
   };
diff --git a/servers/losurdo/configuration.nix.gpg b/servers/losurdo/configuration.nix.gpg
new file mode 100644 (file)
index 0000000..7c9ab4c
Binary files /dev/null and b/servers/losurdo/configuration.nix.gpg differ
index c10d3dc48a26f4112a87c821316c13e0e9f2a568..962f0955513cb5860c2cccf259f3e57f59a19dd8 100644 (file)
@@ -2,14 +2,14 @@
 let inherit (config) networking; in
 {
 imports = [
-  production/dl10j.nix
+  ../../base/dl10j.nix
   production/networking.nix
-  production/zfs.nix
+  production/fileSystems.nix
   production/shorewall.nix
 ];
 deployment = {
   targetEnv = "none";
-  targetHost = (builtins.elemAt networking.interfaces.enp1s0.ipv4.addresses 0).address;
+  targetHost = (builtins.elemAt networking.interfaces.enp5s0.ipv4.addresses 0).address;
   keys = {
   };
 };
diff --git a/servers/losurdo/production/dl10j.nix b/servers/losurdo/production/dl10j.nix
deleted file mode 100644 (file)
index 3a856cf..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-{ pkgs, lib, config, ... }:
-{
-imports = [
-  ../../../base/dl10j.nix
-];
-
-boot.loader.grub.devices = [
-  "/dev/disk/by-id/usb-Generic-_Multi-Card_20071114173400000-0:0"
-];
-
-fileSystems."/boot" =
-  { device = "/dev/disk/by-uuid/0ea6842a-9e95-45ce-b2a9-8639918875f3";
-    fsType = "ext2";
-  };
-
-fileSystems."/boot/efi" =
-  { device = "/dev/disk/by-uuid/6BB1-D61E";
-    fsType = "vfat";
-  };
-
-swapDevices =
-  [ { device = "/dev/disk/by-partuuid/a0bd2fd9-2df3-4626-bc7b-db9da714eba5";
-      randomEncryption = {
-        enable = true;
-        cipher = "aes-xts-plain64";
-        source = "/dev/urandom";
-      };
-    }
-  ];
-
-environment = {
-  systemPackages = with pkgs; [
-    pciutils
-    flashrom
-  ];
-};
-}
diff --git a/servers/losurdo/production/fileSystems.nix b/servers/losurdo/production/fileSystems.nix
new file mode 100644 (file)
index 0000000..abf36e6
--- /dev/null
@@ -0,0 +1,82 @@
+{ pkgs, lib, config, ... }:
+{
+imports = [
+  ../../../base/zfs.nix
+];
+
+# The 32-bit host id of the machine, formatted as 8 hexadecimal characters.
+# You should try to make this id unique among your machines.
+# Manually generated with : head -c4 /dev/urandom | od -A none -t x4 | cut -d ' ' -f 2
+networking.hostId = "69c40b03";
+
+/*
+# Enable the (OpenSolaris-compatible) ZFS auto-snapshotting service.
+services.zfs.autoSnapshot = {
+  enable   = true;
+  frequent = ;
+  hourly   = ;
+  daily    = ;
+  weekly   = ;
+  monthly  = ;
+};
+*/
+
+boot.loader.grub.devices = [
+  "/dev/disk/by-id/nvme-Samsung_SSD_970_EVO_Plus_250GB_S4EUNJ0N211426T"
+];
+
+fileSystems."/boot" =
+  { device = "/dev/disk/by-partlabel/losurdo_nvme_boot";
+    fsType = "ext2";
+  };
+
+fileSystems."/boot/efi" =
+  { device = "/dev/disk/by-partlabel/losurdo_nvme_efi";
+    fsType = "vfat";
+  };
+
+swapDevices =
+  [ { device = "/dev/disk/by-partlabel/losurdo_nvme_swap";
+      randomEncryption = {
+        enable = true;
+        cipher = "aes-xts-plain64";
+        source = "/dev/urandom";
+      };
+    }
+  ];
+
+fileSystems."/" =
+  { device = "losurdo_nvme/root";
+    fsType = "zfs";
+  };
+
+fileSystems."/home" =
+  { device = "losurdo_nvme/home";
+    fsType = "zfs";
+  };
+
+fileSystems."/nix" =
+  { device = "losurdo_nvme/nix";
+    fsType = "zfs";
+  };
+
+fileSystems."/var" =
+  { device = "losurdo_nvme/var";
+    fsType = "zfs";
+  };
+
+fileSystems."/var/cache" =
+  { device = "losurdo_nvme/var/cache";
+    fsType = "zfs";
+  };
+
+fileSystems."/var/log" =
+  { device = "losurdo_nvme/var/log";
+    fsType = "zfs";
+  };
+
+fileSystems."/var/tmp" =
+  { device = "losurdo_nvme/var/tmp";
+    fsType = "zfs";
+  };
+}
diff --git a/servers/losurdo/production/zfs.nix b/servers/losurdo/production/zfs.nix
deleted file mode 100644 (file)
index 9046c3f..0000000
+++ /dev/null
@@ -1,89 +0,0 @@
-{ pkgs, lib, config, ... }:
-
-{
-  imports = [];
-
-  boot.supportedFilesystems = [ "zfs" ];
-
-  # The 32-bit host id of the machine, formatted as 8 hexadecimal characters.
-  # You should try to make this id unique among your machines.
-  # Manually generated with : head -c4 /dev/urandom | od -A none -t x4 | cut -d ' ' -f 2
-  networking.hostId = "69c40b03";
-
-  # none is the recommended elevator with ZFS (which has its own I/O scheduler)
-  # and/or for SSD, whereas HDD could use mq-deadline.
-  services.udev.extraRules = ''
-    # set none scheduler for non-rotating disks
-    ACTION=="add|change", KERNEL=="sd[a-z]", ATTR{queue/rotational}=="0", ATTR{queue/scheduler}="none"
-  '';
-
-  # Ensure extra safeguards are active that zfs uses to protect zfs pools.
-  boot.zfs.forceImportAll  = false;
-  boot.zfs.forceImportRoot = false;
-
-  boot.zfs.enableUnstable = true;
-  boot.zfs.requestEncryptionCredentials = true;
-
-  # Enables periodic scrubbing of ZFS pools.
-  services.zfs.autoScrub.enable = true;
-
-  environment = {
-    systemPackages = [
-      pkgs.mbuffer
-      pkgs.zfs
-    ];
-  };
-
-  /*
-  # Enable the (OpenSolaris-compatible) ZFS auto-snapshotting service.
-  services.zfs.autoSnapshot = {
-    enable   = true;
-    frequent = ;
-    hourly   = ;
-    daily    = ;
-    weekly   = ;
-    monthly  = ;
-  };
-  */
-
-  /*
-  fileSystems."/boot" =
-    { device = "bpool/boot";
-      fsType = "zfs";
-    };
-  */
-  fileSystems."/" =
-    { device = "rpool/root";
-      fsType = "zfs";
-    };
-
-  fileSystems."/home" =
-    { device = "rpool/home";
-      fsType = "zfs";
-    };
-
-  fileSystems."/nix" =
-    { device = "rpool/nix";
-      fsType = "zfs";
-    };
-
-  fileSystems."/var" =
-    { device = "rpool/var";
-      fsType = "zfs";
-    };
-
-  fileSystems."/var/cache" =
-    { device = "rpool/var/cache";
-      fsType = "zfs";
-    };
-
-  fileSystems."/var/log" =
-    { device = "rpool/var/log";
-      fsType = "zfs";
-    };
-
-  fileSystems."/var/tmp" =
-    { device = "rpool/var/tmp";
-      fsType = "zfs";
-    };
-}
diff --git a/servers/losurdo/sfdisk.txt b/servers/losurdo/sfdisk.txt
deleted file mode 100644 (file)
index 8ddee30..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-label: gpt
-label-id: 6E079BB6-95AF-4401-94D7-16D9E788F62D
-device: /dev/disk/by-id/usb-_USB_DISK_2.0_07009A834986F483-0:0
-unit: sectors
-first-lba: 34
-last-lba: 60628958
-
-/dev/disk/by-id/usb-_USB_DISK_2.0_07009A834986F483-0:0-part1 : start=          34, size=        2014, type=21686148-6449-6E6F-744E-656564454649, uuid=11C369DF-5A4A-49DF-AF25-F97B559E63E6
-/dev/disk/by-id/usb-_USB_DISK_2.0_07009A834986F483-0:0-part2 : start=        2048, size=     1048576, type=C12A7328-F81F-11D2-BA4B-00A0C93EC93B, uuid=1A135842-E7B1-48E1-A398-10BDDED9FD3D
-/dev/disk/by-id/usb-_USB_DISK_2.0_07009A834986F483-0:0-part3 : start=     1050624, size=     1048576, type=0FC63DAF-8483-4772-8E79-3D69D8477DE4, uuid=86268225-60B9-496C-B06D-6EFF3E6E2F37
-/dev/disk/by-id/usb-_USB_DISK_2.0_07009A834986F483-0:0-part4 : start=     2099200, size=     8388608, type=0657FD6D-A4AB-43C4-84E5-0933C84B4F4F, uuid=A9DE6216-AD9A-44A1-BA0B-4C5790D0B1DD
-/dev/disk/by-id/usb-_USB_DISK_2.0_07009A834986F483-0:0-part5 : start=    10487808, size=    50141151, type=6A898CC3-1DD2-11B2-99A6-080020736631, uuid=A26AF596-86D7-4892-9904-EBD620ED37D4
diff --git a/servers/losurdo/sgdisk.backup b/servers/losurdo/sgdisk.backup
new file mode 100644 (file)
index 0000000..c48018a
Binary files /dev/null and b/servers/losurdo/sgdisk.backup differ
index c4a7665a2b5fa37de710cec77dc4e475ccfd2c61..c1a4d5e8b04dba828c5b0ac4213b397f69797d69 100644 (file)
@@ -4,6 +4,11 @@ imports = [
   ../../../base/zfs.nix
 ];
 
+# The 32-bit host id of the machine, formatted as 8 hexadecimal characters.
+# You should try to make this id unique among your machines.
+# Manually generated with : head -c4 /dev/urandom | od -A none -t x4 | cut -d ' ' -f 2
+networking.hostId = "69c40b03";
+
 /*
 # Enable the (OpenSolaris-compatible) ZFS auto-snapshotting service.
 services.zfs.autoSnapshot = {
index 6dd94341bb36ea19e2a99908bfd2a9da4fb29b60..967d49c68f91101c4e9f3be2bd64063893411064 100644 (file)
--- a/shell.nix
+++ b/shell.nix
@@ -87,6 +87,7 @@ let
       gnupgHome = toString ../sec/gnupg;
       keys = import shell/openpgp.nix;
       gpgExtraConf = ''
+        # julm@sourcephile.fr
         trusted-key 0xB2450D97085B7B8C
       '';
     };
@@ -124,6 +125,24 @@ let
       inherit pkgs lib;
       modules = [ configuration ];
     }).config;
+
+  # Utility to query the Nix files like nixops show-option
+  # but without requiring a nixops database, and not limited to config entries.
+  pwd = toString (./. + "");
+  sourcephile-nix-get = pkgs.writeShellScriptBin "sourcephile-nix-get" ''
+    nix-instantiate --read-write-mode \
+     --arg networkExprs "[${pwd}/servers.nix ${pwd}/servers/production.nix]" \
+     --arg args '{}' --argstr "uuid" whatever \
+     --argstr deploymentName production "<nixops/eval-machine-info.nix>" \
+     --eval-only --strict --arg checkConfigurationOptions false -A "$@"
+  '';
+  sourcephile-shred-tmp = pkgs.writeShellScriptBin "sourcephile-shred-tmp" ''
+    # Cleanup "../sec/tmp/"
+    # This is done when entering the nix-shell
+    # because direnv already hooks trap EXIT.
+    cd "${pwd}"
+    find ../sec/tmp -type f -exec shred -fu {} +
+  '';
 in
 pkgs.mkShell {
   name = "sourcephile-nix";
@@ -131,6 +150,8 @@ pkgs.mkShell {
   #preferLocalBuild = true;
   #allowSubstitutes = false;
   buildInputs = modules.nix-shell.buildInputs ++ [
+    sourcephile-nix-get
+    sourcephile-shred-tmp
     nixos.nixos-generate-config
     nixos.nixos-install
     nixos.nixos-enter
@@ -190,13 +211,14 @@ pkgs.mkShell {
 
     # Nix
     PATH=$NIX_SHELL_PATH:$PATH
-    export NIX_PATH="nixpkgs=${toString pkgs.path}:nixpkgs-overlays="$PWD"/overlays:nixops=${toString pkgs.nixops}"
+    export NIX_PATH="nixpkgs=${toString pkgs.path}:nixpkgs-overlays="$PWD"/overlays:nixops=${toString pkgs.nixops}/share/nix/nixops"
     export nixpkgs_channel=${nixpkgs_channel}
 
-    # Cleanup "../sec/tmp/"
-    # This is done when entering the nix-shell
-    # because direnv already hooks trap EXIT.
-    (cd "$PWD" && find ../sec/tmp -type f -exec shred -fu {} +)
+    # Since the .envrc calls this shellHook
+    # the EXIT trap cannot be freely used
+    # because it's already used by direnv,
+    # hence shred at startup, which is not ideal.
+    sourcephile-shred-tmp
 
     ${modules.nix-shell.shellHook}
 
@@ -207,12 +229,10 @@ pkgs.mkShell {
     export GPG_TTY=$(tty)
     gpg-connect-agent updatestartuptty /bye >/dev/null
 
-    # NixOS
-    export MERMET_DEPLOYMENT=production
-    export LOSURDO_DEPLOYMENT=production
-
     # nixops
     #export NIXOPS_DEPLOYMENT="staging"
+    export MERMET_DEPLOYMENT=production
+    export LOSURDO_DEPLOYMENT=production
     export NIXOPS_STATE="$PWD"/../sec/nixops/state.nixops
     NIXOPS_OPTS+=" --show-trace"
     export NIXOPS_OPTS
index ce0eb69901265b47c5d12bc22519dd7a4378ef28..689792170870c88810df666033f9433884c252f3 100644 (file)
@@ -413,27 +413,26 @@ options.gnupg = {
         # choose pinentry depending on PINENTRY_USER_DATA
         # this *only works* with gpg2
         # see https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=802020
-        set -x
-        case $PINENTRY_USER_DATA in
-        "")
-          exec pinentry-tty "$@";;
-        curses|emacs|gnome3|gtk-2|qt|tty)
-          exec pinentry-$PINENTRY_USER_DATA "$@";;
-        none) exit 1;; # do not ask for passphrase
-        *)
-          exec ${pkgs.pinentry.gtk2}/bin/pinentry-gtk-2 "$@"
+        case "''${PINENTRY_USER_DATA:-tty}" in
+        curses) exec ${pkgs.pinentry.curses}/bin/pinentry-curses "$@";;
+        #emacs)  exec ''${pkgs.pinentry.emacs}/bin/pinentry-emacs "$@";;
+        #gnome3) exec ''${pkgs.pinentry.gnome3}/bin/pinentry-gnome3 "$@";;
+        gtk-2)  exec ${pkgs.pinentry.gtk2}/bin/pinentry-gtk-2 "$@";;
+        none)   exit 1;; # do not ask for passphrase
+        #qt)     exec ''${pkgs.pinentry.qt}/bin/pinentry-qt "$@";;
+        tty)    exec ${pkgs.pinentry.tty}/bin/pinentry-tty "$@";;
         esac
       '';
     in ''
+      allow-loopback-pinentry
       allow-preset-passphrase
       default-cache-ttl 17200
       default-cache-ttl-ssh 17200
       enable-ssh-support
       max-cache-ttl 17200
       max-cache-ttl-ssh 17200
-      pinentry-program ${pinentry}
       no-allow-external-cache
-      #pinentry-program ${pkgs.pinentry}/bin/pinentry
+      pinentry-program ${pinentry}
     '';
     description = ''
       GnuPG's gpg-agent.conf content.