You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Today I used nixos-anywhere to format and setup a system that has the following drives.
[root@homecloud:~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS
sda 8:0 0 3.6T 0 disk
sdb 8:16 0 3.6T 0 disk
sdc 8:32 0 3.6T 0 disk
sdd 8:48 0 3.6T 0 disk
nvme0n1 259:0 0 465.8G 0 disk
├─nvme0n1p1 259:1 0 128M 0 part
└─nvme0n1p2 259:2 0 465.6G 0 part /nix/store
/
nvme1n1 259:3 0 465.8G 0 disk
├─nvme1n1p1 259:4 0 487M 0 part
└─nvme1n1p2 259:5 0 465.3G 0 part
My disko config looks like this:
{lib, ...}: {
disko.devices = {
disk = {
m0 = {
type = "disk";
device = "/dev/disk/by-id/nvme-CT500P3PSSD8_2246E6862379";
content = {
type = "gpt";
partitions = {
boot = {
size = "1M";
type = "EF02"; # for grub MBR
};
mdadm = {
size = "100%";
content = {
type = "mdraid";
name = "mirror";
};
};
};
};
};
m1 = {
type = "disk";
device = "/dev/disk/by-id/nvme-CT500P3PSSD8_2244E6804767";
content = {
type = "gpt";
partitions = {
boot = {
size = "1M";
type = "EF02"; # for grub MBR
};
mdadm = {
size = "100%";
content = {
type = "mdraid";
name = "mirror";
};
};
};
};
};
pool0 = {
type = "disk";
device = "/dev/disk/by-id/wwn-0x50014ee26a232ee1";
content = {
type = "gpt";
partitions = {
zfs = {
size = "100%";
content = {
type = "zfs";
pool = "data";
};
};
};
};
};
pool1 = {
type = "disk";
device = "/dev/disk/by-id/wwn-0x50014ee214cdf86e";
content = {
type = "gpt";
partitions = {
zfs = {
size = "100%";
content = {
type = "zfs";
pool = "data";
};
};
};
};
};
pool2 = {
type = "disk";
device = "/dev/disk/by-id/wwn-0x50014ee214cdff57";
content = {
type = "gpt";
partitions = {
zfs = {
size = "100%";
content = {
type = "zfs";
pool = "data";
};
};
};
};
};
pool3 = {
type = "disk";
device = "/dev/disk/by-id/wwn-0x50014ee214cdfe15";
content = {
type = "gpt";
partitions = {
zfs = {
size = "100%";
content = {
type = "zfs";
pool = "data";
};
};
};
};
};
};
mdadm = {
mirror = {
type = "mdadm";
level = 1;
content = {
type = "gpt";
partitions = {
primary = {
size = "100%";
content = {
type = "filesystem";
format = "ext4";
mountpoint = "/";
};
};
};
};
};
};
zpool = {
data = {
type = "zpool";
options = {
ashift = "12"; # 4kib block allocation size
atime = "off"; #do not update access time
compression = "lz4";
xattr = "sa"; #store extended attributes in the inode
};
#rootFsOptions = {
# # https://wiki.archlinux.org/title/Install_Arch_Linux_on_ZFS
# #acltype = "posixacl";
# #atime = "off";
# #mountpoint = "none";
# #xattr = "sa";
#};
datasets = {
umbrel = {
type = "zfs_volume";
size = "80%";
};
# "local/home" = {
# type = "zfs_fs";
# mountpoint = "/home";
# # Used by services.zfs.autoSnapshot options.
# options."com.sun:auto-snapshot" = "true";
# };
# TODO: change this such that is presents a disk for a VM
};
};
};
};
}
(note the raid1, to be mounted at "/")
During formatting with disko, this is the log I get:
### Formatting hard drive with disko ###
Warning: Permanently added '192.168.1.11' (ED25519) to the list of known hosts.
umount: /mnt: not mounted
++ realpath /dev/disk/by-id/nvme-CT500P3PSSD8_2246E6862379
+ disk=/dev/nvme0n1
+ lsblk -a -f
NAME FSTYPE FSVER LABEL UUID FSAVAIL FSUSE% MOUNTPOINTS
loop0
squash 4.0 0 100% /nix/.ro-store
loop1
erofs 0 100% /run/etc-metadata
loop2
loop3
loop4
loop5
loop6
loop7
sda
sdb
sdc
sdd
nvme0n1
├─nvme0n1p1
│ vfat FAT16 A272-4F58
└─nvme0n1p2
ext4 1.0 8fc66bbd-3bb2-40c7-a1f7-32c111468c2c
nvme1n1
├─nvme1n1p1
│ vfat FAT32 B6C2-8563
└─nvme1n1p2
linux_ 1.2 nixos:2 fe3011ec-db0e-400a-7586-017afda6c22c
└─md127
+ lsblk --output-all --json
+ bash -x
++ dirname /nix/store/3ys93zsghig63qpcs67vc37x0rwn1zhl-disk-deactivate/disk-deactivate
+ jq -r --arg disk_to_clear /dev/nvme0n1 -f /nix/store/3ys93zsghig63qpcs67vc37x0rwn1zhl-disk-deactivate/disk-deactivate.jq
+ set -fu
+ wipefs --all -f /dev/nvme0n1p1
/dev/nvme0n1p1: 8 bytes were erased at offset 0x00000036 (vfat): 46 41 54 31 36 20 20 20
/dev/nvme0n1p1: 1 byte was erased at offset 0x00000000 (vfat): eb
/dev/nvme0n1p1: 2 bytes were erased at offset 0x000001fe (vfat): 55 aa
+ wipefs --all -f /dev/nvme0n1p2
/dev/nvme0n1p2: 2 bytes were erased at offset 0x00000438 (ext4): 53 ef
++ type zdb
++ zdb -l /dev/nvme0n1
++ sed -nr 's/ +name: '\''(.*)'\''/\1/p'
+ zpool=
+ [[ -n '' ]]
+ unset zpool
++ lsblk /dev/nvme0n1 -l -p -o type,name
++ awk 'match($1,"raid.*") {print $2}'
+ md_dev=
+ [[ -n '' ]]
+ wipefs --all -f /dev/nvme0n1
/dev/nvme0n1: 8 bytes were erased at offset 0x00000200 (gpt): 45 46 49 20 50 41 52 54
/dev/nvme0n1: 8 bytes were erased at offset 0x7470c05e00 (gpt): 45 46 49 20 50 41 52 54
/dev/nvme0n1: 2 bytes were erased at offset 0x000001fe (PMBR): 55 aa
+ dd if=/dev/zero of=/dev/nvme0n1 bs=440 count=1
1+0 records in
1+0 records out
440 bytes copied, 0.000199169 s, 2.2 MB/s
+ lsblk -a -f
NAME FSTYPE FSVER LABEL UUID FSAVAIL FSUSE% MOUNTPOINTS
loop0
squash 4.0 0 100% /nix/.ro-store
loop1
erofs 0 100% /run/etc-metadata
loop2
loop3
loop4
loop5
loop6
loop7
sda
sdb
sdc
sdd
nvme0n1
nvme1n1
├─nvme1n1p1
│ vfat FAT32 B6C2-8563
└─nvme1n1p2
linux_ 1.2 nixos:2 fe3011ec-db0e-400a-7586-017afda6c22c
└─md127
++ realpath /dev/disk/by-id/nvme-CT500P3PSSD8_2244E6804767
+ disk=/dev/nvme1n1
+ lsblk -a -f
NAME FSTYPE FSVER LABEL UUID FSAVAIL FSUSE% MOUNTPOINTS
loop0
squash 4.0 0 100% /nix/.ro-store
loop1
erofs 0 100% /run/etc-metadata
loop2
loop3
loop4
loop5
loop6
loop7
sda
sdb
sdc
sdd
nvme0n1
nvme1n1
├─nvme1n1p1
│ vfat FAT32 B6C2-8563
└─nvme1n1p2
linux_ 1.2 nixos:2 fe3011ec-db0e-400a-7586-017afda6c22c
└─md127
+ lsblk --output-all --json
+ bash -x
++ dirname /nix/store/3ys93zsghig63qpcs67vc37x0rwn1zhl-disk-deactivate/disk-deactivate
+ jq -r --arg disk_to_clear /dev/nvme1n1 -f /nix/store/3ys93zsghig63qpcs67vc37x0rwn1zhl-disk-deactivate/disk-deactivate.jq
+ set -fu
+ wipefs --all -f /dev/nvme1n1p1
/dev/nvme1n1p1: 8 bytes were erased at offset 0x00000052 (vfat): 46 41 54 33 32 20 20 20
/dev/nvme1n1p1: 1 byte was erased at offset 0x00000000 (vfat): eb
/dev/nvme1n1p1: 2 bytes were erased at offset 0x000001fe (vfat): 55 aa
++ type zdb
++ zdb -l /dev/md127
++ sed -nr 's/ +name: '\''(.*)'\''/\1/p'
+ zpool=
+ [[ -n '' ]]
+ unset zpool
+ echo Warning: unknown type md. Consider handling this in https://github.com/nix-community/disko/blob/master/disk-deactivate/disk-deactivate.jq
Warning: unknown type md. Consider handling this in https://github.com/nix-community/disko/blob/master/disk-deactivate/disk-deactivate.jq
+ wipefs --all -f /dev/nvme1n1p2
/dev/nvme1n1p2: 4 bytes were erased at offset 0x00001000 (linux_raid_member): fc 4e 2b a9
++ type zdb
++ zdb -l /dev/nvme1n1
++ sed -nr 's/ +name: '\''(.*)'\''/\1/p'
+ zpool=
+ [[ -n '' ]]
+ unset zpool
++ lsblk /dev/nvme1n1 -l -p -o type,name
++ awk 'match($1,"raid.*") {print $2}'
+ md_dev=
+ [[ -n '' ]]
+ wipefs --all -f /dev/nvme1n1
/dev/nvme1n1: 8 bytes were erased at offset 0x00000200 (gpt): 45 46 49 20 50 41 52 54
/dev/nvme1n1: 8 bytes were erased at offset 0x7470c05e00 (gpt): 45 46 49 20 50 41 52 54
/dev/nvme1n1: 2 bytes were erased at offset 0x000001fe (PMBR): 55 aa
+ dd if=/dev/zero of=/dev/nvme1n1 bs=440 count=1
1+0 records in
1+0 records out
440 bytes copied, 3.3826e-05 s, 13.0 MB/s
+ lsblk -a -f
NAME FSTYPE FSVER LABEL UUID FSAVAIL FSUSE% MOUNTPOINTS
loop0 squashfs 4.0 0 100% /nix/.ro-store
loop1 erofs 0 100% /run/etc-metadata
loop2
loop3
loop4
loop5
loop6
loop7
sda
sdb
sdc
sdd
nvme0n1
nvme1n1
├─nvme1n1p1
└─nvme1n1p2
└─md127
++ realpath /dev/disk/by-id/wwn-0x50014ee26a232ee1
+ disk=/dev/sda
+ lsblk -a -f
NAME FSTYPE FSVER LABEL UUID FSAVAIL FSUSE% MOUNTPOINTS
loop0 squashfs 4.0 0 100% /nix/.ro-store
loop1 erofs 0 100% /run/etc-metadata
loop2
loop3
loop4
loop5
loop6
loop7
sda
sdb
sdc
sdd
nvme0n1
nvme1n1
├─nvme1n1p1
└─nvme1n1p2
└─md127
+ lsblk --output-all --json
+ bash -x
++ dirname /nix/store/3ys93zsghig63qpcs67vc37x0rwn1zhl-disk-deactivate/disk-deactivate
+ jq -r --arg disk_to_clear /dev/sda -f /nix/store/3ys93zsghig63qpcs67vc37x0rwn1zhl-disk-deactivate/disk-deactivate.jq
+ set -fu
++ type zdb
++ zdb -l /dev/sda
++ sed -nr 's/ +name: '\''(.*)'\''/\1/p'
+ zpool=
+ [[ -n '' ]]
+ unset zpool
++ lsblk /dev/sda -l -p -o type,name
++ awk 'match($1,"raid.*") {print $2}'
+ md_dev=
+ [[ -n '' ]]
+ wipefs --all -f /dev/sda
+ dd if=/dev/zero of=/dev/sda bs=440 count=1
1+0 records in
1+0 records out
440 bytes copied, 0.000164165 s, 2.7 MB/s
+ lsblk -a -f
NAME FSTYPE FSVER LABEL UUID FSAVAIL FSUSE% MOUNTPOINTS
loop0 squashfs 4.0 0 100% /nix/.ro-store
loop1 erofs 0 100% /run/etc-metadata
loop2
loop3
loop4
loop5
loop6
loop7
sda
sdb
sdc
sdd
nvme0n1
nvme1n1
├─nvme1n1p1
└─nvme1n1p2
└─md127
++ realpath /dev/disk/by-id/wwn-0x50014ee214cdf86e
+ disk=/dev/sdc
+ lsblk -a -f
NAME FSTYPE FSVER LABEL UUID FSAVAIL FSUSE% MOUNTPOINTS
loop0 squashfs 4.0 0 100% /nix/.ro-store
loop1 erofs 0 100% /run/etc-metadata
loop2
loop3
loop4
loop5
loop6
loop7
sda
sdb
sdc
sdd
nvme0n1
nvme1n1
├─nvme1n1p1
└─nvme1n1p2
└─md127
+ lsblk --output-all --json
+ bash -x
++ dirname /nix/store/3ys93zsghig63qpcs67vc37x0rwn1zhl-disk-deactivate/disk-deactivate
+ jq -r --arg disk_to_clear /dev/sdc -f /nix/store/3ys93zsghig63qpcs67vc37x0rwn1zhl-disk-deactivate/disk-deactivate.jq
+ set -fu
++ type zdb
++ zdb -l /dev/sdc
++ sed -nr 's/ +name: '\''(.*)'\''/\1/p'
+ zpool=
+ [[ -n '' ]]
+ unset zpool
++ lsblk /dev/sdc -l -p -o type,name
++ awk 'match($1,"raid.*") {print $2}'
+ md_dev=
+ [[ -n '' ]]
+ wipefs --all -f /dev/sdc
+ dd if=/dev/zero of=/dev/sdc bs=440 count=1
1+0 records in
1+0 records out
440 bytes copied, 0.000161421 s, 2.7 MB/s
+ lsblk -a -f
NAME FSTYPE FSVER LABEL UUID FSAVAIL FSUSE% MOUNTPOINTS
loop0 squashfs 4.0 0 100% /nix/.ro-store
loop1 erofs 0 100% /run/etc-metadata
loop2
loop3
loop4
loop5
loop6
loop7
sda
sdb
sdc
sdd
nvme0n1
nvme1n1
├─nvme1n1p1
└─nvme1n1p2
└─md127
++ realpath /dev/disk/by-id/wwn-0x50014ee214cdff57
+ disk=/dev/sdb
+ lsblk -a -f
NAME FSTYPE FSVER LABEL UUID FSAVAIL FSUSE% MOUNTPOINTS
loop0 squashfs 4.0 0 100% /nix/.ro-store
loop1 erofs 0 100% /run/etc-metadata
loop2
loop3
loop4
loop5
loop6
loop7
sda
sdb
sdc
sdd
nvme0n1
nvme1n1
├─nvme1n1p1
└─nvme1n1p2
└─md127
+ lsblk --output-all --json
+ bash -x
++ dirname /nix/store/3ys93zsghig63qpcs67vc37x0rwn1zhl-disk-deactivate/disk-deactivate
+ jq -r --arg disk_to_clear /dev/sdb -f /nix/store/3ys93zsghig63qpcs67vc37x0rwn1zhl-disk-deactivate/disk-deactivate.jq
+ set -fu
++ type zdb
++ zdb -l /dev/sdb
++ sed -nr 's/ +name: '\''(.*)'\''/\1/p'
+ zpool=
+ [[ -n '' ]]
+ unset zpool
++ lsblk /dev/sdb -l -p -o type,name
++ awk 'match($1,"raid.*") {print $2}'
+ md_dev=
+ [[ -n '' ]]
+ wipefs --all -f /dev/sdb
+ dd if=/dev/zero of=/dev/sdb bs=440 count=1
1+0 records in
1+0 records out
440 bytes copied, 0.000163915 s, 2.7 MB/s
+ lsblk -a -f
NAME FSTYPE FSVER LABEL UUID FSAVAIL FSUSE% MOUNTPOINTS
loop0 squashfs 4.0 0 100% /nix/.ro-store
loop1 erofs 0 100% /run/etc-metadata
loop2
loop3
loop4
loop5
loop6
loop7
sda
sdb
sdc
sdd
nvme0n1
nvme1n1
├─nvme1n1p1
└─nvme1n1p2
└─md127
++ realpath /dev/disk/by-id/wwn-0x50014ee214cdfe15
+ disk=/dev/sdd
+ lsblk -a -f
NAME FSTYPE FSVER LABEL UUID FSAVAIL FSUSE% MOUNTPOINTS
loop0 squashfs 4.0 0 100% /nix/.ro-store
loop1 erofs 0 100% /run/etc-metadata
loop2
loop3
loop4
loop5
loop6
loop7
sda
sdb
sdc
sdd
nvme0n1
nvme1n1
├─nvme1n1p1
└─nvme1n1p2
└─md127
+ lsblk --output-all --json
+ bash -x
++ dirname /nix/store/3ys93zsghig63qpcs67vc37x0rwn1zhl-disk-deactivate/disk-deactivate
+ jq -r --arg disk_to_clear /dev/sdd -f /nix/store/3ys93zsghig63qpcs67vc37x0rwn1zhl-disk-deactivate/disk-deactivate.jq
+ set -fu
++ type zdb
++ zdb -l /dev/sdd
++ sed -nr 's/ +name: '\''(.*)'\''/\1/p'
+ zpool=
+ [[ -n '' ]]
+ unset zpool
++ lsblk /dev/sdd -l -p -o type,name
++ awk 'match($1,"raid.*") {print $2}'
+ md_dev=
+ [[ -n '' ]]
+ wipefs --all -f /dev/sdd
+ dd if=/dev/zero of=/dev/sdd bs=440 count=1
1+0 records in
1+0 records out
440 bytes copied, 0.000165392 s, 2.7 MB/s
+ lsblk -a -f
NAME FSTYPE FSVER LABEL UUID FSAVAIL FSUSE% MOUNTPOINTS
loop0 squashfs 4.0 0 100% /nix/.ro-store
loop1 erofs 0 100% /run/etc-metadata
loop2
loop3
loop4
loop5
loop6
loop7
sda
sdb
sdc
sdd
nvme0n1
nvme1n1
├─nvme1n1p1
└─nvme1n1p2
└─md127
++ mktemp -d
+ disko_devices_dir=/tmp/tmp.WCf0LdW70Y
+ trap 'rm -rf "$disko_devices_dir"' EXIT
+ mkdir -p /tmp/tmp.WCf0LdW70Y
+ device=/dev/disk/by-id/nvme-CT500P3PSSD8_2246E6862379
+ imageName=m0
+ imageSize=2G
+ name=m0
+ type=disk
+ device=/dev/disk/by-id/nvme-CT500P3PSSD8_2246E6862379
+ efiGptPartitionFirst=1
+ type=gpt
+ blkid /dev/disk/by-id/nvme-CT500P3PSSD8_2246E6862379
+ sgdisk --clear /dev/disk/by-id/nvme-CT500P3PSSD8_2246E6862379
Creating new GPT entries in memory.
The operation has completed successfully.
+ sgdisk --align-end --new=1:0:+1M --change-name=1:disk-m0-boot --typecode=1:EF02 /dev/disk/by-id/nvme-CT500P3PSSD8_2246E6862379
The operation has completed successfully.
+ partprobe /dev/disk/by-id/nvme-CT500P3PSSD8_2246E6862379
+ udevadm trigger --subsystem-match=block
+ udevadm settle
+ sgdisk --align-end --new=2:0:-0 --change-name=2:disk-m0-mdadm --typecode=2:8300 /dev/disk/by-id/nvme-CT500P3PSSD8_2246E6862379
The operation has completed successfully.
+ partprobe /dev/disk/by-id/nvme-CT500P3PSSD8_2246E6862379
+ udevadm trigger --subsystem-match=block
+ udevadm settle
+ device=/dev/disk/by-partlabel/disk-m0-mdadm
+ name=mirror
+ type=mdraid
+ echo /dev/disk/by-partlabel/disk-m0-mdadm
+ device=/dev/disk/by-id/nvme-CT500P3PSSD8_2244E6804767
+ imageName=m1
+ imageSize=2G
+ name=m1
+ type=disk
+ device=/dev/disk/by-id/nvme-CT500P3PSSD8_2244E6804767
+ efiGptPartitionFirst=1
+ type=gpt
+ blkid /dev/disk/by-id/nvme-CT500P3PSSD8_2244E6804767
+ sgdisk --clear /dev/disk/by-id/nvme-CT500P3PSSD8_2244E6804767
Creating new GPT entries in memory.
Warning: The kernel is still using the old partition table.
The new table will be used at the next reboot or after you
run partprobe(8) or kpartx(8)
The operation has completed successfully.
+ sgdisk --align-end --new=1:0:+1M --change-name=1:disk-m1-boot --typecode=1:EF02 /dev/disk/by-id/nvme-CT500P3PSSD8_2244E6804767
Warning: The kernel is still using the old partition table.
The new table will be used at the next reboot or after you
run partprobe(8) or kpartx(8)
The operation has completed successfully.
+ partprobe /dev/disk/by-id/nvme-CT500P3PSSD8_2244E6804767
Error: Partition(s) 2 on /dev/nvme1n1 have been written, but we have been unable to inform the kernel of the change, probably because it/they are in use. As a result, the old partition(s) will remain in use. You should reboot now before making further changes.
+ :
+ udevadm trigger --subsystem-match=block
+ udevadm settle
+ sgdisk --align-end --new=2:0:-0 --change-name=2:disk-m1-mdadm --typecode=2:8300 /dev/disk/by-id/nvme-CT500P3PSSD8_2244E6804767
Warning: The kernel is still using the old partition table.
The new table will be used at the next reboot or after you
run partprobe(8) or kpartx(8)
The operation has completed successfully.
+ partprobe /dev/disk/by-id/nvme-CT500P3PSSD8_2244E6804767
Error: Partition(s) 2 on /dev/nvme1n1 have been written, but we have been unable to inform the kernel of the change, probably because it/they are in use. As a result, the old partition(s) will remain in use. You should reboot now before making further changes.
+ :
+ udevadm trigger --subsystem-match=block
+ udevadm settle
+ device=/dev/disk/by-partlabel/disk-m1-mdadm
+ name=mirror
+ type=mdraid
+ echo /dev/disk/by-partlabel/disk-m1-mdadm
+ device=/dev/disk/by-id/wwn-0x50014ee26a232ee1
+ imageName=pool0
+ imageSize=2G
+ name=pool0
+ type=disk
+ device=/dev/disk/by-id/wwn-0x50014ee26a232ee1
+ efiGptPartitionFirst=1
+ type=gpt
+ blkid /dev/disk/by-id/wwn-0x50014ee26a232ee1
+ sgdisk --clear /dev/disk/by-id/wwn-0x50014ee26a232ee1
Creating new GPT entries in memory.
The operation has completed successfully.
+ sgdisk --align-end --new=1:0:-0 --change-name=1:disk-pool0-zfs --typecode=1:8300 /dev/disk/by-id/wwn-0x50014ee26a232ee1
The operation has completed successfully.
+ partprobe /dev/disk/by-id/wwn-0x50014ee26a232ee1
+ udevadm trigger --subsystem-match=block
+ udevadm settle
+ device=/dev/disk/by-partlabel/disk-pool0-zfs
+ pool=data
+ type=zfs
+ echo /dev/disk/by-partlabel/disk-pool0-zfs
+ device=/dev/disk/by-id/wwn-0x50014ee214cdf86e
+ imageName=pool1
+ imageSize=2G
+ name=pool1
+ type=disk
+ device=/dev/disk/by-id/wwn-0x50014ee214cdf86e
+ efiGptPartitionFirst=1
+ type=gpt
+ blkid /dev/disk/by-id/wwn-0x50014ee214cdf86e
+ sgdisk --clear /dev/disk/by-id/wwn-0x50014ee214cdf86e
Creating new GPT entries in memory.
The operation has completed successfully.
+ sgdisk --align-end --new=1:0:-0 --change-name=1:disk-pool1-zfs --typecode=1:8300 /dev/disk/by-id/wwn-0x50014ee214cdf86e
The operation has completed successfully.
+ partprobe /dev/disk/by-id/wwn-0x50014ee214cdf86e
+ udevadm trigger --subsystem-match=block
+ udevadm settle
+ device=/dev/disk/by-partlabel/disk-pool1-zfs
+ pool=data
+ type=zfs
+ echo /dev/disk/by-partlabel/disk-pool1-zfs
+ device=/dev/disk/by-id/wwn-0x50014ee214cdff57
+ imageName=pool2
+ imageSize=2G
+ name=pool2
+ type=disk
+ device=/dev/disk/by-id/wwn-0x50014ee214cdff57
+ efiGptPartitionFirst=1
+ type=gpt
+ blkid /dev/disk/by-id/wwn-0x50014ee214cdff57
+ sgdisk --clear /dev/disk/by-id/wwn-0x50014ee214cdff57
Creating new GPT entries in memory.
The operation has completed successfully.
+ sgdisk --align-end --new=1:0:-0 --change-name=1:disk-pool2-zfs --typecode=1:8300 /dev/disk/by-id/wwn-0x50014ee214cdff57
The operation has completed successfully.
+ partprobe /dev/disk/by-id/wwn-0x50014ee214cdff57
+ udevadm trigger --subsystem-match=block
+ udevadm settle
+ device=/dev/disk/by-partlabel/disk-pool2-zfs
+ pool=data
+ type=zfs
+ echo /dev/disk/by-partlabel/disk-pool2-zfs
+ device=/dev/disk/by-id/wwn-0x50014ee214cdfe15
+ imageName=pool3
+ imageSize=2G
+ name=pool3
+ type=disk
+ device=/dev/disk/by-id/wwn-0x50014ee214cdfe15
+ efiGptPartitionFirst=1
+ type=gpt
+ blkid /dev/disk/by-id/wwn-0x50014ee214cdfe15
+ sgdisk --clear /dev/disk/by-id/wwn-0x50014ee214cdfe15
Creating new GPT entries in memory.
The operation has completed successfully.
+ sgdisk --align-end --new=1:0:-0 --change-name=1:disk-pool3-zfs --typecode=1:8300 /dev/disk/by-id/wwn-0x50014ee214cdfe15
The operation has completed successfully.
+ partprobe /dev/disk/by-id/wwn-0x50014ee214cdfe15
+ udevadm trigger --subsystem-match=block
+ udevadm settle
+ device=/dev/disk/by-partlabel/disk-pool3-zfs
+ pool=data
+ type=zfs
+ echo /dev/disk/by-partlabel/disk-pool3-zfs
+ level=1
+ metadata=default
+ name=mirror
+ type=mdadm
+ test -e /dev/md/mirror
+ readarray -t disk_devices
++ cat /tmp/tmp.WCf0LdW70Y/raid_mirror
+ echo y
++ wc -l /tmp/tmp.WCf0LdW70Y/raid_mirror
++ cut -f 1 -d ' '
+ mdadm --create /dev/md/mirror --level=1 --raid-devices=2 --metadata=default --force --homehost=any /dev/disk/by-partlabel/disk-m0-mdadm /dev/disk/by-partlabel/disk-m1-mdadm
mdadm: cannot open /dev/disk/by-partlabel/disk-m1-mdadm: No such file or directory
+ rm -rf /tmp/tmp.WCf0LdW70Y
Connection to 192.168.1.11 closed.
I will retry the installation using a nixos installer usb, and will not have time to resolve this issue myself.
Still I hope to bring it to attention, here so it can be fixed and not happen to others in the future.
From what I can gather the mdadm disk is not yet visible to the kernel after the formatting and the problem could be solved by introducing a reboot between formatting and setting up the raid (see the "Error" after writing the two ssd's).
The text was updated successfully, but these errors were encountered:
tristanRW
changed the title
nixos-anywhere fails during disko-formatting-step, leaving system broken
nixos-anywhere fails during disko-formatting-step
Feb 9, 2025
Today I used nixos-anywhere to format and setup a system that has the following drives.
My disko config looks like this:
(note the raid1, to be mounted at "/")
During formatting with disko, this is the log I get:
I will retry the installation using a nixos installer usb, and will not have time to resolve this issue myself.
Still I hope to bring it to attention, here so it can be fixed and not happen to others in the future.
From what I can gather the mdadm disk is not yet visible to the kernel after the formatting and the problem could be solved by introducing a reboot between formatting and setting up the raid (see the "Error" after writing the two ssd's).
The text was updated successfully, but these errors were encountered: