Skip to content

Commit

Permalink
Merge pull request #2 from stgraber/main
Browse files Browse the repository at this point in the history
Rename all mentions of LXD in the storage drivers
  • Loading branch information
hallyn authored Aug 3, 2023
2 parents 61a7ab7 + 3831629 commit a4067ff
Show file tree
Hide file tree
Showing 21 changed files with 68 additions and 68 deletions.
4 changes: 2 additions & 2 deletions incus/storage/drivers/driver_btrfs.go
Original file line number Diff line number Diff line change
Expand Up @@ -207,7 +207,7 @@ func (d *btrfs) Create() error {
} else {
// New btrfs subvolume on existing btrfs filesystem.
cleanSource := filepath.Clean(hostPath)
lxdDir := shared.VarPath()
daemonDir := shared.VarPath()

if shared.PathExists(hostPath) {
hostPathFS, _ := filesystem.Detect(hostPath)
Expand All @@ -216,7 +216,7 @@ func (d *btrfs) Create() error {
}
}

if strings.HasPrefix(cleanSource, lxdDir) {
if strings.HasPrefix(cleanSource, daemonDir) {
if cleanSource != GetPoolMountPath(d.name) {
return fmt.Errorf("Only allowed source path under %q is %q", shared.VarPath(), GetPoolMountPath(d.name))
}
Expand Down
12 changes: 6 additions & 6 deletions incus/storage/drivers/driver_btrfs_volumes.go
Original file line number Diff line number Diff line change
Expand Up @@ -262,7 +262,7 @@ func (d *btrfs) CreateVolumeFromBackup(vol Volume, srcBackup backup.Info, srcDat

var copyOps []btrfsCopyOp

// unpackVolume unpacks all subvolumes in a LXD volume from a backup tarball file.
// unpackVolume unpacks all subvolumes in a volume from a backup tarball file.
unpackVolume := func(v Volume, srcFilePrefix string) error {
_, snapName, _ := api.GetParentAndSnapshotName(v.name)

Expand Down Expand Up @@ -515,7 +515,7 @@ func (d *btrfs) CreateVolumeFromMigration(vol Volume, conn io.ReadWriteCloser, v

d.logger.Debug("Received BTRFS migration meta data header", logger.Ctx{"name": vol.name})
} else {
// Populate the migrationHeader subvolumes with root volumes only to support older LXD sources.
// Populate the migrationHeader subvolumes with root volumes only to support older sources.
for _, snapName := range volTargetArgs.Snapshots {
migrationHeader.Subvolumes = append(migrationHeader.Subvolumes, BTRFSSubVolume{
Snapshot: snapName,
Expand Down Expand Up @@ -608,7 +608,7 @@ func (d *btrfs) createVolumeFromMigrationOptimized(vol Volume, conn io.ReadWrite
// received. We don't use a map as the order should be kept.
copyOps := []btrfsCopyOp{}

// receiveVolume receives all subvolumes in a LXD volume from the source.
// receiveVolume receives all subvolumes in a volume from the source.
receiveVolume := func(v Volume, receivePath string) error {
_, snapName, _ := api.GetParentAndSnapshotName(v.name)

Expand Down Expand Up @@ -653,7 +653,7 @@ func (d *btrfs) createVolumeFromMigrationOptimized(vol Volume, conn io.ReadWrite
return nil
}

// Get instances directory (e.g. /var/lib/lxd/storage-pools/btrfs/containers).
// Get instances directory (e.g. /var/lib/incus/storage-pools/btrfs/containers).
instancesPath := GetVolumeMountPath(d.name, vol.volType, "")

// Create a temporary directory which will act as the parent directory of the received ro snapshot.
Expand Down Expand Up @@ -1127,7 +1127,7 @@ func (d *btrfs) GetVolumeDiskPath(vol Volume) (string, error) {
return genericVFSGetVolumeDiskPath(vol)
}

// ListVolumes returns a list of LXD volumes in storage pool.
// ListVolumes returns a list of volumes in storage pool.
func (d *btrfs) ListVolumes() ([]Volume, error) {
return genericVFSListVolumes(d)
}
Expand Down Expand Up @@ -1434,7 +1434,7 @@ func (d *btrfs) migrateVolumeOptimized(vol Volume, conn io.ReadWriteCloser, volS
}
}

// Get instances directory (e.g. /var/lib/lxd/storage-pools/btrfs/containers).
// Get instances directory (e.g. /var/lib/incus/storage-pools/btrfs/containers).
instancesPath := GetVolumeMountPath(d.name, vol.volType, "")

// Create a temporary directory which will act as the parent directory of the read-only snapshot.
Expand Down
16 changes: 8 additions & 8 deletions incus/storage/drivers/driver_ceph.go
Original file line number Diff line number Diff line change
Expand Up @@ -92,9 +92,9 @@ func (d *ceph) Info() Info {
}
}

// getPlaceholderVolume returns the volume used to indicate if the pool is used by LXD.
// getPlaceholderVolume returns the volume used to indicate if the pool is in use.
func (d *ceph) getPlaceholderVolume() Volume {
return NewVolume(d, d.name, VolumeType("lxd"), ContentTypeFS, d.config["ceph.osd.pool_name"], nil, nil)
return NewVolume(d, d.name, VolumeType("incus"), ContentTypeFS, d.config["ceph.osd.pool_name"], nil, nil)
}

// FillConfig populates the storage pool's configuration file with the default values.
Expand Down Expand Up @@ -177,8 +177,8 @@ func (d *ceph) Create() error {
d.logger.Warn("Failed to initialize pool", logger.Ctx{"pool": d.config["ceph.osd.pool_name"], "cluster": d.config["ceph.cluster_name"]})
}

// Create placeholder storage volume. Other LXD instances will use this to detect whether this osd
// pool is already in use by another LXD instance.
// Create placeholder storage volume. Other instances will use this to detect whether this osd
// pool is already in use by another instance.
err = d.rbdCreateVolume(placeholderVol, "0")
if err != nil {
return err
Expand All @@ -193,15 +193,15 @@ func (d *ceph) Create() error {

if volExists {
// ceph.osd.force_reuse is deprecated and should not be used. OSD pools are a logical
// construct there is no good reason not to create one for dedicated use by LXD.
// construct there is no good reason not to create one for dedicated use by the daemon.
if shared.IsFalseOrEmpty(d.config["ceph.osd.force_reuse"]) {
return fmt.Errorf("Pool '%s' in cluster '%s' seems to be in use by another LXD instance. Use 'ceph.osd.force_reuse=true' to force", d.config["ceph.osd.pool_name"], d.config["ceph.cluster_name"])
return fmt.Errorf("Pool '%s' in cluster '%s' seems to be in use by another Incus instance. Use 'ceph.osd.force_reuse=true' to force", d.config["ceph.osd.pool_name"], d.config["ceph.cluster_name"])
}

d.config["volatile.pool.pristine"] = "false"
} else {
// Create placeholder storage volume. Other LXD instances will use this to detect whether this osd
// pool is already in use by another LXD instance.
// Create placeholder storage volume. Other instances will use this to detect whether this osd
// pool is already in use by another instance.
err := d.rbdCreateVolume(placeholderVol, "0")
if err != nil {
return err
Expand Down
20 changes: 10 additions & 10 deletions incus/storage/drivers/driver_ceph_utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -396,11 +396,11 @@ func (d *ceph) rbdListSnapshotClones(vol Volume, snapshotName string) ([]string,
}

// rbdMarkVolumeDeleted marks an RBD storage volume as being in "zombie" state.
// An RBD storage volume that is in zombie state is not tracked in LXD's
// An RBD storage volume that is in zombie state is not tracked in the
// database anymore but still needs to be kept around for the sake of any
// dependent storage entities in the storage pool. This usually happens when an
// RBD storage volume has protected snapshots; a scenario most common when
// creating a sparse copy of a container or when LXD updated an image and the
// creating a sparse copy of a container or when it updated an image and the
// image still has dependent container clones.
func (d *ceph) rbdMarkVolumeDeleted(vol Volume, newVolumeName string) error {
// Ensure that new volume contains the config from the source volume to maintain filesystem suffix on
Expand Down Expand Up @@ -706,7 +706,7 @@ func (d *ceph) deleteVolume(vol Volume) (int, error) {

// Only delete the parent snapshot of the instance if it is a zombie.
// This includes both if the parent volume itself is a zombie, or if the just the snapshot
// is a zombie. If it is not we know that LXD is still using it.
// is a zombie. If it is not we know that Incus is still using it.
if strings.HasPrefix(string(parentVol.volType), "zombie_") || strings.HasPrefix(parentSnapshotName, "zombie_") {
ret, err := d.deleteVolumeSnapshot(parentVol, parentSnapshotName)
if ret < 0 {
Expand Down Expand Up @@ -777,7 +777,7 @@ func (d *ceph) deleteVolumeSnapshot(vol Volume, snapshotName string) (int, error
return -1, err
}

// Only delete the parent image if it is a zombie. If it is not we know that LXD is still using it.
// Only delete the parent image if it is a zombie. If it is not we know that Incus is still using it.
if strings.HasPrefix(string(vol.volType), "zombie_") {
ret, err := d.deleteVolume(vol)
if ret < 0 {
Expand Down Expand Up @@ -831,7 +831,7 @@ func (d *ceph) deleteVolumeSnapshot(vol Volume, snapshotName string) (int, error
}

// Only delete the parent image if it is a zombie. If it
// is not we know that LXD is still using it.
// is not we know that Incus is still using it.
if strings.HasPrefix(string(vol.volType), "zombie_") {
ret, err := d.deleteVolume(vol)
if ret < 0 {
Expand Down Expand Up @@ -859,7 +859,7 @@ func (d *ceph) deleteVolumeSnapshot(vol Volume, snapshotName string) (int, error
}

// parseParent splits a string describing a RBD storage entity into its components.
// This can be used on strings like: <osd-pool-name>/<lxd-specific-prefix>_<rbd-storage-volume>@<rbd-snapshot-name>
// This can be used on strings like: <osd-pool-name>/<prefix>_<rbd-storage-volume>@<rbd-snapshot-name>
// and will return a Volume and snapshot name.
func (d *ceph) parseParent(parent string) (Volume, string, error) {
vol := Volume{}
Expand Down Expand Up @@ -930,9 +930,9 @@ func (d *ceph) parseParent(parent string) (Volume, string, error) {

// parseClone splits a strings describing an RBD storage volume.
// For example a string like
// <osd-pool-name>/<lxd-specific-prefix>_<rbd-storage-volume>
// <osd-pool-name>/<prefix>_<rbd-storage-volume>
// will be split into
// <osd-pool-name>, <lxd-specific-prefix>, <rbd-storage-volume>.
// <osd-pool-name>, <prefix>, <rbd-storage-volume>.
func (d *ceph) parseClone(clone string) (string, string, string, error) {
idx := strings.Index(clone, "/")
if idx == -1 {
Expand Down Expand Up @@ -1101,7 +1101,7 @@ func (d *ceph) getRBDVolumeName(vol Volume, snapName string, zombie bool, withPo
}

// Let's say we want to send the a container "a" including snapshots "snap0" and
// "snap1" on storage pool "pool1" from LXD "l1" to LXD "l2" on storage pool
// "snap1" on storage pool "pool1" from Incus "l1" to Incus "l2" on storage pool
// "pool2":
//
// The pool layout on "l1" would be:
Expand All @@ -1114,7 +1114,7 @@ func (d *ceph) getRBDVolumeName(vol Volume, snapName string, zombie bool, withPo
//
// rbd export-diff pool1/container_a@snapshot_snap0 - | rbd import-diff - pool2/container_a
//
// (Note that pool2/container_a must have been created by the receiving LXD
// (Note that pool2/container_a must have been created by the receiving Incus
// instance before.)
//
// rbd export-diff pool1/container_a@snapshot_snap1 --from-snap snapshot_snap0 - | rbd import-diff - pool2/container_a
Expand Down
2 changes: 1 addition & 1 deletion incus/storage/drivers/driver_ceph_volumes.go
Original file line number Diff line number Diff line change
Expand Up @@ -1060,7 +1060,7 @@ func (d *ceph) GetVolumeDiskPath(vol Volume) (string, error) {
return "", ErrNotSupported
}

// ListVolumes returns a list of LXD volumes in storage pool.
// ListVolumes returns a list of volumes in storage pool.
func (d *ceph) ListVolumes() ([]Volume, error) {
vols := make(map[string]Volume)

Expand Down
6 changes: 3 additions & 3 deletions incus/storage/drivers/driver_cephfs.go
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@ func (d *cephfs) Create() error {
}

// Create a temporary mountpoint.
mountPath, err := os.MkdirTemp("", "lxd_cephfs_")
mountPath, err := os.MkdirTemp("", "incus_cephfs_")
if err != nil {
return fmt.Errorf("Failed to create temporary directory under: %w", err)
}
Expand Down Expand Up @@ -178,7 +178,7 @@ func (d *cephfs) Create() error {
// Check that the existing path is empty.
ok, _ := shared.PathIsEmpty(filepath.Join(mountPoint, fsPath))
if !ok {
return fmt.Errorf("Only empty CEPHFS paths can be used as a LXD storage pool")
return fmt.Errorf("Only empty CEPHFS paths can be used as a storage pool")
}

return nil
Expand All @@ -195,7 +195,7 @@ func (d *cephfs) Delete(op *operations.Operation) error {
}

// Create a temporary mountpoint.
mountPath, err := os.MkdirTemp("", "lxd_cephfs_")
mountPath, err := os.MkdirTemp("", "incus_cephfs_")
if err != nil {
return fmt.Errorf("Failed to create temporary directory under: %w", err)
}
Expand Down
2 changes: 1 addition & 1 deletion incus/storage/drivers/driver_cephfs_volumes.go
Original file line number Diff line number Diff line change
Expand Up @@ -338,7 +338,7 @@ func (d *cephfs) GetVolumeDiskPath(vol Volume) (string, error) {
return "", ErrNotSupported
}

// ListVolumes returns a list of LXD volumes in storage pool.
// ListVolumes returns a list of volumes in storage pool.
func (d *cephfs) ListVolumes() ([]Volume, error) {
return genericVFSListVolumes(d)
}
Expand Down
2 changes: 1 addition & 1 deletion incus/storage/drivers/driver_cephobject.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ var cephobjectVersion string
var cephobjectLoaded bool

// cephobjectRadosgwAdminUser admin user in radosgw.
const cephobjectRadosgwAdminUser = "lxd-admin"
const cephobjectRadosgwAdminUser = "incus-admin"

type cephobject struct {
common
Expand Down
4 changes: 2 additions & 2 deletions incus/storage/drivers/driver_common.go
Original file line number Diff line number Diff line change
Expand Up @@ -258,7 +258,7 @@ func (d *common) ApplyPatch(name string) error {
// moveGPTAltHeader moves the GPT alternative header to the end of the disk device supplied.
// If the device supplied is not detected as not being a GPT disk then no action is taken and nil is returned.
// If the required sgdisk command is not available a warning is logged, but no error is returned, as really it is
// the job of the VM quest to ensure the partitions are resized to the size of the disk (as LXD does not dicatate
// the job of the VM quest to ensure the partitions are resized to the size of the disk (as Incus does not dictate
// what partition structure (if any) the disk should have. However we do attempt to move the GPT alternative
// header where possible so that the backup header is where it is expected in case of any corruption with the
// primary header.
Expand Down Expand Up @@ -367,7 +367,7 @@ func (d *common) GetVolumeDiskPath(vol Volume) (string, error) {
return "", ErrNotSupported
}

// ListVolumes returns a list of LXD volumes in storage pool.
// ListVolumes returns a list of volumes in storage pool.
func (d *common) ListVolumes() ([]Volume, error) {
return nil, ErrNotSupported
}
Expand Down
2 changes: 1 addition & 1 deletion incus/storage/drivers/driver_dir.go
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ func (d *dir) Create() error {
// Check that if within INCUS_DIR, we're at our expected spot.
cleanSource := filepath.Clean(sourcePath)
if strings.HasPrefix(cleanSource, shared.VarPath()) && cleanSource != GetPoolMountPath(d.name) {
return fmt.Errorf("Source path '%s' is within the LXD directory", cleanSource)
return fmt.Errorf("Source path '%s' is within the Incus directory", cleanSource)
}

// Check that the path is currently empty.
Expand Down
2 changes: 1 addition & 1 deletion incus/storage/drivers/driver_dir_volumes.go
Original file line number Diff line number Diff line change
Expand Up @@ -361,7 +361,7 @@ func (d *dir) GetVolumeDiskPath(vol Volume) (string, error) {
return genericVFSGetVolumeDiskPath(vol)
}

// ListVolumes returns a list of LXD volumes in storage pool.
// ListVolumes returns a list of volumes in storage pool.
func (d *dir) ListVolumes() ([]Volume, error) {
return genericVFSListVolumes(d)
}
Expand Down
22 changes: 11 additions & 11 deletions incus/storage/drivers/driver_lvm.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ import (
"github.com/cyphar/incus/shared/validate"
)

const lvmVgPoolMarker = "lxd_pool" // Indicator tag used to mark volume groups as in use by LXD.
const lvmVgPoolMarker = "incus_pool" // Indicator tag used to mark volume groups as in use.

var lvmLoaded bool
var lvmVersion string
Expand Down Expand Up @@ -132,7 +132,7 @@ func (d *lvm) Create() error {
if d.config["source"] == "" || d.config["source"] == defaultSource {
usingLoopFile = true

// We are using a LXD internal loopback file.
// We are using an internal loopback file.
d.config["source"] = defaultSource
if d.config["lvm.vg_name"] == "" {
d.config["lvm.vg_name"] = d.name
Expand Down Expand Up @@ -302,18 +302,18 @@ func (d *lvm) Create() error {
}

// Skip the in use checks if the force reuse option is enabled. This allows a storage pool to be
// backed by an existing non-empty volume group. Note: This option should be used with care, as LXD
// can then not guarantee that volume name conflicts won't occur with non-LXD created volumes in
// the same volume group. This could also potentially lead to LXD deleting a non-LXD volume should
// backed by an existing non-empty volume group. Note: This option should be used with care, as Incus
// can then not guarantee that volume name conflicts won't occur with non-Incus created volumes in
// the same volume group. This could also potentially lead to Incus deleting a non-Incus volume should
// name conflicts occur.
if shared.IsFalseOrEmpty(d.config["lvm.vg.force_reuse"]) {
if !empty {
return fmt.Errorf("Volume group %q is not empty", d.config["lvm.vg_name"])
}

// Check the tags on the volume group to check it is not already being used by LXD.
// Check the tags on the volume group to check it is not already being used.
if shared.StringInSlice(lvmVgPoolMarker, vgTags) {
return fmt.Errorf("Volume group %q is already used by LXD", d.config["lvm.vg_name"])
return fmt.Errorf("Volume group %q is already used by Incus", d.config["lvm.vg_name"])
}
}
} else {
Expand Down Expand Up @@ -370,13 +370,13 @@ func (d *lvm) Create() error {
}
}

// Mark the volume group with the lvmVgPoolMarker tag to indicate it is now in use by LXD.
// Mark the volume group with the lvmVgPoolMarker tag to indicate it is now in use by Incus.
_, err = shared.TryRunCommand("vgchange", "--addtag", lvmVgPoolMarker, d.config["lvm.vg_name"])
if err != nil {
return err
}

d.logger.Debug("LXD marker tag added to volume group", logger.Ctx{"vg_name": d.config["lvm.vg_name"]})
d.logger.Debug("Incus marker tag added to volume group", logger.Ctx{"vg_name": d.config["lvm.vg_name"]})

revert.Success()
return nil
Expand Down Expand Up @@ -457,14 +457,14 @@ func (d *lvm) Delete(op *operations.Operation) error {

d.logger.Debug("Volume group removed", logger.Ctx{"vg_name": d.config["lvm.vg_name"]})
} else {
// Otherwise just remove the lvmVgPoolMarker tag to indicate LXD no longer uses this VG.
// Otherwise just remove the lvmVgPoolMarker tag to indicate Incus no longer uses this VG.
if shared.StringInSlice(lvmVgPoolMarker, vgTags) {
_, err = shared.TryRunCommand("vgchange", "--deltag", lvmVgPoolMarker, d.config["lvm.vg_name"])
if err != nil {
return fmt.Errorf("Failed to remove marker tag on volume group for the lvm storage pool: %w", err)
}

d.logger.Debug("LXD marker tag removed from volume group", logger.Ctx{"vg_name": d.config["lvm.vg_name"]})
d.logger.Debug("Incus marker tag removed from volume group", logger.Ctx{"vg_name": d.config["lvm.vg_name"]})
}
}
}
Expand Down
4 changes: 2 additions & 2 deletions incus/storage/drivers/driver_lvm_patches.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ import (
"github.com/cyphar/incus/shared/logger"
)

// patchStorageSkipActivation set skipactivation=y on all LXD LVM logical volumes (excluding thin pool volumes).
// patchStorageSkipActivation set skipactivation=y on all Incus LVM logical volumes (excluding thin pool volumes).
func (d *lvm) patchStorageSkipActivation() error {
out, err := shared.RunCommand("lvs", "--noheadings", "-o", "lv_name,lv_attr", d.config["lvm.vg_name"])
if err != nil {
Expand All @@ -24,7 +24,7 @@ func (d *lvm) patchStorageSkipActivation() error {
volName := fields[0]
volAttr := fields[1]

// Ignore non-LXD prefixes, and thinpool volumes (these should remain auto activated).
// Ignore non-Incus prefixes, and thinpool volumes (these should remain auto activated).
if !strings.HasPrefix(volName, "images_") && !strings.HasPrefix(volName, "containers_") && !strings.HasPrefix(volName, "virtual-machines_") && !strings.HasPrefix(volName, "custom_") {
continue
}
Expand Down
4 changes: 2 additions & 2 deletions incus/storage/drivers/driver_lvm_utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ const lvmSnapshotSeparator = "-"
const lvmEscapedHyphen = "--"

// lvmThinpoolDefaultName is the default name for the thinpool volume.
const lvmThinpoolDefaultName = "LXDThinPool"
const lvmThinpoolDefaultName = "IncusThinPool"

// usesThinpool indicates whether the config specifies to use a thin pool or not.
func (d *lvm) usesThinpool() bool {
Expand Down Expand Up @@ -721,7 +721,7 @@ func (d *lvm) thinPoolVolumeUsage(volDevPath string) (uint64, uint64, error) {

// parseLogicalVolumeSnapshot parses a raw logical volume name (from lvs command) and checks whether it is a
// snapshot of the supplied parent volume. Returns unescaped parsed snapshot name if snapshot volume recognised,
// empty string if not. The parent is required due to limitations in the naming scheme that LXD has historically
// empty string if not. The parent is required due to limitations in the naming scheme that Incus has historically
// been used for naming logical volumes meaning that additional context of the parent is required to accurately
// recognise snapshot volumes that belong to the parent.
func (d *lvm) parseLogicalVolumeSnapshot(parent Volume, lvmVolName string) string {
Expand Down
Loading

0 comments on commit a4067ff

Please sign in to comment.