Browse Source

Finish Volume support

master
Till Wegmueller 4 years ago
parent
commit
9736c44e76
  1. 9
      host/build.go
  2. 10
      host/host.go
  3. 3
      image/destroy.go
  4. 2
      image/image.go
  5. 82
      image/unpack.go
  6. 24
      image/update.go
  7. 59
      image/volume.go
  8. 93
      pod/container.go
  9. 16
      pod/manifest.go
  10. 4
      pod/zone.go
  11. 4
      supportfiles/volume.hcl
  12. 167
      volume/volume.go
  13. 121
      zfs/dataset.go

9
host/build.go

@ -317,7 +317,7 @@ func (h *Host) ExportToImage(imageName reference.Reference, container *pod.Conta
wr.AddImageConfig(specImageConfig)
logrus.Infof("saving volume metadata")
for i, volBuildConfig := range imageConfig.Volumes {
for iter, volBuildConfig := range imageConfig.Volumes {
vol := volume.Config{
Name: volBuildConfig.Name,
MountPath: volBuildConfig.Path,
@ -327,7 +327,12 @@ func (h *Host) ExportToImage(imageName reference.Reference, container *pod.Conta
}
if vol.Name == "" {
vol.Name = fmt.Sprintf("volume-%d", i)
vol.Name = fmt.Sprintf("volume-%d", iter)
}
// TODO check if directory is empty or not.
if err := i.AddVolume(vol); err != nil {
return nil, fmt.Errorf("could not add volume %s: %w", vol.MountPath, err)
}
if err := wr.AddMetadata(volume.MetadataKeyPrefix+strings.ReplaceAll(vol.Name, "/", ".")+oci.AnnotationMetadataBlobSuffix, vol); err != nil {

10
host/host.go

@ -67,19 +67,23 @@ func (h *Host) Initialize() error {
dsOptions := viper.GetStringMapString(config.HostDatasetOptionsKey)
dsOptions[zfs.PropertyMountpoint] = mntPoint
if ds, err := zfs.CreateDataset(dsName, zfs.DatasetTypeFilesystem, zfs.Properties(dsOptions)); err != nil {
if ds, err := zfs.CreateDataset(dsName, zfs.DatasetTypeFilesystem, dsOptions); err != nil {
return tracerr.Wrap(err)
} else {
h.Dataset = ds
}
dsOptions = viper.GetStringMapString(config.HostImagesOptionsKey)
if _, err := h.Dataset.CreateChildDataset("images", zfs.Properties(dsOptions)); err != nil {
if _, err := h.Dataset.CreateChildDataset("images", dsOptions); err != nil {
return tracerr.Wrap(err)
}
dsOptions = viper.GetStringMapString(config.HostPodsOptionsKey)
if _, err := h.Dataset.CreateChildDataset("pods", zfs.Properties(dsOptions)); err != nil {
if _, err := h.Dataset.CreateChildDataset("pods", dsOptions); err != nil {
return tracerr.Wrap(err)
}
if _, err := h.Dataset.CreateChildDataset("volumes", dsOptions); err != nil {
return tracerr.Wrap(err)
}

3
image/destroy.go

@ -35,8 +35,7 @@ func (img *Image) Destroy(layerSet map[digest.Digest][]string) error {
}
}
ds := img.GetLayerDS(lName.Digest)
err := ds.Destroy(true)
if err != nil {
if err := ds.Destroy(true); err != nil {
return tracerr.Wrap(err)
}
}

2
image/image.go

@ -9,6 +9,7 @@ import (
"git.wegmueller.it/opencloud/opencloud/config"
"git.wegmueller.it/opencloud/opencloud/image/reference"
"git.wegmueller.it/opencloud/opencloud/volume"
"git.wegmueller.it/opencloud/opencloud/zfs"
"github.com/dustin/go-humanize"
"github.com/opencontainers/go-digest"
@ -32,6 +33,7 @@ type Image struct {
Reference reference.Reference `json:"reference"`
Name string `json:"name"`
Tags Tags `json:"tags"`
volumes []volume.Config `json:"volumes"`
}
type Tags map[string]specsv1.Manifest

82
image/unpack.go

@ -1,11 +1,13 @@
package image
import (
"fmt"
"os"
"path"
"git.wegmueller.it/opencloud/opencloud/config"
"git.wegmueller.it/opencloud/opencloud/image/oci"
"git.wegmueller.it/opencloud/opencloud/volume"
"git.wegmueller.it/opencloud/opencloud/zfs"
specsv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/ztrue/tracerr"
@ -25,43 +27,101 @@ func (img *Image) UnpackOCI(ociDir string, manifest *specsv1.Manifest, keepLayer
}
}()
for i, layer := range manifest.Layers {
if img.HasLayer(layer.Digest) {
volumes := volume.List{}
for i, layerDescriptor := range manifest.Layers {
if img.HasLayer(layerDescriptor.Digest) {
continue
}
var layerDS *zfs.Dataset
var err error
if i == 0 {
var err error
layerDS, err = img.imagesDS.CreateChildDataset(layer.Digest.Encoded(), zfs.Properties{
layerDS, err = img.imagesDS.CreateChildDataset(layerDescriptor.Digest.Encoded(), zfs.Properties{
zfs.PropertyCompression: zfs.CompressionLZ4,
})
if err != nil {
return tracerr.Wrap(err)
}
for _, vol := range img.volumes {
vol, err := volume.CreateVolume(vol, layerDS)
if err != nil {
return fmt.Errorf("can not create initial layer for %s, volume creation failed: %w", img.Name, err)
}
if err := vol.AssignToImage(layerDescriptor.Digest); err != nil {
return fmt.Errorf("could not assign volume %s to image %s: %w", vol.GetDatasetPath(), img.Name, err)
}
volumes.Add(vol)
}
} else if parentDS := img.GetLayerDS(manifest.Layers[i-1].Digest); parentDS != nil {
snap, err := parentDS.GetSnapshot(config.SealSnapshotName)
if err != nil {
return tracerr.Wrap(err)
}
layerDS, err = snap.Clone(path.Join(img.imagesDS.Path, layer.Digest.Encoded()), zfs.Properties{
zfs.PropertyCompression: zfs.CompressionLZ4,
})
layerDS, err = snap.Clone(path.Join(img.imagesDS.Path, layerDescriptor.Digest.Encoded()), nil)
if err != nil {
return tracerr.Wrap(err)
}
// First we go through all already added volumes and clone them into the new layer dataset new
volumes = img.LoadVolumes(layerDescriptor.Digest)
for _, vol := range volumes {
volClone, err := vol.Clone(layerDS)
if err != nil {
return fmt.Errorf("could not clone volume of %s: %w", layerDS.Path, err)
}
if err := volClone.AssignToImage(layerDescriptor.Digest); err != nil {
return fmt.Errorf("could not assign volume %s to image %s: %w", volClone.GetName(), img.Name, err)
}
}
// Check if we need to create new volumes additionally to the ones created
if len(volumes) > len(img.volumes) {
for _, vol := range img.volumes {
volumeObj, err := volume.CreateVolume(vol, layerDS)
if err != nil {
return fmt.Errorf("can not create volume %s for layer %s: %w", vol.Name, layerDescriptor.Digest, err)
}
if err := volumeObj.AssignToImage(layerDescriptor.Digest); err != nil {
return fmt.Errorf("could not assign volume %s to image %s: %w", volumeObj.GetName(), img.Name, err)
}
}
}
} else {
var err error
layerDS, err = img.imagesDS.CreateChildDataset(layer.Digest.Encoded(), zfs.Properties{
layerDS, err = img.imagesDS.CreateChildDataset(layerDescriptor.Digest.Encoded(), zfs.Properties{
zfs.PropertyCompression: zfs.CompressionLZ4,
})
if err != nil {
return tracerr.Wrap(err)
}
for _, vol := range img.volumes {
vol, err := volume.CreateVolume(vol, layerDS)
if err != nil {
return fmt.Errorf("can not create initial layer for %s, volume creation failed: %w", img.Name, err)
}
if err := vol.AssignToImage(layerDescriptor.Digest); err != nil {
return fmt.Errorf("could not assign volume %s to image %s: %w", vol.GetDatasetPath(), img.Name, err)
}
volumes.Add(vol)
}
}
layerDatasets = append(layerDatasets, layerDS)
layerReader, err := oci.NewLayerReader(ociDir, layer)
layerReader, err := oci.NewLayerReader(ociDir, layerDescriptor)
if err != nil {
return tracerr.Wrap(err)
}
@ -76,6 +136,12 @@ func (img *Image) UnpackOCI(ociDir string, manifest *specsv1.Manifest, keepLayer
return tracerr.Wrap(err)
}
for _, vol := range volumes {
if err := vol.Seal(); err != nil {
return fmt.Errorf("could not seal volume %s: %w", vol.GetName(), err)
}
}
err = layerReader.Close()
if err != nil {
return tracerr.Wrap(err)

24
image/update.go

@ -1,13 +1,17 @@
package image
import (
"encoding/json"
"fmt"
"os"
"path"
"strings"
"git.wegmueller.it/opencloud/opencloud/config"
"git.wegmueller.it/opencloud/opencloud/image/reference"
"git.wegmueller.it/opencloud/opencloud/image/transport"
"git.wegmueller.it/opencloud/opencloud/volume"
"github.com/opencontainers/go-digest"
specsv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/ztrue/tracerr"
)
@ -45,6 +49,26 @@ func (img *Image) Update(ref reference.Reference) (err error) {
return tracerr.Wrap(err)
}
for key, value := range manifest.Annotations {
if strings.HasPrefix(key, volume.MetadataKeyPrefix) {
d := digest.Digest(value)
//TODO pull additional metadata blobs from docker
blob, err := img.GetBlob(d)
if err != nil {
return fmt.Errorf("could not load volume configuration for digest %s from disk: %w", d, err)
}
var vol volume.Config
if err := json.Unmarshal(blob, &vol); err != nil {
return fmt.Errorf("could not marshal volume configuration for digest %s: %w", d, err)
}
if err := img.AddVolume(vol); err != nil {
return fmt.Errorf("could not add volume config to image %s: %w", img.Name, err)
}
}
}
if err := img.UnpackOCI(img.VFSPath(), manifest, false); err != nil {
return tracerr.Wrap(err)
}

59
image/volume.go

@ -0,0 +1,59 @@
package image
import (
"errors"
"git.wegmueller.it/opencloud/opencloud/volume"
"git.wegmueller.it/opencloud/opencloud/zfs"
"github.com/opencontainers/go-digest"
)
var VolumeAlreadyExistsError = errors.New("volume already exists")
func (img *Image) AddVolume(vol volume.Config) error {
// Default to LZ4 Compression if nothing is set
if vol.Properties == nil {
vol.Properties = zfs.Properties{
zfs.PropertyCompression: zfs.CompressionLZ4,
}
} else {
if _, ok := vol.Properties[zfs.PropertyCompression]; !ok {
vol.Properties[zfs.PropertyCompression] = zfs.CompressionLZ4
}
}
if img.volumes == nil {
img.volumes = make([]volume.Config, 0)
img.volumes = append(img.volumes, vol)
return nil
}
for _, v := range img.volumes {
if v.MountPath == vol.MountPath {
return VolumeAlreadyExistsError
}
}
img.volumes = append(img.volumes, vol)
return nil
}
func (img *Image) LoadVolumes(layerDigest digest.Digest) volume.List {
volumes := volume.List{}
layerDS := img.GetLayerDS(layerDigest)
if layerDS == nil {
return nil
}
for _, child := range layerDS.Children {
vol, err := volume.OpenVolume(&child)
if err != nil {
continue
}
volumes.Add(vol)
}
return volumes
}

93
pod/container.go

@ -34,14 +34,14 @@ type Container struct {
status Status
dataset *zfs.Dataset
rootDS *zfs.Dataset
Name string `json:"name"`
UUID uuid.UUID `json:"uuid"`
Manifest *ContainerManifest `json:"manifest"`
Image imageSpec.Image `json:"image"`
ImageReference reference.Reference `json:"image_reference"`
Zone *config.Zone `json:"zone,omitempty"`
ImageLayerDesc imageSpec.Descriptor `json:"image_layer_desc"`
Volumes map[string]volume.Config `json:"volumes"`
Name string `json:"name"`
UUID uuid.UUID `json:"uuid"`
Manifest *ContainerManifest `json:"manifest"`
Image imageSpec.Image `json:"image"`
ImageReference reference.Reference `json:"image_reference"`
Zone *config.Zone `json:"zone,omitempty"`
ImageLayerDesc imageSpec.Descriptor `json:"image_layer_desc"`
volumes map[string]*volume.Volume
lifecycleManager lifecycle.Lifecyclemanager
sealed bool
}
@ -51,7 +51,7 @@ func newContainer(manifest *ContainerManifest, name string) *Container {
Name: name,
UUID: uuid.NewV4(),
Manifest: manifest,
Volumes: make(map[string]volume.Config),
volumes: make(map[string]*volume.Volume),
}
return c
@ -68,12 +68,14 @@ func CreateEmptyContainer(parentDataset *zfs.Dataset, m *ContainerManifest, name
return nil, tracerr.Wrap(err)
}
//Todo fix cleanup
// If we haven't finished successfully, clean up the remains
defer func() {
if rErr != nil {
container.cleanup()
}
}()
//defer func() {
// if rErr != nil {
// container.cleanup()
// }
//}()
if err := container.createEmptyRootDataset(); err != nil {
return nil, tracerr.Wrap(err)
@ -100,19 +102,21 @@ func CreateContainer(parentDataset *zfs.Dataset, m *ContainerManifest, name, tag
UUID: uuid.NewV4(),
Manifest: m,
Name: name,
Volumes: make(map[string]volume.Config),
volumes: make(map[string]*volume.Volume),
}
if err := container.initDataset(parentDataset); err != nil {
return nil, tracerr.Wrap(err)
}
//TODO fix cleanup
// If we haven't finished successfully, clean up the remains
defer func() {
if rErr != nil {
container.cleanup()
}
}()
//defer func() {
// if rErr != nil {
// container.cleanup()
// }
//}()
if err := container.openImage(tag, img); err != nil {
return nil, tracerr.Wrap(err)
@ -178,6 +182,24 @@ func (c *Container) cloneImageToContainer(img *image.Image) error {
c.rootDS = rootDS
for _, child := range layerDS.Children {
vol, err := volume.OpenVolume(&child)
if err != nil {
return fmt.Errorf("could not open volume %s of layer %s: %w", child.GetName(), c.ImageLayerDesc.Digest, err)
}
volClone, err := vol.Clone(c.rootDS)
if err != nil {
return fmt.Errorf("could not clone volume %s into container %s: %w", vol.GetName(), c.UUID, err)
}
if err := volClone.AssignToContainer(c.UUID); err != nil {
return fmt.Errorf("could not assign volume %s to container %s: %w", volClone.GetName(), c.UUID, err)
}
c.volumes[volClone.GetDatasetMountPoint()] = volClone
}
return nil
}
@ -237,12 +259,20 @@ func (c *Container) initVolumes() error {
volCfg.Name = fmt.Sprintf("volume.%d", i)
}
vol, err := volume.CreateVolume(volCfg, c.dataset, c.UUID)
if v, ok := c.volumes[volCfg.MountPath]; ok {
return fmt.Errorf("volume %s is already mounted under %s, cannot mount another one", v.GetName(), volCfg.MountPath)
}
vol, err := volume.CreateVolume(volCfg, c.dataset)
if err != nil {
return fmt.Errorf("could not initialize volume for container %s(%s): %w", c.UUID, c.Name, err)
}
c.Volumes[vol.GetDatasetPath()] = volCfg
if err := vol.AssignToContainer(c.UUID); err != nil {
return fmt.Errorf("could not assign volume %s to container %s: %w", vol.GetName(), c.UUID, err)
}
c.volumes[vol.GetDatasetPath()] = vol
}
return nil
@ -259,7 +289,10 @@ func (c *Container) initDataset(parentDataset *zfs.Dataset) error {
}
func (c *Container) cleanup() error {
return c.dataset.Destroy(true)
if c.dataset != nil {
return c.dataset.Destroy(true)
}
return nil
}
func (c *Container) createEmptyRootDataset() error {
@ -289,6 +322,7 @@ func LoadContainer(parentDS *zfs.Dataset, id uuid.UUID) (*Container, error) {
container := &Container{
dataset: containerDS,
UUID: id,
volumes: make(map[string]*volume.Volume),
}
if err := container.Load(); err != nil {
@ -346,6 +380,19 @@ func (c *Container) Load() error {
return err
}
for _, child := range c.dataset.Children {
if child.GetName() == "root" {
continue
}
vol, err := volume.OpenVolume(&child)
if err != nil {
return fmt.Errorf("could not load container %s, volume %s failed to load: %w", c.UUID, child.GetName(), err)
}
c.volumes[vol.GetDatasetMountPoint()] = vol
}
c.sealed = true
return nil
}

16
pod/manifest.go

@ -1,14 +1,12 @@
package pod
import (
"encoding/json"
"fmt"
"strings"
"git.wegmueller.it/opencloud/opencloud/image"
"git.wegmueller.it/opencloud/opencloud/image/reference"
"git.wegmueller.it/opencloud/opencloud/volume"
"github.com/opencontainers/go-digest"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/ztrue/tracerr"
)
@ -61,20 +59,6 @@ func ContainerManifestFromImage(ref reference.Reference, img *image.Image) (*Con
m.Spec.Solaris.LimitPriv += "," + value
}
}
if strings.HasPrefix(key, volume.MetadataKeyPrefix) {
d := digest.Digest(value)
blob, err := img.GetBlob(d)
if err != nil {
return nil, fmt.Errorf("could not load volume configuration for digest %s from disk: %w", d, err)
}
var vol volume.Config
if err := json.Unmarshal(blob, &vol); err != nil {
return nil, fmt.Errorf("could not marshal volume configuration for digest %s: %w", d, err)
}
m.Volumes = append(m.Volumes, vol)
}
}
m.Spec.Solaris.Anet = []specs.SolarisAnet{}

4
pod/zone.go

@ -49,9 +49,9 @@ func newZoneFromContainer(container *Container) *config.Zone {
}
}
for dsPath := range container.Volumes {
for _, vol := range container.volumes {
z.Datasets = append(z.Datasets, config.Dataset{
Name: dsPath,
Name: vol.GetDatasetPath(),
})
}

4
supportfiles/volume.hcl

@ -19,7 +19,9 @@ image "openindiana/hipster" {
}
cmd = "pkg update -v"
}
volume "/var/adm" {
# Volume must be a empty directory
volume "/opt" {
persist = true
}
}

167
volume/volume.go

@ -2,18 +2,25 @@ package volume
import (
"fmt"
"os"
"path"
"path/filepath"
"strings"
"git.wegmueller.it/opencloud/opencloud/config"
"git.wegmueller.it/opencloud/opencloud/zfs"
"github.com/opencontainers/go-digest"
uuid "github.com/satori/go.uuid"
"github.com/spf13/viper"
"github.com/ztrue/tracerr"
)
const (
MetadataKeyPrefix = "org.aurora-opencloud.volume"
MetadataKeyPrefix = "org.aurora-opencloud.volume"
ZFSNameProperty = "volume:name"
ZFSMountPointProperty = "volume:destination"
ZFSPersistentProperty = "volume:persistent"
ZFSBackupProperty = "volume:backup"
)
type Config struct {
@ -26,9 +33,23 @@ type Config struct {
}
type Volume struct {
datasetName string
config Config
ds *zfs.Dataset
config Config
ds *zfs.Dataset
}
type List []*Volume
func (l *List) Add(volume *Volume) {
*l = append(*l, volume)
}
func (l *List) Find(name string) (*Volume, bool) {
for _, vol := range *l {
if vol.config.Name == name {
return vol, true
}
}
return nil, false
}
func getOrCreatePersistentContainerVolumeDataset(containerID uuid.UUID) (*zfs.Dataset, error) {
@ -46,7 +67,7 @@ func getOrCreatePersistentContainerVolumeDataset(containerID uuid.UUID) (*zfs.Da
return rDs, nil
}
func CreateVolume(conf Config, containerDS *zfs.Dataset, containerUUID uuid.UUID) (*Volume, error) {
func CreateVolume(conf Config, parentDS *zfs.Dataset) (*Volume, error) {
if conf.Name == "" {
idx := strings.LastIndex(conf.MountPath, "/")
conf.Name = conf.MountPath[idx+1:]
@ -56,21 +77,24 @@ func CreateVolume(conf Config, containerDS *zfs.Dataset, containerUUID uuid.UUID
conf.Properties = zfs.Properties{}
}
conf.Properties[zfs.PropertyMountpoint] = conf.MountPath
conf.Properties[zfs.PropertyZoned] = "on"
conf.Properties["container:volume:destination"] = conf.MountPath
conf.Properties["container:volume:persistent"] = boolToString(conf.Persist)
conf.Properties["container:volume:backup"] = boolToString(conf.Backup)
conf.Properties["container:uuid"] = containerUUID.String()
conf.Properties[ZFSNameProperty] = conf.Name
conf.Properties[ZFSMountPointProperty] = conf.MountPath
conf.Properties[ZFSPersistentProperty] = boolToString(conf.Persist)
conf.Properties[ZFSBackupProperty] = boolToString(conf.Backup)
conf.Properties[zfs.PropertyMountpoint] = parentDS.VFSPath(conf.MountPath)
vol := &Volume{
config: conf,
}
if err := os.MkdirAll(parentDS.VFSPath(conf.MountPath), 0755); err != nil {
return nil, fmt.Errorf("could not create volument mount for %s: %w", conf.Name, err)
}
var err error
vol.ds, err = containerDS.CreateChildDataset(conf.Name, conf.Properties)
vol.ds, err = parentDS.CreateChildDataset(conf.Name, conf.Properties)
if err != nil {
return nil, tracerr.Wrap(err)
return nil, fmt.Errorf("could not create dataset %s: %w", path.Join(parentDS.Path, conf.Name), err)
}
return vol, nil
@ -84,13 +108,65 @@ func boolToString(b bool) string {
}
}
func PersistVolume(containerId uuid.UUID, vol *Volume) error {
func stringToBool(str string) bool {
switch str {
case "on", "true":
return true
default:
return false
}
}
func OpenVolume(dataset *zfs.Dataset) (*Volume, error) {
v := &Volume{
config: Config{
Properties: dataset.Properties.ToSimpleProperties(),
},
ds: dataset,
}
if val, ok := dataset.Properties[ZFSNameProperty]; ok {
v.config.Name = val.Value
} else {
return nil, fmt.Errorf("dataset is not a volume property %s is not set", ZFSNameProperty)
}
if val, ok := dataset.Properties[ZFSMountPointProperty]; ok {
v.config.MountPath = val.Value
} else {
return nil, fmt.Errorf("dataset is not a volume property %s is not set", ZFSMountPointProperty)
}
if val, ok := dataset.Properties[ZFSBackupProperty]; ok {
v.config.Backup = stringToBool(val.Value)
} else {
return nil, fmt.Errorf("dataset is not a volume property %s is not set", ZFSBackupProperty)
}
if val, ok := dataset.Properties[ZFSPersistentProperty]; ok {
v.config.Persist = stringToBool(val.Value)
} else {
return nil, fmt.Errorf("dataset is not a volume property %s is not set", ZFSPersistentProperty)
}
return v, nil
}
func (v *Volume) Seal() error {
if _, err := v.ds.Snapshot(config.SealSnapshotName); err != nil {
return fmt.Errorf("could not seal the volume %s: %w", v.ds.Path, err)
}
return nil
}
func (v *Volume) Persist(containerId uuid.UUID) error {
rootDS, err := getOrCreatePersistentContainerVolumeDataset(containerId)
if err != nil {
return tracerr.Wrap(err)
}
return vol.ds.Rename(path.Join(rootDS.Path, vol.config.Name), false)
return v.ds.Rename(path.Join(rootDS.Path, v.config.Name), false)
}
func (v *Volume) Destroy() error {
@ -104,3 +180,64 @@ func (v *Volume) GetDatasetPath() string {
func (v *Volume) GetDatasetMountPoint() string {
return v.ds.Mountpoint
}
func (v *Volume) AssignToContainer(containerUUID uuid.UUID) error {
v.config.Properties["container:uuid"] = containerUUID.String()
if v.ds.IsMounted() {
if err := v.ds.Unmount(); err != nil {
return fmt.Errorf("volume %s could not be unmounted: %w", v.GetName(), err)
}
}
if err := v.ds.SetManyProperties(zfs.PropertyKeyValuePair{
Name: "container:uuid",
Value: containerUUID.String(),
}, zfs.PropertyKeyValuePair{
Name: zfs.PropertyMountpoint,
Value: v.config.MountPath,
}); err != nil {
return fmt.Errorf("could not assign volume %s to container %s: %w", v.GetName(), containerUUID, err)
}
return nil
}
func (v *Volume) AssignToImage(imageDigest digest.Digest) error {
v.config.Properties["image:digest"] = imageDigest.Encoded()
v.config.Properties["image:algorithm"] = imageDigest.Algorithm().String()
if err := v.ds.SetProperty("image:digest", imageDigest.Encoded()); err != nil {
return fmt.Errorf("could not assign volume to image %s: %w", imageDigest, err)
}
if err := v.ds.SetProperty("image:algorithm", imageDigest.Algorithm().String()); err != nil {
return fmt.Errorf("could not assign volume to image %s: %w", imageDigest, err)
}
return nil
}
func (v *Volume) Clone(newParentDS *zfs.Dataset) (*Volume, error) {
snap, err := v.ds.GetSnapshot(config.SealSnapshotName)
if err != nil {
return nil, fmt.Errorf("could not find seal snapshot of volume %s: %w", v.ds.Path, err)
}
properties := v.ds.Properties.GetAllLocalProperties()
properties[zfs.PropertyMountpoint] = filepath.Join(newParentDS.Mountpoint, v.config.MountPath)
newVolDs, err := snap.Clone(path.Join(newParentDS.Path, v.config.Name), properties)
if err != nil {
return nil, fmt.Errorf("could not clone dataset %s: %w", v.ds.Path, err)
}
return &Volume{
config: v.config,
ds: newVolDs,
}, nil
}
func (v *Volume) GetName() string {
return v.config.Name
}

121
zfs/dataset.go

@ -11,6 +11,34 @@ import (
// DatasetProperties type is map of dataset or volume properties prop -> value
type DatasetProperties map[string]Property
func (d DatasetProperties) ToSimpleProperties() Properties {
p := Properties{}
for key, value := range d {
p[key] = value.Value
}
return p
}
func (d DatasetProperties) GetAllUserProperties() Properties {
p := Properties{}
for key, value := range d {
if strings.Contains(key, ":") {
p[key] = value.Value
}
}
return p
}
func (d DatasetProperties) GetAllLocalProperties() Properties {
p := Properties{}
for key, value := range d {
if value.Source == "local" {
p[key] = value.Value
}
}
return p
}
type Properties map[string]string
// DatasetType defines enum of dataset types
@ -59,14 +87,39 @@ func OpenDatasetByPath(vfsPath string) (d *Dataset, err error) {
//Read a Dataset and all its Properties from zfs Command
func OpenDataset(zfsName string) (d *Dataset, err error) {
retVal, err := zfsExec([]string{"get", "-Hp", "all", zfsName})
if err != nil {
return nil, err
}
d = &Dataset{
Path: zfsName,
Properties: make(DatasetProperties),
}
if err := d.ReadAllProperties(); err != nil {
return nil, err
}
children, err := List(zfsName)
if err != nil {
return
}
for _, child := range children {
if !(child == zfsName) {
//slash := regexp.MustCompile("/")
//matches := slash.FindAllStringIndex(child, -1)
//zfs command outputs all Children But that is a hassle to parse so ignore children of children here
//TODO Figure out if I want to switch this to nonrecursive. and if So How
if childds, err := OpenDataset(child); err == nil {
d.Children = append(d.Children, *childds)
}
}
}
return
}
func (d *Dataset) ReadAllProperties() error {
retVal, err := zfsExec([]string{"get", "-Hp", "all", d.Path})
if err != nil {
return err
}
for _, line := range retVal {
propLine := strings.Fields(line)
propName := propLine[1]
@ -106,22 +159,8 @@ func OpenDataset(zfsName string) (d *Dataset, err error) {
}
}
}
children, err := List(zfsName)
if err != nil {
return
}
for _, child := range children {
if !(child == zfsName) {
//slash := regexp.MustCompile("/")
//matches := slash.FindAllStringIndex(child, -1)
//zfs command outputs all Children But that is a hassle to parse so ignore children of children here
//TODO Figure out if I want to switch this to nonrecursive. and if So How
if childds, err := OpenDataset(child); err == nil {
d.Children = append(d.Children, *childds)
}
}
}
return
return nil
}
// SetProperty set ZFS dataset property to value. Not all properties can be set,
@ -135,6 +174,32 @@ func (d *Dataset) SetProperty(prop string, value string) (err error) {
return
}
type PropertyKeyValuePair struct {
Name string
Value string
}
func (p PropertyKeyValuePair) String() string {
return fmt.Sprintf("%s=%s", p.Name, p.Value)
}
func (d *Dataset) SetManyProperties(properties ...PropertyKeyValuePair) (err error) {
args := []string{"set"}
for _, prop := range properties {
args = append(args, prop.String())
}
args = append(args, d.Path)
if _, err = zfsExec(args); err != nil {
return err
}
if err := d.ReadAllProperties(); err != nil {
return err
}
return
}
// GetProperty reload and return single specified property. This also reloads requested
// property in Properties map.
func (d *Dataset) GetProperty(p string) (prop Property, err error) {
@ -162,10 +227,9 @@ func (d *Dataset) Rename(newName string, forceUnmount bool) (err error) {
// IsMounted checks to see if the mount is active. If the filesystem is mounted,
// sets in 'where' argument the current mountpoint, and returns true. Otherwise,
// returns false.
func (d *Dataset) IsMounted() (mounted bool, where string) {
func (d *Dataset) IsMounted() (mounted bool) {
if d.Properties["mounted"].Value == "yes" {
mounted = true
where = d.Properties["mountpoint"].Value
} else {
mounted = false
}
@ -242,13 +306,13 @@ func (d *Dataset) CreateChildDataset(path string, props Properties) (dataset *Da
return
}
func (d *Dataset) GetChildDataset(path string) (*Dataset, error) {
func (d *Dataset) GetChildDataset(name string) (*Dataset, error) {
for _, child := range d.Children {
if child.Path == d.Path+"/"+path {
if child.Path == d.Path+"/"+name {
return &child, nil
}
}
return &Dataset{}, fmt.Errorf("could not find child dataset %s", path)
return &Dataset{}, fmt.Errorf("could not find child dataset %s", name)
}
func (d *Dataset) GetName() string {
@ -269,10 +333,11 @@ func (d *Dataset) Destroy(recursive bool) error {
}
func (d *Dataset) Clone(dest string, properties Properties) (*Dataset, error) {
if !strings.Contains(d.Path, "@") {
return &Dataset{}, fmt.Errorf("dataset %s is not a snapshot", d.Path)
if strings.Contains(d.Path, "@") {
return Clone(d.Path, dest, properties)
}
return Clone(d.Path, dest, properties)
return nil, fmt.Errorf("dataset %s is not a snapshot", d.Path)
}
func (d *Dataset) Snapshot(name string) (*Dataset, error) {

Loading…
Cancel
Save