Browse Source

Refactor: Move imaged into own package

Add: More Programm like zfs Support.
layerset
Till Wegmüller 6 years ago
parent
commit
0f8b866bca
  1. 71
      Vagrantfile
  2. 118
      common/common.go
  3. 23
      imaged/client.go
  4. 27
      imaged/daemon.go
  5. 23
      imaged/main.go
  6. 3
      installd/daemon.go
  7. 19
      main.go
  8. 2
      model.go
  9. 369
      zfs/common.go
  10. 3
      zfs/create.go
  11. 238
      zfs/dataset.go
  12. 29
      zfs/list.go

71
Vagrantfile vendored

@ -1,71 +0,0 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
# All Vagrant configuration is done below. The "2" in Vagrant.configure
# configures the configuration version (we support older styles for
# backwards compatibility). Please don't change it unless you know what
# you're doing.
Vagrant.configure("2") do |config|
# The most common configuration options are documented and commented below.
# For a complete reference, please see the online documentation at
# https://docs.vagrantup.com.
# Every Vagrant development environment requires a box. You can search for
# boxes at https://atlas.hashicorp.com/search.
config.vm.box = "openindiana/hipster"
# Disable automatic box update checking. If you disable this, then
# boxes will only be checked for updates when the user runs
# `vagrant box outdated`. This is not recommended.
# config.vm.box_check_update = false
# Create a forwarded port mapping which allows access to a specific port
# within the machine from a port on the host machine. In the example below,
# accessing "localhost:8080" will access port 80 on the guest machine.
config.vm.network "forwarded_port", guest: 2345, host: 2345
# Create a private network, which allows host-only access to the machine
# using a specific IP.
# config.vm.network "private_network", ip: "192.168.33.10"
# Create a public network, which generally matched to bridged network.
# Bridged networks make the machine appear as another physical device on
# your network.
# config.vm.network "public_network"
# Share an additional folder to the guest VM. The first argument is
# the path on the host to the actual folder. The second argument is
# the path on the guest to mount the folder. And the optional third
# argument is a set of non-required options.
# config.vm.synced_folder "../data", "/vagrant_data"
# Provider-specific configuration so you can fine-tune various
# backing providers for Vagrant. These expose provider-specific options.
# Example for VirtualBox:
#
# config.vm.provider "virtualbox" do |vb|
# # Display the VirtualBox GUI when booting the machine
# vb.gui = true
#
# # Customize the amount of memory on the VM:
# vb.memory = "1024"
# end
#
# View the documentation for the provider you are using for more
# information on available options.
# Define a Vagrant Push strategy for pushing to Atlas. Other push strategies
# such as FTP and Heroku are also available. See the documentation at
# https://docs.vagrantup.com/v2/push/atlas.html for more information.
# config.push.define "atlas" do |push|
# push.app = "YOUR_ATLAS_USERNAME/YOUR_APPLICATION_NAME"
# end
# Enable provisioning with a shell script. Additional provisioners such as
# Puppet, Chef, Ansible, Salt, and Docker are also available. Please see the
# documentation for more information about their specific syntax and use.
config.vm.provision "shell", inline: <<-SHELL
pkg install build-essential developer/golang
pkg update
SHELL
end

118
common/common.go

@ -0,0 +1,118 @@
package common
import (
"net"
"fmt"
"net/rpc"
"os"
"log"
"os/signal"
"syscall"
"github.com/takama/daemon"
)
var Stdlog, Errlog *log.Logger
type Server struct {
daemon.Daemon
Port string
Name string
Description string
}
// Manage by daemon commands or run the daemon
func RunService(service Server, clientFn func() (string,error)) (string, error) {
usage := "Usage: imaged install | remove | start | stop | status"
// if received any kind of command, do it
if len(os.Args) > 1 {
command := os.Args[1]
switch command {
case "install":
return service.Install()
case "remove":
return service.Remove()
case "start":
return service.Start()
case "stop":
return service.Stop()
case "status":
return service.Status()
case "client":
return clientFn()
default:
return usage, nil
}
}
// Do something, call your goroutines, etc
// Set up channel on which to send signal notifications.
// We must use a buffered channel or risk missing the signal
// if we're not ready to receive when the signal is sent.
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt, os.Kill, syscall.SIGTERM)
// Set up listener for defined host and port
listener, err := net.Listen("tcp", service.Port)
if err != nil {
return "Possibly was a problem with the port binding", err
}
// set up channel on which to send accepted connections
listen := make(chan net.Conn, 100)
go acceptConnection(listener, listen)
defer func() {
listener.Close()
fmt.Println("Listener closed")
}()
rpc.Register(service)
// loop work cycle with accept connections or interrupt
// by system signal
for {
select {
case conn := <-listen:
go handleClient(conn)
case killSignal := <-interrupt:
Stdlog.Println("Got signal:", killSignal)
Stdlog.Println("Stoping listening on ", listener.Addr())
listener.Close()
if killSignal == os.Interrupt {
return "Daemon was interruped by system signal", nil
}
return "Daemon was killed", nil
}
}
// never happen, but need to complete code
return usage, nil
}
// Accept a client connection and collect it in a channel
func acceptConnection(listener net.Listener, listen chan<- net.Conn) {
for {
conn, err := listener.Accept()
if err != nil {
continue
}
listen <- conn
}
}
func handleClient(client net.Conn) {
Stdlog.Println("Got connection from: "+client.RemoteAddr().String())
// Close connection when this function ends
defer func() {
Stdlog.Println("Closing connection...")
client.Close()
}()
rpc.ServeConn(client)
}
func init() {
Stdlog = log.New(os.Stdout, "", log.Ldate|log.Ltime)
Errlog = log.New(os.Stderr, "", log.Ldate|log.Ltime)
}

23
imaged/client.go

@ -0,0 +1,23 @@
package imaged
import (
"net/rpc"
"fmt"
)
func client() (string, error){
c, err := rpc.Dial("tcp", "127.0.0.1:9999")
if err != nil {
fmt.Println(err)
return "Error:", err
}
var result []string
ferr := c.Call("Imaged.List", "rpool", &result)
if ferr != nil {
fmt.Println(ferr)
} else {
fmt.Println("Got Result: ")
fmt.Println(result)
}
return "Sucess:", nil
}

27
imaged/daemon.go

@ -0,0 +1,27 @@
package imaged
import (
"github.com/toasterson/opencloud/zfs"
)
const (
// name of the service
name = "imaged"
description = "OpenCloud Image Service"
// port which daemon should be listen
port = ":9999"
)
type Imaged struct {
}
func (this *Imaged) List(pool string, reply *[]string) (err error){
*reply, err = zfs.List(pool)
return err
}
// dependencies that are NOT required by the service, but might be used
var dependencies = []string{"dummy.service"}

23
imaged/main.go

@ -0,0 +1,23 @@
package imaged
import (
"github.com/takama/daemon"
"os"
"fmt"
"github.com/toasterson/opencloud/common"
)
func main() {
srv, err := daemon.New(name, description, dependencies...)
if err != nil {
common.Errlog.Println("Error: ", err)
os.Exit(1)
}
service := common.Server{srv, port, name, description}
status, err := common.RunService(service, client)
if err != nil {
common.Errlog.Println(status, "\nError: ", err)
os.Exit(1)
}
fmt.Println(status)
}

3
installd/daemon.go

@ -0,0 +1,3 @@
package installd

19
main.go

@ -1,22 +1,9 @@
package main
import (
"github.com/toasterson/opencloud/zfs"
"log"
)
func main() {
datasets , err:= zfs.List("rpool")
if err != nil{
log.Fatal(err)
}
log.Println(datasets)
for _, dataset := range(datasets){
size, serr := zfs.UsedIncludingChildren(dataset)
if serr != nil {
log.Fatal(serr)
}
log.Println(size)
}
log.Println("Test")
}
}

2
controller/base.go → model.go

@ -1,4 +1,4 @@
package controller
package main
import (
"net"

369
zfs/common.go

@ -0,0 +1,369 @@
package zfs
import (
"github.com/c2h5oh/datasize"
"strings"
"strconv"
)
// VDevType type of device in the pool
type VDevType string
// Types of Virtual Devices
const (
VDevTypeRoot VDevType = "root" // VDevTypeRoot root device in ZFS pool
VDevTypeMirror = "mirror" // VDevTypeMirror mirror device in ZFS pool
VDevTypeReplacing = "replacing" // VDevTypeReplacing replacing
VDevTypeRaidz = "raidz" // VDevTypeRaidz RAIDZ device
VDevTypeDisk = "disk" // VDevTypeDisk device is disk
VDevTypeFile = "file" // VDevTypeFile device is file
VDevTypeMissing = "missing" // VDevTypeMissing missing device
VDevTypeHole = "hole" // VDevTypeHole hole
VDevTypeSpare = "spare" // VDevTypeSpare spare device
VDevTypeLog = "log" // VDevTypeLog ZIL device
VDevTypeL2cache = "l2cache" // VDevTypeL2cache cache device (disk)
)
// PoolStatus type representing status of the pool
type PoolStatus int
// PoolState type representing pool state
type PoolState uint64
// VDevState - vdev states tye
type VDevState uint64
// VDevAux - vdev aux states
type VDevAux uint64
// Property ZFS pool or dataset property value
type Property struct {
Value string
Source string
}
// Pool status
const (
/*
* The following correspond to faults as defined in the (fault.fs.zfs.*)
* event namespace. Each is associated with a corresponding message ID.
*/
PoolStatusCorruptCache PoolStatus = iota /* corrupt /kernel/drv/zpool.cache */
PoolStatusMissingDevR /* missing device with replicas */
PoolStatusMissingDevNr /* missing device with no replicas */
PoolStatusCorruptLabelR /* bad device label with replicas */
PoolStatusCorruptLabelNr /* bad device label with no replicas */
PoolStatusBadGUIDSum /* sum of device guids didn't match */
PoolStatusCorruptPool /* pool metadata is corrupted */
PoolStatusCorruptData /* data errors in user (meta)data */
PoolStatusFailingDev /* device experiencing errors */
PoolStatusVersionNewer /* newer on-disk version */
PoolStatusHostidMismatch /* last accessed by another system */
PoolStatusIoFailureWait /* failed I/O, failmode 'wait' */
PoolStatusIoFailureContinue /* failed I/O, failmode 'continue' */
PoolStatusBadLog /* cannot read log chain(s) */
PoolStatusErrata /* informational errata available */
/*
* If the pool has unsupported features but can still be opened in
* read-only mode, its status is ZPOOL_STATUS_UNSUP_FEAT_WRITE. If the
* pool has unsupported features but cannot be opened at all, its
* status is ZPOOL_STATUS_UNSUP_FEAT_READ.
*/
PoolStatusUnsupFeatRead /* unsupported features for read */
PoolStatusUnsupFeatWrite /* unsupported features for write */
/*
* These faults have no corresponding message ID. At the time we are
* checking the status, the original reason for the FMA fault (I/O or
* checksum errors) has been lost.
*/
PoolStatusFaultedDevR /* faulted device with replicas */
PoolStatusFaultedDevNr /* faulted device with no replicas */
/*
* The following are not faults per se, but still an error possibly
* requiring administrative attention. There is no corresponding
* message ID.
*/
PoolStatusVersionOlder /* older legacy on-disk version */
PoolStatusFeatDisabled /* supported features are disabled */
PoolStatusResilvering /* device being resilvered */
PoolStatusOfflineDev /* device online */
PoolStatusRemovedDev /* removed device */
/*
* Finally, the following indicates a healthy pool.
*/
PoolStatusOk
)
// Possible ZFS pool states
const (
PoolStateActive PoolState = iota /* In active use */
PoolStateExported /* Explicitly exported */
PoolStateDestroyed /* Explicitly destroyed */
PoolStateSpare /* Reserved for hot spare use */
PoolStateL2cache /* Level 2 ARC device */
PoolStateUninitialized /* Internal spa_t state */
PoolStateUnavail /* Internal libzfs state */
PoolStatePotentiallyActive /* Internal libzfs state */
)
// Pool properties. Enumerates available ZFS pool properties. Use it to access
// pool properties either to read or set soecific property.
const (
PoolPropName int = iota
PoolPropSize
PoolPropCapacity
PoolPropAltroot
PoolPropHealth
PoolPropGUID
PoolPropVersion
PoolPropBootfs
PoolPropDelegation
PoolPropAutoreplace
PoolPropCachefile
PoolPropFailuremode
PoolPropListsnaps
PoolPropAutoexpand
PoolPropDedupditto
PoolPropDedupratio
PoolPropFree
PoolPropAllocated
PoolPropReadonly
PoolPropAshift
PoolPropComment
PoolPropExpandsz
PoolPropFreeing
PoolPropFragmentaion
PoolPropLeaked
PoolPropMaxBlockSize
PoolPropTName
PoolNumProps
)
/*
* Dataset properties are identified by these constants and must be added to
* the end of this list to ensure that external consumers are not affected
* by the change. If you make any changes to this list, be sure to update
* the property table in module/zcommon/zfs_prop.c.
*/
const (
DatasetPropType int = iota
DatasetPropCreation
DatasetPropUsed
DatasetPropAvailable
DatasetPropReferenced
DatasetPropCompressratio
DatasetPropMounted
DatasetPropOrigin
DatasetPropQuota
DatasetPropReservation
DatasetPropVolsize
DatasetPropVolblocksize
DatasetPropRecordsize
DatasetPropMountpoint
DatasetPropSharenfs
DatasetPropChecksum
DatasetPropCompression
DatasetPropAtime
DatasetPropDevices
DatasetPropExec
DatasetPropSetuid
DatasetPropReadonly
DatasetPropZoned
DatasetPropSnapdir
DatasetPropPrivate /* not exposed to user, temporary */
DatasetPropAclinherit
DatasetPropCreatetxg /* not exposed to the user */
DatasetPropName /* not exposed to the user */
DatasetPropCanmount
DatasetPropIscsioptions /* not exposed to the user */
DatasetPropXattr
DatasetPropNumclones /* not exposed to the user */
DatasetPropCopies
DatasetPropVersion
DatasetPropUtf8only
DatasetPropNormalize
DatasetPropCase
DatasetPropVscan
DatasetPropNbmand
DatasetPropSharesmb
DatasetPropRefquota
DatasetPropRefreservation
DatasetPropGUID
DatasetPropPrimarycache
DatasetPropSecondarycache
DatasetPropUsedsnap
DatasetPropUsedds
DatasetPropUsedchild
DatasetPropUsedrefreserv
DatasetPropUseraccounting /* not exposed to the user */
DatasetPropStmfShareinfo /* not exposed to the user */
DatasetPropDeferDestroy
DatasetPropUserrefs
DatasetPropLogbias
DatasetPropUnique /* not exposed to the user */
DatasetPropObjsetid /* not exposed to the user */
DatasetPropDedup
DatasetPropMlslabel
DatasetPropSync
DatasetPropRefratio
DatasetPropWritten
DatasetPropClones
DatasetPropLogicalused
DatasetPropLogicalreferenced
DatasetPropInconsistent /* not exposed to the user */
DatasetPropSnapdev
DatasetPropAcltype
DatasetPropSelinuxContext
DatasetPropSelinuxFsContext
DatasetPropSelinuxDefContext
DatasetPropSelinuxRootContext
DatasetPropRelatime
DatasetPropRedundantMetadata
DatasetPropOverlay
DatasetNumProps
)
// ZFS errors
const (
ESuccess = 0 /* no error -- success */
ENomem = 2000 << iota /* out of memory */
EBadprop /* invalid property value */
EPropreadonly /* cannot set readonly property */
EProptype /* property does not apply to dataset type */
EPropnoninherit /* property is not inheritable */
EPropspace /* bad quota or reservation */
EBadtype /* dataset is not of appropriate type */
EBusy /* pool or dataset is busy */
EExists /* pool or dataset already exists */
ENoent /* no such pool or dataset */
EBadstream /* bad backup stream */
EDsreadonly /* dataset is readonly */
EVoltoobig /* volume is too large for 32-bit system */
EInvalidname /* invalid dataset name */
EBadrestore /* unable to restore to destination */
EBadbackup /* backup failed */
EBadtarget /* bad attach/detach/replace target */
ENodevice /* no such device in pool */
EBaddev /* invalid device to add */
ENoreplicas /* no valid replicas */
EResilvering /* currently resilvering */
EBadversion /* unsupported version */
EPoolunavail /* pool is currently unavailable */
EDevoverflow /* too many devices in one vdev */
EBadpath /* must be an absolute path */
ECrosstarget /* rename or clone across pool or dataset */
EZoned /* used improperly in local zone */
EMountfailed /* failed to mount dataset */
EUmountfailed /* failed to unmount dataset */
EUnsharenfsfailed /* unshare(1M) failed */
ESharenfsfailed /* share(1M) failed */
EPerm /* permission denied */
ENospc /* out of space */
EFault /* bad address */
EIo /* I/O error */
EIntr /* signal received */
EIsspare /* device is a hot spare */
EInvalconfig /* invalid vdev configuration */
ERecursive /* recursive dependency */
ENohistory /* no history object */
EPoolprops /* couldn't retrieve pool props */
EPoolNotsup /* ops not supported for this type of pool */
EPoolInvalarg /* invalid argument for this pool operation */
ENametoolong /* dataset name is too long */
EOpenfailed /* open of device failed */
ENocap /* couldn't get capacity */
ELabelfailed /* write of label failed */
EBadwho /* invalid permission who */
EBadperm /* invalid permission */
EBadpermset /* invalid permission set name */
ENodelegation /* delegated administration is disabled */
EUnsharesmbfailed /* failed to unshare over smb */
ESharesmbfailed /* failed to share over smb */
EBadcache /* bad cache file */
EIsl2CACHE /* device is for the level 2 ARC */
EVdevnotsup /* unsupported vdev type */
ENotsup /* ops not supported on this dataset */
EActiveSpare /* pool has active shared spare devices */
EUnplayedLogs /* log device has unplayed logs */
EReftagRele /* snapshot release: tag not found */
EReftagHold /* snapshot hold: tag already exists */
ETagtoolong /* snapshot hold/rele: tag too long */
EPipefailed /* pipe create failed */
EThreadcreatefailed /* thread create failed */
EPostsplitOnline /* onlining a disk after splitting it */
EScrubbing /* currently scrubbing */
ENoScrub /* no active scrub */
EDiff /* general failure of zfs diff */
EDiffdata /* bad zfs diff data */
EPoolreadonly /* pool is in read-only mode */
EUnknown
)
// vdev states are ordered from least to most healthy.
// A vdev that's VDevStateCantOpen or below is considered unusable.
const (
VDevStateUnknown VDevState = iota // Uninitialized vdev
VDevStateClosed // Not currently open
VDevStateOffline // Not allowed to open
VDevStateRemoved // Explicitly removed from system
VDevStateCantOpen // Tried to open, but failed
VDevStateFaulted // External request to fault device
VDevStateDegraded // Replicated vdev with unhealthy kids
VDevStateHealthy // Presumed good
)
// vdev aux states. When a vdev is in the VDevStateCantOpen state, the aux field
// of the vdev stats structure uses these constants to distinguish why.
const (
VDevAuxNone VDevAux = iota // no error
VDevAuxOpenFailed // ldi_open_*() or vn_open() failed
VDevAuxCorruptData // bad label or disk contents
VDevAuxNoReplicas // insufficient number of replicas
VDevAuxBadGUIDSum // vdev guid sum doesn't match
VDevAuxTooSmall // vdev size is too small
VDevAuxBadLabel // the label is OK but invalid
VDevAuxVersionNewer // on-disk version is too new
VDevAuxVersionOlder // on-disk version is too old
VDevAuxUnsupFeat // unsupported features
VDevAuxSpared // hot spare used in another pool
VDevAuxErrExceeded // too many errors
VDevAuxIOFailure // experienced I/O failure
VDevAuxBadLog // cannot read log chain(s)
VDevAuxExternal // external diagnosis
VDevAuxSplitPool // vdev was split off into another pool
)
//Nice Helper Function to convert Text into a nice Human Readable and Calculateable Value
func convertToSize(s string) (size datasize.ByteSize, err error) {
sizeText := strings.TrimSpace(s)
if strings.Contains(sizeText, "."){
unit := sizeText[len(sizeText)-1:]
sizeText = sizeText[0:len(sizeText)-1]
switch unit {
case "T":
unit = "G"
case "G":
unit = "M"
case "M":
unit = "K"
default:
unit = ""
}
f, err := strconv.ParseFloat(sizeText, 64)
if err != nil {
return size, err
}
f = f * 1024
sizeText = strconv.FormatFloat(f, 'f', 0, 64) + unit
}
if err = size.UnmarshalText([]byte(sizeText)); err != nil{
return
}
return
}

3
zfs/create.go

@ -0,0 +1,3 @@
package zfs

238
zfs/dataset.go

@ -0,0 +1,238 @@
package zfs
import (
"os/exec"
"bytes"
"strings"
"fmt"
"regexp"
"github.com/c2h5oh/datasize"
"errors"
)
const (
msgDatasetIsNil = "Dataset handle not initialized or its closed"
)
// DatasetProperties type is map of dataset or volume properties prop -> value
type DatasetProperties map[string]Property
// DatasetType defines enum of dataset types
type DatasetType int32
const (
// DatasetTypeFilesystem - file system dataset
DatasetTypeFilesystem DatasetType = (1 << 0)
// DatasetTypeSnapshot - snapshot of dataset
DatasetTypeSnapshot = (1 << 1)
// DatasetTypeVolume - volume (virtual block device) dataset
DatasetTypeVolume = (1 << 2)
// DatasetTypePool - pool dataset
DatasetTypePool = (1 << 3)
// DatasetTypeBookmark - bookmark dataset
DatasetTypeBookmark = (1 << 4)
)
// Dataset - ZFS dataset object
type Dataset struct {
Path string
Type DatasetType
Properties DatasetProperties
Children []Dataset
}
func zfsExec(args []string) (retVal []string, err error){
cmd := exec.Command("zfs", args...)
var out, serr bytes.Buffer
cmd.Stdout = &out
cmd.Stderr = &serr
if err = cmd.Run(); err != nil{
return []string{}, errors.New(strings.TrimSpace(string(serr.Bytes())))
}
if out.Len() > 0 {
retVal = strings.Split(string(out.Bytes()), "\n")
retVal = retVal[1:]
//Do some trimming as there could be a empty line in there
for i, val := range(retVal){
val = strings.TrimSpace(val)
if val == ""{
retVal = append(retVal[:i], retVal[i+1:]...)
}
}
}
return
}
func datasetPropertyListToCMD (props map[string]string) (retVal []string) {
for key, prop := range props{
retVal = append(retVal, "-o", fmt.Sprintf("%s=%s", key, prop))
}
return
}
//Read a Dataset and all its Properties from zfs Command
func OpenDataset(path string) (d Dataset){
retVal, err := zfsExec([]string{"get", "all", path})
if err != nil {
return
}
d.Path = path
d.Properties = make(DatasetProperties)
for _, line := range retVal{
propLine := strings.Fields(line)
propName := propLine[1]
if propName == "type"{
switch propLine[2] {
case "filesystem":
d.Type = DatasetTypeFilesystem
case "volume":
d.Type = DatasetTypeVolume
default:
d.Type = DatasetTypeFilesystem
}
} else {
prop := Property{
propLine[2],
propLine[3],
}
d.Properties[propName] = prop
}
}
children, err := List(path)
if err != nil {
return
}
for _, child := range children{
if !(child == path){
slash := regexp.MustCompile("/")
matches := slash.FindAllStringIndex(child, -1)
//zfs command outputs all Children But that is a hassle to parse so ignore children of children here
//TODO Figure out if I want to switch this to nonrecursive. and if So How
if !(len(matches) > 1 ){
d.Children = append(d.Children, OpenDataset(child))
}
}
}
return
}
// DatasetCreate create a new filesystem or volume on path representing
// pool/dataset or pool/parent/dataset
func DatasetCreate(path string, dtype DatasetType, props map[string]string) (d Dataset, err error) {
args := []string{"create"}
args = append(args, datasetPropertyListToCMD(props)...)
args = append(args, path)
if _, err = zfsExec(args); err != nil{
return
}
d = OpenDataset(path)
return
}
// SetProperty set ZFS dataset property to value. Not all properties can be set,
// some can be set only at creation time and some are read only.
// Always check if returned error and its description.
func (d *Dataset) SetProperty(prop string, value string) (err error) {
if _, err = zfsExec([]string{"set", fmt.Sprintf("%s=%s", prop, value), d.Path}); err != nil{
return
}
d.Properties[prop], err = d.GetProperty(prop)
return
}
// GetProperty reload and return single specified property. This also reloads requested
// property in Properties map.
func (d *Dataset) GetProperty(p string) (prop Property, err error) {
var retVal []string
if retVal, err = zfsExec([]string{"get", p, d.Path}); err != nil {
return
}
propLine := strings.Fields(retVal[0])
prop.Value = propLine[1]
prop.Source = propLine[2]
return
}
// Rename dataset
func (d *Dataset) Rename(newName string, forceUnmount bool) (err error) {
args := []string{"rename"}
if forceUnmount {
args = append(args, "-f")
}
args = append(args, d.Path, newName)
_, err = zfsExec(args)
return
}
// IsMounted checks to see if the mount is active. If the filesystem is mounted,
// sets in 'where' argument the current mountpoint, and returns true. Otherwise,
// returns false.
func (d *Dataset) IsMounted() (mounted bool, where string) {
if d.Properties["mounted"].Value == "yes"{
mounted = true
where = d.Properties["mountpoint"].Value
} else {
mounted = false
}
return
}
// Mount the given filesystem.
func (d *Dataset) Mount(options string) (err error) {
args := []string{"mount"}
if options != "" {
args = append(args, "-o", options)
}
args = append(args, d.Path)
_, err = zfsExec(args)
return
}
// Unmount the given filesystem.
func (d *Dataset) Unmount() (err error) {
_, err = zfsExec([]string{"unmount", d.Path})
return
}
// UnmountAll unmount this filesystem and any children inheriting the
// mountpoint property.
func (d *Dataset) UnmountAll() (err error) {
for _, child := range d.Children {
if err = child.UnmountAll(); err != nil{
return
}
if strings.Contains(child.Properties["mountpoint"].Source, "inherited") {
if err = child.Unmount(); err != nil{
return
}
}
}
return
}
func (d *Dataset) Size() (size datasize.ByteSize) {
var err error
if size, err = convertToSize(d.Properties["referenced"].Value); err != nil {
return 0
}
return
}
func (d *Dataset) Avail() (size datasize.ByteSize) {
var err error
if size, err = convertToSize(d.Properties["available"].Value); err != nil {
return 0
}
return
}
func (d *Dataset) Used() (size datasize.ByteSize) {
var err error
if size, err = convertToSize(d.Properties["usedbydataset"].Value); err != nil {
return 0
}
return
}

29
zfs/main.go → zfs/list.go

@ -5,7 +5,6 @@ import (
"bytes"
"strings"
"github.com/c2h5oh/datasize"
"strconv"
"errors"
)
@ -45,33 +44,9 @@ func zfsListSomeSize(dataset string, parameters ...string) (size datasize.ByteSi
zfs_args = append(zfs_args, dataset)
datasetSize, err := zfsList(zfs_args)
if err != nil {
return size, err
return
}
sizeText := strings.TrimSpace(datasetSize[0])
if strings.Contains(sizeText, "."){
unit := sizeText[len(sizeText)-1:]
sizeText = sizeText[0:len(sizeText)-1]
switch unit {
case "T":
unit = "G"
case "G":
unit = "M"
case "M":
unit = "K"
default:
unit = ""
}
f, ferr := strconv.ParseFloat(sizeText, 64)
if ferr != nil {
return size, ferr
}
f = f * 1024
sizeText = strconv.FormatFloat(f, 'f', 0, 64) + unit
}
if uerr := size.UnmarshalText([]byte(sizeText)); uerr != nil{
return size, uerr
}
return size, nil
return convertToSize(datasetSize[0])
}
func zfsList(args []string) (retVal []string, err error){
Loading…
Cancel
Save