diff --git a/.gitignore b/.gitignore
index 3c02a9a96..21ca5065f 100644
--- a/.gitignore
+++ b/.gitignore
@@ -78,3 +78,7 @@ nodes/
# IDE-related stuff
.idea
+
+# pfs-jrpc cache
+
+pfs-jrpc.env
diff --git a/CONFIGURING.md b/CONFIGURING.md
index 5bf01e2cf..03f5f7472 100644
--- a/CONFIGURING.md
+++ b/CONFIGURING.md
@@ -93,6 +93,12 @@ For each of the keys supported, the following table will list whether or not its
| | ChunkedConnectionPoolSize | Yes | | Yes | No |
| | NonChunkedConnectionPoolSize | Yes | | Yes | No |
| | ChecksumChunkedPutChunks | No | false | Yes | Yes |
+| | SwiftReconNoWriteThreshold | No | 80 | Yes | Yes |
+| | SwiftReconNoWriteErrno | No | ENOSPC | Yes | Yes |
+| | SwiftReconReadOnlyThreshold | No | 90 | Yes | Yes |
+| | SwiftReconReadOnlyErrno | No | EROFS | Yes | Yes |
+| | SwiftConfDir | No | /etc/swift | Yes | Yes |
+| | SwiftReconChecksPerConfCheck | No | 10 | Yes | Yes |
| FSGlobals | VolumeGroupList | Yes | | Yes | Yes |
| | CheckpointHeaderConsensusAttempts | No | 5 | Yes | No |
| | MountRetryLimit | No | 6 | Yes | No |
@@ -191,7 +197,10 @@ For each of the keys supported, the following table will list whether or not its
| | RetryRPCTTLCompleted | No | 10m | Yes | No |
| | RetryRPCAckTrim | No | 100ms | Yes | No |
| | RetryRPCDeadlineIO | No | 60s | Yes | No |
-| | RetryRPCKEEPALIVEPeriod | No | 60s | Yes | No |
+| | RetryRPCKeepAlivePeriod | No | 60s | Yes | No |
+| | MinLeaseDuration | No | 250ms | Yes | No |
+| | LeaseInterruptInterval | No | 250ms | Yes | No |
+| | LeaseInterruptLimit | No | 20 | Yes | No |
| Logging | LogFilePath | No | None | Yes | No |
| | LogToConsole | No | false | Yes | No |
| | TraceLevelLogging | No | None | Yes | No |
diff --git a/Makefile b/Makefile
index 762cee1ed..0172e36ea 100644
--- a/Makefile
+++ b/Makefile
@@ -36,6 +36,7 @@ gobinsubdirs = \
inodeworkout \
pfs-crash \
pfs-fsck \
+ pfs-jrpc \
pfs-restart-test \
pfs-stress \
pfs-swift-load \
@@ -91,7 +92,9 @@ else
minimal: pre-generate generate install
endif
-.PHONY: all all-deb-builder bench c-build c-clean c-install c-install-deb-builder c-test ci clean cover fmt generate install pre-generate python-test test version
+pfsagent: pre-generate generate pfsagent-install
+
+.PHONY: all all-deb-builder bench c-build c-clean c-install c-install-deb-builder c-test ci clean cover fmt generate install pfsagent pfsagent-install pre-generate python-test test version
bench:
@set -e; \
@@ -177,6 +180,10 @@ install:
$(MAKE) --no-print-directory -C $$gosubdir install; \
done
+pfsagent-install:
+ $(MAKE) --no-print-directory -C pfsagentd install
+ $(MAKE) --no-print-directory -C pfsagentd/pfsagentd-swift-auth-plugin install
+
pre-generate:
@set -e; \
go install github.com/swiftstack/ProxyFS/vendor/golang.org/x/tools/cmd/stringer; \
diff --git a/bin/autodoc b/bin/autodoc
index 2eaaf3b57..e516981fb 100755
--- a/bin/autodoc
+++ b/bin/autodoc
@@ -26,11 +26,6 @@ PROJECTS = {
'dir': ['../swift', 'saio'],
'container': 'doc',
},
- 'swift-specs': {
- 'root': '/vagrant/swift-specs/doc/',
- 'dir': ['../'],
- 'container': 'specs',
- },
'swiftclient': {
'root': '/vagrant/python-swiftclient/doc/',
'dir': [],
diff --git a/blunder/api.go b/blunder/api.go
index a8b1da7d0..85d4b386b 100644
--- a/blunder/api.go
+++ b/blunder/api.go
@@ -63,6 +63,7 @@ const (
NotPermError FsError = FsError(int(unix.EPERM)) // Operation not permitted
NotFoundError FsError = FsError(int(unix.ENOENT)) // No such file or directory
IOError FsError = FsError(int(unix.EIO)) // I/O error
+ ReadOnlyError FsError = FsError(int(unix.EROFS)) // Read-only file system
TooBigError FsError = FsError(int(unix.E2BIG)) // Argument list too long
TooManyArgsError FsError = FsError(int(unix.E2BIG)) // Arg list too long
BadFileError FsError = FsError(int(unix.EBADF)) // Bad file number
@@ -81,7 +82,6 @@ const (
FileTooLargeError FsError = FsError(int(unix.EFBIG)) // File too large
NoSpaceError FsError = FsError(int(unix.ENOSPC)) // No space left on device
BadSeekError FsError = FsError(int(unix.ESPIPE)) // Illegal seek
- ReadOnlyError FsError = FsError(int(unix.EROFS)) // Read-only file system
TooManyLinksError FsError = FsError(int(unix.EMLINK)) // Too many links
OutOfRangeError FsError = FsError(int(unix.ERANGE)) // Math result not representable
NameTooLongError FsError = FsError(int(unix.ENAMETOOLONG)) // File name too long
@@ -97,6 +97,7 @@ const (
// Errors that map to constants already defined above
const (
NotActiveError FsError = NotFoundError
+ BadLeaseRequest FsError = InvalidArgError
BadMountIDError FsError = InvalidArgError
BadMountVolumeError FsError = InvalidArgError
NotFileError FsError = IsDirError
diff --git a/ci/ansible/chef_files/ci.attribs.json b/ci/ansible/chef_files/ci.attribs.json
index b8c6dd90b..9260af429 100644
--- a/ci/ansible/chef_files/ci.attribs.json
+++ b/ci/ansible/chef_files/ci.attribs.json
@@ -28,8 +28,6 @@
"swiftclient_repo_branch": "master",
"swift_bench_repo": "git://github.com/openstack/swift-bench.git",
"swift_bench_repo_branch": "master",
- "swift_specs_repo": "git://github.com/openstack/swift-specs.git",
- "swift_specs_repo_branch": "master",
"extra_key": "",
"source_root": "/home/swiftstack/provisioning",
"use_swiftstack_packages": false
diff --git a/ci/ansible/chef_files/runway.attribs.json b/ci/ansible/chef_files/runway.attribs.json
index 3a896801f..d18d0490b 100644
--- a/ci/ansible/chef_files/runway.attribs.json
+++ b/ci/ansible/chef_files/runway.attribs.json
@@ -28,8 +28,6 @@
"swiftclient_repo_branch": "master",
"swift_bench_repo": "git://github.com/openstack/swift-bench.git",
"swift_bench_repo_branch": "master",
- "swift_specs_repo": "git://github.com/openstack/swift-specs.git",
- "swift_specs_repo_branch": "master",
"extra_key": "",
"source_root": "/home/swift/code/ProxyFS",
"use_swiftstack_packages": false,
diff --git a/ci/ansible/chef_files/runway_ss.attribs.json b/ci/ansible/chef_files/runway_ss.attribs.json
index 73d8f5114..cdf8c6b65 100644
--- a/ci/ansible/chef_files/runway_ss.attribs.json
+++ b/ci/ansible/chef_files/runway_ss.attribs.json
@@ -28,8 +28,6 @@
"swiftclient_repo_branch": "master",
"swift_bench_repo": "git://github.com/openstack/swift-bench.git",
"swift_bench_repo_branch": "master",
- "swift_specs_repo": "git://github.com/openstack/swift-specs.git",
- "swift_specs_repo_branch": "master",
"extra_key": "",
"source_root": "/home/swift/code/ProxyFS",
"use_swiftstack_packages": true,
diff --git a/ci/ansible/chef_files/vagrant.attribs.json b/ci/ansible/chef_files/vagrant.attribs.json
index 69692bf08..4b17de57d 100644
--- a/ci/ansible/chef_files/vagrant.attribs.json
+++ b/ci/ansible/chef_files/vagrant.attribs.json
@@ -28,8 +28,6 @@
"swiftclient_repo_branch": "master",
"swift_bench_repo": "git://github.com/openstack/swift-bench.git",
"swift_bench_repo_branch": "master",
- "swift_specs_repo": "git://github.com/openstack/swift-specs.git",
- "swift_specs_repo_branch": "master",
"extra_key": "",
"source_root": "/vagrant",
"use_swiftstack_packages": false
diff --git a/ci/chef.attribs.json b/ci/chef.attribs.json
index c2b2b24bb..0c83b6f36 100644
--- a/ci/chef.attribs.json
+++ b/ci/chef.attribs.json
@@ -26,8 +26,6 @@
"swiftclient_repo_branch": "master",
"swift_bench_repo": "git://github.com/openstack/swift-bench.git",
"swift_bench_repo_branch": "master",
- "swift_specs_repo": "git://github.com/openstack/swift-specs.git",
- "swift_specs_repo_branch": "master",
"extra_key": "",
"source_root": "/home/swiftstack/provisioning"
}
diff --git a/confgen/sample-proxyfs-configuration/proxyfs.conf b/confgen/sample-proxyfs-configuration/proxyfs.conf
index c3bb93322..3d7651b20 100644
--- a/confgen/sample-proxyfs-configuration/proxyfs.conf
+++ b/confgen/sample-proxyfs-configuration/proxyfs.conf
@@ -17,7 +17,6 @@ PublicIPAddr: 192.168.18.222
PrivateIPAddr: 192.168.18.222
ReadCacheQuotaFraction: 0.20
-
# Identifies what "peers" make up the cluster and which one "we" are
[Cluster]
WhoAmI: c67631b5-cb88-11e9-99da-0248604d6797
@@ -25,20 +24,41 @@ Peers: c63edaae-cb88-11e9-b49f-020e05f0ad07 c67631b5-cb88-11e9
Arbiters: c63edaae-cb88-11e9-b49f-020e05f0ad07 c67631b5-cb88-11e9-99da-0248604d6797 c6e8e18f-cb88-11e9-92db-02a727b377d1
ServerGuid: c67631b5-cb88-11e9-99da-0248604d6797
AcctHash: de374096638e77912ff8ebf617b98fc7
+PrivateClusterUDPPort: 8123
+UDPPacketSendSize: 1400
+UDPPacketRecvSize: 1500
+UDPPacketCapPerMessage: 5
+HeartBeatDuration: 1s
+HeartBeatMissLimit: 3
+MessageQueueDepthPerPeer: 4
+MaxRequestDuration: 1s
+LivenessCheckRedundancy: 2
+LogLevel: 0
ConfigVersion: 1570666863
# Specifies the path particulars to the "NoAuth" WSGI pipeline
[SwiftClient]
+NoAuthIPAddr: 127.0.0.1
NoAuthTCPPort: 8090
-Timeout: 10000ms
-ChunkedConnectionPoolSize: 1000
-NonChunkedConnectionPoolSize: 100
-RetryLimit: 11
-RetryDelay: 1000ms
+
+RetryDelay: 1s
RetryExpBackoff: 1.5
-RetryLimitObject: 8
-RetryDelayObject: 1000ms
+RetryLimit: 11
+
+RetryDelayObject: 1s
RetryExpBackoffObject: 1.95
+RetryLimitObject: 8
+
+ChunkedConnectionPoolSize: 512
+NonChunkedConnectionPoolSize: 128
+
+SwiftReconNoWriteThreshold: 80
+SwiftReconNoWriteErrno: ENOSPC
+SwiftReconReadOnlyThreshold: 90
+SwiftReconReadOnlyErrno: EROFS
+SwiftConfDir: /etc/swift
+SwiftReconChecksPerConfCheck: 10
+
# A set of storage policies into which the chunks of files and directories will go
@@ -50,17 +70,20 @@ ContainerNamePrefix: Standard-Replica_
ContainersPerPeer: 1000
MaxObjectsPerContainer: 1000000
-
-
-
-
-
# RPC path from file system clients (both Samba and "normal" WSGI stack)... needs to be shared with them
[JSONRPCServer]
-TCPPort: 12345
-FastTCPPort: 32345
-Debug: False
-DataPathLogging: False
+TCPPort: 12345
+FastTCPPort: 32345
+DataPathLogging: false
+Debug: false
+RetryRPCPort: 32356
+RetryRPCTTLCompleted: 10m
+RetryRPCAckTrim: 100ms
+RetryRPCDeadlineIO: 60s
+RetryRPCKeepAlivePeriod: 60s
+MinLeaseDuration: 250ms
+LeaseInterruptInterval: 250ms
+LeaseInterruptLimit: 20
[RPC] # Note: This is very soon to be deprecated... so just hard-code these values until then
NoAuthTCPSocket=true
@@ -105,8 +128,6 @@ EtcdEnabled: true
EtcdAutoSyncInterval: 60000ms
EtcdDialTimeout: 10000ms
EtcdOpTimeout: 20000ms
-.include ./proxyfs-etcd-endpoints.conf
-
-
+.include ./proxyfs-etcd-endpoints.conf
.include ./proxyfs-shares.conf
diff --git a/cookbooks/swift/recipes/source.rb b/cookbooks/swift/recipes/source.rb
index 0117e2bb6..e3bb544dd 100644
--- a/cookbooks/swift/recipes/source.rb
+++ b/cookbooks/swift/recipes/source.rb
@@ -45,13 +45,6 @@
action :run
end
-execute "git swift-specs" do
- cwd "#{node['source_root']}"
- command "sudo -u #{node['swift_user']} git clone -b #{node['swift_specs_repo_branch']} #{node['swift_specs_repo']}"
- creates "#{node['source_root']}/swift-specs"
- action :run
-end
-
execute "fix semantic_version error from testtools" do
command "pip install --upgrade testtools"
end
@@ -86,12 +79,6 @@
action :run
end
-execute "swift-specs-install" do
- cwd "#{node['source_root']}/swift-specs"
- command "pip install -r requirements.txt"
- action :run
-end
-
execute "install tox" do
command "pip install tox==3.5.3"
if not node['full_reprovision']
diff --git a/dlm/config.go b/dlm/config.go
index ed63792d8..ebbba8974 100644
--- a/dlm/config.go
+++ b/dlm/config.go
@@ -58,6 +58,9 @@ func (dummy *globalsStruct) ServeVolume(confMap conf.ConfMap, volumeName string)
func (dummy *globalsStruct) UnserveVolume(confMap conf.ConfMap, volumeName string) (err error) {
return nil
}
+func (dummy *globalsStruct) VolumeToBeUnserved(confMap conf.ConfMap, volumeName string) (err error) {
+ return nil
+}
func (dummy *globalsStruct) SignaledStart(confMap conf.ConfMap) (err error) {
return nil
}
diff --git a/evtlog/config.go b/evtlog/config.go
index c9aaab213..6c747f1f8 100644
--- a/evtlog/config.go
+++ b/evtlog/config.go
@@ -152,6 +152,9 @@ func (dummy *globalsStruct) ServeVolume(confMap conf.ConfMap, volumeName string)
func (dummy *globalsStruct) UnserveVolume(confMap conf.ConfMap, volumeName string) (err error) {
return nil
}
+func (dummy *globalsStruct) VolumeToBeUnserved(confMap conf.ConfMap, volumeName string) (err error) {
+ return nil
+}
func (dummy *globalsStruct) SignaledStart(confMap conf.ConfMap) (err error) {
return nil
}
diff --git a/fs/api.go b/fs/api.go
index a51f5793f..38fb7f750 100644
--- a/fs/api.go
+++ b/fs/api.go
@@ -11,8 +11,6 @@ import (
"github.com/swiftstack/ProxyFS/utils"
)
-type MountID uint64
-
// ReadRangeIn is the ReadPlan range requested
//
// Either Offset or Len can be omitted, but not both. Those correspond
@@ -97,12 +95,6 @@ type FlockStruct struct {
Pid uint64
}
-type MountOptions uint64
-
-const (
- MountReadOnly MountOptions = 1 << iota
-)
-
type StatKey uint64
const (
@@ -153,19 +145,19 @@ type JobHandle interface {
Info() (info []string)
}
-// Mount handle interface
+// Volume handle interface
-func MountByAccountName(accountName string, mountOptions MountOptions) (mountHandle MountHandle, err error) {
- mountHandle, err = mountByAccountName(accountName, mountOptions)
+func FetchVolumeHandleByAccountName(accountName string) (volumeHandle VolumeHandle, err error) {
+ volumeHandle, err = fetchVolumeHandleByAccountName(accountName)
return
}
-func MountByVolumeName(volumeName string, mountOptions MountOptions) (mountHandle MountHandle, err error) {
- mountHandle, err = mountByVolumeName(volumeName, mountOptions)
+func FetchVolumeHandleByVolumeName(volumeName string) (volumeHandle VolumeHandle, err error) {
+ volumeHandle, err = fetchVolumeHandleByVolumeName(volumeName)
return
}
-type MountHandle interface {
+type VolumeHandle interface {
Access(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, inodeNumber inode.InodeNumber, accessMode inode.InodeMode) (accessReturn bool)
CallInodeToProvisionObject() (pPath string, err error)
Create(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, dirInodeNumber inode.InodeNumber, basename string, filePerm inode.InodeMode) (fileInodeNumber inode.InodeNumber, err error)
diff --git a/fs/api_internal.go b/fs/api_internal.go
index 852909396..f8f5df31b 100644
--- a/fs/api_internal.go
+++ b/fs/api_internal.go
@@ -194,19 +194,18 @@ func (inFlightFileInodeData *inFlightFileInodeDataStruct) inFlightFileInodeDataT
inFlightFileInodeData.wg.Done()
}
-func mountByAccountName(accountName string, mountOptions MountOptions) (mountHandle MountHandle, err error) {
+func fetchVolumeHandleByAccountName(accountName string) (volumeHandle VolumeHandle, err error) {
var (
- mS *mountStruct
ok bool
- volStruct *volumeStruct
+ vS *volumeStruct
volumeName string
)
startTime := time.Now()
defer func() {
- globals.MountUsec.Add(uint64(time.Since(startTime) / time.Microsecond))
+ globals.FetchVolumeHandleUsec.Add(uint64(time.Since(startTime) / time.Microsecond))
if err != nil {
- globals.MountErrors.Add(1)
+ globals.FetchVolumeHandleErrors.Add(1)
}
}()
@@ -220,7 +219,7 @@ func mountByAccountName(accountName string, mountOptions MountOptions) (mountHan
return
}
- volStruct, ok = globals.volumeMap[volumeName]
+ vS, ok = globals.volumeMap[volumeName]
if !ok {
err = fmt.Errorf("Unknown volumeName computed by mountByAccountName(): \"%s\"", volumeName)
err = blunder.AddError(err, blunder.NotFoundError)
@@ -228,46 +227,31 @@ func mountByAccountName(accountName string, mountOptions MountOptions) (mountHan
return
}
- globals.lastMountID++
-
- mS = &mountStruct{
- id: globals.lastMountID,
- options: mountOptions,
- volStruct: volStruct,
- }
-
- globals.mountMap[mS.id] = mS
-
- volStruct.dataMutex.Lock()
- volStruct.mountList = append(volStruct.mountList, mS.id)
- volStruct.dataMutex.Unlock()
-
globals.Unlock()
- mountHandle = mS
+ volumeHandle = vS
err = nil
return
}
-func mountByVolumeName(volumeName string, mountOptions MountOptions) (mountHandle MountHandle, err error) {
+func fetchVolumeHandleByVolumeName(volumeName string) (volumeHandle VolumeHandle, err error) {
var (
- mS *mountStruct
- ok bool
- volStruct *volumeStruct
+ ok bool
+ vS *volumeStruct
)
startTime := time.Now()
defer func() {
- globals.MountUsec.Add(uint64(time.Since(startTime) / time.Microsecond))
+ globals.FetchVolumeHandleUsec.Add(uint64(time.Since(startTime) / time.Microsecond))
if err != nil {
- globals.MountErrors.Add(1)
+ globals.FetchVolumeHandleErrors.Add(1)
}
}()
globals.Lock()
- volStruct, ok = globals.volumeMap[volumeName]
+ vS, ok = globals.volumeMap[volumeName]
if !ok {
err = fmt.Errorf("Unknown volumeName passed to mountByVolumeName(): \"%s\"", volumeName)
err = blunder.AddError(err, blunder.NotFoundError)
@@ -275,43 +259,29 @@ func mountByVolumeName(volumeName string, mountOptions MountOptions) (mountHandl
return
}
- globals.lastMountID++
-
- mS = &mountStruct{
- id: globals.lastMountID,
- options: mountOptions,
- volStruct: volStruct,
- }
-
- globals.mountMap[mS.id] = mS
-
- volStruct.dataMutex.Lock()
- volStruct.mountList = append(volStruct.mountList, mS.id)
- volStruct.dataMutex.Unlock()
-
globals.Unlock()
- mountHandle = mS
+ volumeHandle = vS
err = nil
return
}
-func (mS *mountStruct) Access(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, inodeNumber inode.InodeNumber, accessMode inode.InodeMode) (accessReturn bool) {
+func (vS *volumeStruct) Access(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, inodeNumber inode.InodeNumber, accessMode inode.InodeMode) (accessReturn bool) {
startTime := time.Now()
defer func() {
globals.AccessUsec.Add(uint64(time.Since(startTime) / time.Microsecond))
}()
- mS.volStruct.jobRWMutex.RLock()
- defer mS.volStruct.jobRWMutex.RUnlock()
+ vS.jobRWMutex.RLock()
+ defer vS.jobRWMutex.RUnlock()
- accessReturn = mS.volStruct.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, accessMode,
+ accessReturn = vS.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, accessMode,
inode.NoOverride)
return
}
-func (mS *mountStruct) CallInodeToProvisionObject() (pPath string, err error) {
+func (vS *volumeStruct) CallInodeToProvisionObject() (pPath string, err error) {
startTime := time.Now()
defer func() {
globals.CallInodeToProvisionObjectUsec.Add(uint64(time.Since(startTime) / time.Microsecond))
@@ -320,14 +290,14 @@ func (mS *mountStruct) CallInodeToProvisionObject() (pPath string, err error) {
}
}()
- mS.volStruct.jobRWMutex.RLock()
- defer mS.volStruct.jobRWMutex.RUnlock()
+ vS.jobRWMutex.RLock()
+ defer vS.jobRWMutex.RUnlock()
- pPath, err = mS.volStruct.inodeVolumeHandle.ProvisionObject()
+ pPath, err = vS.inodeVolumeHandle.ProvisionObject()
return
}
-func (mS *mountStruct) Create(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, dirInodeNumber inode.InodeNumber, basename string, filePerm inode.InodeMode) (fileInodeNumber inode.InodeNumber, err error) {
+func (vS *volumeStruct) Create(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, dirInodeNumber inode.InodeNumber, basename string, filePerm inode.InodeMode) (fileInodeNumber inode.InodeNumber, err error) {
startTime := time.Now()
defer func() {
globals.CreateUsec.Add(uint64(time.Since(startTime) / time.Microsecond))
@@ -336,8 +306,8 @@ func (mS *mountStruct) Create(userID inode.InodeUserID, groupID inode.InodeGroup
}
}()
- mS.volStruct.jobRWMutex.RLock()
- defer mS.volStruct.jobRWMutex.RUnlock()
+ vS.jobRWMutex.RLock()
+ defer vS.jobRWMutex.RUnlock()
err = validateBaseName(basename)
if err != nil {
@@ -345,7 +315,7 @@ func (mS *mountStruct) Create(userID inode.InodeUserID, groupID inode.InodeGroup
}
// Lock the directory inode before doing the link
- dirInodeLock, err := mS.volStruct.inodeVolumeHandle.InitInodeLock(dirInodeNumber, nil)
+ dirInodeLock, err := vS.inodeVolumeHandle.InitInodeLock(dirInodeNumber, nil)
if err != nil {
return 0, err
}
@@ -355,24 +325,24 @@ func (mS *mountStruct) Create(userID inode.InodeUserID, groupID inode.InodeGroup
}
defer dirInodeLock.Unlock()
- if !mS.volStruct.inodeVolumeHandle.Access(dirInodeNumber, userID, groupID, otherGroupIDs, inode.F_OK,
+ if !vS.inodeVolumeHandle.Access(dirInodeNumber, userID, groupID, otherGroupIDs, inode.F_OK,
inode.NoOverride) {
return 0, blunder.NewError(blunder.NotFoundError, "ENOENT")
}
- if !mS.volStruct.inodeVolumeHandle.Access(dirInodeNumber, userID, groupID, otherGroupIDs, inode.W_OK|inode.X_OK,
+ if !vS.inodeVolumeHandle.Access(dirInodeNumber, userID, groupID, otherGroupIDs, inode.W_OK|inode.X_OK,
inode.NoOverride) {
return 0, blunder.NewError(blunder.PermDeniedError, "EACCES")
}
// create the file and add it to the directory
- fileInodeNumber, err = mS.volStruct.inodeVolumeHandle.CreateFile(filePerm, userID, groupID)
+ fileInodeNumber, err = vS.inodeVolumeHandle.CreateFile(filePerm, userID, groupID)
if err != nil {
return 0, err
}
- err = mS.volStruct.inodeVolumeHandle.Link(dirInodeNumber, basename, fileInodeNumber, false)
+ err = vS.inodeVolumeHandle.Link(dirInodeNumber, basename, fileInodeNumber, false)
if err != nil {
- destroyErr := mS.volStruct.inodeVolumeHandle.Destroy(fileInodeNumber)
+ destroyErr := vS.inodeVolumeHandle.Destroy(fileInodeNumber)
if destroyErr != nil {
logger.WarnfWithError(destroyErr, "couldn't destroy inode %v after failed Link() in fs.Create", fileInodeNumber)
}
@@ -382,7 +352,7 @@ func (mS *mountStruct) Create(userID inode.InodeUserID, groupID inode.InodeGroup
return fileInodeNumber, nil
}
-func (mS *mountStruct) DefragmentFile(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, fileInodeNumber inode.InodeNumber) (err error) {
+func (vS *volumeStruct) DefragmentFile(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, fileInodeNumber inode.InodeNumber) (err error) {
var (
eofReached bool
fileOffset uint64
@@ -398,45 +368,45 @@ func (mS *mountStruct) DefragmentFile(userID inode.InodeUserID, groupID inode.In
}
}()
- mS.volStruct.jobRWMutex.RLock()
+ vS.jobRWMutex.RLock()
- inodeLock, err = mS.volStruct.inodeVolumeHandle.InitInodeLock(fileInodeNumber, nil)
+ inodeLock, err = vS.inodeVolumeHandle.InitInodeLock(fileInodeNumber, nil)
if nil != err {
- mS.volStruct.jobRWMutex.RUnlock()
+ vS.jobRWMutex.RUnlock()
return
}
err = inodeLock.WriteLock()
if nil != err {
- mS.volStruct.jobRWMutex.RUnlock()
+ vS.jobRWMutex.RUnlock()
return
}
- if !mS.volStruct.inodeVolumeHandle.Access(fileInodeNumber, userID, groupID, otherGroupIDs, inode.F_OK,
+ if !vS.inodeVolumeHandle.Access(fileInodeNumber, userID, groupID, otherGroupIDs, inode.F_OK,
inode.NoOverride) {
_ = inodeLock.Unlock()
- mS.volStruct.jobRWMutex.RUnlock()
+ vS.jobRWMutex.RUnlock()
err = blunder.NewError(blunder.NotFoundError, "ENOENT")
return
}
- if !mS.volStruct.inodeVolumeHandle.Access(fileInodeNumber, userID, groupID, otherGroupIDs, inode.W_OK,
+ if !vS.inodeVolumeHandle.Access(fileInodeNumber, userID, groupID, otherGroupIDs, inode.W_OK,
inode.OwnerOverride) {
_ = inodeLock.Unlock()
- mS.volStruct.jobRWMutex.RUnlock()
+ vS.jobRWMutex.RUnlock()
err = blunder.NewError(blunder.PermDeniedError, "EACCES")
return
}
- inodeType, err = mS.volStruct.inodeVolumeHandle.GetType(fileInodeNumber)
+ inodeType, err = vS.inodeVolumeHandle.GetType(fileInodeNumber)
if nil != err {
_ = inodeLock.Unlock()
- mS.volStruct.jobRWMutex.RUnlock()
+ vS.jobRWMutex.RUnlock()
logger.ErrorfWithError(err, "couldn't get type for inode %v", fileInodeNumber)
return
}
// Make sure the inode number is for a file inode
if inodeType != inode.FileType {
_ = inodeLock.Unlock()
- mS.volStruct.jobRWMutex.RUnlock()
+ vS.jobRWMutex.RUnlock()
err = fmt.Errorf("%s: expected inode %v to be a file inode, got %v", utils.GetFnName(), fileInodeNumber, inodeType)
logger.ErrorWithError(err)
err = blunder.AddError(err, blunder.NotFileError)
@@ -446,26 +416,26 @@ func (mS *mountStruct) DefragmentFile(userID inode.InodeUserID, groupID inode.In
fileOffset = 0
for {
- fileOffset, eofReached, err = mS.volStruct.inodeVolumeHandle.DefragmentFile(fileInodeNumber, fileOffset, mS.volStruct.fileDefragmentChunkSize)
+ fileOffset, eofReached, err = vS.inodeVolumeHandle.DefragmentFile(fileInodeNumber, fileOffset, vS.fileDefragmentChunkSize)
_ = inodeLock.Unlock()
- mS.volStruct.jobRWMutex.RUnlock()
+ vS.jobRWMutex.RUnlock()
if nil != err {
return
}
if eofReached {
return
}
- time.Sleep(mS.volStruct.fileDefragmentChunkDelay)
- mS.volStruct.jobRWMutex.RLock()
+ time.Sleep(vS.fileDefragmentChunkDelay)
+ vS.jobRWMutex.RLock()
err = inodeLock.WriteLock()
if nil != err {
- mS.volStruct.jobRWMutex.RUnlock()
+ vS.jobRWMutex.RUnlock()
return
}
}
}
-func (mS *mountStruct) FetchExtentMapChunk(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, fileInodeNumber inode.InodeNumber, fileOffset uint64, maxEntriesFromFileOffset int64, maxEntriesBeforeFileOffset int64) (extentMapChunk *inode.ExtentMapChunkStruct, err error) {
+func (vS *volumeStruct) FetchExtentMapChunk(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, fileInodeNumber inode.InodeNumber, fileOffset uint64, maxEntriesFromFileOffset int64, maxEntriesBeforeFileOffset int64) (extentMapChunk *inode.ExtentMapChunkStruct, err error) {
var (
inodeLock *dlm.RWLockStruct
inodeType inode.InodeType
@@ -479,10 +449,10 @@ func (mS *mountStruct) FetchExtentMapChunk(userID inode.InodeUserID, groupID ino
}
}()
- mS.volStruct.jobRWMutex.RLock()
- defer mS.volStruct.jobRWMutex.RUnlock()
+ vS.jobRWMutex.RLock()
+ defer vS.jobRWMutex.RUnlock()
- inodeLock, err = mS.volStruct.inodeVolumeHandle.InitInodeLock(fileInodeNumber, nil)
+ inodeLock, err = vS.inodeVolumeHandle.InitInodeLock(fileInodeNumber, nil)
if nil != err {
return
}
@@ -492,18 +462,18 @@ func (mS *mountStruct) FetchExtentMapChunk(userID inode.InodeUserID, groupID ino
}
defer inodeLock.Unlock()
- if !mS.volStruct.inodeVolumeHandle.Access(fileInodeNumber, userID, groupID, otherGroupIDs, inode.F_OK,
+ if !vS.inodeVolumeHandle.Access(fileInodeNumber, userID, groupID, otherGroupIDs, inode.F_OK,
inode.NoOverride) {
err = blunder.NewError(blunder.NotFoundError, "ENOENT")
return
}
- if !mS.volStruct.inodeVolumeHandle.Access(fileInodeNumber, userID, groupID, otherGroupIDs, inode.R_OK,
+ if !vS.inodeVolumeHandle.Access(fileInodeNumber, userID, groupID, otherGroupIDs, inode.R_OK,
inode.OwnerOverride) {
err = blunder.NewError(blunder.PermDeniedError, "EACCES")
return
}
- inodeType, err = mS.volStruct.inodeVolumeHandle.GetType(fileInodeNumber)
+ inodeType, err = vS.inodeVolumeHandle.GetType(fileInodeNumber)
if nil != err {
logger.ErrorfWithError(err, "couldn't get type for inode %v", fileInodeNumber)
return
@@ -516,7 +486,7 @@ func (mS *mountStruct) FetchExtentMapChunk(userID inode.InodeUserID, groupID ino
return
}
- extentMapChunk, err = mS.volStruct.inodeVolumeHandle.FetchExtentMapChunk(fileInodeNumber, fileOffset, maxEntriesFromFileOffset, maxEntriesBeforeFileOffset)
+ extentMapChunk, err = vS.inodeVolumeHandle.FetchExtentMapChunk(fileInodeNumber, fileOffset, maxEntriesFromFileOffset, maxEntriesBeforeFileOffset)
return
}
@@ -530,22 +500,22 @@ func (mS *mountStruct) FetchExtentMapChunk(userID inode.InodeUserID, groupID ino
//
// TODO is to determine where else a call to this func should also be made.
//
-func (mS *mountStruct) doInlineCheckpointIfEnabled() {
+func (vS *volumeStruct) doInlineCheckpointIfEnabled() {
var (
err error
)
- if !mS.volStruct.doCheckpointPerFlush {
+ if !vS.doCheckpointPerFlush {
return
}
- err = mS.volStruct.headhunterVolumeHandle.DoCheckpoint()
+ err = vS.headhunterVolumeHandle.DoCheckpoint()
if nil != err {
logger.Fatalf("fs.doInlineCheckpoint() call to headhunter.DoCheckpoint() failed: %v", err)
}
}
-func (mS *mountStruct) Flush(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, inodeNumber inode.InodeNumber) (err error) {
+func (vS *volumeStruct) Flush(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, inodeNumber inode.InodeNumber) (err error) {
startTime := time.Now()
defer func() {
globals.FlushUsec.Add(uint64(time.Since(startTime) / time.Microsecond))
@@ -554,10 +524,10 @@ func (mS *mountStruct) Flush(userID inode.InodeUserID, groupID inode.InodeGroupI
}
}()
- mS.volStruct.jobRWMutex.RLock()
- defer mS.volStruct.jobRWMutex.RUnlock()
+ vS.jobRWMutex.RLock()
+ defer vS.jobRWMutex.RUnlock()
- inodeLock, err := mS.volStruct.inodeVolumeHandle.InitInodeLock(inodeNumber, nil)
+ inodeLock, err := vS.inodeVolumeHandle.InitInodeLock(inodeNumber, nil)
if err != nil {
return
}
@@ -567,7 +537,7 @@ func (mS *mountStruct) Flush(userID inode.InodeUserID, groupID inode.InodeGroupI
}
defer inodeLock.Unlock()
- if !mS.volStruct.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.F_OK,
+ if !vS.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.F_OK,
inode.NoOverride) {
return blunder.NewError(blunder.NotFoundError, "ENOENT")
}
@@ -578,22 +548,22 @@ func (mS *mountStruct) Flush(userID inode.InodeUserID, groupID inode.InodeGroupI
// check would fail if the caller actually only had ReadOnly access to the Inode, so
// we won't be doing the check here.
- err = mS.volStruct.inodeVolumeHandle.Flush(inodeNumber, false)
- mS.volStruct.untrackInFlightFileInodeData(inodeNumber, false)
+ err = vS.inodeVolumeHandle.Flush(inodeNumber, false)
+ vS.untrackInFlightFileInodeData(inodeNumber, false)
- mS.doInlineCheckpointIfEnabled()
+ vS.doInlineCheckpointIfEnabled()
return
}
-func (mS *mountStruct) getFileLockList(inodeNumber inode.InodeNumber) (flockList *list.List) {
- mS.volStruct.dataMutex.Lock()
- defer mS.volStruct.dataMutex.Unlock()
+func (vS *volumeStruct) getFileLockList(inodeNumber inode.InodeNumber) (flockList *list.List) {
+ vS.dataMutex.Lock()
+ defer vS.dataMutex.Unlock()
- flockList, ok := mS.volStruct.FLockMap[inodeNumber]
+ flockList, ok := vS.FLockMap[inodeNumber]
if !ok {
flockList = new(list.List)
- mS.volStruct.FLockMap[inodeNumber] = flockList
+ vS.FLockMap[inodeNumber] = flockList
}
return
@@ -617,8 +587,8 @@ func checkConflict(elm *FlockStruct, flock *FlockStruct) bool {
return false
}
-func (mS *mountStruct) verifyLock(inodeNumber inode.InodeNumber, flock *FlockStruct) (conflictLock *FlockStruct) {
- flockList := mS.getFileLockList(inodeNumber)
+func (vS *volumeStruct) verifyLock(inodeNumber inode.InodeNumber, flock *FlockStruct) (conflictLock *FlockStruct) {
+ flockList := vS.getFileLockList(inodeNumber)
for e := flockList.Front(); e != nil; e = e.Next() {
elm := e.Value.(*FlockStruct)
@@ -633,9 +603,9 @@ func (mS *mountStruct) verifyLock(inodeNumber inode.InodeNumber, flock *FlockStr
// Insert a file lock range to corresponding lock list for the pid.
// Assumption: There is no lock conflict and the range that is being inserted has no conflict and is free.
-func (mS *mountStruct) fileLockInsert(inodeNumber inode.InodeNumber, inFlock *FlockStruct) (err error) {
+func (vS *volumeStruct) fileLockInsert(inodeNumber inode.InodeNumber, inFlock *FlockStruct) (err error) {
err = nil
- flockList := mS.getFileLockList(inodeNumber)
+ flockList := vS.getFileLockList(inodeNumber)
overlapList := new(list.List)
var beforeElm *list.Element // Refers to the immediate element that starts before the start of the range.
@@ -747,9 +717,9 @@ func (mS *mountStruct) fileLockInsert(inodeNumber inode.InodeNumber, inFlock *Fl
}
// Unlock a given range. All locks held in this range by the process (identified by Pid) are removed.
-func (mS *mountStruct) fileUnlock(inodeNumber inode.InodeNumber, inFlock *FlockStruct) (err error) {
+func (vS *volumeStruct) fileUnlock(inodeNumber inode.InodeNumber, inFlock *FlockStruct) (err error) {
- flockList := mS.getFileLockList(inodeNumber)
+ flockList := vS.getFileLockList(inodeNumber)
if flockList == nil {
logger.Warnf("Unlock of a region not already locked - %+v", inFlock)
return
@@ -819,7 +789,7 @@ func (mS *mountStruct) fileUnlock(inodeNumber inode.InodeNumber, inFlock *FlockS
// Implements file locking conforming to fcntl(2) locking description. F_SETLKW is not implemented. Supports F_SETLW and F_GETLW.
// whence: FS supports only SEEK_SET - starting from 0, since it does not manage file handles, caller is expected to supply the start and length relative to offset ZERO.
-func (mS *mountStruct) Flock(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, inodeNumber inode.InodeNumber, lockCmd int32, inFlock *FlockStruct) (outFlock *FlockStruct, err error) {
+func (vS *volumeStruct) Flock(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, inodeNumber inode.InodeNumber, lockCmd int32, inFlock *FlockStruct) (outFlock *FlockStruct, err error) {
startTime := time.Now()
defer func() {
switch lockCmd {
@@ -852,8 +822,8 @@ func (mS *mountStruct) Flock(userID inode.InodeUserID, groupID inode.InodeGroupI
}()
- mS.volStruct.jobRWMutex.RLock()
- defer mS.volStruct.jobRWMutex.RUnlock()
+ vS.jobRWMutex.RLock()
+ defer vS.jobRWMutex.RUnlock()
outFlock = inFlock
@@ -863,7 +833,7 @@ func (mS *mountStruct) Flock(userID inode.InodeUserID, groupID inode.InodeGroupI
}
// Make sure the inode does not go away, while we are applying the flock.
- inodeLock, err := mS.volStruct.inodeVolumeHandle.InitInodeLock(inodeNumber, nil)
+ inodeLock, err := vS.inodeVolumeHandle.InitInodeLock(inodeNumber, nil)
if err != nil {
return
}
@@ -873,11 +843,11 @@ func (mS *mountStruct) Flock(userID inode.InodeUserID, groupID inode.InodeGroupI
}
defer inodeLock.Unlock()
- if !mS.volStruct.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.F_OK, inode.NoOverride) {
+ if !vS.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.F_OK, inode.NoOverride) {
err = blunder.NewError(blunder.NotFoundError, "ENOENT")
return
}
- if !mS.volStruct.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.R_OK, inode.OwnerOverride) {
+ if !vS.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.R_OK, inode.OwnerOverride) {
err = blunder.NewError(blunder.PermDeniedError, "EACCES")
return
}
@@ -888,7 +858,7 @@ func (mS *mountStruct) Flock(userID inode.InodeUserID, groupID inode.InodeGroupI
switch lockCmd {
case syscall.F_GETLK:
- conflictLock := mS.verifyLock(inodeNumber, inFlock)
+ conflictLock := vS.verifyLock(inodeNumber, inFlock)
if conflictLock != nil {
outFlock = conflictLock
err = blunder.AddError(nil, blunder.TryAgainError)
@@ -900,10 +870,10 @@ func (mS *mountStruct) Flock(userID inode.InodeUserID, groupID inode.InodeGroupI
case syscall.F_SETLK:
if inFlock.Type == syscall.F_UNLCK {
- err = mS.fileUnlock(inodeNumber, inFlock)
+ err = vS.fileUnlock(inodeNumber, inFlock)
} else if inFlock.Type == syscall.F_WRLCK || inFlock.Type == syscall.F_RDLCK {
- err = mS.fileLockInsert(inodeNumber, inFlock)
+ err = vS.fileLockInsert(inodeNumber, inFlock)
} else {
err = blunder.NewError(blunder.InvalidArgError, "EINVAL")
@@ -919,9 +889,9 @@ func (mS *mountStruct) Flock(userID inode.InodeUserID, groupID inode.InodeGroupI
return
}
-func (mS *mountStruct) getstatHelper(inodeNumber inode.InodeNumber, callerID dlm.CallerID) (stat Stat, err error) {
+func (vS *volumeStruct) getstatHelper(inodeNumber inode.InodeNumber, callerID dlm.CallerID) (stat Stat, err error) {
- lockID, err := mS.volStruct.inodeVolumeHandle.MakeLockID(inodeNumber)
+ lockID, err := vS.inodeVolumeHandle.MakeLockID(inodeNumber)
if err != nil {
return
}
@@ -930,17 +900,17 @@ func (mS *mountStruct) getstatHelper(inodeNumber inode.InodeNumber, callerID dlm
return nil, blunder.AddError(err, blunder.NotFoundError)
}
- stat, err = mS.getstatHelperWhileLocked(inodeNumber)
+ stat, err = vS.getstatHelperWhileLocked(inodeNumber)
return
}
-func (mS *mountStruct) getstatHelperWhileLocked(inodeNumber inode.InodeNumber) (stat Stat, err error) {
+func (vS *volumeStruct) getstatHelperWhileLocked(inodeNumber inode.InodeNumber) (stat Stat, err error) {
var (
metadata *inode.MetadataStruct
)
- metadata, err = mS.volStruct.inodeVolumeHandle.GetMetadata(inodeNumber)
+ metadata, err = vS.inodeVolumeHandle.GetMetadata(inodeNumber)
if nil != err {
return
}
@@ -963,7 +933,7 @@ func (mS *mountStruct) getstatHelperWhileLocked(inodeNumber inode.InodeNumber) (
return
}
-func (mS *mountStruct) Getstat(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, inodeNumber inode.InodeNumber) (stat Stat, err error) {
+func (vS *volumeStruct) Getstat(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, inodeNumber inode.InodeNumber) (stat Stat, err error) {
startTime := time.Now()
defer func() {
globals.GetstatUsec.Add(uint64(time.Since(startTime) / time.Microsecond))
@@ -972,10 +942,10 @@ func (mS *mountStruct) Getstat(userID inode.InodeUserID, groupID inode.InodeGrou
}
}()
- mS.volStruct.jobRWMutex.RLock()
- defer mS.volStruct.jobRWMutex.RUnlock()
+ vS.jobRWMutex.RLock()
+ defer vS.jobRWMutex.RUnlock()
- inodeLock, err := mS.volStruct.inodeVolumeHandle.InitInodeLock(inodeNumber, nil)
+ inodeLock, err := vS.inodeVolumeHandle.InitInodeLock(inodeNumber, nil)
if err != nil {
return
}
@@ -986,12 +956,12 @@ func (mS *mountStruct) Getstat(userID inode.InodeUserID, groupID inode.InodeGrou
defer inodeLock.Unlock()
// Call getstat helper function to do the work
- return mS.getstatHelper(inodeNumber, inodeLock.GetCallerID())
+ return vS.getstatHelper(inodeNumber, inodeLock.GetCallerID())
}
-func (mS *mountStruct) getTypeHelper(inodeNumber inode.InodeNumber, callerID dlm.CallerID) (inodeType inode.InodeType, err error) {
+func (vS *volumeStruct) getTypeHelper(inodeNumber inode.InodeNumber, callerID dlm.CallerID) (inodeType inode.InodeType, err error) {
- lockID, err := mS.volStruct.inodeVolumeHandle.MakeLockID(inodeNumber)
+ lockID, err := vS.inodeVolumeHandle.MakeLockID(inodeNumber)
if err != nil {
return
}
@@ -1001,7 +971,7 @@ func (mS *mountStruct) getTypeHelper(inodeNumber inode.InodeNumber, callerID dlm
return
}
- inodeType, err = mS.volStruct.inodeVolumeHandle.GetType(inodeNumber)
+ inodeType, err = vS.inodeVolumeHandle.GetType(inodeNumber)
if err != nil {
logger.ErrorWithError(err, "couldn't get inode type")
return inodeType, err
@@ -1009,7 +979,7 @@ func (mS *mountStruct) getTypeHelper(inodeNumber inode.InodeNumber, callerID dlm
return inodeType, nil
}
-func (mS *mountStruct) GetType(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, inodeNumber inode.InodeNumber) (inodeType inode.InodeType, err error) {
+func (vS *volumeStruct) GetType(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, inodeNumber inode.InodeNumber) (inodeType inode.InodeType, err error) {
startTime := time.Now()
defer func() {
globals.GetTypeUsec.Add(uint64(time.Since(startTime) / time.Microsecond))
@@ -1018,10 +988,10 @@ func (mS *mountStruct) GetType(userID inode.InodeUserID, groupID inode.InodeGrou
}
}()
- mS.volStruct.jobRWMutex.RLock()
- defer mS.volStruct.jobRWMutex.RUnlock()
+ vS.jobRWMutex.RLock()
+ defer vS.jobRWMutex.RUnlock()
- inodeLock, err := mS.volStruct.inodeVolumeHandle.InitInodeLock(inodeNumber, nil)
+ inodeLock, err := vS.inodeVolumeHandle.InitInodeLock(inodeNumber, nil)
if err != nil {
return
}
@@ -1031,10 +1001,10 @@ func (mS *mountStruct) GetType(userID inode.InodeUserID, groupID inode.InodeGrou
}
defer inodeLock.Unlock()
- return mS.getTypeHelper(inodeNumber, inodeLock.GetCallerID())
+ return vS.getTypeHelper(inodeNumber, inodeLock.GetCallerID())
}
-func (mS *mountStruct) GetXAttr(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, inodeNumber inode.InodeNumber, streamName string) (value []byte, err error) {
+func (vS *volumeStruct) GetXAttr(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, inodeNumber inode.InodeNumber, streamName string) (value []byte, err error) {
startTime := time.Now()
defer func() {
globals.GetXAttrUsec.Add(uint64(time.Since(startTime) / time.Microsecond))
@@ -1043,10 +1013,10 @@ func (mS *mountStruct) GetXAttr(userID inode.InodeUserID, groupID inode.InodeGro
}
}()
- mS.volStruct.jobRWMutex.RLock()
- defer mS.volStruct.jobRWMutex.RUnlock()
+ vS.jobRWMutex.RLock()
+ defer vS.jobRWMutex.RUnlock()
- inodeLock, err := mS.volStruct.inodeVolumeHandle.InitInodeLock(inodeNumber, nil)
+ inodeLock, err := vS.inodeVolumeHandle.InitInodeLock(inodeNumber, nil)
if err != nil {
return
}
@@ -1056,18 +1026,18 @@ func (mS *mountStruct) GetXAttr(userID inode.InodeUserID, groupID inode.InodeGro
}
defer inodeLock.Unlock()
- if !mS.volStruct.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.F_OK,
+ if !vS.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.F_OK,
inode.NoOverride) {
err = blunder.NewError(blunder.NotFoundError, "ENOENT")
return
}
- if !mS.volStruct.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.R_OK,
+ if !vS.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.R_OK,
inode.OwnerOverride) {
err = blunder.NewError(blunder.PermDeniedError, "EACCES")
return
}
- value, err = mS.volStruct.inodeVolumeHandle.GetStream(inodeNumber, streamName)
+ value, err = vS.inodeVolumeHandle.GetStream(inodeNumber, streamName)
if err != nil {
// Did not find the requested stream. However this isn't really an error since
// samba will ask for acl-related streams and is fine with not finding them.
@@ -1077,7 +1047,7 @@ func (mS *mountStruct) GetXAttr(userID inode.InodeUserID, groupID inode.InodeGro
return
}
-func (mS *mountStruct) IsDir(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, inodeNumber inode.InodeNumber) (inodeIsDir bool, err error) {
+func (vS *volumeStruct) IsDir(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, inodeNumber inode.InodeNumber) (inodeIsDir bool, err error) {
startTime := time.Now()
defer func() {
globals.IsDirUsec.Add(uint64(time.Since(startTime) / time.Microsecond))
@@ -1086,10 +1056,10 @@ func (mS *mountStruct) IsDir(userID inode.InodeUserID, groupID inode.InodeGroupI
}
}()
- mS.volStruct.jobRWMutex.RLock()
- defer mS.volStruct.jobRWMutex.RUnlock()
+ vS.jobRWMutex.RLock()
+ defer vS.jobRWMutex.RUnlock()
- inodeLock, err := mS.volStruct.inodeVolumeHandle.InitInodeLock(inodeNumber, nil)
+ inodeLock, err := vS.inodeVolumeHandle.InitInodeLock(inodeNumber, nil)
if err != nil {
return
}
@@ -1099,7 +1069,7 @@ func (mS *mountStruct) IsDir(userID inode.InodeUserID, groupID inode.InodeGroupI
}
defer inodeLock.Unlock()
- lockID, err := mS.volStruct.inodeVolumeHandle.MakeLockID(inodeNumber)
+ lockID, err := vS.inodeVolumeHandle.MakeLockID(inodeNumber)
if err != nil {
return
}
@@ -1108,14 +1078,14 @@ func (mS *mountStruct) IsDir(userID inode.InodeUserID, groupID inode.InodeGroupI
return false, blunder.AddError(err, blunder.NotFoundError)
}
- inodeType, err := mS.volStruct.inodeVolumeHandle.GetType(inodeNumber)
+ inodeType, err := vS.inodeVolumeHandle.GetType(inodeNumber)
if err != nil {
return false, err
}
return inodeType == inode.DirType, nil
}
-func (mS *mountStruct) IsFile(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, inodeNumber inode.InodeNumber) (inodeIsFile bool, err error) {
+func (vS *volumeStruct) IsFile(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, inodeNumber inode.InodeNumber) (inodeIsFile bool, err error) {
startTime := time.Now()
defer func() {
globals.IsFileUsec.Add(uint64(time.Since(startTime) / time.Microsecond))
@@ -1124,10 +1094,10 @@ func (mS *mountStruct) IsFile(userID inode.InodeUserID, groupID inode.InodeGroup
}
}()
- mS.volStruct.jobRWMutex.RLock()
- defer mS.volStruct.jobRWMutex.RUnlock()
+ vS.jobRWMutex.RLock()
+ defer vS.jobRWMutex.RUnlock()
- inodeLock, err := mS.volStruct.inodeVolumeHandle.InitInodeLock(inodeNumber, nil)
+ inodeLock, err := vS.inodeVolumeHandle.InitInodeLock(inodeNumber, nil)
if err != nil {
return
}
@@ -1137,7 +1107,7 @@ func (mS *mountStruct) IsFile(userID inode.InodeUserID, groupID inode.InodeGroup
}
defer inodeLock.Unlock()
- inodeType, err := mS.volStruct.inodeVolumeHandle.GetType(inodeNumber)
+ inodeType, err := vS.inodeVolumeHandle.GetType(inodeNumber)
if err != nil {
return false, err
}
@@ -1145,7 +1115,7 @@ func (mS *mountStruct) IsFile(userID inode.InodeUserID, groupID inode.InodeGroup
return inodeType == inode.FileType, nil
}
-func (mS *mountStruct) IsSymlink(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, inodeNumber inode.InodeNumber) (inodeIsSymlink bool, err error) {
+func (vS *volumeStruct) IsSymlink(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, inodeNumber inode.InodeNumber) (inodeIsSymlink bool, err error) {
startTime := time.Now()
defer func() {
globals.IsSymlinkUsec.Add(uint64(time.Since(startTime) / time.Microsecond))
@@ -1154,10 +1124,10 @@ func (mS *mountStruct) IsSymlink(userID inode.InodeUserID, groupID inode.InodeGr
}
}()
- mS.volStruct.jobRWMutex.RLock()
- defer mS.volStruct.jobRWMutex.RUnlock()
+ vS.jobRWMutex.RLock()
+ defer vS.jobRWMutex.RUnlock()
- inodeLock, err := mS.volStruct.inodeVolumeHandle.InitInodeLock(inodeNumber, nil)
+ inodeLock, err := vS.inodeVolumeHandle.InitInodeLock(inodeNumber, nil)
if err != nil {
return
}
@@ -1167,7 +1137,7 @@ func (mS *mountStruct) IsSymlink(userID inode.InodeUserID, groupID inode.InodeGr
}
defer inodeLock.Unlock()
- inodeType, err := mS.volStruct.inodeVolumeHandle.GetType(inodeNumber)
+ inodeType, err := vS.inodeVolumeHandle.GetType(inodeNumber)
if err != nil {
return false, err
}
@@ -1175,7 +1145,7 @@ func (mS *mountStruct) IsSymlink(userID inode.InodeUserID, groupID inode.InodeGr
return inodeType == inode.SymlinkType, nil
}
-func (mS *mountStruct) Link(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, dirInodeNumber inode.InodeNumber, basename string, targetInodeNumber inode.InodeNumber) (err error) {
+func (vS *volumeStruct) Link(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, dirInodeNumber inode.InodeNumber, basename string, targetInodeNumber inode.InodeNumber) (err error) {
startTime := time.Now()
defer func() {
globals.LinkUsec.Add(uint64(time.Since(startTime) / time.Microsecond))
@@ -1184,8 +1154,8 @@ func (mS *mountStruct) Link(userID inode.InodeUserID, groupID inode.InodeGroupID
}
}()
- mS.volStruct.jobRWMutex.RLock()
- defer mS.volStruct.jobRWMutex.RUnlock()
+ vS.jobRWMutex.RLock()
+ defer vS.jobRWMutex.RUnlock()
var (
inodeType inode.InodeType
@@ -1199,12 +1169,12 @@ func (mS *mountStruct) Link(userID inode.InodeUserID, groupID inode.InodeGroupID
// We need both dirInodelock and the targetInode lock to make sure they
// don't go away and linkCount is updated correctly.
callerID := dlm.GenerateCallerID()
- dirInodeLock, err := mS.volStruct.inodeVolumeHandle.InitInodeLock(dirInodeNumber, callerID)
+ dirInodeLock, err := vS.inodeVolumeHandle.InitInodeLock(dirInodeNumber, callerID)
if err != nil {
return
}
- targetInodeLock, err := mS.volStruct.inodeVolumeHandle.InitInodeLock(targetInodeNumber, callerID)
+ targetInodeLock, err := vS.inodeVolumeHandle.InitInodeLock(targetInodeNumber, callerID)
if err != nil {
return
}
@@ -1217,7 +1187,7 @@ func (mS *mountStruct) Link(userID inode.InodeUserID, groupID inode.InodeGroupID
}
// make sure target inode is not a directory
- inodeType, err = mS.volStruct.inodeVolumeHandle.GetType(targetInodeNumber)
+ inodeType, err = vS.inodeVolumeHandle.GetType(targetInodeNumber)
if err != nil {
targetInodeLock.Unlock()
// Because we know that GetType() has already "blunderized" the error, we just pass it on
@@ -1247,34 +1217,34 @@ func (mS *mountStruct) Link(userID inode.InodeUserID, groupID inode.InodeGroupID
}
defer targetInodeLock.Unlock()
- if !mS.volStruct.inodeVolumeHandle.Access(dirInodeNumber, userID, groupID, otherGroupIDs, inode.F_OK,
+ if !vS.inodeVolumeHandle.Access(dirInodeNumber, userID, groupID, otherGroupIDs, inode.F_OK,
inode.NoOverride) {
err = blunder.NewError(blunder.NotFoundError, "ENOENT")
return
}
- if !mS.volStruct.inodeVolumeHandle.Access(targetInodeNumber, userID, groupID, otherGroupIDs, inode.F_OK,
+ if !vS.inodeVolumeHandle.Access(targetInodeNumber, userID, groupID, otherGroupIDs, inode.F_OK,
inode.NoOverride) {
err = blunder.NewError(blunder.NotFoundError, "ENOENT")
return
}
- if !mS.volStruct.inodeVolumeHandle.Access(dirInodeNumber, userID, groupID, otherGroupIDs, inode.W_OK|inode.X_OK,
+ if !vS.inodeVolumeHandle.Access(dirInodeNumber, userID, groupID, otherGroupIDs, inode.W_OK|inode.X_OK,
inode.NoOverride) {
err = blunder.NewError(blunder.PermDeniedError, "EACCES")
return
}
- err = mS.volStruct.inodeVolumeHandle.Link(dirInodeNumber, basename, targetInodeNumber, false)
+ err = vS.inodeVolumeHandle.Link(dirInodeNumber, basename, targetInodeNumber, false)
// if the link was successful and this is a regular file then any
// pending data was flushed
if err == nil && inodeType == inode.FileType {
- mS.volStruct.untrackInFlightFileInodeData(targetInodeNumber, false)
+ vS.untrackInFlightFileInodeData(targetInodeNumber, false)
}
return err
}
-func (mS *mountStruct) ListXAttr(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, inodeNumber inode.InodeNumber) (streamNames []string, err error) {
+func (vS *volumeStruct) ListXAttr(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, inodeNumber inode.InodeNumber) (streamNames []string, err error) {
startTime := time.Now()
defer func() {
globals.ListXAttrUsec.Add(uint64(time.Since(startTime) / time.Microsecond))
@@ -1283,10 +1253,10 @@ func (mS *mountStruct) ListXAttr(userID inode.InodeUserID, groupID inode.InodeGr
}
}()
- mS.volStruct.jobRWMutex.RLock()
- defer mS.volStruct.jobRWMutex.RUnlock()
+ vS.jobRWMutex.RLock()
+ defer vS.jobRWMutex.RUnlock()
- inodeLock, err := mS.volStruct.inodeVolumeHandle.InitInodeLock(inodeNumber, nil)
+ inodeLock, err := vS.inodeVolumeHandle.InitInodeLock(inodeNumber, nil)
if err != nil {
return
}
@@ -1296,18 +1266,18 @@ func (mS *mountStruct) ListXAttr(userID inode.InodeUserID, groupID inode.InodeGr
}
defer inodeLock.Unlock()
- if !mS.volStruct.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.F_OK,
+ if !vS.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.F_OK,
inode.NoOverride) {
err = blunder.NewError(blunder.NotFoundError, "ENOENT")
return
}
- if !mS.volStruct.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.R_OK,
+ if !vS.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.R_OK,
inode.OwnerOverride) {
err = blunder.NewError(blunder.PermDeniedError, "EACCES")
return
}
- metadata, err := mS.volStruct.inodeVolumeHandle.GetMetadata(inodeNumber)
+ metadata, err := vS.inodeVolumeHandle.GetMetadata(inodeNumber)
if err != nil {
// Did not find the requested stream. However this isn't really an error since
// samba will ask for acl-related streams and is fine with not finding them.
@@ -1320,7 +1290,7 @@ func (mS *mountStruct) ListXAttr(userID inode.InodeUserID, groupID inode.InodeGr
return
}
-func (mS *mountStruct) Lookup(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, dirInodeNumber inode.InodeNumber, basename string) (inodeNumber inode.InodeNumber, err error) {
+func (vS *volumeStruct) Lookup(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, dirInodeNumber inode.InodeNumber, basename string) (inodeNumber inode.InodeNumber, err error) {
startTime := time.Now()
defer func() {
globals.LookupUsec.Add(uint64(time.Since(startTime) / time.Microsecond))
@@ -1329,32 +1299,32 @@ func (mS *mountStruct) Lookup(userID inode.InodeUserID, groupID inode.InodeGroup
}
}()
- mS.volStruct.jobRWMutex.RLock()
- defer mS.volStruct.jobRWMutex.RUnlock()
+ vS.jobRWMutex.RLock()
+ defer vS.jobRWMutex.RUnlock()
- dirInodeLock, err := mS.volStruct.inodeVolumeHandle.InitInodeLock(dirInodeNumber, nil)
+ dirInodeLock, err := vS.inodeVolumeHandle.InitInodeLock(dirInodeNumber, nil)
if err != nil {
return
}
dirInodeLock.ReadLock()
defer dirInodeLock.Unlock()
- if !mS.volStruct.inodeVolumeHandle.Access(dirInodeNumber, userID, groupID, otherGroupIDs, inode.F_OK,
+ if !vS.inodeVolumeHandle.Access(dirInodeNumber, userID, groupID, otherGroupIDs, inode.F_OK,
inode.NoOverride) {
err = blunder.NewError(blunder.NotFoundError, "ENOENT")
return
}
- if !mS.volStruct.inodeVolumeHandle.Access(dirInodeNumber, userID, groupID, otherGroupIDs, inode.X_OK,
+ if !vS.inodeVolumeHandle.Access(dirInodeNumber, userID, groupID, otherGroupIDs, inode.X_OK,
inode.NoOverride) {
err = blunder.NewError(blunder.PermDeniedError, "EACCES")
return
}
- inodeNumber, err = mS.volStruct.inodeVolumeHandle.Lookup(dirInodeNumber, basename)
+ inodeNumber, err = vS.inodeVolumeHandle.Lookup(dirInodeNumber, basename)
return inodeNumber, err
}
-func (mS *mountStruct) LookupPath(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, fullpath string) (inodeNumber inode.InodeNumber, err error) {
+func (vS *volumeStruct) LookupPath(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, fullpath string) (inodeNumber inode.InodeNumber, err error) {
startTime := time.Now()
defer func() {
globals.LookupPathUsec.Add(uint64(time.Since(startTime) / time.Microsecond))
@@ -1363,8 +1333,8 @@ func (mS *mountStruct) LookupPath(userID inode.InodeUserID, groupID inode.InodeG
}
}()
- mS.volStruct.jobRWMutex.RLock()
- defer mS.volStruct.jobRWMutex.RUnlock()
+ vS.jobRWMutex.RLock()
+ defer vS.jobRWMutex.RUnlock()
// In the special case of a fullpath starting with "/", the path segment splitting above
// results in a first segment that still begins with "/". Because this is not recognized
@@ -1380,7 +1350,7 @@ func (mS *mountStruct) LookupPath(userID inode.InodeUserID, groupID inode.InodeG
cursorInodeNumber := inode.RootDirInodeNumber
for _, segment := range pathSegments {
- cursorInodeLock, err1 := mS.volStruct.inodeVolumeHandle.InitInodeLock(cursorInodeNumber, nil)
+ cursorInodeLock, err1 := vS.inodeVolumeHandle.InitInodeLock(cursorInodeNumber, nil)
if err = err1; err != nil {
return
}
@@ -1389,14 +1359,14 @@ func (mS *mountStruct) LookupPath(userID inode.InodeUserID, groupID inode.InodeG
return
}
- if !mS.volStruct.inodeVolumeHandle.Access(cursorInodeNumber, userID, groupID, otherGroupIDs, inode.X_OK,
+ if !vS.inodeVolumeHandle.Access(cursorInodeNumber, userID, groupID, otherGroupIDs, inode.X_OK,
inode.NoOverride) {
cursorInodeLock.Unlock()
err = blunder.NewError(blunder.PermDeniedError, "EACCES")
return
}
- cursorInodeNumber, err = mS.volStruct.inodeVolumeHandle.Lookup(cursorInodeNumber, segment)
+ cursorInodeNumber, err = vS.inodeVolumeHandle.Lookup(cursorInodeNumber, segment)
cursorInodeLock.Unlock()
if err != nil {
@@ -1407,7 +1377,7 @@ func (mS *mountStruct) LookupPath(userID inode.InodeUserID, groupID inode.InodeG
return cursorInodeNumber, nil
}
-func (mS *mountStruct) MiddlewareCoalesce(destPath string, metaData []byte, elementPaths []string) (
+func (vS *volumeStruct) MiddlewareCoalesce(destPath string, metaData []byte, elementPaths []string) (
ino uint64, numWrites uint64, attrChangeTime uint64, modificationTime uint64, err error) {
var (
@@ -1435,8 +1405,8 @@ func (mS *mountStruct) MiddlewareCoalesce(destPath string, metaData []byte, elem
}
}()
- mS.volStruct.jobRWMutex.RLock()
- defer mS.volStruct.jobRWMutex.RUnlock()
+ vS.jobRWMutex.RLock()
+ defer vS.jobRWMutex.RUnlock()
// Retry until done or failure (starting with ZERO backoff)
@@ -1458,7 +1428,7 @@ Restart:
for coalesceElementListIndex, elementPath = range elementPaths {
dirInodeNumber, dirEntryInodeNumber, dirEntryBasename, _, retryRequired, err =
- mS.resolvePath(
+ vS.resolvePath(
inode.RootDirInodeNumber,
elementPath,
heldLocks,
@@ -1486,7 +1456,7 @@ Restart:
}
_, dirEntryInodeNumber, _, _, retryRequired, err =
- mS.resolvePath(
+ vS.resolvePath(
inode.RootDirInodeNumber,
destPath,
heldLocks,
@@ -1508,7 +1478,7 @@ Restart:
// Invoke package inode to actually perform the Coalesce operation
destFileInodeNumber = dirEntryInodeNumber
- ctime, mtime, numWrites, _, err = mS.volStruct.inodeVolumeHandle.Coalesce(
+ ctime, mtime, numWrites, _, err = vS.inodeVolumeHandle.Coalesce(
destFileInodeNumber, MiddlewareStream, metaData, coalesceElementList)
// We can now release all the WriteLocks we are currently holding
@@ -1524,7 +1494,7 @@ Restart:
return
}
-func (mS *mountStruct) MiddlewareDelete(parentDir string, basename string) (err error) {
+func (vS *volumeStruct) MiddlewareDelete(parentDir string, basename string) (err error) {
var (
dirEntryBasename string
dirEntryInodeNumber inode.InodeNumber
@@ -1562,7 +1532,7 @@ Restart:
heldLocks = newHeldLocks()
dirInodeNumber, dirEntryInodeNumber, dirEntryBasename, _, retryRequired, err =
- mS.resolvePath(
+ vS.resolvePath(
inode.RootDirInodeNumber,
parentDir+"/"+basename,
heldLocks,
@@ -1582,7 +1552,7 @@ Restart:
// Check if Unlink() and Destroy() are doable
- inodeVolumeHandle = mS.volStruct.inodeVolumeHandle
+ inodeVolumeHandle = vS.inodeVolumeHandle
inodeType, err = inodeVolumeHandle.GetType(dirEntryInodeNumber)
if nil != err {
@@ -1637,7 +1607,7 @@ Restart:
return
}
-func (mS *mountStruct) middlewareReadDirHelper(path string, maxEntries uint64, prevBasename string) (pathDirInodeNumber inode.InodeNumber, dirEntrySlice []inode.DirEntry, moreEntries bool, err error) {
+func (vS *volumeStruct) middlewareReadDirHelper(path string, maxEntries uint64, prevBasename string) (pathDirInodeNumber inode.InodeNumber, dirEntrySlice []inode.DirEntry, moreEntries bool, err error) {
var (
dirEntrySliceElement inode.DirEntry
heldLocks *heldLocksStruct
@@ -1661,7 +1631,7 @@ Restart:
heldLocks = newHeldLocks()
_, pathDirInodeNumber, _, _, retryRequired, err =
- mS.resolvePath(
+ vS.resolvePath(
inode.RootDirInodeNumber,
path,
heldLocks,
@@ -1679,7 +1649,7 @@ Restart:
// Now assemble response
- internalDirEntrySlice, moreEntries, err = mS.volStruct.inodeVolumeHandle.ReadDir(pathDirInodeNumber, maxEntries, 0, prevBasename)
+ internalDirEntrySlice, moreEntries, err = vS.inodeVolumeHandle.ReadDir(pathDirInodeNumber, maxEntries, 0, prevBasename)
if nil != err {
heldLocks.free()
return
@@ -1694,7 +1664,7 @@ Restart:
if ("." == dirEntrySliceElement.Basename) || (".." == dirEntrySliceElement.Basename) {
dirEntrySliceElement.Type = inode.DirType
} else {
- dirEntrySliceElement.Type, err = mS.GetType(inode.InodeRootUserID, inode.InodeGroupID(0), nil, dirEntrySliceElement.InodeNumber)
+ dirEntrySliceElement.Type, err = vS.GetType(inode.InodeRootUserID, inode.InodeGroupID(0), nil, dirEntrySliceElement.InodeNumber)
if nil != err {
// It's ok to have an error here... it just means the directory we are iterating is changing
continue
@@ -1709,7 +1679,7 @@ Restart:
return
}
-func (mS *mountStruct) MiddlewareGetAccount(maxEntries uint64, marker string, endmarker string) (accountEnts []AccountEntry, mtime uint64, ctime uint64, err error) {
+func (vS *volumeStruct) MiddlewareGetAccount(maxEntries uint64, marker string, endmarker string) (accountEnts []AccountEntry, mtime uint64, ctime uint64, err error) {
var (
dirEntrySlice []inode.DirEntry
dirEntrySliceElement inode.DirEntry
@@ -1718,7 +1688,7 @@ func (mS *mountStruct) MiddlewareGetAccount(maxEntries uint64, marker string, en
statResult Stat
)
- statResult, err = mS.Getstat(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.RootDirInodeNumber)
+ statResult, err = vS.Getstat(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.RootDirInodeNumber)
if nil != err {
return
}
@@ -1735,7 +1705,7 @@ func (mS *mountStruct) MiddlewareGetAccount(maxEntries uint64, marker string, en
moreEntries = true
for moreEntries {
- _, dirEntrySlice, moreEntries, err = mS.middlewareReadDirHelper("/", remainingMaxEntries, marker)
+ _, dirEntrySlice, moreEntries, err = vS.middlewareReadDirHelper("/", remainingMaxEntries, marker)
if nil != err {
return
}
@@ -1754,7 +1724,7 @@ func (mS *mountStruct) MiddlewareGetAccount(maxEntries uint64, marker string, en
if ("." != dirEntrySliceElement.Basename) && (".." != dirEntrySliceElement.Basename) {
// So we've skipped "." & ".." - now also skip non-DirInodes
if inode.DirType == dirEntrySliceElement.Type {
- statResult, err = mS.Getstat(inode.InodeRootUserID, inode.InodeGroupID(0), nil, dirEntrySliceElement.InodeNumber)
+ statResult, err = vS.Getstat(inode.InodeRootUserID, inode.InodeGroupID(0), nil, dirEntrySliceElement.InodeNumber)
if nil != err {
return
}
@@ -1792,7 +1762,7 @@ type dirEntrySliceStackElementStruct struct {
moreEntries bool
}
-func (mS *mountStruct) MiddlewareGetContainer(vContainerName string, maxEntries uint64, marker string, endmarker string, prefix string, delimiter string) (containerEnts []ContainerEntry, err error) {
+func (vS *volumeStruct) MiddlewareGetContainer(vContainerName string, maxEntries uint64, marker string, endmarker string, prefix string, delimiter string) (containerEnts []ContainerEntry, err error) {
var (
containerEntry ContainerEntry
containerEntryBasename string // Misnamed... this is actually everything after ContainerName
@@ -1841,7 +1811,7 @@ func (mS *mountStruct) MiddlewareGetContainer(vContainerName string, maxEntries
markerPathDirInodeIndex = -1 // Must be special cased below to ensure we don't look in markerPath
markerCanonicalized = "" // Actually never accessed
} else {
- markerPath, markerPathDirInodeIndex, err = mS.canonicalizePathAndLocateLeafDirInode(vContainerName + "/" + marker)
+ markerPath, markerPathDirInodeIndex, err = vS.canonicalizePathAndLocateLeafDirInode(vContainerName + "/" + marker)
if nil != err {
err = blunder.AddError(err, blunder.InvalidArgError)
return
@@ -1862,7 +1832,7 @@ func (mS *mountStruct) MiddlewareGetContainer(vContainerName string, maxEntries
endmarkerPath = []string{}
endmarkerCanonicalized = "" // Actually never accessed
} else {
- endmarkerPath, _, err = mS.canonicalizePathAndLocateLeafDirInode(vContainerName + "/" + endmarker)
+ endmarkerPath, _, err = vS.canonicalizePathAndLocateLeafDirInode(vContainerName + "/" + endmarker)
if nil != err {
err = blunder.AddError(err, blunder.InvalidArgError)
return
@@ -1879,7 +1849,7 @@ func (mS *mountStruct) MiddlewareGetContainer(vContainerName string, maxEntries
}
}
- prefixPath, prefixPathDirInodeIndex, err = mS.canonicalizePathAndLocateLeafDirInode(vContainerName + "/" + prefix)
+ prefixPath, prefixPathDirInodeIndex, err = vS.canonicalizePathAndLocateLeafDirInode(vContainerName + "/" + prefix)
if nil != err {
err = blunder.AddError(err, blunder.InvalidArgError)
return
@@ -2033,7 +2003,7 @@ func (mS *mountStruct) MiddlewareGetContainer(vContainerName string, maxEntries
// Setup shortcuts/contants
dlmCallerID = dlm.GenerateCallerID()
- inodeVolumeHandle = mS.volStruct.inodeVolumeHandle
+ inodeVolumeHandle = vS.inodeVolumeHandle
// Compute initial response
@@ -2046,7 +2016,7 @@ Restart:
heldLocks = newHeldLocks()
_, dirInodeNumber, _, _, retryRequired, err =
- mS.resolvePath(
+ vS.resolvePath(
inode.RootDirInodeNumber,
dirPath,
heldLocks,
@@ -2129,7 +2099,7 @@ Restart:
// Perform initial ReadDir and place in dirEntrySliceStack
if nil == dirEntrySliceElementToPrepend {
- _, dirEntrySlice, moreEntries, err = mS.middlewareReadDirHelper(dirPath, remainingMaxEntries, prevReturned)
+ _, dirEntrySlice, moreEntries, err = vS.middlewareReadDirHelper(dirPath, remainingMaxEntries, prevReturned)
if nil != err {
return
}
@@ -2138,7 +2108,7 @@ Restart:
dirEntrySlice = []inode.DirEntry{*dirEntrySliceElementToPrepend}
moreEntries = false
} else {
- _, dirEntrySliceToAppend, moreEntries, err = mS.middlewareReadDirHelper(dirPath, remainingMaxEntries, prevReturned)
+ _, dirEntrySliceToAppend, moreEntries, err = vS.middlewareReadDirHelper(dirPath, remainingMaxEntries, prevReturned)
if nil == err {
dirEntrySlice = make([]inode.DirEntry, 1, 1+len(dirEntrySliceToAppend))
dirEntrySlice[0] = *dirEntrySliceElementToPrepend
@@ -2173,7 +2143,7 @@ Restart:
dirEntrySliceElement = dirEntrySlice[dirEntrySliceElementIndex]
prevReturned = dirEntrySliceElement.Basename
- _, dirEntrySlice, moreEntries, err = mS.middlewareReadDirHelper(dirPath, remainingMaxEntries, prevReturned)
+ _, dirEntrySlice, moreEntries, err = vS.middlewareReadDirHelper(dirPath, remainingMaxEntries, prevReturned)
if (nil != err) || (0 == len(dirEntrySlice)) {
// Even though we thought there were moreEntries, there now are not for some reason
@@ -2233,7 +2203,7 @@ Restart:
prevReturned = dirPathSplit[len(dirPathSplit)-1]
- _, dirEntrySlice, moreEntries, err = mS.middlewareReadDirHelper(dirPath, remainingMaxEntries, prevReturned)
+ _, dirEntrySlice, moreEntries, err = vS.middlewareReadDirHelper(dirPath, remainingMaxEntries, prevReturned)
if nil != err {
return
}
@@ -2367,7 +2337,7 @@ Restart:
prevReturned = ""
- _, dirEntrySlice, moreEntries, err = mS.middlewareReadDirHelper(dirPath, remainingMaxEntries, prevReturned)
+ _, dirEntrySlice, moreEntries, err = vS.middlewareReadDirHelper(dirPath, remainingMaxEntries, prevReturned)
if nil != err {
return
}
@@ -2389,7 +2359,7 @@ Restart:
return
}
-func (mS *mountStruct) MiddlewareGetObject(containerObjectPath string,
+func (vS *volumeStruct) MiddlewareGetObject(containerObjectPath string,
readRangeIn []ReadRangeIn, readRangeOut *[]inode.ReadPlanStep) (
response HeadResponse, err error) {
@@ -2434,7 +2404,7 @@ Restart:
heldLocks = newHeldLocks()
_, dirEntryInodeNumber, _, _, retryRequired, err =
- mS.resolvePath(
+ vS.resolvePath(
inode.RootDirInodeNumber,
containerObjectPath,
heldLocks,
@@ -2453,7 +2423,7 @@ Restart:
// Now assemble response
- stat, err = mS.getstatHelperWhileLocked(dirEntryInodeNumber)
+ stat, err = vS.getstatHelperWhileLocked(dirEntryInodeNumber)
if nil != err {
heldLocks.free()
return
@@ -2471,7 +2441,7 @@ Restart:
response.FileSize = 0
}
- response.Metadata, err = mS.volStruct.inodeVolumeHandle.GetStream(dirEntryInodeNumber, MiddlewareStream)
+ response.Metadata, err = vS.inodeVolumeHandle.GetStream(dirEntryInodeNumber, MiddlewareStream)
if nil != err {
if blunder.Is(err, blunder.StreamNotFound) {
response.Metadata = []byte{}
@@ -2490,7 +2460,7 @@ Restart:
return
}
- inodeVolumeHandle = mS.volStruct.inodeVolumeHandle
+ inodeVolumeHandle = vS.inodeVolumeHandle
if len(readRangeIn) == 0 {
// Get ReadPlan for entire file
@@ -2523,7 +2493,7 @@ Restart:
return
}
-func (mS *mountStruct) MiddlewareHeadResponse(entityPath string) (response HeadResponse, err error) {
+func (vS *volumeStruct) MiddlewareHeadResponse(entityPath string) (response HeadResponse, err error) {
var (
dirEntryInodeNumber inode.InodeNumber
heldLocks *heldLocksStruct
@@ -2555,7 +2525,7 @@ Restart:
heldLocks = newHeldLocks()
_, dirEntryInodeNumber, _, _, retryRequired, err =
- mS.resolvePath(
+ vS.resolvePath(
inode.RootDirInodeNumber,
entityPath,
heldLocks,
@@ -2574,7 +2544,7 @@ Restart:
// Now assemble response
- stat, err = mS.getstatHelperWhileLocked(dirEntryInodeNumber)
+ stat, err = vS.getstatHelperWhileLocked(dirEntryInodeNumber)
if nil != err {
heldLocks.free()
return
@@ -2594,7 +2564,7 @@ Restart:
response.FileSize = 0
}
- response.Metadata, err = mS.volStruct.inodeVolumeHandle.GetStream(dirEntryInodeNumber, MiddlewareStream)
+ response.Metadata, err = vS.inodeVolumeHandle.GetStream(dirEntryInodeNumber, MiddlewareStream)
if nil != err {
heldLocks.free()
response.Metadata = []byte{}
@@ -2612,7 +2582,7 @@ Restart:
return
}
-func (mS *mountStruct) MiddlewarePost(parentDir string, baseName string, newMetaData []byte, oldMetaData []byte) (err error) {
+func (vS *volumeStruct) MiddlewarePost(parentDir string, baseName string, newMetaData []byte, oldMetaData []byte) (err error) {
var (
dirEntryInodeNumber inode.InodeNumber
existingStreamData []byte
@@ -2645,7 +2615,7 @@ Restart:
heldLocks = newHeldLocks()
_, dirEntryInodeNumber, _, _, retryRequired, err =
- mS.resolvePath(
+ vS.resolvePath(
inode.RootDirInodeNumber,
parentDir+"/"+baseName,
heldLocks,
@@ -2669,7 +2639,7 @@ Restart:
// Compare oldMetaData to existing existingStreamData to make sure that the HTTP metadata has not changed.
// If it has changed, then return an error since middleware has to handle it.
- existingStreamData, err = mS.volStruct.inodeVolumeHandle.GetStream(dirEntryInodeNumber, MiddlewareStream)
+ existingStreamData, err = vS.inodeVolumeHandle.GetStream(dirEntryInodeNumber, MiddlewareStream)
if nil != err {
if blunder.Is(err, blunder.StreamNotFound) {
err = nil
@@ -2690,7 +2660,7 @@ Restart:
// Change looks okay so make it.
- err = mS.volStruct.inodeVolumeHandle.PutStream(dirEntryInodeNumber, MiddlewareStream, newMetaData)
+ err = vS.inodeVolumeHandle.PutStream(dirEntryInodeNumber, MiddlewareStream, newMetaData)
if nil != err {
heldLocks.free()
return
@@ -2698,13 +2668,13 @@ Restart:
// PutStream() implicitly flushed... so, if it was a FileInode, we don't need to track it anymore
- mS.volStruct.untrackInFlightFileInodeData(dirEntryInodeNumber, false)
+ vS.untrackInFlightFileInodeData(dirEntryInodeNumber, false)
heldLocks.free()
return
}
-func (mS *mountStruct) MiddlewarePutComplete(vContainerName string, vObjectPath string, pObjectPaths []string, pObjectLengths []uint64, pObjectMetadata []byte) (mtime uint64, ctime uint64, fileInodeNumber inode.InodeNumber, numWrites uint64, err error) {
+func (vS *volumeStruct) MiddlewarePutComplete(vContainerName string, vObjectPath string, pObjectPaths []string, pObjectLengths []uint64, pObjectMetadata []byte) (mtime uint64, ctime uint64, fileInodeNumber inode.InodeNumber, numWrites uint64, err error) {
var (
containerName string
dirInodeNumber inode.InodeNumber
@@ -2713,7 +2683,7 @@ func (mS *mountStruct) MiddlewarePutComplete(vContainerName string, vObjectPath
dirEntryInodeType inode.InodeType
fileOffset uint64
heldLocks *heldLocksStruct
- inodeVolumeHandle inode.VolumeHandle = mS.volStruct.inodeVolumeHandle
+ inodeVolumeHandle inode.VolumeHandle = vS.inodeVolumeHandle
numPObjects int
objectName string
pObjectIndex int
@@ -2754,7 +2724,7 @@ Restart:
heldLocks = newHeldLocks()
dirInodeNumber, dirEntryInodeNumber, dirEntryBasename, dirEntryInodeType, retryRequired, err =
- mS.resolvePath(
+ vS.resolvePath(
inode.RootDirInodeNumber,
vContainerName+"/"+vObjectPath,
heldLocks,
@@ -2780,7 +2750,7 @@ Restart:
if dirEntryInodeType == inode.DirType {
// try to unlink the directory (rmdir flushes the inodes)
- err = mS.rmdirActual(dirInodeNumber, dirEntryBasename, dirEntryInodeNumber)
+ err = vS.rmdirActual(dirInodeNumber, dirEntryBasename, dirEntryInodeNumber)
if err != nil {
// the directory was probably not empty
heldLocks.free()
@@ -2790,13 +2760,13 @@ Restart:
} else {
// unlink the symlink (unlink flushes the inodes)
- err = mS.unlinkActual(dirInodeNumber, dirEntryBasename, dirEntryInodeNumber)
+ err = vS.unlinkActual(dirInodeNumber, dirEntryBasename, dirEntryInodeNumber)
if err != nil {
// ReadOnlyError is my best guess for the failure
err = blunder.NewError(blunder.ReadOnlyError,
"MiddlewareMkdir(): vol '%s' failed to unlink '%s': %v",
- mS.volStruct.volumeName, vContainerName+"/"+vObjectPath, err)
+ vS.volumeName, vContainerName+"/"+vObjectPath, err)
heldLocks.free()
return
}
@@ -2804,7 +2774,7 @@ Restart:
// let resolvePath() create the file
dirInodeNumber, dirEntryInodeNumber, dirEntryBasename, dirEntryInodeType, retryRequired, err =
- mS.resolvePath(
+ vS.resolvePath(
inode.RootDirInodeNumber,
vContainerName+"/"+vObjectPath,
heldLocks,
@@ -2861,7 +2831,7 @@ Restart:
return
}
- stat, err = mS.getstatHelperWhileLocked(dirEntryInodeNumber)
+ stat, err = vS.getstatHelperWhileLocked(dirEntryInodeNumber)
if nil != err {
heldLocks.free()
return
@@ -2876,7 +2846,7 @@ Restart:
return
}
-func (mS *mountStruct) MiddlewareMkdir(vContainerName string, vObjectPath string, metadata []byte) (mtime uint64, ctime uint64, inodeNumber inode.InodeNumber, numWrites uint64, err error) {
+func (vS *volumeStruct) MiddlewareMkdir(vContainerName string, vObjectPath string, metadata []byte) (mtime uint64, ctime uint64, inodeNumber inode.InodeNumber, numWrites uint64, err error) {
var (
dirInodeNumber inode.InodeNumber
dirEntryInodeNumber inode.InodeNumber
@@ -2912,7 +2882,7 @@ Restart:
// Resolve the object, locking it and its parent directory exclusive
dirInodeNumber, dirEntryInodeNumber, dirEntryBasename, dirEntryInodeType, retryRequired, err =
- mS.resolvePath(
+ vS.resolvePath(
inode.RootDirInodeNumber,
vContainerName+"/"+vObjectPath,
heldLocks,
@@ -2935,20 +2905,20 @@ Restart:
if dirEntryInodeType != inode.DirType {
// unlink the file or symlink (unlink flushes the inodes)
- err = mS.unlinkActual(dirInodeNumber, dirEntryBasename, dirEntryInodeNumber)
+ err = vS.unlinkActual(dirInodeNumber, dirEntryBasename, dirEntryInodeNumber)
if err != nil {
// ReadOnlyError is my best guess for the failure
err = blunder.NewError(blunder.ReadOnlyError,
"MiddlewareMkdir(): vol '%s' failed to unlink '%s': %v",
- mS.volStruct.volumeName, vContainerName+"/"+vObjectPath, err)
+ vS.volumeName, vContainerName+"/"+vObjectPath, err)
heldLocks.free()
return
}
// let resolvePath() make the directory
dirInodeNumber, dirEntryInodeNumber, dirEntryBasename, dirEntryInodeType, retryRequired, err =
- mS.resolvePath(
+ vS.resolvePath(
inode.RootDirInodeNumber,
vContainerName+"/"+vObjectPath,
heldLocks,
@@ -2967,14 +2937,14 @@ Restart:
}
}
- err = mS.volStruct.inodeVolumeHandle.PutStream(dirEntryInodeNumber, MiddlewareStream, metadata)
+ err = vS.inodeVolumeHandle.PutStream(dirEntryInodeNumber, MiddlewareStream, metadata)
if err != nil {
heldLocks.free()
logger.DebugfIDWithError(internalDebug, err, "MiddlewareHeadResponse(): failed PutStream() for for dirEntryInodeNumber 0x%016X (pObjectMetadata: %v)", dirEntryInodeNumber, metadata)
return
}
- stat, err = mS.getstatHelperWhileLocked(dirEntryInodeNumber)
+ stat, err = vS.getstatHelperWhileLocked(dirEntryInodeNumber)
if nil != err {
heldLocks.free()
return
@@ -2989,7 +2959,7 @@ Restart:
return
}
-func (mS *mountStruct) MiddlewarePutContainer(containerName string, oldMetadata []byte, newMetadata []byte) (err error) {
+func (vS *volumeStruct) MiddlewarePutContainer(containerName string, oldMetadata []byte, newMetadata []byte) (err error) {
var (
containerInodeLock *dlm.RWLockStruct
containerInodeNumber inode.InodeNumber
@@ -3007,19 +2977,19 @@ func (mS *mountStruct) MiddlewarePutContainer(containerName string, oldMetadata
}
}()
- mS.volStruct.jobRWMutex.RLock()
- defer mS.volStruct.jobRWMutex.RUnlock()
+ vS.jobRWMutex.RLock()
+ defer vS.jobRWMutex.RUnlock()
// Yes, it's a heavy lock to hold on the root inode. However, we
// might need to add a new directory entry there, so there's not
// much else we can do.
- rootInodeLock, err := mS.volStruct.inodeVolumeHandle.GetWriteLock(inode.RootDirInodeNumber, nil)
+ rootInodeLock, err := vS.inodeVolumeHandle.GetWriteLock(inode.RootDirInodeNumber, nil)
if nil != err {
return
}
defer rootInodeLock.Unlock()
- containerInodeNumber, err = mS.volStruct.inodeVolumeHandle.Lookup(inode.RootDirInodeNumber, containerName)
+ containerInodeNumber, err = vS.inodeVolumeHandle.Lookup(inode.RootDirInodeNumber, containerName)
if err != nil && blunder.IsNot(err, blunder.NotFoundError) {
return
} else if err != nil {
@@ -3029,34 +2999,34 @@ func (mS *mountStruct) MiddlewarePutContainer(containerName string, oldMetadata
return
}
- newDirInodeNumber, err = mS.volStruct.inodeVolumeHandle.CreateDir(inode.PosixModePerm, 0, 0)
+ newDirInodeNumber, err = vS.inodeVolumeHandle.CreateDir(inode.PosixModePerm, 0, 0)
if err != nil {
logger.ErrorWithError(err)
return
}
- newDirInodeLock, err = mS.volStruct.inodeVolumeHandle.GetWriteLock(newDirInodeNumber, nil)
+ newDirInodeLock, err = vS.inodeVolumeHandle.GetWriteLock(newDirInodeNumber, nil)
defer newDirInodeLock.Unlock()
- err = mS.volStruct.inodeVolumeHandle.PutStream(newDirInodeNumber, MiddlewareStream, newMetadata)
+ err = vS.inodeVolumeHandle.PutStream(newDirInodeNumber, MiddlewareStream, newMetadata)
if err != nil {
logger.ErrorWithError(err)
return
}
- err = mS.volStruct.inodeVolumeHandle.Link(inode.RootDirInodeNumber, containerName, newDirInodeNumber, false)
+ err = vS.inodeVolumeHandle.Link(inode.RootDirInodeNumber, containerName, newDirInodeNumber, false)
return
}
- containerInodeLock, err = mS.volStruct.inodeVolumeHandle.GetWriteLock(containerInodeNumber, nil)
+ containerInodeLock, err = vS.inodeVolumeHandle.GetWriteLock(containerInodeNumber, nil)
if err != nil {
return
}
defer containerInodeLock.Unlock()
// Existing container: just update the metadata
- existingMetadata, err = mS.volStruct.inodeVolumeHandle.GetStream(containerInodeNumber, MiddlewareStream)
+ existingMetadata, err = vS.inodeVolumeHandle.GetStream(containerInodeNumber, MiddlewareStream)
// GetStream() will return an error if there is no "middleware" stream
if err != nil && blunder.IsNot(err, blunder.StreamNotFound) {
@@ -3070,12 +3040,12 @@ func (mS *mountStruct) MiddlewarePutContainer(containerName string, oldMetadata
err = blunder.NewError(blunder.TryAgainError, "Metadata differs - actual: %v request: %v", existingMetadata, oldMetadata)
return
}
- err = mS.volStruct.inodeVolumeHandle.PutStream(containerInodeNumber, MiddlewareStream, newMetadata)
+ err = vS.inodeVolumeHandle.PutStream(containerInodeNumber, MiddlewareStream, newMetadata)
return
}
-func (mS *mountStruct) Mkdir(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, inodeNumber inode.InodeNumber, basename string, filePerm inode.InodeMode) (newDirInodeNumber inode.InodeNumber, err error) {
+func (vS *volumeStruct) Mkdir(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, inodeNumber inode.InodeNumber, basename string, filePerm inode.InodeMode) (newDirInodeNumber inode.InodeNumber, err error) {
startTime := time.Now()
defer func() {
globals.MkdirUsec.Add(uint64(time.Since(startTime) / time.Microsecond))
@@ -3084,8 +3054,8 @@ func (mS *mountStruct) Mkdir(userID inode.InodeUserID, groupID inode.InodeGroupI
}
}()
- mS.volStruct.jobRWMutex.RLock()
- defer mS.volStruct.jobRWMutex.RUnlock()
+ vS.jobRWMutex.RLock()
+ defer vS.jobRWMutex.RUnlock()
// Make sure the file basename is not too long
err = validateBaseName(basename)
@@ -3093,13 +3063,13 @@ func (mS *mountStruct) Mkdir(userID inode.InodeUserID, groupID inode.InodeGroupI
return 0, err
}
- newDirInodeNumber, err = mS.volStruct.inodeVolumeHandle.CreateDir(filePerm, userID, groupID)
+ newDirInodeNumber, err = vS.inodeVolumeHandle.CreateDir(filePerm, userID, groupID)
if err != nil {
logger.ErrorWithError(err)
return 0, err
}
- inodeLock, err := mS.volStruct.inodeVolumeHandle.InitInodeLock(inodeNumber, nil)
+ inodeLock, err := vS.inodeVolumeHandle.InitInodeLock(inodeNumber, nil)
if err != nil {
return
}
@@ -3109,20 +3079,20 @@ func (mS *mountStruct) Mkdir(userID inode.InodeUserID, groupID inode.InodeGroupI
}
defer inodeLock.Unlock()
- if !mS.volStruct.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.F_OK,
+ if !vS.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.F_OK,
inode.NoOverride) {
- destroyErr := mS.volStruct.inodeVolumeHandle.Destroy(newDirInodeNumber)
+ destroyErr := vS.inodeVolumeHandle.Destroy(newDirInodeNumber)
if destroyErr != nil {
logger.WarnfWithError(destroyErr, "couldn't destroy inode %v after failed Access(F_OK) in fs.Mkdir", newDirInodeNumber)
}
err = blunder.NewError(blunder.NotFoundError, "ENOENT")
return 0, err
}
- if !mS.volStruct.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.W_OK|inode.X_OK,
+ if !vS.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.W_OK|inode.X_OK,
inode.NoOverride) {
- destroyErr := mS.volStruct.inodeVolumeHandle.Destroy(newDirInodeNumber)
+ destroyErr := vS.inodeVolumeHandle.Destroy(newDirInodeNumber)
if destroyErr != nil {
logger.WarnfWithError(destroyErr, "couldn't destroy inode %v after failed Access(W_OK|X_OK) in fs.Mkdir", newDirInodeNumber)
}
@@ -3130,9 +3100,9 @@ func (mS *mountStruct) Mkdir(userID inode.InodeUserID, groupID inode.InodeGroupI
return 0, err
}
- err = mS.volStruct.inodeVolumeHandle.Link(inodeNumber, basename, newDirInodeNumber, false)
+ err = vS.inodeVolumeHandle.Link(inodeNumber, basename, newDirInodeNumber, false)
if err != nil {
- destroyErr := mS.volStruct.inodeVolumeHandle.Destroy(newDirInodeNumber)
+ destroyErr := vS.inodeVolumeHandle.Destroy(newDirInodeNumber)
if destroyErr != nil {
logger.WarnfWithError(destroyErr, "couldn't destroy inode %v after failed Link() in fs.Mkdir", newDirInodeNumber)
}
@@ -3142,7 +3112,7 @@ func (mS *mountStruct) Mkdir(userID inode.InodeUserID, groupID inode.InodeGroupI
return newDirInodeNumber, nil
}
-func (mS *mountStruct) RemoveXAttr(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, inodeNumber inode.InodeNumber, streamName string) (err error) {
+func (vS *volumeStruct) RemoveXAttr(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, inodeNumber inode.InodeNumber, streamName string) (err error) {
startTime := time.Now()
defer func() {
globals.RemoveXAttrUsec.Add(uint64(time.Since(startTime) / time.Microsecond))
@@ -3151,10 +3121,10 @@ func (mS *mountStruct) RemoveXAttr(userID inode.InodeUserID, groupID inode.Inode
}
}()
- mS.volStruct.jobRWMutex.RLock()
- defer mS.volStruct.jobRWMutex.RUnlock()
+ vS.jobRWMutex.RLock()
+ defer vS.jobRWMutex.RUnlock()
- inodeLock, err := mS.volStruct.inodeVolumeHandle.InitInodeLock(inodeNumber, nil)
+ inodeLock, err := vS.inodeVolumeHandle.InitInodeLock(inodeNumber, nil)
if err != nil {
return
}
@@ -3164,28 +3134,28 @@ func (mS *mountStruct) RemoveXAttr(userID inode.InodeUserID, groupID inode.Inode
}
defer inodeLock.Unlock()
- if !mS.volStruct.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.F_OK,
+ if !vS.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.F_OK,
inode.NoOverride) {
err = blunder.NewError(blunder.NotFoundError, "ENOENT")
return
}
- if !mS.volStruct.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.W_OK,
+ if !vS.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.W_OK,
inode.OwnerOverride) {
err = blunder.NewError(blunder.PermDeniedError, "EACCES")
return
}
- err = mS.volStruct.inodeVolumeHandle.DeleteStream(inodeNumber, streamName)
+ err = vS.inodeVolumeHandle.DeleteStream(inodeNumber, streamName)
if err != nil {
logger.ErrorfWithError(err, "Failed to delete XAttr %v of inode %v", streamName, inodeNumber)
}
- mS.volStruct.untrackInFlightFileInodeData(inodeNumber, false)
+ vS.untrackInFlightFileInodeData(inodeNumber, false)
return
}
-func (mS *mountStruct) Rename(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, srcDirInodeNumber inode.InodeNumber, srcBasename string, dstDirInodeNumber inode.InodeNumber, dstBasename string) (err error) {
+func (vS *volumeStruct) Rename(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, srcDirInodeNumber inode.InodeNumber, srcBasename string, dstDirInodeNumber inode.InodeNumber, dstBasename string) (err error) {
var (
dirEntryBasename string
dirEntryInodeNumber inode.InodeNumber
@@ -3203,8 +3173,8 @@ func (mS *mountStruct) Rename(userID inode.InodeUserID, groupID inode.InodeGroup
}
}()
- mS.volStruct.jobRWMutex.RLock()
- defer mS.volStruct.jobRWMutex.RUnlock()
+ vS.jobRWMutex.RLock()
+ defer vS.jobRWMutex.RUnlock()
err = validateBaseName(srcBasename)
if nil != err {
@@ -3233,7 +3203,7 @@ Restart:
// Acquire WriteLock on {srcDirInodeNumber,srcBasename} & perform Access Check
dirInodeNumber, _, dirEntryBasename, _, retryRequired, err =
- mS.resolvePath(
+ vS.resolvePath(
srcDirInodeNumber,
srcBasename,
heldLocks,
@@ -3257,7 +3227,7 @@ Restart:
return
}
- if !mS.volStruct.inodeVolumeHandle.Access(srcDirInodeNumber, userID, groupID, otherGroupIDs, inode.W_OK|inode.X_OK, inode.NoOverride) {
+ if !vS.inodeVolumeHandle.Access(srcDirInodeNumber, userID, groupID, otherGroupIDs, inode.W_OK|inode.X_OK, inode.NoOverride) {
heldLocks.free()
err = blunder.NewError(blunder.PermDeniedError, "EACCES")
return
@@ -3265,7 +3235,7 @@ Restart:
// Acquire WriteLock on dstDirInodeNumber & perform Access Check
_, dirEntryInodeNumber, _, _, retryRequired, err =
- mS.resolvePath(
+ vS.resolvePath(
dstDirInodeNumber,
".",
heldLocks,
@@ -3289,7 +3259,7 @@ Restart:
return
}
- if !mS.volStruct.inodeVolumeHandle.Access(dstDirInodeNumber, userID, groupID, otherGroupIDs, inode.W_OK|inode.X_OK, inode.NoOverride) {
+ if !vS.inodeVolumeHandle.Access(dstDirInodeNumber, userID, groupID, otherGroupIDs, inode.W_OK|inode.X_OK, inode.NoOverride) {
heldLocks.free()
err = blunder.NewError(blunder.PermDeniedError, "EACCES")
return
@@ -3298,7 +3268,7 @@ Restart:
// Acquire WriteLock on dstBasename if it exists
dirInodeNumber, _, dirEntryBasename, _, retryRequired, err =
- mS.resolvePath(
+ vS.resolvePath(
dstDirInodeNumber,
dstBasename,
heldLocks,
@@ -3321,14 +3291,14 @@ Restart:
// Locks held & Access Checks succeeded... time to do the Move
- err = mS.volStruct.inodeVolumeHandle.Move(srcDirInodeNumber, srcBasename, dstDirInodeNumber, dstBasename)
+ err = vS.inodeVolumeHandle.Move(srcDirInodeNumber, srcBasename, dstDirInodeNumber, dstBasename)
heldLocks.free()
return // err returned from inode.Move() suffices here
}
-func (mS *mountStruct) Read(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, inodeNumber inode.InodeNumber, offset uint64, length uint64, profiler *utils.Profiler) (buf []byte, err error) {
+func (vS *volumeStruct) Read(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, inodeNumber inode.InodeNumber, offset uint64, length uint64, profiler *utils.Profiler) (buf []byte, err error) {
startTime := time.Now()
defer func() {
globals.ReadUsec.Add(uint64(time.Since(startTime) / time.Microsecond))
@@ -3338,10 +3308,10 @@ func (mS *mountStruct) Read(userID inode.InodeUserID, groupID inode.InodeGroupID
}
}()
- mS.volStruct.jobRWMutex.RLock()
- defer mS.volStruct.jobRWMutex.RUnlock()
+ vS.jobRWMutex.RLock()
+ defer vS.jobRWMutex.RUnlock()
- inodeLock, err := mS.volStruct.inodeVolumeHandle.InitInodeLock(inodeNumber, nil)
+ inodeLock, err := vS.inodeVolumeHandle.InitInodeLock(inodeNumber, nil)
if err != nil {
return
}
@@ -3351,18 +3321,18 @@ func (mS *mountStruct) Read(userID inode.InodeUserID, groupID inode.InodeGroupID
}
defer inodeLock.Unlock()
- if !mS.volStruct.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.F_OK,
+ if !vS.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.F_OK,
inode.NoOverride) {
err = blunder.NewError(blunder.NotFoundError, "ENOENT")
return
}
- if !mS.volStruct.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.R_OK,
+ if !vS.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.R_OK,
inode.OwnerOverride) {
err = blunder.NewError(blunder.PermDeniedError, "EACCES")
return
}
- inodeType, err := mS.volStruct.inodeVolumeHandle.GetType(inodeNumber)
+ inodeType, err := vS.inodeVolumeHandle.GetType(inodeNumber)
if err != nil {
logger.ErrorfWithError(err, "couldn't get type for inode %v", inodeNumber)
return buf, err
@@ -3375,7 +3345,7 @@ func (mS *mountStruct) Read(userID inode.InodeUserID, groupID inode.InodeGroupID
}
profiler.AddEventNow("before inode.Read()")
- buf, err = mS.volStruct.inodeVolumeHandle.Read(inodeNumber, offset, length, profiler)
+ buf, err = vS.inodeVolumeHandle.Read(inodeNumber, offset, length, profiler)
profiler.AddEventNow("after inode.Read()")
if uint64(len(buf)) > length {
err = fmt.Errorf("%s: Buf length %v is greater than supplied length %v", utils.GetFnName(), uint64(len(buf)), length)
@@ -3386,7 +3356,7 @@ func (mS *mountStruct) Read(userID inode.InodeUserID, groupID inode.InodeGroupID
return buf, err
}
-func (mS *mountStruct) readdirHelper(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, inodeNumber inode.InodeNumber, maxEntries uint64, prevReturned ...interface{}) (dirEntries []inode.DirEntry, statEntries []Stat, numEntries uint64, areMoreEntries bool, err error) {
+func (vS *volumeStruct) readdirHelper(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, inodeNumber inode.InodeNumber, maxEntries uint64, prevReturned ...interface{}) (dirEntries []inode.DirEntry, statEntries []Stat, numEntries uint64, areMoreEntries bool, err error) {
var (
dirEntryIndex uint64
dlmCallerID dlm.CallerID
@@ -3396,11 +3366,11 @@ func (mS *mountStruct) readdirHelper(userID inode.InodeUserID, groupID inode.Ino
tryLockBackoffContext *tryLockBackoffContextStruct
)
- mS.volStruct.jobRWMutex.RLock()
- defer mS.volStruct.jobRWMutex.RUnlock()
+ vS.jobRWMutex.RLock()
+ defer vS.jobRWMutex.RUnlock()
dlmCallerID = dlm.GenerateCallerID()
- inodeVolumeHandle = mS.volStruct.inodeVolumeHandle
+ inodeVolumeHandle = vS.inodeVolumeHandle
tryLockBackoffContext = &tryLockBackoffContextStruct{}
@@ -3453,7 +3423,7 @@ Restart:
goto Restart
}
- statEntries[dirEntryIndex], err = mS.getstatHelperWhileLocked(dirEntries[dirEntryIndex].InodeNumber)
+ statEntries[dirEntryIndex], err = vS.getstatHelperWhileLocked(dirEntries[dirEntryIndex].InodeNumber)
if nil != err {
internalErr = inodeLock.Unlock()
if nil != internalErr {
@@ -3473,7 +3443,7 @@ Restart:
return
}
-func (mS *mountStruct) Readdir(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, inodeNumber inode.InodeNumber, maxEntries uint64, prevReturned ...interface{}) (entries []inode.DirEntry, numEntries uint64, areMoreEntries bool, err error) {
+func (vS *volumeStruct) Readdir(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, inodeNumber inode.InodeNumber, maxEntries uint64, prevReturned ...interface{}) (entries []inode.DirEntry, numEntries uint64, areMoreEntries bool, err error) {
startTime := time.Now()
defer func() {
globals.ReaddirUsec.Add(uint64(time.Since(startTime) / time.Microsecond))
@@ -3483,12 +3453,12 @@ func (mS *mountStruct) Readdir(userID inode.InodeUserID, groupID inode.InodeGrou
}
}()
- entries, _, numEntries, areMoreEntries, err = mS.readdirHelper(userID, groupID, otherGroupIDs, inodeNumber, maxEntries, prevReturned...)
+ entries, _, numEntries, areMoreEntries, err = vS.readdirHelper(userID, groupID, otherGroupIDs, inodeNumber, maxEntries, prevReturned...)
return
}
-func (mS *mountStruct) ReaddirPlus(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, inodeNumber inode.InodeNumber, maxEntries uint64, prevReturned ...interface{}) (dirEntries []inode.DirEntry, statEntries []Stat, numEntries uint64, areMoreEntries bool, err error) {
+func (vS *volumeStruct) ReaddirPlus(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, inodeNumber inode.InodeNumber, maxEntries uint64, prevReturned ...interface{}) (dirEntries []inode.DirEntry, statEntries []Stat, numEntries uint64, areMoreEntries bool, err error) {
startTime := time.Now()
defer func() {
globals.ReaddirPlusUsec.Add(uint64(time.Since(startTime) / time.Microsecond))
@@ -3498,12 +3468,12 @@ func (mS *mountStruct) ReaddirPlus(userID inode.InodeUserID, groupID inode.Inode
}
}()
- dirEntries, statEntries, numEntries, areMoreEntries, err = mS.readdirHelper(userID, groupID, otherGroupIDs, inodeNumber, maxEntries, prevReturned...)
+ dirEntries, statEntries, numEntries, areMoreEntries, err = vS.readdirHelper(userID, groupID, otherGroupIDs, inodeNumber, maxEntries, prevReturned...)
return
}
-func (mS *mountStruct) Readsymlink(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, inodeNumber inode.InodeNumber) (target string, err error) {
+func (vS *volumeStruct) Readsymlink(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, inodeNumber inode.InodeNumber) (target string, err error) {
startTime := time.Now()
defer func() {
globals.ReadsymlinkUsec.Add(uint64(time.Since(startTime) / time.Microsecond))
@@ -3512,10 +3482,10 @@ func (mS *mountStruct) Readsymlink(userID inode.InodeUserID, groupID inode.Inode
}
}()
- mS.volStruct.jobRWMutex.RLock()
- defer mS.volStruct.jobRWMutex.RUnlock()
+ vS.jobRWMutex.RLock()
+ defer vS.jobRWMutex.RUnlock()
- inodeLock, err := mS.volStruct.inodeVolumeHandle.InitInodeLock(inodeNumber, nil)
+ inodeLock, err := vS.inodeVolumeHandle.InitInodeLock(inodeNumber, nil)
if err != nil {
return
}
@@ -3525,25 +3495,25 @@ func (mS *mountStruct) Readsymlink(userID inode.InodeUserID, groupID inode.Inode
}
defer inodeLock.Unlock()
- if !mS.volStruct.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.F_OK,
+ if !vS.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.F_OK,
inode.NoOverride) {
err = blunder.NewError(blunder.NotFoundError, "ENOENT")
return
}
- if !mS.volStruct.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.R_OK,
+ if !vS.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.R_OK,
inode.NoOverride) {
err = blunder.NewError(blunder.PermDeniedError, "EACCES")
return
}
- target, err = mS.volStruct.inodeVolumeHandle.GetSymlink(inodeNumber)
+ target, err = vS.inodeVolumeHandle.GetSymlink(inodeNumber)
return target, err
}
-func (mS *mountStruct) Resize(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, inodeNumber inode.InodeNumber, newSize uint64) (err error) {
+func (vS *volumeStruct) Resize(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, inodeNumber inode.InodeNumber, newSize uint64) (err error) {
startTime := time.Now()
defer func() {
globals.ResizeUsec.Add(uint64(time.Since(startTime) / time.Microsecond))
@@ -3552,10 +3522,10 @@ func (mS *mountStruct) Resize(userID inode.InodeUserID, groupID inode.InodeGroup
}
}()
- mS.volStruct.jobRWMutex.RLock()
- defer mS.volStruct.jobRWMutex.RUnlock()
+ vS.jobRWMutex.RLock()
+ defer vS.jobRWMutex.RUnlock()
- inodeLock, err := mS.volStruct.inodeVolumeHandle.InitInodeLock(inodeNumber, nil)
+ inodeLock, err := vS.inodeVolumeHandle.InitInodeLock(inodeNumber, nil)
if err != nil {
return
}
@@ -3565,26 +3535,26 @@ func (mS *mountStruct) Resize(userID inode.InodeUserID, groupID inode.InodeGroup
}
defer inodeLock.Unlock()
- if !mS.volStruct.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.F_OK,
+ if !vS.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.F_OK,
inode.NoOverride) {
err = blunder.NewError(blunder.NotFoundError, "ENOENT")
return
}
- if !mS.volStruct.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.W_OK,
+ if !vS.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.W_OK,
inode.OwnerOverride) {
err = blunder.NewError(blunder.PermDeniedError, "EACCES")
return
}
- err = mS.volStruct.inodeVolumeHandle.SetSize(inodeNumber, newSize)
- mS.volStruct.untrackInFlightFileInodeData(inodeNumber, false)
+ err = vS.inodeVolumeHandle.SetSize(inodeNumber, newSize)
+ vS.untrackInFlightFileInodeData(inodeNumber, false)
return err
}
-func (mS *mountStruct) Rmdir(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, inodeNumber inode.InodeNumber, basename string) (err error) {
+func (vS *volumeStruct) Rmdir(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, inodeNumber inode.InodeNumber, basename string) (err error) {
startTime := time.Now()
defer func() {
globals.RmdirUsec.Add(uint64(time.Since(startTime) / time.Microsecond))
@@ -3593,11 +3563,11 @@ func (mS *mountStruct) Rmdir(userID inode.InodeUserID, groupID inode.InodeGroupI
}
}()
- mS.volStruct.jobRWMutex.RLock()
- defer mS.volStruct.jobRWMutex.RUnlock()
+ vS.jobRWMutex.RLock()
+ defer vS.jobRWMutex.RUnlock()
callerID := dlm.GenerateCallerID()
- inodeLock, err := mS.volStruct.inodeVolumeHandle.InitInodeLock(inodeNumber, callerID)
+ inodeLock, err := vS.inodeVolumeHandle.InitInodeLock(inodeNumber, callerID)
if err != nil {
return
}
@@ -3607,23 +3577,23 @@ func (mS *mountStruct) Rmdir(userID inode.InodeUserID, groupID inode.InodeGroupI
}
defer inodeLock.Unlock()
- if !mS.volStruct.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.F_OK,
+ if !vS.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.F_OK,
inode.NoOverride) {
err = blunder.NewError(blunder.NotFoundError, "ENOENT")
return
}
- if !mS.volStruct.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.W_OK|inode.X_OK,
+ if !vS.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.W_OK|inode.X_OK,
inode.NoOverride) {
err = blunder.NewError(blunder.PermDeniedError, "EACCES")
return
}
- basenameInodeNumber, err := mS.volStruct.inodeVolumeHandle.Lookup(inodeNumber, basename)
+ basenameInodeNumber, err := vS.inodeVolumeHandle.Lookup(inodeNumber, basename)
if nil != err {
return
}
- basenameInodeLock, err := mS.volStruct.inodeVolumeHandle.InitInodeLock(basenameInodeNumber, callerID)
+ basenameInodeLock, err := vS.inodeVolumeHandle.InitInodeLock(basenameInodeNumber, callerID)
if err != nil {
return
}
@@ -3635,14 +3605,14 @@ func (mS *mountStruct) Rmdir(userID inode.InodeUserID, groupID inode.InodeGroupI
// no permissions are required on the target directory
- err = mS.rmdirActual(inodeNumber, basename, basenameInodeNumber)
+ err = vS.rmdirActual(inodeNumber, basename, basenameInodeNumber)
return
}
-func (mS *mountStruct) rmdirActual(inodeNumber inode.InodeNumber,
+func (vS *volumeStruct) rmdirActual(inodeNumber inode.InodeNumber,
basename string, basenameInodeNumber inode.InodeNumber) (err error) {
- basenameInodeType, err := mS.volStruct.inodeVolumeHandle.GetType(basenameInodeNumber)
+ basenameInodeType, err := vS.inodeVolumeHandle.GetType(basenameInodeNumber)
if nil != err {
return
}
@@ -3653,7 +3623,7 @@ func (mS *mountStruct) rmdirActual(inodeNumber inode.InodeNumber,
return
}
- dirEntries, err := mS.volStruct.inodeVolumeHandle.NumDirEntries(basenameInodeNumber)
+ dirEntries, err := vS.inodeVolumeHandle.NumDirEntries(basenameInodeNumber)
if nil != err {
return
}
@@ -3664,12 +3634,12 @@ func (mS *mountStruct) rmdirActual(inodeNumber inode.InodeNumber,
return
}
- err = mS.volStruct.inodeVolumeHandle.Unlink(inodeNumber, basename, false)
+ err = vS.inodeVolumeHandle.Unlink(inodeNumber, basename, false)
if nil != err {
return
}
- err = mS.volStruct.inodeVolumeHandle.Destroy(basenameInodeNumber)
+ err = vS.inodeVolumeHandle.Destroy(basenameInodeNumber)
if nil != err {
return
}
@@ -3677,7 +3647,7 @@ func (mS *mountStruct) rmdirActual(inodeNumber inode.InodeNumber,
return
}
-func (mS *mountStruct) Setstat(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, inodeNumber inode.InodeNumber, stat Stat) (err error) {
+func (vS *volumeStruct) Setstat(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, inodeNumber inode.InodeNumber, stat Stat) (err error) {
startTime := time.Now()
defer func() {
globals.SetstatUsec.Add(uint64(time.Since(startTime) / time.Microsecond))
@@ -3686,10 +3656,10 @@ func (mS *mountStruct) Setstat(userID inode.InodeUserID, groupID inode.InodeGrou
}
}()
- mS.volStruct.jobRWMutex.RLock()
- defer mS.volStruct.jobRWMutex.RUnlock()
+ vS.jobRWMutex.RLock()
+ defer vS.jobRWMutex.RUnlock()
- inodeLock, err := mS.volStruct.inodeVolumeHandle.InitInodeLock(inodeNumber, nil)
+ inodeLock, err := vS.inodeVolumeHandle.InitInodeLock(inodeNumber, nil)
if err != nil {
return
}
@@ -3699,7 +3669,7 @@ func (mS *mountStruct) Setstat(userID inode.InodeUserID, groupID inode.InodeGrou
}
defer inodeLock.Unlock()
- if !mS.volStruct.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.P_OK,
+ if !vS.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.P_OK,
inode.NoOverride) {
err = blunder.NewError(blunder.NotPermError, "EPERM")
return
@@ -3710,7 +3680,7 @@ func (mS *mountStruct) Setstat(userID inode.InodeUserID, groupID inode.InodeGrou
// changing the filesize requires write permission
_, ok := stat[StatSize]
if ok {
- if !mS.volStruct.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.W_OK,
+ if !vS.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.W_OK,
inode.OwnerOverride) {
err = blunder.NewError(blunder.NotPermError, "EPERM")
return
@@ -3722,7 +3692,7 @@ func (mS *mountStruct) Setstat(userID inode.InodeUserID, groupID inode.InodeGrou
for _, key := range ownerOnly {
_, ok := stat[key]
if ok {
- if !mS.volStruct.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.P_OK,
+ if !vS.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.P_OK,
inode.NoOverride) {
err = blunder.NewError(blunder.NotPermError, "EPERM")
return
@@ -3793,7 +3763,7 @@ func (mS *mountStruct) Setstat(userID inode.InodeUserID, groupID inode.InodeGrou
//
// Set permissions, if present in the map
if settingFilePerm {
- err = mS.volStruct.inodeVolumeHandle.SetPermMode(inodeNumber, inode.InodeMode(filePerm))
+ err = vS.inodeVolumeHandle.SetPermMode(inodeNumber, inode.InodeMode(filePerm))
if err != nil {
logger.ErrorWithError(err)
return err
@@ -3803,12 +3773,12 @@ func (mS *mountStruct) Setstat(userID inode.InodeUserID, groupID inode.InodeGrou
// set owner and/or group owner, if present in the map
err = nil
if settingUserID && settingGroupID {
- err = mS.volStruct.inodeVolumeHandle.SetOwnerUserIDGroupID(inodeNumber, inode.InodeUserID(newUserID),
+ err = vS.inodeVolumeHandle.SetOwnerUserIDGroupID(inodeNumber, inode.InodeUserID(newUserID),
inode.InodeGroupID(newGroupID))
} else if settingUserID {
- err = mS.volStruct.inodeVolumeHandle.SetOwnerUserID(inodeNumber, inode.InodeUserID(newUserID))
+ err = vS.inodeVolumeHandle.SetOwnerUserID(inodeNumber, inode.InodeUserID(newUserID))
} else if settingGroupID {
- err = mS.volStruct.inodeVolumeHandle.SetOwnerGroupID(inodeNumber, inode.InodeGroupID(newGroupID))
+ err = vS.inodeVolumeHandle.SetOwnerGroupID(inodeNumber, inode.InodeGroupID(newGroupID))
}
if err != nil {
logger.ErrorWithError(err)
@@ -3819,7 +3789,7 @@ func (mS *mountStruct) Setstat(userID inode.InodeUserID, groupID inode.InodeGrou
crtime, ok := stat[StatCRTime]
if ok {
newCreationTime := time.Unix(0, int64(crtime))
- err = mS.volStruct.inodeVolumeHandle.SetCreationTime(inodeNumber, newCreationTime)
+ err = vS.inodeVolumeHandle.SetCreationTime(inodeNumber, newCreationTime)
if err != nil {
logger.ErrorWithError(err)
return err
@@ -3830,7 +3800,7 @@ func (mS *mountStruct) Setstat(userID inode.InodeUserID, groupID inode.InodeGrou
mtime, ok := stat[StatMTime]
if ok {
newModificationTime := time.Unix(0, int64(mtime))
- err = mS.volStruct.inodeVolumeHandle.SetModificationTime(inodeNumber, newModificationTime)
+ err = vS.inodeVolumeHandle.SetModificationTime(inodeNumber, newModificationTime)
if err != nil {
logger.ErrorWithError(err)
return err
@@ -3841,7 +3811,7 @@ func (mS *mountStruct) Setstat(userID inode.InodeUserID, groupID inode.InodeGrou
atime, ok := stat[StatATime]
if ok {
newAccessTime := time.Unix(0, int64(atime))
- err = mS.volStruct.inodeVolumeHandle.SetAccessTime(inodeNumber, newAccessTime)
+ err = vS.inodeVolumeHandle.SetAccessTime(inodeNumber, newAccessTime)
if err != nil {
logger.ErrorWithError(err)
return err
@@ -3855,13 +3825,13 @@ func (mS *mountStruct) Setstat(userID inode.InodeUserID, groupID inode.InodeGrou
if ok {
newAccessTime := time.Unix(0, int64(ctime))
logger.Infof("%s: ignoring attempt to change ctime to %v on volume '%s' inode %v",
- utils.GetFnName(), newAccessTime, mS.volStruct.volumeName, inodeNumber)
+ utils.GetFnName(), newAccessTime, vS.volumeName, inodeNumber)
}
// Set size, if present in the map
size, ok := stat[StatSize]
if ok {
- err = mS.volStruct.inodeVolumeHandle.SetSize(inodeNumber, size)
+ err = vS.inodeVolumeHandle.SetSize(inodeNumber, size)
if err != nil {
logger.ErrorWithError(err)
return err
@@ -3871,7 +3841,7 @@ func (mS *mountStruct) Setstat(userID inode.InodeUserID, groupID inode.InodeGrou
return
}
-func (mS *mountStruct) SetXAttr(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, inodeNumber inode.InodeNumber, streamName string, value []byte, flags int) (err error) {
+func (vS *volumeStruct) SetXAttr(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, inodeNumber inode.InodeNumber, streamName string, value []byte, flags int) (err error) {
startTime := time.Now()
defer func() {
globals.SetXAttrUsec.Add(uint64(time.Since(startTime) / time.Microsecond))
@@ -3880,10 +3850,10 @@ func (mS *mountStruct) SetXAttr(userID inode.InodeUserID, groupID inode.InodeGro
}
}()
- mS.volStruct.jobRWMutex.RLock()
- defer mS.volStruct.jobRWMutex.RUnlock()
+ vS.jobRWMutex.RLock()
+ defer vS.jobRWMutex.RUnlock()
- inodeLock, err := mS.volStruct.inodeVolumeHandle.InitInodeLock(inodeNumber, nil)
+ inodeLock, err := vS.inodeVolumeHandle.InitInodeLock(inodeNumber, nil)
if err != nil {
return
}
@@ -3893,12 +3863,12 @@ func (mS *mountStruct) SetXAttr(userID inode.InodeUserID, groupID inode.InodeGro
}
defer inodeLock.Unlock()
- if !mS.volStruct.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.F_OK,
+ if !vS.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.F_OK,
inode.NoOverride) {
err = blunder.NewError(blunder.NotFoundError, "ENOENT")
return
}
- if !mS.volStruct.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.W_OK,
+ if !vS.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.W_OK,
inode.OwnerOverride) {
err = blunder.NewError(blunder.PermDeniedError, "EACCES")
return
@@ -3908,12 +3878,12 @@ func (mS *mountStruct) SetXAttr(userID inode.InodeUserID, groupID inode.InodeGro
case SetXAttrCreateOrReplace:
break
case SetXAttrCreate:
- _, err = mS.GetXAttr(userID, groupID, otherGroupIDs, inodeNumber, streamName)
+ _, err = vS.GetXAttr(userID, groupID, otherGroupIDs, inodeNumber, streamName)
if err == nil {
return blunder.AddError(err, blunder.FileExistsError)
}
case SetXAttrReplace:
- _, err = mS.GetXAttr(userID, groupID, otherGroupIDs, inodeNumber, streamName)
+ _, err = vS.GetXAttr(userID, groupID, otherGroupIDs, inodeNumber, streamName)
if err != nil {
return blunder.AddError(err, blunder.StreamNotFound)
}
@@ -3921,17 +3891,17 @@ func (mS *mountStruct) SetXAttr(userID inode.InodeUserID, groupID inode.InodeGro
return blunder.AddError(err, blunder.InvalidArgError)
}
- err = mS.volStruct.inodeVolumeHandle.PutStream(inodeNumber, streamName, value)
+ err = vS.inodeVolumeHandle.PutStream(inodeNumber, streamName, value)
if err != nil {
logger.ErrorfWithError(err, "Failed to set XAttr %v to inode %v", streamName, inodeNumber)
}
- mS.volStruct.untrackInFlightFileInodeData(inodeNumber, false)
+ vS.untrackInFlightFileInodeData(inodeNumber, false)
return
}
-func (mS *mountStruct) StatVfs() (statVFS StatVFS, err error) {
+func (vS *volumeStruct) StatVfs() (statVFS StatVFS, err error) {
startTime := time.Now()
defer func() {
globals.StatVfsUsec.Add(uint64(time.Since(startTime) / time.Microsecond))
@@ -3940,27 +3910,27 @@ func (mS *mountStruct) StatVfs() (statVFS StatVFS, err error) {
}
}()
- mS.volStruct.jobRWMutex.RLock()
- defer mS.volStruct.jobRWMutex.RUnlock()
+ vS.jobRWMutex.RLock()
+ defer vS.jobRWMutex.RUnlock()
statVFS = make(map[StatVFSKey]uint64)
- statVFS[StatVFSFilesystemID] = mS.volStruct.inodeVolumeHandle.GetFSID()
- statVFS[StatVFSBlockSize] = mS.volStruct.reportedBlockSize
- statVFS[StatVFSFragmentSize] = mS.volStruct.reportedFragmentSize
- statVFS[StatVFSTotalBlocks] = mS.volStruct.reportedNumBlocks
- statVFS[StatVFSFreeBlocks] = mS.volStruct.reportedNumBlocks
- statVFS[StatVFSAvailBlocks] = mS.volStruct.reportedNumBlocks
- statVFS[StatVFSTotalInodes] = mS.volStruct.reportedNumInodes
- statVFS[StatVFSFreeInodes] = mS.volStruct.reportedNumInodes
- statVFS[StatVFSAvailInodes] = mS.volStruct.reportedNumInodes
+ statVFS[StatVFSFilesystemID] = vS.inodeVolumeHandle.GetFSID()
+ statVFS[StatVFSBlockSize] = vS.reportedBlockSize
+ statVFS[StatVFSFragmentSize] = vS.reportedFragmentSize
+ statVFS[StatVFSTotalBlocks] = vS.reportedNumBlocks
+ statVFS[StatVFSFreeBlocks] = vS.reportedNumBlocks
+ statVFS[StatVFSAvailBlocks] = vS.reportedNumBlocks
+ statVFS[StatVFSTotalInodes] = vS.reportedNumInodes
+ statVFS[StatVFSFreeInodes] = vS.reportedNumInodes
+ statVFS[StatVFSAvailInodes] = vS.reportedNumInodes
statVFS[StatVFSMountFlags] = 0
statVFS[StatVFSMaxFilenameLen] = FileNameMax
return statVFS, nil
}
-func (mS *mountStruct) Symlink(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, inodeNumber inode.InodeNumber, basename string, target string) (symlinkInodeNumber inode.InodeNumber, err error) {
+func (vS *volumeStruct) Symlink(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, inodeNumber inode.InodeNumber, basename string, target string) (symlinkInodeNumber inode.InodeNumber, err error) {
startTime := time.Now()
defer func() {
globals.SymlinkUsec.Add(uint64(time.Since(startTime) / time.Microsecond))
@@ -3969,8 +3939,8 @@ func (mS *mountStruct) Symlink(userID inode.InodeUserID, groupID inode.InodeGrou
}
}()
- mS.volStruct.jobRWMutex.RLock()
- defer mS.volStruct.jobRWMutex.RUnlock()
+ vS.jobRWMutex.RLock()
+ defer vS.jobRWMutex.RUnlock()
err = validateBaseName(basename)
if err != nil {
@@ -3983,12 +3953,12 @@ func (mS *mountStruct) Symlink(userID inode.InodeUserID, groupID inode.InodeGrou
}
// Mode for symlinks defaults to rwxrwxrwx, i.e. inode.PosixModePerm
- symlinkInodeNumber, err = mS.volStruct.inodeVolumeHandle.CreateSymlink(target, inode.PosixModePerm, userID, groupID)
+ symlinkInodeNumber, err = vS.inodeVolumeHandle.CreateSymlink(target, inode.PosixModePerm, userID, groupID)
if err != nil {
return
}
- inodeLock, err := mS.volStruct.inodeVolumeHandle.InitInodeLock(inodeNumber, nil)
+ inodeLock, err := vS.inodeVolumeHandle.InitInodeLock(inodeNumber, nil)
if err != nil {
return
}
@@ -3998,20 +3968,20 @@ func (mS *mountStruct) Symlink(userID inode.InodeUserID, groupID inode.InodeGrou
}
defer inodeLock.Unlock()
- if !mS.volStruct.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.F_OK,
+ if !vS.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.F_OK,
inode.NoOverride) {
- destroyErr := mS.volStruct.inodeVolumeHandle.Destroy(symlinkInodeNumber)
+ destroyErr := vS.inodeVolumeHandle.Destroy(symlinkInodeNumber)
if destroyErr != nil {
logger.WarnfWithError(destroyErr, "couldn't destroy inode %v after failed Access(F_OK) in fs.Symlink", symlinkInodeNumber)
}
err = blunder.NewError(blunder.NotFoundError, "ENOENT")
return
}
- if !mS.volStruct.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.W_OK|inode.X_OK,
+ if !vS.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.W_OK|inode.X_OK,
inode.NoOverride) {
- destroyErr := mS.volStruct.inodeVolumeHandle.Destroy(symlinkInodeNumber)
+ destroyErr := vS.inodeVolumeHandle.Destroy(symlinkInodeNumber)
if destroyErr != nil {
logger.WarnfWithError(destroyErr, "couldn't destroy inode %v after failed Access(W_OK|X_OK) in fs.Symlink", symlinkInodeNumber)
}
@@ -4019,9 +3989,9 @@ func (mS *mountStruct) Symlink(userID inode.InodeUserID, groupID inode.InodeGrou
return
}
- err = mS.volStruct.inodeVolumeHandle.Link(inodeNumber, basename, symlinkInodeNumber, false)
+ err = vS.inodeVolumeHandle.Link(inodeNumber, basename, symlinkInodeNumber, false)
if err != nil {
- destroyErr := mS.volStruct.inodeVolumeHandle.Destroy(symlinkInodeNumber)
+ destroyErr := vS.inodeVolumeHandle.Destroy(symlinkInodeNumber)
if destroyErr != nil {
logger.WarnfWithError(destroyErr, "couldn't destroy inode %v after failed Link() in fs.Symlink", symlinkInodeNumber)
}
@@ -4031,7 +4001,7 @@ func (mS *mountStruct) Symlink(userID inode.InodeUserID, groupID inode.InodeGrou
return
}
-func (mS *mountStruct) Unlink(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, inodeNumber inode.InodeNumber, basename string) (err error) {
+func (vS *volumeStruct) Unlink(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, inodeNumber inode.InodeNumber, basename string) (err error) {
startTime := time.Now()
defer func() {
globals.UnlinkUsec.Add(uint64(time.Since(startTime) / time.Microsecond))
@@ -4040,11 +4010,11 @@ func (mS *mountStruct) Unlink(userID inode.InodeUserID, groupID inode.InodeGroup
}
}()
- mS.volStruct.jobRWMutex.RLock()
- defer mS.volStruct.jobRWMutex.RUnlock()
+ vS.jobRWMutex.RLock()
+ defer vS.jobRWMutex.RUnlock()
callerID := dlm.GenerateCallerID()
- inodeLock, err := mS.volStruct.inodeVolumeHandle.InitInodeLock(inodeNumber, callerID)
+ inodeLock, err := vS.inodeVolumeHandle.InitInodeLock(inodeNumber, callerID)
if err != nil {
return
}
@@ -4054,23 +4024,23 @@ func (mS *mountStruct) Unlink(userID inode.InodeUserID, groupID inode.InodeGroup
}
defer inodeLock.Unlock()
- if !mS.volStruct.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.F_OK,
+ if !vS.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.F_OK,
inode.NoOverride) {
err = blunder.NewError(blunder.NotFoundError, "ENOENT")
return
}
- if !mS.volStruct.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.W_OK|inode.X_OK,
+ if !vS.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.W_OK|inode.X_OK,
inode.NoOverride) {
err = blunder.NewError(blunder.PermDeniedError, "EACCES")
return
}
- basenameInodeNumber, err := mS.volStruct.inodeVolumeHandle.Lookup(inodeNumber, basename)
+ basenameInodeNumber, err := vS.inodeVolumeHandle.Lookup(inodeNumber, basename)
if nil != err {
return
}
- basenameInodeLock, err := mS.volStruct.inodeVolumeHandle.InitInodeLock(basenameInodeNumber, callerID)
+ basenameInodeLock, err := vS.inodeVolumeHandle.InitInodeLock(basenameInodeNumber, callerID)
if err != nil {
return
}
@@ -4080,14 +4050,14 @@ func (mS *mountStruct) Unlink(userID inode.InodeUserID, groupID inode.InodeGroup
}
defer basenameInodeLock.Unlock()
- err = mS.unlinkActual(inodeNumber, basename, basenameInodeNumber)
+ err = vS.unlinkActual(inodeNumber, basename, basenameInodeNumber)
return
}
-func (mS *mountStruct) unlinkActual(inodeNumber inode.InodeNumber,
+func (vS *volumeStruct) unlinkActual(inodeNumber inode.InodeNumber,
basename string, basenameInodeNumber inode.InodeNumber) (err error) {
- basenameInodeType, err := mS.volStruct.inodeVolumeHandle.GetType(basenameInodeNumber)
+ basenameInodeType, err := vS.inodeVolumeHandle.GetType(basenameInodeNumber)
if nil != err {
return
}
@@ -4098,19 +4068,19 @@ func (mS *mountStruct) unlinkActual(inodeNumber inode.InodeNumber,
return
}
- err = mS.volStruct.inodeVolumeHandle.Unlink(inodeNumber, basename, false)
+ err = vS.inodeVolumeHandle.Unlink(inodeNumber, basename, false)
if nil != err {
return
}
- basenameLinkCount, err := mS.volStruct.inodeVolumeHandle.GetLinkCount(basenameInodeNumber)
+ basenameLinkCount, err := vS.inodeVolumeHandle.GetLinkCount(basenameInodeNumber)
if nil != err {
return
}
if 0 == basenameLinkCount {
- mS.volStruct.untrackInFlightFileInodeData(basenameInodeNumber, false)
- err = mS.volStruct.inodeVolumeHandle.Destroy(basenameInodeNumber)
+ vS.untrackInFlightFileInodeData(basenameInodeNumber, false)
+ err = vS.inodeVolumeHandle.Destroy(basenameInodeNumber)
if nil != err {
return
}
@@ -4119,15 +4089,15 @@ func (mS *mountStruct) unlinkActual(inodeNumber inode.InodeNumber,
return
}
-func (mS *mountStruct) VolumeName() (volumeName string) {
+func (vS *volumeStruct) VolumeName() (volumeName string) {
startTime := time.Now()
- volumeName = mS.volStruct.volumeName
+ volumeName = vS.volumeName
globals.VolumeNameUsec.Add(uint64(time.Since(startTime) / time.Microsecond))
return
}
-func (mS *mountStruct) Write(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, inodeNumber inode.InodeNumber, offset uint64, buf []byte, profiler *utils.Profiler) (size uint64, err error) {
+func (vS *volumeStruct) Write(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, inodeNumber inode.InodeNumber, offset uint64, buf []byte, profiler *utils.Profiler) (size uint64, err error) {
startTime := time.Now()
defer func() {
globals.WriteUsec.Add(uint64(time.Since(startTime) / time.Microsecond))
@@ -4137,13 +4107,13 @@ func (mS *mountStruct) Write(userID inode.InodeUserID, groupID inode.InodeGroupI
}
}()
- mS.volStruct.jobRWMutex.RLock()
- defer mS.volStruct.jobRWMutex.RUnlock()
+ vS.jobRWMutex.RLock()
+ defer vS.jobRWMutex.RUnlock()
logger.Tracef("fs.Write(): starting volume '%s' inode %v offset %v len %v",
- mS.volStruct.volumeName, inodeNumber, offset, len(buf))
+ vS.volumeName, inodeNumber, offset, len(buf))
- inodeLock, err := mS.volStruct.inodeVolumeHandle.InitInodeLock(inodeNumber, nil)
+ inodeLock, err := vS.inodeVolumeHandle.InitInodeLock(inodeNumber, nil)
if err != nil {
return
}
@@ -4153,37 +4123,37 @@ func (mS *mountStruct) Write(userID inode.InodeUserID, groupID inode.InodeGroupI
}
defer inodeLock.Unlock()
- if !mS.volStruct.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.F_OK,
+ if !vS.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.F_OK,
inode.NoOverride) {
err = blunder.NewError(blunder.NotFoundError, "ENOENT")
return
}
- if !mS.volStruct.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.W_OK,
+ if !vS.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.W_OK,
inode.OwnerOverride) {
err = blunder.NewError(blunder.PermDeniedError, "EACCES")
return
}
profiler.AddEventNow("before inode.Write()")
- err = mS.volStruct.inodeVolumeHandle.Write(inodeNumber, offset, buf, profiler)
+ err = vS.inodeVolumeHandle.Write(inodeNumber, offset, buf, profiler)
profiler.AddEventNow("after inode.Write()")
// write to Swift presumably succeeds or fails as a whole
if err != nil {
return 0, err
}
- logger.Tracef("fs.Write(): tracking write volume '%s' inode %v", mS.volStruct.volumeName, inodeNumber)
- mS.volStruct.trackInFlightFileInodeData(inodeNumber)
+ logger.Tracef("fs.Write(): tracking write volume '%s' inode %v", vS.volumeName, inodeNumber)
+ vS.trackInFlightFileInodeData(inodeNumber)
size = uint64(len(buf))
return
}
-func (mS *mountStruct) Wrote(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, inodeNumber inode.InodeNumber, containerName string, objectName string, fileOffset []uint64, objectOffset []uint64, length []uint64) (err error) {
- mS.volStruct.jobRWMutex.RLock()
- defer mS.volStruct.jobRWMutex.RUnlock()
+func (vS *volumeStruct) Wrote(userID inode.InodeUserID, groupID inode.InodeGroupID, otherGroupIDs []inode.InodeGroupID, inodeNumber inode.InodeNumber, containerName string, objectName string, fileOffset []uint64, objectOffset []uint64, length []uint64) (err error) {
+ vS.jobRWMutex.RLock()
+ defer vS.jobRWMutex.RUnlock()
- inodeLock, err := mS.volStruct.inodeVolumeHandle.InitInodeLock(inodeNumber, nil)
+ inodeLock, err := vS.inodeVolumeHandle.InitInodeLock(inodeNumber, nil)
if err != nil {
return
}
@@ -4193,21 +4163,21 @@ func (mS *mountStruct) Wrote(userID inode.InodeUserID, groupID inode.InodeGroupI
}
defer inodeLock.Unlock()
- if !mS.volStruct.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.F_OK,
+ if !vS.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.F_OK,
inode.NoOverride) {
err = blunder.NewError(blunder.NotFoundError, "ENOENT")
return
}
- if !mS.volStruct.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.W_OK,
+ if !vS.inodeVolumeHandle.Access(inodeNumber, userID, groupID, otherGroupIDs, inode.W_OK,
inode.OwnerOverride) {
err = blunder.NewError(blunder.PermDeniedError, "EACCES")
return
}
- err = mS.volStruct.inodeVolumeHandle.Flush(inodeNumber, false)
- mS.volStruct.untrackInFlightFileInodeData(inodeNumber, false)
+ err = vS.inodeVolumeHandle.Flush(inodeNumber, false)
+ vS.untrackInFlightFileInodeData(inodeNumber, false)
- err = mS.volStruct.inodeVolumeHandle.Wrote(inodeNumber, containerName, objectName, fileOffset, objectOffset, length, true)
+ err = vS.inodeVolumeHandle.Wrote(inodeNumber, containerName, objectName, fileOffset, objectOffset, length, true)
return // err, as set by inode.Wrote(), is sufficient
}
@@ -4258,8 +4228,8 @@ func revSplitPath(fullpath string) []string {
// obstacleInodeNumber must refer to an existing file or directory
// that is (a) already part of the directory tree and (b) not the root
// directory.
-func (mS *mountStruct) removeObstacleToObjectPut(callerID dlm.CallerID, dirInodeNumber inode.InodeNumber, obstacleName string, obstacleInodeNumber inode.InodeNumber) error {
- statResult, err := mS.getstatHelper(obstacleInodeNumber, callerID)
+func (vS *volumeStruct) removeObstacleToObjectPut(callerID dlm.CallerID, dirInodeNumber inode.InodeNumber, obstacleName string, obstacleInodeNumber inode.InodeNumber) error {
+ statResult, err := vS.getstatHelper(obstacleInodeNumber, callerID)
if err != nil {
return err
}
@@ -4267,12 +4237,12 @@ func (mS *mountStruct) removeObstacleToObjectPut(callerID dlm.CallerID, dirInode
fileType := inode.InodeType(statResult[StatFType])
if fileType == inode.FileType || fileType == inode.SymlinkType {
// Files and symlinks can always, barring errors, be unlinked
- err = mS.volStruct.inodeVolumeHandle.Unlink(dirInodeNumber, obstacleName, false)
+ err = vS.inodeVolumeHandle.Unlink(dirInodeNumber, obstacleName, false)
if err != nil {
return err
}
} else if fileType == inode.DirType {
- numEntries, err := mS.volStruct.inodeVolumeHandle.NumDirEntries(obstacleInodeNumber)
+ numEntries, err := vS.inodeVolumeHandle.NumDirEntries(obstacleInodeNumber)
if err != nil {
return err
}
@@ -4293,7 +4263,7 @@ func (mS *mountStruct) removeObstacleToObjectPut(callerID dlm.CallerID, dirInode
// We already have the locks and we've already
// checked that it's empty, so let's just get
// down to it.
- err = mS.volStruct.inodeVolumeHandle.Unlink(dirInodeNumber, obstacleName, false)
+ err = vS.inodeVolumeHandle.Unlink(dirInodeNumber, obstacleName, false)
if err != nil {
return err
}
diff --git a/fs/api_test.go b/fs/api_test.go
index 9d7ecc8e8..31641e65d 100644
--- a/fs/api_test.go
+++ b/fs/api_test.go
@@ -16,7 +16,7 @@ import (
// TODO: Enhance this to do a stat() as well and check number of files
func expectDirectory(t *testing.T, userID inode.InodeUserID, groupID inode.InodeGroupID, inodeNum inode.InodeNumber, expectedEntries []string) {
- readdirEntries, numEntries, moreEntries, err := testMountStruct.Readdir(userID, groupID, nil, inodeNum, 0, "")
+ readdirEntries, numEntries, moreEntries, err := testVolumeStruct.Readdir(userID, groupID, nil, inodeNum, 0, "")
if nil != err {
t.Fatalf("Readdir() [#1] returned error: %v", err)
}
@@ -47,7 +47,7 @@ func createTestDirectory(t *testing.T, dirname string) (dirInode inode.InodeNumb
// Get root dir inode number
rootDirInodeNumber := inode.RootDirInodeNumber
- dirInode, err = testMountStruct.Mkdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil, rootDirInodeNumber, dirname, inode.PosixModePerm)
+ dirInode, err = testVolumeStruct.Mkdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil, rootDirInodeNumber, dirname, inode.PosixModePerm)
if nil != err {
t.Fatalf("Mkdir() returned error: %v", err)
}
@@ -64,12 +64,12 @@ func TestCreateAndLookup(t *testing.T) {
rootDirInodeNumber := inode.RootDirInodeNumber
basename := "create_lookup.test"
- createdFileInodeNumber, err := testMountStruct.Create(inode.InodeRootUserID, inode.InodeGroupID(0), nil, rootDirInodeNumber, basename, inode.PosixModePerm)
+ createdFileInodeNumber, err := testVolumeStruct.Create(inode.InodeRootUserID, inode.InodeGroupID(0), nil, rootDirInodeNumber, basename, inode.PosixModePerm)
if err != nil {
t.Fatalf("Unexpectedly couldn't create file: %v", err)
}
- foundFileInodeNumber, err := testMountStruct.Lookup(inode.InodeRootUserID, inode.InodeGroupID(0), nil, rootDirInodeNumber, basename)
+ foundFileInodeNumber, err := testVolumeStruct.Lookup(inode.InodeRootUserID, inode.InodeGroupID(0), nil, rootDirInodeNumber, basename)
if err != nil {
t.Fatalf("Unexpectedly failed to look up %v", basename)
}
@@ -78,7 +78,7 @@ func TestCreateAndLookup(t *testing.T) {
t.Fatalf("Expected created inode number %v to equal found inode number %v", createdFileInodeNumber, foundFileInodeNumber)
}
- err = testMountStruct.Unlink(inode.InodeRootUserID, inode.InodeGroupID(0), nil, rootDirInodeNumber, basename)
+ err = testVolumeStruct.Unlink(inode.InodeRootUserID, inode.InodeGroupID(0), nil, rootDirInodeNumber, basename)
if nil != err {
t.Fatalf("Unlink() returned error: %v", err)
}
@@ -93,12 +93,12 @@ func TestGetstat(t *testing.T) {
basename := "getstat.test"
timeBeforeCreation := uint64(time.Now().UnixNano())
- inodeNumber, err := testMountStruct.Create(inode.InodeRootUserID, inode.InodeGroupID(0), nil, rootDirInodeNumber, basename, inode.PosixModePerm)
+ inodeNumber, err := testVolumeStruct.Create(inode.InodeRootUserID, inode.InodeGroupID(0), nil, rootDirInodeNumber, basename, inode.PosixModePerm)
if err != nil {
t.Fatalf("couldn't create file: %v", err)
}
- stat, err := testMountStruct.Getstat(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inodeNumber)
+ stat, err := testVolumeStruct.Getstat(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inodeNumber)
if err != nil {
t.Fatalf("couldn't stat inode %v: %v", inodeNumber, err)
}
@@ -119,7 +119,7 @@ func TestGetstat(t *testing.T) {
// TODO: perform a write, check that size has changed accordingly
// TODO: make and delete hardlinks, check that link count has changed accordingly
- err = testMountStruct.Unlink(inode.InodeRootUserID, inode.InodeGroupID(0), nil, rootDirInodeNumber, basename)
+ err = testVolumeStruct.Unlink(inode.InodeRootUserID, inode.InodeGroupID(0), nil, rootDirInodeNumber, basename)
if nil != err {
t.Fatalf("Unlink() returned error: %v", err)
}
@@ -169,21 +169,21 @@ func TestAllAPIPositiveCases(t *testing.T) {
rootDirInodeNumber := inode.RootDirInodeNumber
// Mkdir A/B/ : create a subdirectory within Volume directory
- _, err = testMountStruct.Mkdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil, rootDirInodeNumber, "TestSubDirectory", inode.PosixModePerm)
- // newDirInodeNum, err := testMountStruct.Mkdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil, rootDirInodeNumber, "TestSubDirectory")
+ _, err = testVolumeStruct.Mkdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil, rootDirInodeNumber, "TestSubDirectory", inode.PosixModePerm)
+ // newDirInodeNum, err := testVolumeStruct.Mkdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil, rootDirInodeNumber, "TestSubDirectory")
if nil != err {
t.Fatalf("Mkdir() returned error: %v", err)
}
// Create #1 A/C : create and open a normal file within Volume directory
basename := "TestNormalFile"
- createdFileInodeNumber, err := testMountStruct.Create(inode.InodeRootUserID, inode.InodeGroupID(0), nil, rootDirInodeNumber, basename, inode.PosixModePerm)
+ createdFileInodeNumber, err := testVolumeStruct.Create(inode.InodeRootUserID, inode.InodeGroupID(0), nil, rootDirInodeNumber, basename, inode.PosixModePerm)
if err != nil {
t.Fatalf("Create() [#1] returned error: %v", err)
}
// Lookup #1 A/C : fetch the inode name of the just created normal file
- foundFileInodeNumber, err := testMountStruct.Lookup(inode.InodeRootUserID, inode.InodeGroupID(0), nil, rootDirInodeNumber, basename)
+ foundFileInodeNumber, err := testVolumeStruct.Lookup(inode.InodeRootUserID, inode.InodeGroupID(0), nil, rootDirInodeNumber, basename)
if err != nil {
t.Fatalf("Lookup() [#1] returned error: %v", err)
}
@@ -193,7 +193,7 @@ func TestAllAPIPositiveCases(t *testing.T) {
// Write A/C : write something to normal file
bufToWrite := []byte{0x41, 0x42, 0x43}
- write_rspSize, err := testMountStruct.Write(inode.InodeRootUserID, inode.InodeGroupID(0), nil, createdFileInodeNumber, 0, bufToWrite, nil)
+ write_rspSize, err := testVolumeStruct.Write(inode.InodeRootUserID, inode.InodeGroupID(0), nil, createdFileInodeNumber, 0, bufToWrite, nil)
if nil != err {
t.Fatalf("Write() returned error: %v", err)
}
@@ -202,13 +202,13 @@ func TestAllAPIPositiveCases(t *testing.T) {
}
// don't forget to flush
- err = testMountStruct.Flush(inode.InodeRootUserID, inode.InodeGroupID(0), nil, createdFileInodeNumber)
+ err = testVolumeStruct.Flush(inode.InodeRootUserID, inode.InodeGroupID(0), nil, createdFileInodeNumber)
if err != nil {
t.Fatalf("Flush() returned error: %v", err)
}
// Read A/C : read back what was just written to normal file
- read_buf, err := testMountStruct.Read(inode.InodeRootUserID, inode.InodeGroupID(0), nil, createdFileInodeNumber, 0, uint64(len(bufToWrite)), nil)
+ read_buf, err := testVolumeStruct.Read(inode.InodeRootUserID, inode.InodeGroupID(0), nil, createdFileInodeNumber, 0, uint64(len(bufToWrite)), nil)
if nil != err {
t.Fatalf("Read() returned error: %v", err)
}
@@ -219,7 +219,7 @@ func TestAllAPIPositiveCases(t *testing.T) {
t.Fatalf("Read() returned data different from what was written")
}
- extent_map_chunk, err := testMountStruct.FetchExtentMapChunk(inode.InodeRootUserID, inode.InodeGroupID(0), nil, createdFileInodeNumber, uint64(0), int64(1), int64(0))
+ extent_map_chunk, err := testVolumeStruct.FetchExtentMapChunk(inode.InodeRootUserID, inode.InodeGroupID(0), nil, createdFileInodeNumber, uint64(0), int64(1), int64(0))
if nil != err {
t.Fatalf("FetchExtentMapChunk() returned error: %v", err)
}
@@ -246,7 +246,7 @@ func TestAllAPIPositiveCases(t *testing.T) {
}
// Getstat #1 A/C : check the current size of the normal file
- getstat_1_rspStat, err := testMountStruct.Getstat(inode.InodeRootUserID, inode.InodeGroupID(0), nil, foundFileInodeNumber)
+ getstat_1_rspStat, err := testVolumeStruct.Getstat(inode.InodeRootUserID, inode.InodeGroupID(0), nil, foundFileInodeNumber)
if nil != err {
t.Fatalf("Getstat() returned error: %v", err)
}
@@ -259,13 +259,13 @@ func TestAllAPIPositiveCases(t *testing.T) {
}
// Resize A/C : truncate the file
- err = testMountStruct.Resize(inode.InodeRootUserID, inode.InodeGroupID(0), nil, foundFileInodeNumber, 0)
+ err = testVolumeStruct.Resize(inode.InodeRootUserID, inode.InodeGroupID(0), nil, foundFileInodeNumber, 0)
if nil != err {
t.Fatalf("Resize() returned error: %v", err)
}
// Getstat #2 A/C : verify the size of the normal file is now zero
- getstat_2_rspStat, err := testMountStruct.Getstat(inode.InodeRootUserID, inode.InodeGroupID(0), nil, foundFileInodeNumber)
+ getstat_2_rspStat, err := testVolumeStruct.Getstat(inode.InodeRootUserID, inode.InodeGroupID(0), nil, foundFileInodeNumber)
if nil != err {
t.Fatalf("Getstat() [#2] returned error: %v", err)
}
@@ -278,13 +278,13 @@ func TestAllAPIPositiveCases(t *testing.T) {
}
// Symlink A/D->A/C : create a symlink to the normal file
- createdSymlinkInodeNumber, err := testMountStruct.Symlink(inode.InodeRootUserID, inode.InodeGroupID(0), nil, rootDirInodeNumber, "TestSymlink", "TestNormalFile")
+ createdSymlinkInodeNumber, err := testVolumeStruct.Symlink(inode.InodeRootUserID, inode.InodeGroupID(0), nil, rootDirInodeNumber, "TestSymlink", "TestNormalFile")
if nil != err {
t.Fatalf("Symlink() returned error: %v", err)
}
// Lookup #2 A/D : fetch the inode name of the just created symlink
- lookup_2_inodeHandle, err := testMountStruct.Lookup(inode.InodeRootUserID, inode.InodeGroupID(0), nil, rootDirInodeNumber, "TestSymlink")
+ lookup_2_inodeHandle, err := testVolumeStruct.Lookup(inode.InodeRootUserID, inode.InodeGroupID(0), nil, rootDirInodeNumber, "TestSymlink")
if nil != err {
t.Fatalf("Lookup() [#2] returned error: %v", err)
}
@@ -293,7 +293,7 @@ func TestAllAPIPositiveCases(t *testing.T) {
}
// Readsymlink A/D : read the symlink to ensure it points to the normal file
- readsymlink_target, err := testMountStruct.Readsymlink(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lookup_2_inodeHandle)
+ readsymlink_target, err := testVolumeStruct.Readsymlink(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lookup_2_inodeHandle)
if nil != err {
t.Fatalf("Readsymlink() returned error: %v", err)
}
@@ -302,13 +302,13 @@ func TestAllAPIPositiveCases(t *testing.T) {
}
// Lookup #3 A/B/ : fetch the inode name of the subdirectory
- lookup_3_inodeHandle, err := testMountStruct.Lookup(inode.InodeRootUserID, inode.InodeGroupID(0), nil, rootDirInodeNumber, "TestSubDirectory")
+ lookup_3_inodeHandle, err := testVolumeStruct.Lookup(inode.InodeRootUserID, inode.InodeGroupID(0), nil, rootDirInodeNumber, "TestSubDirectory")
if nil != err {
t.Fatalf("Lookup() [#3] returned error: %v", err)
}
// Create #2 A/B/E : create a normal file within subdirectory
- testSubDirectoryFileInode, err := testMountStruct.Create(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lookup_3_inodeHandle, "TestSubDirectoryFile", inode.PosixModePerm)
+ testSubDirectoryFileInode, err := testVolumeStruct.Create(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lookup_3_inodeHandle, "TestSubDirectoryFile", inode.PosixModePerm)
if nil != err {
t.Fatalf("Create() [#2] returned error: %v", err)
}
@@ -318,7 +318,7 @@ func TestAllAPIPositiveCases(t *testing.T) {
expectDirectory(t, inode.InodeRootUserID, inode.InodeGroupID(0), lookup_3_inodeHandle, entriesExpected)
// Link A/B/E
- err = testMountStruct.Link(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lookup_3_inodeHandle, "TestSubDirectoryFileHardLink", testSubDirectoryFileInode)
+ err = testVolumeStruct.Link(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lookup_3_inodeHandle, "TestSubDirectoryFileHardLink", testSubDirectoryFileInode)
if nil != err {
t.Fatalf("Link() returned error: %v", err)
}
@@ -327,7 +327,7 @@ func TestAllAPIPositiveCases(t *testing.T) {
expectDirectory(t, inode.InodeRootUserID, inode.InodeGroupID(0), lookup_3_inodeHandle, entriesExpected)
// Unlink #1 A/B/E : delete the normal file within the subdirectory
- err = testMountStruct.Unlink(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lookup_3_inodeHandle, "TestSubDirectoryFile")
+ err = testVolumeStruct.Unlink(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lookup_3_inodeHandle, "TestSubDirectoryFile")
if nil != err {
t.Fatalf("Unlink() [#1] returned error: %v", err)
}
@@ -336,7 +336,7 @@ func TestAllAPIPositiveCases(t *testing.T) {
expectDirectory(t, inode.InodeRootUserID, inode.InodeGroupID(0), lookup_3_inodeHandle, entriesExpected)
// Unlink #1.5 A/B/E : delete the normal file within the subdirectory
- err = testMountStruct.Unlink(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lookup_3_inodeHandle, "TestSubDirectoryFileHardLink")
+ err = testVolumeStruct.Unlink(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lookup_3_inodeHandle, "TestSubDirectoryFileHardLink")
if nil != err {
t.Fatalf("Unlink() [#1.5] returned error: %v", err)
}
@@ -345,20 +345,20 @@ func TestAllAPIPositiveCases(t *testing.T) {
expectDirectory(t, inode.InodeRootUserID, inode.InodeGroupID(0), rootDirInodeNumber, entriesExpected)
// Unlink #2 A/D : delete the symlink
- err = testMountStruct.Unlink(inode.InodeRootUserID, inode.InodeGroupID(0), nil, rootDirInodeNumber, "TestSymlink")
+ err = testVolumeStruct.Unlink(inode.InodeRootUserID, inode.InodeGroupID(0), nil, rootDirInodeNumber, "TestSymlink")
if nil != err {
t.Fatalf("Unlink() [#2] returned error: %v", err)
}
// Unlink #3 A/C : delete the normal file
- err = testMountStruct.Unlink(inode.InodeRootUserID, inode.InodeGroupID(0), nil, rootDirInodeNumber, "TestNormalFile")
+ err = testVolumeStruct.Unlink(inode.InodeRootUserID, inode.InodeGroupID(0), nil, rootDirInodeNumber, "TestNormalFile")
if nil != err {
t.Fatalf("Unlink() [#3] returned error: %v", err)
}
// Rmdir #4 A/B : delete the subdirectory
- err = testMountStruct.Rmdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil, rootDirInodeNumber, "TestSubDirectory")
+ err = testVolumeStruct.Rmdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil, rootDirInodeNumber, "TestSubDirectory")
if nil != err {
t.Fatalf("Unlink() [#4] returned error: %v", err)
}
@@ -376,13 +376,13 @@ func TestBadLinks(t *testing.T) {
testDirInode := createTestDirectory(t, "BadLinks")
validFile := "PerfectlyValidFile"
- validFileInodeNumber, err := testMountStruct.Create(inode.InodeRootUserID, inode.InodeGroupID(0), nil, testDirInode, validFile, inode.PosixModePerm)
+ validFileInodeNumber, err := testVolumeStruct.Create(inode.InodeRootUserID, inode.InodeGroupID(0), nil, testDirInode, validFile, inode.PosixModePerm)
if err != nil {
t.Fatalf("Create() returned error: %v", err)
}
nameTooLong := strings.Repeat("x", FileNameMax+1)
- err = testMountStruct.Link(inode.InodeRootUserID, inode.InodeGroupID(0), nil, testDirInode, nameTooLong, validFileInodeNumber)
+ err = testVolumeStruct.Link(inode.InodeRootUserID, inode.InodeGroupID(0), nil, testDirInode, nameTooLong, validFileInodeNumber)
if nil != err {
if blunder.IsNot(err, blunder.NameTooLongError) {
t.Fatalf("Link() returned error %v, expected %v(%d).", blunder.Errno(err), blunder.NameTooLongError, blunder.NameTooLongError.Value())
@@ -404,7 +404,7 @@ func TestMkdir(t *testing.T) {
longButLegalFilename := strings.Repeat("x", FileNameMax)
nameTooLong := strings.Repeat("x", FileNameMax+1)
- _, err := testMountStruct.Mkdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil, testDirInode, nameTooLong, inode.PosixModePerm)
+ _, err := testVolumeStruct.Mkdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil, testDirInode, nameTooLong, inode.PosixModePerm)
if nil != err {
if blunder.IsNot(err, blunder.NameTooLongError) {
t.Fatalf("Mkdir() returned error %v, expected %v(%d).", blunder.Errno(err), blunder.NameTooLongError, blunder.NameTooLongError.Value())
@@ -413,7 +413,7 @@ func TestMkdir(t *testing.T) {
t.Fatal("Mkdir() unexpectedly succeeded on too-long filename!")
}
- _, err = testMountStruct.Mkdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil, testDirInode, longButLegalFilename, inode.PosixModePerm)
+ _, err = testVolumeStruct.Mkdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil, testDirInode, longButLegalFilename, inode.PosixModePerm)
if err != nil {
t.Fatalf("Mkdir() returned error: %v", err)
}
@@ -422,18 +422,18 @@ func TestMkdir(t *testing.T) {
expectDirectory(t, inode.InodeRootUserID, inode.InodeGroupID(0), testDirInode, entriesExpected)
longButLegalFullPath := "/Mkdir/" + longButLegalFilename
- ino, err := testMountStruct.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, longButLegalFullPath)
+ ino, err := testVolumeStruct.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, longButLegalFullPath)
if err != nil {
t.Fatalf("LookupPath() returned error: %v", err)
}
- _, err = testMountStruct.Getstat(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(ino))
+ _, err = testVolumeStruct.Getstat(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(ino))
if err != nil {
t.Fatalf("GetStat() returned error: %v", err)
}
// trying to make the directory a second time should fail with EEXIST
- _, err = testMountStruct.Mkdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil,
+ _, err = testVolumeStruct.Mkdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil,
testDirInode, longButLegalFilename, inode.PosixModePerm)
if err == nil {
t.Fatalf("Mkdir() of existing entry returned success")
@@ -451,14 +451,14 @@ func TestRmdir(t *testing.T) {
testDirInode := createTestDirectory(t, "Rmdir")
- _, err := testMountStruct.Mkdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil,
+ _, err := testVolumeStruct.Mkdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil,
testDirInode, "test1", inode.PosixModePerm)
if err != nil {
t.Fatalf("Mkdir(\"test1\") returned error: %v", err)
}
// the test directory can't be removed until its empty
- err = testMountStruct.Rmdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil,
+ err = testVolumeStruct.Rmdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil,
inode.RootDirInodeNumber, "Rmdir")
if err == nil {
t.Fatalf("Rmdir() [#0] should have failed")
@@ -468,13 +468,13 @@ func TestRmdir(t *testing.T) {
}
// empty the test directory
- err = testMountStruct.Rmdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil,
+ err = testVolumeStruct.Rmdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil,
testDirInode, "test1")
if err != nil {
t.Fatalf("Rmdir() [#1] returned error: %v", err)
}
- err = testMountStruct.Rmdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil,
+ err = testVolumeStruct.Rmdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil,
inode.RootDirInodeNumber, "Rmdir")
if err != nil {
t.Fatalf("Rmdir() [#2] returned error: %v", err)
@@ -489,13 +489,13 @@ func TestBadRename(t *testing.T) {
nameTooLong := strings.Repeat("x", FileNameMax+1)
validFile := "PerfectlyValidFile"
- _, err := testMountStruct.Create(inode.InodeRootUserID, inode.InodeGroupID(0), nil, testDirInode, validFile, inode.PosixModePerm)
+ _, err := testVolumeStruct.Create(inode.InodeRootUserID, inode.InodeGroupID(0), nil, testDirInode, validFile, inode.PosixModePerm)
if nil != err {
t.Fatalf("Create() returned error: %v", err)
}
// Try to rename a valid file to a name that is too long
- err = testMountStruct.Rename(inode.InodeRootUserID, inode.InodeGroupID(0), nil, testDirInode, validFile, testDirInode, nameTooLong)
+ err = testVolumeStruct.Rename(inode.InodeRootUserID, inode.InodeGroupID(0), nil, testDirInode, validFile, testDirInode, nameTooLong)
if nil != err {
if blunder.IsNot(err, blunder.NameTooLongError) {
t.Fatalf("Link() returned error %v, expected %v(%d).", blunder.Errno(err), blunder.NameTooLongError, blunder.NameTooLongError.Value())
@@ -508,7 +508,7 @@ func TestBadRename(t *testing.T) {
expectDirectory(t, inode.InodeRootUserID, inode.InodeGroupID(0), testDirInode, entriesExpected)
// Try to rename a nonexistent file with a name that is too long
- err = testMountStruct.Rename(inode.InodeRootUserID, inode.InodeGroupID(0), nil, testDirInode, nameTooLong, testDirInode, "AlsoAGoodFilename")
+ err = testVolumeStruct.Rename(inode.InodeRootUserID, inode.InodeGroupID(0), nil, testDirInode, nameTooLong, testDirInode, "AlsoAGoodFilename")
if nil != err {
if blunder.IsNot(err, blunder.NameTooLongError) {
t.Fatalf("Link() returned error %v, expected %v(%d).", blunder.Errno(err), blunder.NameTooLongError, blunder.NameTooLongError.Value())
@@ -535,7 +535,7 @@ func TestBadChownChmod(t *testing.T) {
// Create file to play with
basename := "TestFile"
- createdFileInodeNumber, err := testMountStruct.Create(inode.InodeRootUserID, inode.InodeGroupID(0), nil, rootDirInodeNumber, basename, inode.PosixModePerm)
+ createdFileInodeNumber, err := testVolumeStruct.Create(inode.InodeRootUserID, inode.InodeGroupID(0), nil, rootDirInodeNumber, basename, inode.PosixModePerm)
if err != nil {
t.Fatalf("Create() %v returned error: %v", basename, err)
}
@@ -547,7 +547,7 @@ func TestBadChownChmod(t *testing.T) {
// Validate too-big Mode
stat := make(Stat)
stat[StatMode] = tooBigForUint32
- err = testMountStruct.Setstat(inode.InodeRootUserID, inode.InodeGroupID(0), nil, createdFileInodeNumber, stat)
+ err = testVolumeStruct.Setstat(inode.InodeRootUserID, inode.InodeGroupID(0), nil, createdFileInodeNumber, stat)
if blunder.IsNot(err, blunder.InvalidFileModeError) {
t.Fatalf("Setstat() %v returned error %v, expected %v(%d).", basename, blunder.Errno(err), blunder.InvalidFileModeError, blunder.InvalidFileModeError.Value())
}
@@ -555,7 +555,7 @@ func TestBadChownChmod(t *testing.T) {
// Validate too-big UserID
stat[StatUserID] = tooBigForUint32
- err = testMountStruct.Setstat(inode.InodeRootUserID, inode.InodeGroupID(0), nil, createdFileInodeNumber, stat)
+ err = testVolumeStruct.Setstat(inode.InodeRootUserID, inode.InodeGroupID(0), nil, createdFileInodeNumber, stat)
if blunder.Errno(err) != int(blunder.InvalidFileModeError) {
t.Fatalf("Setstat() %v returned error %v, expected %v(%d).", basename, blunder.Errno(err), blunder.InvalidFileModeError, blunder.InvalidFileModeError.Value())
}
@@ -563,7 +563,7 @@ func TestBadChownChmod(t *testing.T) {
// Validate too-big GroupID
stat[StatGroupID] = tooBigForUint32
- err = testMountStruct.Setstat(inode.InodeRootUserID, inode.InodeGroupID(0), nil, createdFileInodeNumber, stat)
+ err = testVolumeStruct.Setstat(inode.InodeRootUserID, inode.InodeGroupID(0), nil, createdFileInodeNumber, stat)
if blunder.Errno(err) != int(blunder.InvalidFileModeError) {
t.Fatalf("Setstat() %v returned error %v, expected %v(%d).", basename, blunder.Errno(err), blunder.InvalidFileModeError, blunder.InvalidFileModeError.Value())
}
@@ -583,13 +583,13 @@ func TestFlock(t *testing.T) {
// Create file to play with
basename := "TestLockFile"
- lockFileInodeNumber, err := testMountStruct.Create(inode.InodeRootUserID, inode.InodeGroupID(0), nil, rootDirInodeNumber, basename, inode.PosixModePerm)
+ lockFileInodeNumber, err := testVolumeStruct.Create(inode.InodeRootUserID, inode.InodeGroupID(0), nil, rootDirInodeNumber, basename, inode.PosixModePerm)
if err != nil {
t.Fatalf("Create() %v returned error: %v", basename, err)
}
// Resize the file to a 1M so that we can apply byte range locks:
- err = testMountStruct.Resize(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, 1024*1024)
+ err = testVolumeStruct.Resize(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, 1024*1024)
if err != nil {
t.Fatalf("Resize() %v returned error: %v", basename, err)
}
@@ -601,20 +601,20 @@ func TestFlock(t *testing.T) {
lock.Len = 0
lock.Pid = 1
- _, err = testMountStruct.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, syscall.F_SETLK, &lock)
+ _, err = testVolumeStruct.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, syscall.F_SETLK, &lock)
if err != nil {
t.Fatalf("Write lock on file failed: %v", err)
}
lock.Type = syscall.F_UNLCK
- _, err = testMountStruct.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, syscall.F_SETLK, &lock)
+ _, err = testVolumeStruct.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, syscall.F_SETLK, &lock)
if err != nil {
t.Fatalf("Unlock on file failed: %v", blunder.Errno(err))
}
lock.Type = syscall.F_WRLCK
lock.Pid = 1
- _, err = testMountStruct.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, syscall.F_SETLK, &lock)
+ _, err = testVolumeStruct.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, syscall.F_SETLK, &lock)
if err != nil {
t.Fatalf("Write lock on file failed: %v", err)
}
@@ -623,32 +623,32 @@ func TestFlock(t *testing.T) {
var lock1 FlockStruct
lock1 = lock
lock1.Pid = 2
- _, err = testMountStruct.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, syscall.F_SETLK, &lock1)
+ _, err = testVolumeStruct.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, syscall.F_SETLK, &lock1)
if blunder.Errno(err) != int(blunder.TryAgainError) {
t.Fatalf("Write lock on a locked file should fail with EAGAIN instead got : %v", err)
}
// Lock again from pid1, it should succeed:
- _, err = testMountStruct.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, syscall.F_SETLK, &lock)
+ _, err = testVolumeStruct.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, syscall.F_SETLK, &lock)
if err != nil {
t.Fatalf("Relocking from same PID on file failed: %v", err)
}
lock.Type = syscall.F_UNLCK
- _, err = testMountStruct.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, syscall.F_SETLK, &lock)
+ _, err = testVolumeStruct.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, syscall.F_SETLK, &lock)
if err != nil {
t.Fatalf("Unlock failed : %v", err)
}
// Read lock test:
lock.Type = syscall.F_RDLCK
- _, err = testMountStruct.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, syscall.F_SETLK, &lock)
+ _, err = testVolumeStruct.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, syscall.F_SETLK, &lock)
if err != nil {
t.Fatalf("Read lock pid - 1 failed: %v", err)
}
lock1.Type = syscall.F_RDLCK
- _, err = testMountStruct.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, syscall.F_SETLK, &lock1)
+ _, err = testVolumeStruct.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, syscall.F_SETLK, &lock1)
if err != nil {
t.Fatalf("Read lock pid - 2 failed: %v", err)
}
@@ -657,32 +657,32 @@ func TestFlock(t *testing.T) {
lock3 := lock
lock3.Type = syscall.F_WRLCK
lock3.Pid = 3
- _, err = testMountStruct.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, syscall.F_SETLK, &lock3)
+ _, err = testVolumeStruct.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, syscall.F_SETLK, &lock3)
if blunder.Errno(err) != int(blunder.TryAgainError) {
t.Fatalf("Write lock should have failed with EAGAIN instead got - %v", err)
}
lock11 := lock1
lock11.Type = syscall.F_UNLCK
- _, err = testMountStruct.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, syscall.F_SETLK, &lock11)
+ _, err = testVolumeStruct.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, syscall.F_SETLK, &lock11)
if err != nil {
t.Fatalf("Unlock of (readlock) - 2 failed: %v", err)
}
lock01 := lock
lock01.Type = syscall.F_UNLCK
- _, err = testMountStruct.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, syscall.F_SETLK, &lock01)
+ _, err = testVolumeStruct.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, syscall.F_SETLK, &lock01)
if err != nil {
t.Fatalf("Unlock of (readlock) - 1 failed: %v", err)
}
- _, err = testMountStruct.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, syscall.F_SETLK, &lock3)
+ _, err = testVolumeStruct.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, syscall.F_SETLK, &lock3)
if err != nil {
t.Fatalf("Write lock should have succeeded instead got - %v", err.Error())
}
lock3.Type = syscall.F_UNLCK
- _, err = testMountStruct.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, syscall.F_SETLK, &lock3)
+ _, err = testVolumeStruct.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, syscall.F_SETLK, &lock3)
if err != nil {
t.Fatalf("Unlock of (write after read) failed: %v", err)
}
@@ -696,7 +696,7 @@ func TestFlock(t *testing.T) {
lock10.Type = syscall.F_WRLCK
lock10.Whence = 0
- _, err = testMountStruct.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, syscall.F_SETLK, &lock10)
+ _, err = testVolumeStruct.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, syscall.F_SETLK, &lock10)
if err != nil {
t.Fatalf("Range test failed to lock range (100 - 200), err %v", err)
}
@@ -706,7 +706,7 @@ func TestFlock(t *testing.T) {
lock201.Type = syscall.F_RDLCK
lock201.Start = 10
lock201.Len = 10
- _, err = testMountStruct.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, syscall.F_SETLK, &lock201)
+ _, err = testVolumeStruct.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, syscall.F_SETLK, &lock201)
if err != nil {
t.Fatalf("Range test failed to read lock range (10 - 20) by pid2, err %v", err)
}
@@ -714,7 +714,7 @@ func TestFlock(t *testing.T) {
lock202 := lock201
lock202.Start = 90
lock202.Len = 10
- _, err = testMountStruct.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, syscall.F_SETLK, &lock202)
+ _, err = testVolumeStruct.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, syscall.F_SETLK, &lock202)
if err != nil {
t.Fatalf("Range test failed to read lock range (90 - 100) by pid2, err %v", err)
}
@@ -722,14 +722,14 @@ func TestFlock(t *testing.T) {
lock203 := lock202
lock203.Start = 80
lock203.Len = 40
- _, err = testMountStruct.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, syscall.F_SETLK, &lock203)
+ _, err = testVolumeStruct.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, syscall.F_SETLK, &lock203)
if err == nil {
t.Fatalf("Range test read lock of range (80 - 120) should have failed for pid2 err %v", err)
}
lock204 := lock203
lock204.Start = 180
- _, err = testMountStruct.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, syscall.F_SETLK, &lock204)
+ _, err = testVolumeStruct.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, syscall.F_SETLK, &lock204)
if err == nil {
t.Fatalf("Range test read lock of range (180 - 220) should have failed for pid2 err %v", err)
}
@@ -737,7 +737,7 @@ func TestFlock(t *testing.T) {
lock205 := lock204
lock205.Start = 200
lock205.Len = 10
- _, err = testMountStruct.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, syscall.F_SETLK, &lock205)
+ _, err = testVolumeStruct.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, syscall.F_SETLK, &lock205)
if err != nil {
t.Fatalf("Range test read lock of range (200 - 210) should have succeeded for pid2 err %v", err)
}
@@ -745,44 +745,44 @@ func TestFlock(t *testing.T) {
lock206 := lock205
lock206.Start = 240
lock206.Len = 10
- _, err = testMountStruct.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, syscall.F_SETLK, &lock206)
+ _, err = testVolumeStruct.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, syscall.F_SETLK, &lock206)
if err != nil {
t.Fatalf("Range test read lock of range (240 - 250) should have succeeded for pid2 err %v", err)
}
lock101 := lock10
lock101.Type = syscall.F_RDLCK
- _, err = testMountStruct.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, syscall.F_SETLK, &lock101)
+ _, err = testVolumeStruct.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, syscall.F_SETLK, &lock101)
if err != nil {
t.Fatalf("Range test converting write lock to read lock of pid1 range 100 - 200 failed, err %v", err)
}
// Now, lock 203 and 204 should succceed.
- _, err = testMountStruct.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, syscall.F_SETLK, &lock203)
+ _, err = testVolumeStruct.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, syscall.F_SETLK, &lock203)
if err != nil {
t.Fatalf("Range test read lock of range (80 - 120) should have succeeded for pid2 err %v", err)
}
- _, err = testMountStruct.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, syscall.F_SETLK, &lock204)
+ _, err = testVolumeStruct.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, syscall.F_SETLK, &lock204)
if err != nil {
t.Fatalf("Range test read lock of range (180 - 220) should have succeeded for pid2 err %v", err)
}
lock30 := lock10
lock30.Pid = 3
- _, err = testMountStruct.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, syscall.F_SETLK, &lock30)
+ _, err = testVolumeStruct.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, syscall.F_SETLK, &lock30)
if err == nil {
t.Fatalf("Range test write lock of range 100 - 200 should have failed for pid3 err %v", err)
}
lock102 := lock10
lock102.Type = syscall.F_UNLCK
- _, err = testMountStruct.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, syscall.F_SETLK, &lock102)
+ _, err = testVolumeStruct.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, syscall.F_SETLK, &lock102)
if err != nil {
t.Fatalf("Range test unlock of range 100 - 200 for pid1 should have succeeded, err - %v", err)
}
- _, err = testMountStruct.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, syscall.F_SETLK, &lock30)
+ _, err = testVolumeStruct.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, syscall.F_SETLK, &lock30)
if err == nil {
t.Fatalf("Range test write lock of range 100 - 200 should have failed for pid3 err %v", err)
}
@@ -790,19 +790,19 @@ func TestFlock(t *testing.T) {
lock207 := lock10
lock207.Type = syscall.F_UNLCK
lock207.Pid = 2
- _, err = testMountStruct.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, syscall.F_SETLK, &lock207)
+ _, err = testVolumeStruct.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, syscall.F_SETLK, &lock207)
if err != nil {
t.Fatalf("Range test unlock of range 100 - 200 for pid2 should have succeeded, err - %v", err)
}
- _, err = testMountStruct.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, syscall.F_SETLK, &lock30)
+ _, err = testVolumeStruct.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, syscall.F_SETLK, &lock30)
if err != nil {
t.Fatalf("Range test write lock of range 100 - 200 should have succeeded for pid3 err %v", err)
}
lock301 := lock30
lock301.Type = syscall.F_UNLCK
- _, err = testMountStruct.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, syscall.F_SETLK, &lock301)
+ _, err = testVolumeStruct.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, syscall.F_SETLK, &lock301)
if err != nil {
t.Fatalf("Range test unlock of range 100 - 200 should have succeeded for pid3 err %v", err)
}
@@ -811,7 +811,7 @@ func TestFlock(t *testing.T) {
lock2u1.Type = syscall.F_UNLCK
lock2u1.Start = 0
lock2u1.Len = 150
- _, err = testMountStruct.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, syscall.F_SETLK, &lock2u1)
+ _, err = testVolumeStruct.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, syscall.F_SETLK, &lock2u1)
if err != nil {
t.Fatalf("Range test unlock of range 0 - 150 should have succeeded for pid2 err %v", err)
}
@@ -819,7 +819,7 @@ func TestFlock(t *testing.T) {
lock2u2 := lock2u1
lock2u2.Start = 150
lock2u2.Len = 150
- _, err = testMountStruct.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, syscall.F_SETLK, &lock2u2)
+ _, err = testVolumeStruct.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, syscall.F_SETLK, &lock2u2)
if err != nil {
t.Fatalf("Range test unlock of range 150 - 300 should have succeeded for pid2 err %v", err)
}
@@ -827,7 +827,7 @@ func TestFlock(t *testing.T) {
lock30.Start = 0
lock30.Len = 250
lock30.Type = syscall.F_WRLCK
- lockHeld, err := testMountStruct.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, syscall.F_GETLK, &lock30)
+ lockHeld, err := testVolumeStruct.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, lockFileInodeNumber, syscall.F_GETLK, &lock30)
if err != nil {
t.Fatalf("Range test GET write lock of range 0 - 250 should have succeeded for pid3 err %v lockHeld %+v", err, lockHeld)
}
@@ -836,7 +836,7 @@ func TestFlock(t *testing.T) {
t.Fatalf("GetLock should have succeeded for range 0 - 250 for pid 3, err %v", err)
}
- err = testMountStruct.Unlink(inode.InodeRootUserID, inode.InodeGroupID(0), nil, rootDirInodeNumber, basename)
+ err = testVolumeStruct.Unlink(inode.InodeRootUserID, inode.InodeGroupID(0), nil, rootDirInodeNumber, basename)
if err != nil {
t.Fatalf("Unlink() %v returned error: %v", basename, err)
}
@@ -864,31 +864,31 @@ func TestStaleInodes(t *testing.T) {
testSetup(t, false)
// scratchpad directory for testing
- testDirInodeNumber, err = testMountStruct.Mkdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil,
+ testDirInodeNumber, err = testVolumeStruct.Mkdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil,
rootDirInodeNumber, testDirname, 0755)
if nil != err {
t.Fatalf("Mkdir() '%s' returned error: %v", testDirname, err)
}
// create a valid test file
- testFileInodeNumber, err = testMountStruct.Create(inode.InodeRootUserID, inode.InodeGroupID(0), nil,
+ testFileInodeNumber, err = testVolumeStruct.Create(inode.InodeRootUserID, inode.InodeGroupID(0), nil,
testDirInodeNumber, testFileName, 0644)
if nil != err {
t.Fatalf("Create() '%s' returned error: %v", testFileName, err)
}
// get an inode number that used to belong to a dirctory
- _, err = testMountStruct.Mkdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil,
+ _, err = testVolumeStruct.Mkdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil,
testDirInodeNumber, staleDirName, 0755)
if nil != err {
t.Fatalf("Mkdir() '%s' returned error: %v", testDirname, err)
}
- staleDirInodeNumber, err = testMountStruct.Lookup(inode.InodeRootUserID, inode.InodeGroupID(0), nil,
+ staleDirInodeNumber, err = testVolumeStruct.Lookup(inode.InodeRootUserID, inode.InodeGroupID(0), nil,
testDirInodeNumber, staleDirName)
if err != nil {
t.Fatalf("Unexpectedly failed to look up of '%s': %v", testDirname, err)
}
- err = testMountStruct.Rmdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil,
+ err = testVolumeStruct.Rmdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil,
testDirInodeNumber, staleDirName)
if nil != err {
t.Fatalf("Rmdir() of '%s' returned error: %v", staleDirName, err)
@@ -897,24 +897,24 @@ func TestStaleInodes(t *testing.T) {
// get an inode number that used to belong to a file (it shouldn't
// really matter which type of file the inode used to be, but it doesn't
// hurt to have two to play with)
- _, err = testMountStruct.Create(inode.InodeRootUserID, inode.InodeGroupID(0), nil,
+ _, err = testVolumeStruct.Create(inode.InodeRootUserID, inode.InodeGroupID(0), nil,
testDirInodeNumber, staleFileName, 0644)
if nil != err {
t.Fatalf("Mkdir() '%s' returned error: %v", testDirname, err)
}
- staleFileInodeNumber, err = testMountStruct.Lookup(inode.InodeRootUserID, inode.InodeGroupID(0), nil,
+ staleFileInodeNumber, err = testVolumeStruct.Lookup(inode.InodeRootUserID, inode.InodeGroupID(0), nil,
testDirInodeNumber, staleFileName)
if err != nil {
t.Fatalf("Unexpectedly failed to look up of '%s': %v", testDirname, err)
}
- err = testMountStruct.Unlink(inode.InodeRootUserID, inode.InodeGroupID(0), nil,
+ err = testVolumeStruct.Unlink(inode.InodeRootUserID, inode.InodeGroupID(0), nil,
testDirInodeNumber, staleFileName)
if nil != err {
t.Fatalf("Unlink() of '%s' returned error: %v", staleFileName, err)
}
// Stat
- _, err = testMountStruct.Getstat(inode.InodeRootUserID, inode.InodeGroupID(0), nil, staleFileInodeNumber)
+ _, err = testVolumeStruct.Getstat(inode.InodeRootUserID, inode.InodeGroupID(0), nil, staleFileInodeNumber)
if nil == err {
t.Fatalf("Getstat() should not have returned success")
}
@@ -923,7 +923,7 @@ func TestStaleInodes(t *testing.T) {
}
// Mkdir
- _, err = testMountStruct.Mkdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil,
+ _, err = testVolumeStruct.Mkdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil,
staleDirInodeNumber, "TestSubDirectory", 0755)
if nil == err {
t.Fatalf("Mkdir() should not have returned success")
@@ -933,7 +933,7 @@ func TestStaleInodes(t *testing.T) {
}
// Rmdir
- err = testMountStruct.Rmdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil,
+ err = testVolumeStruct.Rmdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil,
staleDirInodeNumber, "fubar")
if nil == err {
t.Fatalf("Rmdir() should not have returned success")
@@ -943,7 +943,7 @@ func TestStaleInodes(t *testing.T) {
}
// Create
- _, err = testMountStruct.Create(inode.InodeRootUserID, inode.InodeGroupID(0), nil,
+ _, err = testVolumeStruct.Create(inode.InodeRootUserID, inode.InodeGroupID(0), nil,
staleDirInodeNumber, "fubar", 0644)
if nil == err {
t.Fatalf("Create() should not have returned success")
@@ -953,7 +953,7 @@ func TestStaleInodes(t *testing.T) {
}
// Lookup
- _, err = testMountStruct.Lookup(inode.InodeRootUserID, inode.InodeGroupID(0), nil,
+ _, err = testVolumeStruct.Lookup(inode.InodeRootUserID, inode.InodeGroupID(0), nil,
staleDirInodeNumber, "fubar")
if nil == err {
t.Fatalf("Lookup() should not have returned success")
@@ -964,7 +964,7 @@ func TestStaleInodes(t *testing.T) {
// Write
bufToWrite := []byte{0x41, 0x42, 0x43}
- _, err = testMountStruct.Write(inode.InodeRootUserID, inode.InodeGroupID(0), nil,
+ _, err = testVolumeStruct.Write(inode.InodeRootUserID, inode.InodeGroupID(0), nil,
staleFileInodeNumber, 0, bufToWrite, nil)
if nil == err {
t.Fatalf("Write() should not have returned success")
@@ -974,7 +974,7 @@ func TestStaleInodes(t *testing.T) {
}
// Read
- _, err = testMountStruct.Read(inode.InodeRootUserID, inode.InodeGroupID(0), nil,
+ _, err = testVolumeStruct.Read(inode.InodeRootUserID, inode.InodeGroupID(0), nil,
staleFileInodeNumber, 0, uint64(len(bufToWrite)), nil)
if nil == err {
t.Fatalf("Read() should not have returned success")
@@ -984,7 +984,7 @@ func TestStaleInodes(t *testing.T) {
}
// Trunc
- err = testMountStruct.Resize(inode.InodeRootUserID, inode.InodeGroupID(0), nil, staleFileInodeNumber, 77)
+ err = testVolumeStruct.Resize(inode.InodeRootUserID, inode.InodeGroupID(0), nil, staleFileInodeNumber, 77)
if nil == err {
t.Fatalf("Resize() should not have returned success")
}
@@ -993,7 +993,7 @@ func TestStaleInodes(t *testing.T) {
}
// Symlink
- _, err = testMountStruct.Symlink(inode.InodeRootUserID, inode.InodeGroupID(0), nil,
+ _, err = testVolumeStruct.Symlink(inode.InodeRootUserID, inode.InodeGroupID(0), nil,
staleDirInodeNumber, "TestSymlink", "fubar")
if nil == err {
t.Fatalf("Symlink() should not have returned success")
@@ -1003,7 +1003,7 @@ func TestStaleInodes(t *testing.T) {
}
// Readsymlink (that we didn't create)
- _, err = testMountStruct.Readsymlink(inode.InodeRootUserID, inode.InodeGroupID(0), nil, staleFileInodeNumber)
+ _, err = testVolumeStruct.Readsymlink(inode.InodeRootUserID, inode.InodeGroupID(0), nil, staleFileInodeNumber)
if nil == err {
t.Fatalf("Readsymlink() should not have returned success")
}
@@ -1012,7 +1012,7 @@ func TestStaleInodes(t *testing.T) {
}
// Readdir
- _, _, _, err = testMountStruct.Readdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil, staleDirInodeNumber, 0, "")
+ _, _, _, err = testVolumeStruct.Readdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil, staleDirInodeNumber, 0, "")
if nil == err {
t.Fatalf("Readdir() should not have returned success")
}
@@ -1021,7 +1021,7 @@ func TestStaleInodes(t *testing.T) {
}
// Link -- two cases, one with stale directory and one with stale file
- err = testMountStruct.Link(inode.InodeRootUserID, inode.InodeGroupID(0), nil,
+ err = testVolumeStruct.Link(inode.InodeRootUserID, inode.InodeGroupID(0), nil,
staleDirInodeNumber, "fubar", testFileInodeNumber)
if nil == err {
t.Fatalf("Link(1) should not have returned success")
@@ -1030,7 +1030,7 @@ func TestStaleInodes(t *testing.T) {
t.Fatalf("Link(1) should have failed with NotFoundError, instead got: %v", err)
}
- err = testMountStruct.Link(inode.InodeRootUserID, inode.InodeGroupID(0), nil,
+ err = testVolumeStruct.Link(inode.InodeRootUserID, inode.InodeGroupID(0), nil,
testDirInodeNumber, testFileName, staleFileInodeNumber)
if nil == err {
t.Fatalf("Link(2) should not have returned success")
@@ -1040,7 +1040,7 @@ func TestStaleInodes(t *testing.T) {
}
// Unlink
- err = testMountStruct.Unlink(inode.InodeRootUserID, inode.InodeGroupID(0), nil,
+ err = testVolumeStruct.Unlink(inode.InodeRootUserID, inode.InodeGroupID(0), nil,
staleDirInodeNumber, "fubar")
if nil == err {
t.Fatalf("Unlink() should not have returned success")
@@ -1050,7 +1050,7 @@ func TestStaleInodes(t *testing.T) {
}
// Rename -- two cases, one with stale src directory and one with stale dest
- err = testMountStruct.Rename(inode.InodeRootUserID, inode.InodeGroupID(0), nil,
+ err = testVolumeStruct.Rename(inode.InodeRootUserID, inode.InodeGroupID(0), nil,
testDirInodeNumber, "fubar", staleDirInodeNumber, "barfu")
if nil == err {
t.Fatalf("Rename(1) should not have returned success")
@@ -1059,7 +1059,7 @@ func TestStaleInodes(t *testing.T) {
t.Fatalf("Rename(1) should have failed with NotFoundError, instead got: %v", err)
}
- err = testMountStruct.Rename(inode.InodeRootUserID, inode.InodeGroupID(0), nil,
+ err = testVolumeStruct.Rename(inode.InodeRootUserID, inode.InodeGroupID(0), nil,
staleDirInodeNumber, "fubar", testDirInodeNumber, "barfu")
if nil == err {
t.Fatalf("Rename(2) should not have returned success")
@@ -1069,12 +1069,12 @@ func TestStaleInodes(t *testing.T) {
}
// cleanup test file and directory
- err = testMountStruct.Unlink(inode.InodeRootUserID, inode.InodeGroupID(0), nil,
+ err = testVolumeStruct.Unlink(inode.InodeRootUserID, inode.InodeGroupID(0), nil,
testDirInodeNumber, testFileName)
if nil != err {
t.Fatalf("Unlink() of '%s' returned error: %v", testFileName, err)
}
- err = testMountStruct.Rmdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil,
+ err = testVolumeStruct.Rmdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil,
rootDirInodeNumber, testDirname)
if nil != err {
t.Fatalf("Rmdir() of '%s' returned error: %v", testDirname, err)
@@ -1090,17 +1090,17 @@ func TestMiddlewareGetContainer(t *testing.T) {
testDirInode := createTestDirectory(t, "container")
marker1 := "a_marker"
- _, err := testMountStruct.Create(inode.InodeRootUserID, inode.InodeGroupID(0), nil, testDirInode, marker1, inode.PosixModePerm)
+ _, err := testVolumeStruct.Create(inode.InodeRootUserID, inode.InodeGroupID(0), nil, testDirInode, marker1, inode.PosixModePerm)
if err != nil {
t.Fatalf("Create() returned error: %v", err)
}
marker2 := "b_marker"
- _, err = testMountStruct.Create(inode.InodeRootUserID, inode.InodeGroupID(0), nil, testDirInode, marker2, inode.PosixModePerm)
+ _, err = testVolumeStruct.Create(inode.InodeRootUserID, inode.InodeGroupID(0), nil, testDirInode, marker2, inode.PosixModePerm)
if err != nil {
t.Fatalf("Create() returned error: %v", err)
}
- ents, err = testMountStruct.MiddlewareGetContainer("container", 10, "a", "", "", "")
+ ents, err = testVolumeStruct.MiddlewareGetContainer("container", 10, "a", "", "", "")
if nil != err {
t.Fatalf("got some error: %v", err)
}
@@ -1108,7 +1108,7 @@ func TestMiddlewareGetContainer(t *testing.T) {
t.Fatalf("marker a gave wrong number of entries: %v", ents)
}
- ents, err = testMountStruct.MiddlewareGetContainer("container", 10, "b", "", "", "")
+ ents, err = testVolumeStruct.MiddlewareGetContainer("container", 10, "b", "", "", "")
if nil != err {
t.Fatalf("got some error: %v", err)
}
@@ -1116,7 +1116,7 @@ func TestMiddlewareGetContainer(t *testing.T) {
t.Fatalf("marker b gave wrong number of entries: %v", ents)
}
- ents, err = testMountStruct.MiddlewareGetContainer("container", 10, "a_marker", "", "", "")
+ ents, err = testVolumeStruct.MiddlewareGetContainer("container", 10, "a_marker", "", "", "")
if nil != err {
t.Fatalf("got some error: %v", err)
}
@@ -1146,7 +1146,7 @@ func verifyMetadata(t *testing.T, containerObjPath string,
)
// fetch the current metadata (implicit and explicit)
- headMeta, err = testMountStruct.MiddlewareHeadResponse(containerObjPath)
+ headMeta, err = testVolumeStruct.MiddlewareHeadResponse(containerObjPath)
if err != nil {
t.Errorf("MiddlewareHeadResponse() for '%s' op %s '%s' failed: %v",
containerObjPath, opName, stepName, err)
@@ -1233,11 +1233,11 @@ func TestMiddlewarePuts(t *testing.T) {
)
// make a container for testing and verify the explicit metadata
- err = testMountStruct.MiddlewarePutContainer(containerName, []byte(""), initialMetadata)
+ err = testVolumeStruct.MiddlewarePutContainer(containerName, []byte(""), initialMetadata)
if err != nil {
t.Fatalf("MiddlewarePutContainer() failed: %v", err)
}
- opMeta, err = testMountStruct.MiddlewareHeadResponse(containerName)
+ opMeta, err = testVolumeStruct.MiddlewareHeadResponse(containerName)
if err != nil {
t.Fatalf("MiddlewareHeadResponse() for container '%s' failed: %v", containerName, err)
}
@@ -1249,7 +1249,7 @@ func TestMiddlewarePuts(t *testing.T) {
// create a file object and then verify the explicit metadata and
// returned attributes are correct
opMeta.ModificationTime, opMeta.AttrChangeTime, opMeta.InodeNumber, opMeta.NumWrites, err =
- testMountStruct.MiddlewarePutComplete(containerName, objectPath, nil, nil, initialMetadata)
+ testVolumeStruct.MiddlewarePutComplete(containerName, objectPath, nil, nil, initialMetadata)
if err != nil {
t.Errorf("MiddlewarePutComplete() for container '%s' object '%s' failed: %v",
containerName, objectPath, err)
@@ -1264,7 +1264,7 @@ func TestMiddlewarePuts(t *testing.T) {
// replace the file object with a directory object then verify the
// explicit metadata and returned attributes
opMeta.ModificationTime, opMeta.AttrChangeTime, opMeta.InodeNumber, opMeta.NumWrites, err =
- testMountStruct.MiddlewareMkdir(containerName, objectPath, updatedMetadata)
+ testVolumeStruct.MiddlewareMkdir(containerName, objectPath, updatedMetadata)
if err != nil {
t.Errorf("MiddlewareMkdir() for container '%s' object '%s' failed: %v",
containerName, objectPath, err)
@@ -1279,7 +1279,7 @@ func TestMiddlewarePuts(t *testing.T) {
// verify the metadata (explicit and implicit) returned by
// MiddlewareGetObject() matches MiddlewareHeadResponse() for a
// directory
- opMeta, err = testMountStruct.MiddlewareGetObject(containerObjectPath,
+ opMeta, err = testVolumeStruct.MiddlewareGetObject(containerObjectPath,
[]ReadRangeIn{}, &[]inode.ReadPlanStep{})
if err != nil {
t.Errorf("MiddlewareGetObject() for object '%s' failed: %v", containerObjectPath, err)
@@ -1290,7 +1290,7 @@ func TestMiddlewarePuts(t *testing.T) {
// change the directory object back to a file object and verify the
// explicit metadata and returned attributes
opMeta.ModificationTime, opMeta.AttrChangeTime, opMeta.InodeNumber, opMeta.NumWrites, err =
- testMountStruct.MiddlewarePutComplete(containerName, objectPath, nil, nil, updatedMetadata2)
+ testVolumeStruct.MiddlewarePutComplete(containerName, objectPath, nil, nil, updatedMetadata2)
if err != nil {
t.Errorf("MiddlewarePutComplete() for container '%s' object '%s' failed: %v",
containerName, objectPath, err)
@@ -1304,7 +1304,7 @@ func TestMiddlewarePuts(t *testing.T) {
// verify the metadata (explicit and implicit) returned by
// MiddlewareGetObject() matches MiddlewareHeadResponse()
- opMeta, err = testMountStruct.MiddlewareGetObject(containerObjectPath,
+ opMeta, err = testVolumeStruct.MiddlewareGetObject(containerObjectPath,
[]ReadRangeIn{}, &[]inode.ReadPlanStep{})
if err != nil {
t.Errorf("MiddlewareGetObject() for object '%s' failed: %v", containerObjectPath, err)
@@ -1322,7 +1322,7 @@ func TestMiddlewarePuts(t *testing.T) {
containerDirPath := containerName + "/" + dirPath
opMeta.ModificationTime, opMeta.AttrChangeTime, opMeta.InodeNumber, opMeta.NumWrites, err =
- testMountStruct.MiddlewarePutComplete(containerName, dirPath, nil, nil, initialMetadata)
+ testVolumeStruct.MiddlewarePutComplete(containerName, dirPath, nil, nil, initialMetadata)
if err == nil {
t.Errorf("MiddlewarePutComplete() for container '%s' non-empty object '%s' should have failed",
containerName, objectPath)
@@ -1339,7 +1339,7 @@ func TestMiddlewarePuts(t *testing.T) {
// succeed and update the explicit metadata (but should not delete
// existing directory entries)
opMeta.ModificationTime, opMeta.AttrChangeTime, opMeta.InodeNumber, opMeta.NumWrites, err =
- testMountStruct.MiddlewareMkdir(containerName, dirPath, updatedMetadata)
+ testVolumeStruct.MiddlewareMkdir(containerName, dirPath, updatedMetadata)
if err != nil {
t.Errorf("MiddlewareMkdir() for object '%s' failed: %v", containerDirPath, err)
} else {
diff --git a/fs/config.go b/fs/config.go
index 006e93682..ebae16412 100644
--- a/fs/config.go
+++ b/fs/config.go
@@ -27,12 +27,6 @@ type inFlightFileInodeDataStruct struct {
// Note: There are potentially multiple initiators of this signal
const inFlightFileInodeDataControlBuffering = 100
-type mountStruct struct {
- id MountID
- options MountOptions
- volStruct *volumeStruct
-}
-
type volumeStruct struct {
dataMutex trackedlock.Mutex
volumeName string
@@ -46,7 +40,6 @@ type volumeStruct struct {
reportedNumInodes uint64 // Used for Total, Free, and Avail
FLockMap map[inode.InodeNumber]*list.List
inFlightFileInodeDataMap map[inode.InodeNumber]*inFlightFileInodeDataStruct
- mountList []MountID
jobRWMutex trackedlock.RWMutex
inodeVolumeHandle inode.VolumeHandle
headhunterVolumeHandle headhunter.VolumeHandle
@@ -65,9 +58,8 @@ type globalsStruct struct {
tryLockSerializationThreshhold uint64
symlinkMax uint16
- volumeMap map[string]*volumeStruct // key == volumeStruct.volumeName
- mountMap map[MountID]*mountStruct
- lastMountID MountID
+ volumeMap map[string]*volumeStruct // key == volumeStruct.volumeName
+
inFlightFileInodeDataList *list.List
serializedBackoffList *list.List
@@ -177,8 +169,8 @@ type globalsStruct struct {
MiddlewarePutCompleteErrors bucketstats.Total
MiddlewarePutContainerErrors bucketstats.Total
- MountUsec bucketstats.BucketLog2Round
- MountErrors bucketstats.BucketLog2Round
+ FetchVolumeHandleUsec bucketstats.BucketLog2Round
+ FetchVolumeHandleErrors bucketstats.BucketLog2Round
ValidateVolumeUsec bucketstats.BucketLog2Round
ScrubVolumeUsec bucketstats.BucketLog2Round
ValidateBaseNameUsec bucketstats.BucketLog2Round
@@ -214,15 +206,14 @@ func (dummy *globalsStruct) Up(confMap conf.ConfMap) (err error) {
}
globals.volumeMap = make(map[string]*volumeStruct)
- globals.mountMap = make(map[MountID]*mountStruct)
- globals.lastMountID = MountID(0)
+
globals.inFlightFileInodeDataList = list.New()
globals.serializedBackoffList = list.New()
bucketstats.Register("proxyfs.fs", "", &globals)
err = nil
- return
+ return nil
}
func (dummy *globalsStruct) VolumeGroupCreated(confMap conf.ConfMap, volumeGroupName string, activePeer string, virtualIPAddr string) (err error) {
@@ -255,7 +246,6 @@ func (dummy *globalsStruct) ServeVolume(confMap conf.ConfMap, volumeName string)
volumeName: volumeName,
FLockMap: make(map[inode.InodeNumber]*list.List),
inFlightFileInodeDataMap: make(map[inode.InodeNumber]*inFlightFileInodeDataStruct),
- mountList: make([]MountID, 0),
}
volumeSectionName = "Volume:" + volumeName
@@ -311,12 +301,12 @@ func (dummy *globalsStruct) ServeVolume(confMap conf.ConfMap, volumeName string)
globals.volumeMap[volumeName] = volume
- return nil
+ err = nil
+ return
}
func (dummy *globalsStruct) UnserveVolume(confMap conf.ConfMap, volumeName string) (err error) {
var (
- id MountID
ok bool
volume *volumeStruct
)
@@ -328,18 +318,17 @@ func (dummy *globalsStruct) UnserveVolume(confMap conf.ConfMap, volumeName strin
return
}
- for _, id = range volume.mountList {
- delete(globals.mountMap, id)
- }
-
volume.untrackInFlightFileInodeDataAll()
delete(globals.volumeMap, volumeName)
err = nil
- return nil
+ return
}
+func (dummy *globalsStruct) VolumeToBeUnserved(confMap conf.ConfMap, volumeName string) (err error) {
+ return nil
+}
func (dummy *globalsStruct) SignaledStart(confMap conf.ConfMap) (err error) {
return nil
}
@@ -356,10 +345,6 @@ func (dummy *globalsStruct) Down(confMap conf.ConfMap) (err error) {
err = fmt.Errorf("fs.Down() called with 0 != len(globals.volumeMap")
return
}
- if 0 != len(globals.mountMap) {
- err = fmt.Errorf("fs.Down() called with 0 != len(globals.mountMap")
- return
- }
if 0 != globals.inFlightFileInodeDataList.Len() {
err = fmt.Errorf("fs.Down() called with 0 != globals.inFlightFileInodeDataList.Len()")
return
diff --git a/fs/metadata_stress_test.go b/fs/metadata_stress_test.go
index a74928dd6..6c9819d37 100644
--- a/fs/metadata_stress_test.go
+++ b/fs/metadata_stress_test.go
@@ -21,7 +21,7 @@ var testDirInodeNumber inode.InodeNumber
func testSetupForStress(t *testing.T, starvationMode bool) {
var err error
testSetup(t, starvationMode)
- testDirInodeNumber, err = testMountStruct.Mkdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.RootDirInodeNumber, testDirName, inode.PosixModePerm)
+ testDirInodeNumber, err = testVolumeStruct.Mkdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.RootDirInodeNumber, testDirName, inode.PosixModePerm)
if nil != err {
t.Fatalf("Failed to create %s: %v", testDirName, err)
}
@@ -183,16 +183,16 @@ func loopOp(fileRequest *testRequest, threadID int, inodeNumber inode.InodeNumbe
fName = name1 + "-" + strconv.Itoa(localLoopCount)
switch fileRequest.opType {
case createLoopTestOp:
- _, err = testMountStruct.Create(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inodeNumber, fName, inode.PosixModePerm)
+ _, err = testVolumeStruct.Create(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inodeNumber, fName, inode.PosixModePerm)
case lookupPathLoopTestOp:
- _, err = testMountStruct.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, fName)
+ _, err = testVolumeStruct.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, fName)
case readdirLoopTestOp:
areMoreEntries = true
lastBasename = ""
maxEntries = 10
totalEntriesRead = 0 // Useful for debugging
for areMoreEntries {
- dirEnts, numEntries, more, err = testMountStruct.Readdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inodeNumber, maxEntries, lastBasename)
+ dirEnts, numEntries, more, err = testVolumeStruct.Readdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inodeNumber, maxEntries, lastBasename)
if nil != err {
return
}
@@ -206,7 +206,7 @@ func loopOp(fileRequest *testRequest, threadID int, inodeNumber inode.InodeNumbe
maxEntries = 10
totalEntriesRead = 0 // Useful for debugging
for areMoreEntries {
- containerEnts, err = testMountStruct.MiddlewareGetContainer(testDirName, maxEntries, lastBasename, "", "", "")
+ containerEnts, err = testVolumeStruct.MiddlewareGetContainer(testDirName, maxEntries, lastBasename, "", "", "")
if nil != err {
return
}
@@ -218,12 +218,12 @@ func loopOp(fileRequest *testRequest, threadID int, inodeNumber inode.InodeNumbe
}
}
case unlinkLoopTestOp:
- err = testMountStruct.Unlink(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inodeNumber, fName)
+ err = testVolumeStruct.Unlink(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inodeNumber, fName)
case reWriteNoFlushLoopTestOp:
- _, _ = testMountStruct.Write(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inodeNumber, fileRequest.offset, *fileRequest.bufPtr, nil)
+ _, _ = testVolumeStruct.Write(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inodeNumber, fileRequest.offset, *fileRequest.bufPtr, nil)
case seqWriteNoFlushLoopTestOp:
offset = fileRequest.length * uint64(localLoopCount)
- _, _ = testMountStruct.Write(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inodeNumber, offset, *fileRequest.bufPtr, nil)
+ _, _ = testVolumeStruct.Write(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inodeNumber, offset, *fileRequest.bufPtr, nil)
}
localLoopCount++
infiniteLoopCount++
@@ -286,7 +286,7 @@ func threadNode(threadID int) {
case createTestOp:
response := &testResponse{}
- response.inodeNumber, response.err = testMountStruct.Create(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inodeNumber,
+ response.inodeNumber, response.err = testVolumeStruct.Create(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inodeNumber,
name1, inode.PosixModePerm)
threadMap[threadID].operationStatus <- response
@@ -303,7 +303,7 @@ func threadNode(threadID int) {
threadMap[threadID].operationStatus <- response
case mkdirTestOp:
- newInodeNumber, err := testMountStruct.Mkdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inodeNumber, name1, inode.PosixModePerm)
+ newInodeNumber, err := testVolumeStruct.Mkdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inodeNumber, name1, inode.PosixModePerm)
response := &testResponse{err: err, inodeNumber: newInodeNumber}
threadMap[threadID].operationStatus <- response
@@ -320,12 +320,12 @@ func threadNode(threadID int) {
threadMap[threadID].operationStatus <- response
case rmdirTestOp:
- err := testMountStruct.Rmdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inodeNumber, name1)
+ err := testVolumeStruct.Rmdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inodeNumber, name1)
response := &testResponse{err: err}
threadMap[threadID].operationStatus <- response
case unlinkTestOp:
- err := testMountStruct.Unlink(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inodeNumber, name1)
+ err := testVolumeStruct.Unlink(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inodeNumber, name1)
response := &testResponse{err: err}
threadMap[threadID].operationStatus <- response
diff --git a/fs/resolve_path.go b/fs/resolve_path.go
index 5a5b9ad1b..0a6606447 100644
--- a/fs/resolve_path.go
+++ b/fs/resolve_path.go
@@ -192,7 +192,7 @@ func (heldLocks *heldLocksStruct) free() {
//
// heldLocks.free()
//
-func (mS *mountStruct) resolvePath(startingInodeNumber inode.InodeNumber, path string, heldLocks *heldLocksStruct, options resolvePathOption) (dirInodeNumber inode.InodeNumber, dirEntryInodeNumber inode.InodeNumber, dirEntryBasename string, dirEntryInodeType inode.InodeType, retryRequired bool, err error) {
+func (vS *volumeStruct) resolvePath(startingInodeNumber inode.InodeNumber, path string, heldLocks *heldLocksStruct, options resolvePathOption) (dirInodeNumber inode.InodeNumber, dirEntryInodeNumber inode.InodeNumber, dirEntryBasename string, dirEntryInodeType inode.InodeType, retryRequired bool, err error) {
var (
dirEntryInodeLock *dlm.RWLockStruct
dirEntryInodeLockAlreadyExclusive bool
@@ -241,7 +241,7 @@ func (mS *mountStruct) resolvePath(startingInodeNumber inode.InodeNumber, path s
// Setup shortcuts/contants
dlmCallerID = dlm.GenerateCallerID()
- inodeVolumeHandle = mS.volStruct.inodeVolumeHandle
+ inodeVolumeHandle = vS.inodeVolumeHandle
// Prepare for SymlinkInode-restart handling on canonicalized path
@@ -965,7 +965,7 @@ func reCanonicalizePathForSymlink(canonicalizedPathSplit []string, symlinkIndex
//
// Note that a dirInodeIndex == -1 is possible
//
-func (mS *mountStruct) canonicalizePathAndLocateLeafDirInode(path string) (canonicalizedPathSplit []string, dirInodeIndex int, err error) {
+func (vS *volumeStruct) canonicalizePathAndLocateLeafDirInode(path string) (canonicalizedPathSplit []string, dirInodeIndex int, err error) {
var (
dirEntryInodeType inode.InodeType
heldLocks *heldLocksStruct
@@ -991,7 +991,7 @@ Restart:
heldLocks = newHeldLocks()
_, _, _, dirEntryInodeType, retryRequired, err =
- mS.resolvePath(
+ vS.resolvePath(
inode.RootDirInodeNumber,
strings.Join(canonicalizedPathSplit, "/"),
heldLocks,
diff --git a/fs/resolve_path_test.go b/fs/resolve_path_test.go
index b2e813b2f..66dc97ab8 100644
--- a/fs/resolve_path_test.go
+++ b/fs/resolve_path_test.go
@@ -71,15 +71,15 @@ func TestResolvePath(t *testing.T) {
testSetup(t, false)
- testContainer, err = testMountStruct.Mkdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.RootDirInodeNumber, "TestResolvePathContainer", inode.PosixModePerm)
+ testContainer, err = testVolumeStruct.Mkdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.RootDirInodeNumber, "TestResolvePathContainer", inode.PosixModePerm)
if nil != err {
t.Fatalf("Mkdir(,,,,\"TestResolvePathContainer\",) failed: %v", err)
}
- fileA, err = testMountStruct.Create(inode.InodeRootUserID, inode.InodeGroupID(0), nil, testContainer, "FileA", inode.PosixModePerm)
+ fileA, err = testVolumeStruct.Create(inode.InodeRootUserID, inode.InodeGroupID(0), nil, testContainer, "FileA", inode.PosixModePerm)
if nil != err {
t.Fatalf("Create(,,,,\"FileA\",) failed: %v", err)
}
- symlinkA, err = testMountStruct.Symlink(inode.InodeRootUserID, inode.InodeGroupID(0), nil, testContainer, "SymlinkA", "FileA")
+ symlinkA, err = testVolumeStruct.Symlink(inode.InodeRootUserID, inode.InodeGroupID(0), nil, testContainer, "SymlinkA", "FileA")
if nil != err {
t.Fatalf("Symlink(,,,,\"SymlinkA\",) failed: %v", err)
}
@@ -89,7 +89,7 @@ func TestResolvePath(t *testing.T) {
heldLocks = newHeldLocks()
dirInodeNumber, dirEntryInodeNumber, dirEntryBasename, dirEntryInodeType, retryRequired, err =
- testMountStruct.resolvePath(
+ testVolumeStruct.resolvePath(
inode.RootDirInodeNumber,
"/TestResolvePathContainer/SymlinkA",
heldLocks,
@@ -121,7 +121,7 @@ func TestResolvePath(t *testing.T) {
heldLocks = newHeldLocks()
dirInodeNumber, dirEntryInodeNumber, dirEntryBasename, dirEntryInodeType, retryRequired, err =
- testMountStruct.resolvePath(
+ testVolumeStruct.resolvePath(
inode.RootDirInodeNumber,
"/TestResolvePathContainer/SymlinkA",
heldLocks,
@@ -155,7 +155,7 @@ func TestResolvePath(t *testing.T) {
heldLocks = newHeldLocks()
dirInodeNumber, dirEntryInodeNumber, dirEntryBasename, dirEntryInodeType, retryRequired, err =
- testMountStruct.resolvePath(
+ testVolumeStruct.resolvePath(
inode.RootDirInodeNumber,
"/TestResolvePathContainer/SymlinkA",
heldLocks,
@@ -192,7 +192,7 @@ func TestResolvePath(t *testing.T) {
heldLocks = newHeldLocks()
dirInodeNumber, dirEntryInodeNumber, dirEntryBasename, dirEntryInodeType, retryRequired, err =
- testMountStruct.resolvePath(
+ testVolumeStruct.resolvePath(
inode.RootDirInodeNumber,
"/TestResolvePathContainer/DirA/FileB",
heldLocks,
@@ -230,7 +230,7 @@ func TestResolvePath(t *testing.T) {
heldLocks.free()
- dirEntryInodeNumber, err = testMountStruct.Lookup(inode.InodeRootUserID, inode.InodeGroupID(0), nil, testContainer, "DirA")
+ dirEntryInodeNumber, err = testVolumeStruct.Lookup(inode.InodeRootUserID, inode.InodeGroupID(0), nil, testContainer, "DirA")
if nil != err {
t.Fatalf("Lookup(,,,,\"DirA\") failed: %v", err)
}
@@ -238,7 +238,7 @@ func TestResolvePath(t *testing.T) {
t.Fatalf("Lookup(,,,,\"DirA\") returned 0x%016X... expected 0x%016X", dirEntryInodeNumber, dirA)
}
- dirEntryInodeNumber, err = testMountStruct.Lookup(inode.InodeRootUserID, inode.InodeGroupID(0), nil, dirA, "FileB")
+ dirEntryInodeNumber, err = testVolumeStruct.Lookup(inode.InodeRootUserID, inode.InodeGroupID(0), nil, dirA, "FileB")
if nil != err {
t.Fatalf("Lookup(,,,,\"FileB\") failed: %v", err)
}
@@ -248,23 +248,23 @@ func TestResolvePath(t *testing.T) {
// Destroy directory hierachy underneath "/TestResolvePathContainer/"
- err = testMountStruct.Unlink(inode.InodeRootUserID, inode.InodeGroupID(0), nil, dirA, "FileB")
+ err = testVolumeStruct.Unlink(inode.InodeRootUserID, inode.InodeGroupID(0), nil, dirA, "FileB")
if nil != err {
t.Fatalf("Unlink(,,,,\"FileB\") failed: %v", err)
}
- err = testMountStruct.Rmdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil, testContainer, "DirA")
+ err = testVolumeStruct.Rmdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil, testContainer, "DirA")
if nil != err {
t.Fatalf("Rmdir(,,,,\"DirA\") failed: %v", err)
}
- err = testMountStruct.Unlink(inode.InodeRootUserID, inode.InodeGroupID(0), nil, testContainer, "SymlinkA")
+ err = testVolumeStruct.Unlink(inode.InodeRootUserID, inode.InodeGroupID(0), nil, testContainer, "SymlinkA")
if nil != err {
t.Fatalf("Unlink(,,,,\"SymlinkA\") failed: %v", err)
}
- err = testMountStruct.Unlink(inode.InodeRootUserID, inode.InodeGroupID(0), nil, testContainer, "FileA")
+ err = testVolumeStruct.Unlink(inode.InodeRootUserID, inode.InodeGroupID(0), nil, testContainer, "FileA")
if nil != err {
t.Fatalf("Unlink(,,,,\"FileA\") failed: %v", err)
}
- err = testMountStruct.Rmdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.RootDirInodeNumber, "TestResolvePathContainer")
+ err = testVolumeStruct.Rmdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.RootDirInodeNumber, "TestResolvePathContainer")
if nil != err {
t.Fatalf("Rmdir(,,,,\"TestResolvePathContainer\") failed: %v", err)
}
@@ -293,23 +293,23 @@ func TestCanonicalizePath(t *testing.T) {
testSetup(t, false)
- _, err = testMountStruct.Create(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.RootDirInodeNumber, "RootFileName", inode.PosixModePerm)
+ _, err = testVolumeStruct.Create(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.RootDirInodeNumber, "RootFileName", inode.PosixModePerm)
if nil != err {
t.Fatal(err)
}
- containerInodeNumber, err = testMountStruct.Mkdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.RootDirInodeNumber, "ContainerName", inode.PosixModePerm)
+ containerInodeNumber, err = testVolumeStruct.Mkdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.RootDirInodeNumber, "ContainerName", inode.PosixModePerm)
if nil != err {
t.Fatal(err)
}
- _, err = testMountStruct.Create(inode.InodeRootUserID, inode.InodeGroupID(0), nil, containerInodeNumber, "ContainerFileName", inode.PosixModePerm)
+ _, err = testVolumeStruct.Create(inode.InodeRootUserID, inode.InodeGroupID(0), nil, containerInodeNumber, "ContainerFileName", inode.PosixModePerm)
if nil != err {
t.Fatal(err)
}
- directoryInodeNumber, err = testMountStruct.Mkdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil, containerInodeNumber, "DirectoryName", inode.PosixModePerm)
+ directoryInodeNumber, err = testVolumeStruct.Mkdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil, containerInodeNumber, "DirectoryName", inode.PosixModePerm)
if nil != err {
t.Fatal(err)
}
- _, err = testMountStruct.Create(inode.InodeRootUserID, inode.InodeGroupID(0), nil, directoryInodeNumber, "DirectoryFileName", inode.PosixModePerm)
+ _, err = testVolumeStruct.Create(inode.InodeRootUserID, inode.InodeGroupID(0), nil, directoryInodeNumber, "DirectoryFileName", inode.PosixModePerm)
if nil != err {
t.Fatal(err)
}
@@ -372,7 +372,7 @@ func TestCanonicalizePath(t *testing.T) {
}
for _, testCanonicalizePathItem = range testCanonicalizePathList {
- canonicalizedPathSplit, dirInodeIndex, err = testMountStruct.canonicalizePathAndLocateLeafDirInode(testCanonicalizePathItem.path)
+ canonicalizedPathSplit, dirInodeIndex, err = testVolumeStruct.canonicalizePathAndLocateLeafDirInode(testCanonicalizePathItem.path)
if testCanonicalizePathItem.shouldSucceed {
if nil == err {
if len(canonicalizedPathSplit) != len(testCanonicalizePathItem.canonicalizedPathSplit) {
@@ -396,23 +396,23 @@ func TestCanonicalizePath(t *testing.T) {
}
}
- err = testMountStruct.Unlink(inode.InodeRootUserID, inode.InodeGroupID(0), nil, directoryInodeNumber, "DirectoryFileName")
+ err = testVolumeStruct.Unlink(inode.InodeRootUserID, inode.InodeGroupID(0), nil, directoryInodeNumber, "DirectoryFileName")
if nil != err {
t.Fatal(err)
}
- err = testMountStruct.Rmdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil, containerInodeNumber, "DirectoryName")
+ err = testVolumeStruct.Rmdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil, containerInodeNumber, "DirectoryName")
if nil != err {
t.Fatal(err)
}
- err = testMountStruct.Unlink(inode.InodeRootUserID, inode.InodeGroupID(0), nil, containerInodeNumber, "ContainerFileName")
+ err = testVolumeStruct.Unlink(inode.InodeRootUserID, inode.InodeGroupID(0), nil, containerInodeNumber, "ContainerFileName")
if nil != err {
t.Fatal(err)
}
- err = testMountStruct.Rmdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.RootDirInodeNumber, "ContainerName")
+ err = testVolumeStruct.Rmdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.RootDirInodeNumber, "ContainerName")
if nil != err {
t.Fatal(err)
}
- err = testMountStruct.Unlink(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.RootDirInodeNumber, "RootFileName")
+ err = testVolumeStruct.Unlink(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.RootDirInodeNumber, "RootFileName")
if nil != err {
t.Fatal(err)
}
diff --git a/fs/setup_teardown_test.go b/fs/setup_teardown_test.go
index 43d3c6042..314417771 100644
--- a/fs/setup_teardown_test.go
+++ b/fs/setup_teardown_test.go
@@ -17,19 +17,19 @@ import (
var (
testConfMap conf.ConfMap
- testMountStruct *mountStruct // our global mountStruct to be used in tests
- testRamswiftDoneChan chan bool // our test chan used during testTeardown() to know ramswift is, indeed, down
+ testRamswiftDoneChan chan bool // our test chan used during testTeardown() to know ramswift is, indeed, down
+ testVolumeStruct *volumeStruct // our global volumeStruct to be used in tests
)
func testSetup(t *testing.T, starvationMode bool) {
var (
err error
- mountHandle MountHandle
ok bool
signalHandlerIsArmedWG sync.WaitGroup
testConfMapStrings []string
testConfUpdateStrings []string
testDir string
+ testVolumeHandle VolumeHandle
)
testDir, err = ioutil.TempDir(os.TempDir(), "ProxyFS_test_fs_")
@@ -155,11 +155,11 @@ func testSetup(t *testing.T, starvationMode bool) {
t.Fatalf("transitions.Up() failed: %v", err)
}
- mountHandle, err = MountByVolumeName("TestVolume", MountOptions(0))
+ testVolumeHandle, err = FetchVolumeHandleByVolumeName("TestVolume")
if nil != err {
- t.Fatalf("fs.Mount() failed: %v", err)
+ t.Fatalf("fs.FetchVolumeHandleByVolumeName() failed: %v", err)
}
- testMountStruct, ok = mountHandle.(*mountStruct)
+ testVolumeStruct, ok = testVolumeHandle.(*volumeStruct)
if !ok {
t.Fatalf("fs.Mount() returned !ok")
}
diff --git a/fsworkout/main.go b/fsworkout/main.go
index 90387dfbc..84fd8d6ef 100644
--- a/fsworkout/main.go
+++ b/fsworkout/main.go
@@ -25,11 +25,11 @@ var (
measureCreate bool
measureDestroy bool
measureStat bool
- mountHandle fs.MountHandle
perThreadDir bool
rootDirMutex trackedlock.Mutex
stepErrChan chan error
threads uint64
+ volumeHandle fs.VolumeHandle
volumeName string
)
@@ -193,9 +193,9 @@ func main() {
volumeName = volumeList[0]
- mountHandle, err = fs.MountByVolumeName(volumeName, fs.MountOptions(0))
+ volumeHandle, err = fs.FetchVolumeHandleByVolumeName(volumeName)
if nil != err {
- fmt.Fprintf(os.Stderr, "fs.MountByVolumeName(\"%value\",) failed: %v\n", volumeName, err)
+ fmt.Fprintf(os.Stderr, "fs.FetchVolumeHandleByVolumeName(\"%value\",) failed: %v\n", volumeName, err)
os.Exit(1)
}
@@ -275,13 +275,13 @@ func fsWorkout(threadIndex uint64) {
if perThreadDir {
dirInodeName = fmt.Sprintf("%s%016X", dirInodeNamePrefix, threadIndex)
if measureCreate {
- dirInodeNumber, err = mountHandle.Mkdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.RootDirInodeNumber, dirInodeName, inode.PosixModePerm)
+ dirInodeNumber, err = volumeHandle.Mkdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.RootDirInodeNumber, dirInodeName, inode.PosixModePerm)
if nil != err {
stepErrChan <- err
runtime.Goexit()
}
} else { // measureStat || measureDestroy
- dirInodeNumber, err = mountHandle.Lookup(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.RootDirInodeNumber, dirInodeName)
+ dirInodeNumber, err = volumeHandle.Lookup(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.RootDirInodeNumber, dirInodeName)
if nil != err {
stepErrChan <- err
runtime.Goexit()
@@ -304,24 +304,24 @@ func fsWorkout(threadIndex uint64) {
// Do measured operations
for i = 0; i < inodesPerThread; i++ {
if measureCreate {
- fileInodeNumber, err = mountHandle.Create(inode.InodeRootUserID, inode.InodeGroupID(0), nil, dirInodeNumber, fileInodeName[i], inode.PosixModePerm)
+ fileInodeNumber, err = volumeHandle.Create(inode.InodeRootUserID, inode.InodeGroupID(0), nil, dirInodeNumber, fileInodeName[i], inode.PosixModePerm)
if nil != err {
stepErrChan <- err
runtime.Goexit()
}
} else if measureStat {
- fileInodeNumber, err = mountHandle.Lookup(inode.InodeRootUserID, inode.InodeGroupID(0), nil, dirInodeNumber, fileInodeName[i])
+ fileInodeNumber, err = volumeHandle.Lookup(inode.InodeRootUserID, inode.InodeGroupID(0), nil, dirInodeNumber, fileInodeName[i])
if nil != err {
stepErrChan <- err
runtime.Goexit()
}
- _, err = mountHandle.Getstat(inode.InodeRootUserID, inode.InodeGroupID(0), nil, fileInodeNumber)
+ _, err = volumeHandle.Getstat(inode.InodeRootUserID, inode.InodeGroupID(0), nil, fileInodeNumber)
if nil != err {
stepErrChan <- err
runtime.Goexit()
}
} else { // measureDestroy
- err = mountHandle.Unlink(inode.InodeRootUserID, inode.InodeGroupID(0), nil, dirInodeNumber, fileInodeName[i])
+ err = volumeHandle.Unlink(inode.InodeRootUserID, inode.InodeGroupID(0), nil, dirInodeNumber, fileInodeName[i])
if nil != err {
stepErrChan <- err
runtime.Goexit()
@@ -337,7 +337,7 @@ func fsWorkout(threadIndex uint64) {
// Do shutdown step
if perThreadDir && measureDestroy {
- err = mountHandle.Rmdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.RootDirInodeNumber, dirInodeName)
+ err = volumeHandle.Rmdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.RootDirInodeNumber, dirInodeName)
if nil != err {
stepErrChan <- err
runtime.Goexit()
diff --git a/fuse/config.go b/fuse/config.go
index 087f25918..ecd800bcd 100644
--- a/fuse/config.go
+++ b/fuse/config.go
@@ -130,6 +130,13 @@ func (dummy *globalsStruct) UnserveVolume(confMap conf.ConfMap, volumeName strin
return
}
+func (dummy *globalsStruct) VolumeToBeUnserved(confMap conf.ConfMap, volumeName string) (err error) {
+ // TODO: Might want to actually FUSE Unmount right here
+
+ err = nil
+ return
+}
+
func (dummy *globalsStruct) SignaledStart(confMap conf.ConfMap) (err error) {
closeGate()
@@ -191,10 +198,10 @@ func performMount(volume *volumeStruct) (err error) {
curRetryCount uint32
lazyUnmountCmd *exec.Cmd
missing bool
- mountHandle fs.MountHandle
mountPointContainingDirDevice int64
mountPointDevice int64
mountPointNameBase string
+ volumeHandle fs.VolumeHandle
)
volume.mounted = false
@@ -266,12 +273,12 @@ func performMount(volume *volumeStruct) (err error) {
return
}
- mountHandle, err = fs.MountByVolumeName(volume.volumeName, fs.MountOptions(0))
+ volumeHandle, err = fs.FetchVolumeHandleByVolumeName(volume.volumeName)
if nil != err {
return
}
- fs := &ProxyFUSE{mountHandle: mountHandle}
+ fs := &ProxyFUSE{volumeHandle: volumeHandle}
// We synchronize the mounting of the mount point to make sure our FUSE goroutine
// has reached the point that it can service requests.
diff --git a/fuse/dir.go b/fuse/dir.go
index 77b77d29f..43a7b7383 100644
--- a/fuse/dir.go
+++ b/fuse/dir.go
@@ -16,15 +16,15 @@ import (
)
type Dir struct {
- mountHandle fs.MountHandle
- inodeNumber inode.InodeNumber
+ volumeHandle fs.VolumeHandle
+ inodeNumber inode.InodeNumber
}
func (d Dir) Access(ctx context.Context, req *fuselib.AccessRequest) error {
enterGate()
defer leaveGate()
- if d.mountHandle.Access(inode.InodeUserID(req.Uid), inode.InodeGroupID(req.Gid), nil, d.inodeNumber, inode.InodeMode(req.Mask)) {
+ if d.volumeHandle.Access(inode.InodeUserID(req.Uid), inode.InodeGroupID(req.Gid), nil, d.inodeNumber, inode.InodeMode(req.Mask)) {
return nil
} else {
return newFuseError(blunder.NewError(blunder.PermDeniedError, "EACCES"))
@@ -39,7 +39,7 @@ func (d Dir) Attr(ctx context.Context, attr *fuselib.Attr) (err error) {
enterGate()
defer leaveGate()
- stat, err = d.mountHandle.Getstat(inode.InodeRootUserID, inode.InodeGroupID(0), nil, d.inodeNumber)
+ stat, err = d.volumeHandle.Getstat(inode.InodeRootUserID, inode.InodeGroupID(0), nil, d.inodeNumber)
if nil != err {
err = newFuseError(err)
return
@@ -75,7 +75,7 @@ func (d Dir) Setattr(ctx context.Context, req *fuselib.SetattrRequest, resp *fus
enterGate()
defer leaveGate()
- stat, err = d.mountHandle.Getstat(inode.InodeUserID(req.Uid), inode.InodeGroupID(req.Gid), nil, d.inodeNumber)
+ stat, err = d.volumeHandle.Getstat(inode.InodeUserID(req.Uid), inode.InodeGroupID(req.Gid), nil, d.inodeNumber)
if nil != err {
err = newFuseError(err)
return
@@ -114,7 +114,7 @@ func (d Dir) Setattr(ctx context.Context, req *fuselib.SetattrRequest, resp *fus
statUpdates[fs.StatCRTime] = uint64(req.Crtime.UnixNano())
}
- err = d.mountHandle.Setstat(inode.InodeUserID(req.Header.Uid), inode.InodeGroupID(req.Header.Gid), nil, d.inodeNumber, statUpdates)
+ err = d.volumeHandle.Setstat(inode.InodeUserID(req.Header.Uid), inode.InodeGroupID(req.Header.Gid), nil, d.inodeNumber, statUpdates)
if nil != err {
err = newFuseError(err)
}
@@ -126,36 +126,36 @@ func (d Dir) Lookup(ctx context.Context, name string) (fusefslib.Node, error) {
enterGate()
defer leaveGate()
- childInodeNumber, err := d.mountHandle.Lookup(inode.InodeRootUserID, inode.InodeGroupID(0), nil, d.inodeNumber, name)
+ childInodeNumber, err := d.volumeHandle.Lookup(inode.InodeRootUserID, inode.InodeGroupID(0), nil, d.inodeNumber, name)
if err != nil {
return nil, fuselib.ENOENT
}
- isDir, err := d.mountHandle.IsDir(inode.InodeRootUserID, inode.InodeGroupID(0), nil, childInodeNumber)
+ isDir, err := d.volumeHandle.IsDir(inode.InodeRootUserID, inode.InodeGroupID(0), nil, childInodeNumber)
if isDir {
- return Dir{mountHandle: d.mountHandle, inodeNumber: childInodeNumber}, nil
+ return Dir{volumeHandle: d.volumeHandle, inodeNumber: childInodeNumber}, nil
} else if err != nil {
err = newFuseError(err)
return nil, err
}
- isFile, err := d.mountHandle.IsFile(inode.InodeRootUserID, inode.InodeGroupID(0), nil, childInodeNumber)
+ isFile, err := d.volumeHandle.IsFile(inode.InodeRootUserID, inode.InodeGroupID(0), nil, childInodeNumber)
if isFile {
- return File{mountHandle: d.mountHandle, inodeNumber: childInodeNumber}, nil
+ return File{volumeHandle: d.volumeHandle, inodeNumber: childInodeNumber}, nil
} else if err != nil {
err = newFuseError(err)
return nil, err
}
- isSymlink, err := d.mountHandle.IsSymlink(inode.InodeRootUserID, inode.InodeGroupID(0), nil, childInodeNumber)
+ isSymlink, err := d.volumeHandle.IsSymlink(inode.InodeRootUserID, inode.InodeGroupID(0), nil, childInodeNumber)
if isSymlink {
- return Symlink{mountHandle: d.mountHandle, inodeNumber: childInodeNumber}, nil
+ return Symlink{volumeHandle: d.volumeHandle, inodeNumber: childInodeNumber}, nil
} else if err != nil {
err = newFuseError(err)
return nil, err
}
- actualType, err := d.mountHandle.GetType(inode.InodeRootUserID, inode.InodeGroupID(0), nil, childInodeNumber)
+ actualType, err := d.volumeHandle.GetType(inode.InodeRootUserID, inode.InodeGroupID(0), nil, childInodeNumber)
if err != nil {
err = newFuseError(err)
return nil, err
@@ -194,7 +194,7 @@ func (d Dir) ReadDirAll(ctx context.Context) ([]fuselib.Dirent, error) {
var readCount uint64
var err error
- readEntries, readCount, more, err = d.mountHandle.Readdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil, d.inodeNumber, 1024, lastEntryName)
+ readEntries, readCount, more, err = d.volumeHandle.Readdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil, d.inodeNumber, 1024, lastEntryName)
if err != nil {
logger.ErrorfWithError(err, "Error in ReadDirAll")
return nil, fuselib.EIO
@@ -222,9 +222,9 @@ func (d Dir) Remove(ctx context.Context, req *fuselib.RemoveRequest) (err error)
defer leaveGate()
if req.Dir {
- err = d.mountHandle.Rmdir(inode.InodeUserID(req.Header.Uid), inode.InodeGroupID(req.Header.Gid), nil, d.inodeNumber, req.Name)
+ err = d.volumeHandle.Rmdir(inode.InodeUserID(req.Header.Uid), inode.InodeGroupID(req.Header.Gid), nil, d.inodeNumber, req.Name)
} else {
- err = d.mountHandle.Unlink(inode.InodeUserID(req.Header.Uid), inode.InodeGroupID(req.Header.Gid), nil, d.inodeNumber, req.Name)
+ err = d.volumeHandle.Unlink(inode.InodeUserID(req.Header.Uid), inode.InodeGroupID(req.Header.Gid), nil, d.inodeNumber, req.Name)
}
if nil != err {
err = newFuseError(err)
@@ -243,12 +243,12 @@ func (d Dir) Mknod(ctx context.Context, req *fuselib.MknodRequest) (fusefslib.No
err = newFuseError(err)
return nil, err
}
- inodeNumber, err := d.mountHandle.Create(inode.InodeUserID(req.Header.Uid), inode.InodeGroupID(req.Header.Gid), nil, d.inodeNumber, req.Name, inode.InodeMode(req.Mode))
+ inodeNumber, err := d.volumeHandle.Create(inode.InodeUserID(req.Header.Uid), inode.InodeGroupID(req.Header.Gid), nil, d.inodeNumber, req.Name, inode.InodeMode(req.Mode))
if err != nil {
err = newFuseError(err)
return nil, err
}
- file := File{mountHandle: d.mountHandle, inodeNumber: inodeNumber}
+ file := File{volumeHandle: d.volumeHandle, inodeNumber: inodeNumber}
return file, nil
}
@@ -262,12 +262,12 @@ func (d Dir) Create(ctx context.Context, req *fuselib.CreateRequest, resp *fusel
err = newFuseError(err)
return nil, nil, err
}
- inodeNumber, err := d.mountHandle.Create(inode.InodeUserID(req.Header.Uid), inode.InodeGroupID(req.Header.Gid), nil, d.inodeNumber, req.Name, inode.InodeMode(req.Mode))
+ inodeNumber, err := d.volumeHandle.Create(inode.InodeUserID(req.Header.Uid), inode.InodeGroupID(req.Header.Gid), nil, d.inodeNumber, req.Name, inode.InodeMode(req.Mode))
if err != nil {
err = newFuseError(err)
return nil, nil, err
}
- file := File{mountHandle: d.mountHandle, inodeNumber: inodeNumber}
+ file := File{volumeHandle: d.volumeHandle, inodeNumber: inodeNumber}
return file, file, nil
}
@@ -275,7 +275,7 @@ func (d Dir) Flush(ctx context.Context, req *fuselib.FlushRequest) error {
enterGate()
defer leaveGate()
- err := d.mountHandle.Flush(inode.InodeUserID(req.Header.Uid), inode.InodeGroupID(req.Header.Gid), nil, d.inodeNumber)
+ err := d.volumeHandle.Flush(inode.InodeUserID(req.Header.Uid), inode.InodeGroupID(req.Header.Gid), nil, d.inodeNumber)
if err != nil {
err = newFuseError(err)
}
@@ -286,7 +286,7 @@ func (d Dir) Fsync(ctx context.Context, req *fuselib.FsyncRequest) error {
enterGate()
defer leaveGate()
- err := d.mountHandle.Flush(inode.InodeUserID(req.Header.Uid), inode.InodeGroupID(req.Header.Gid), nil,
+ err := d.volumeHandle.Flush(inode.InodeUserID(req.Header.Uid), inode.InodeGroupID(req.Header.Gid), nil,
d.inodeNumber)
if err != nil {
err = newFuseError(err)
@@ -299,12 +299,12 @@ func (d Dir) Mkdir(ctx context.Context, req *fuselib.MkdirRequest) (fusefslib.No
defer leaveGate()
trimmedMode := inode.InodeMode(req.Mode) & inode.PosixModePerm
- newDirInodeNumber, err := d.mountHandle.Mkdir(inode.InodeUserID(req.Header.Uid), inode.InodeGroupID(req.Header.Gid), nil, d.inodeNumber, req.Name, trimmedMode)
+ newDirInodeNumber, err := d.volumeHandle.Mkdir(inode.InodeUserID(req.Header.Uid), inode.InodeGroupID(req.Header.Gid), nil, d.inodeNumber, req.Name, trimmedMode)
if err != nil {
err = newFuseError(err)
return nil, err
}
- return Dir{mountHandle: d.mountHandle, inodeNumber: newDirInodeNumber}, nil
+ return Dir{volumeHandle: d.volumeHandle, inodeNumber: newDirInodeNumber}, nil
}
func (d Dir) Rename(ctx context.Context, req *fuselib.RenameRequest, newDir fusefslib.Node) error {
@@ -315,7 +315,7 @@ func (d Dir) Rename(ctx context.Context, req *fuselib.RenameRequest, newDir fuse
if !ok {
return fuselib.EIO
}
- err := d.mountHandle.Rename(inode.InodeUserID(req.Header.Uid), inode.InodeGroupID(req.Header.Gid), nil, d.inodeNumber, req.OldName, dstDir.inodeNumber, req.NewName)
+ err := d.volumeHandle.Rename(inode.InodeUserID(req.Header.Uid), inode.InodeGroupID(req.Header.Gid), nil, d.inodeNumber, req.OldName, dstDir.inodeNumber, req.NewName)
if err != nil {
err = newFuseError(err)
}
@@ -326,13 +326,13 @@ func (d Dir) Symlink(ctx context.Context, req *fuselib.SymlinkRequest) (fusefsli
enterGate()
defer leaveGate()
- symlinkInodeNumber, err := d.mountHandle.Symlink(inode.InodeUserID(req.Header.Uid), inode.InodeGroupID(req.Header.Gid), nil, d.inodeNumber, req.NewName, req.Target)
+ symlinkInodeNumber, err := d.volumeHandle.Symlink(inode.InodeUserID(req.Header.Uid), inode.InodeGroupID(req.Header.Gid), nil, d.inodeNumber, req.NewName, req.Target)
if err != nil {
err = newFuseError(err)
return nil, err
}
- return Symlink{mountHandle: d.mountHandle, inodeNumber: symlinkInodeNumber}, nil
+ return Symlink{volumeHandle: d.volumeHandle, inodeNumber: symlinkInodeNumber}, nil
}
func (d Dir) Link(ctx context.Context, req *fuselib.LinkRequest, old fusefslib.Node) (fusefslib.Node, error) {
@@ -345,7 +345,7 @@ func (d Dir) Link(ctx context.Context, req *fuselib.LinkRequest, old fusefslib.N
return nil, err
}
- err := d.mountHandle.Link(inode.InodeUserID(req.Header.Uid), inode.InodeGroupID(req.Header.Gid), nil, d.inodeNumber, req.NewName, oldFile.inodeNumber)
+ err := d.volumeHandle.Link(inode.InodeUserID(req.Header.Uid), inode.InodeGroupID(req.Header.Gid), nil, d.inodeNumber, req.NewName, oldFile.inodeNumber)
if err != nil {
err = newFuseError(err)
}
diff --git a/fuse/file.go b/fuse/file.go
index 1ef48df1a..bd8817bec 100644
--- a/fuse/file.go
+++ b/fuse/file.go
@@ -15,15 +15,15 @@ import (
)
type File struct {
- mountHandle fs.MountHandle
- inodeNumber inode.InodeNumber
+ volumeHandle fs.VolumeHandle
+ inodeNumber inode.InodeNumber
}
func (f File) Access(ctx context.Context, req *fuselib.AccessRequest) (err error) {
enterGate()
defer leaveGate()
- if f.mountHandle.Access(inode.InodeUserID(req.Uid), inode.InodeGroupID(req.Gid), nil, f.inodeNumber, inode.InodeMode(req.Mask)) {
+ if f.volumeHandle.Access(inode.InodeUserID(req.Uid), inode.InodeGroupID(req.Gid), nil, f.inodeNumber, inode.InodeMode(req.Mask)) {
err = nil
} else {
err = newFuseError(blunder.NewError(blunder.PermDeniedError, "EACCES"))
@@ -40,7 +40,7 @@ func (f File) Attr(ctx context.Context, attr *fuselib.Attr) (err error) {
enterGate()
defer leaveGate()
- stat, err = f.mountHandle.Getstat(inode.InodeRootUserID, inode.InodeGroupID(0), nil, f.inodeNumber)
+ stat, err = f.volumeHandle.Getstat(inode.InodeRootUserID, inode.InodeGroupID(0), nil, f.inodeNumber)
if nil != err {
err = newFuseError(err)
return
@@ -78,7 +78,7 @@ func (f File) Setattr(ctx context.Context, req *fuselib.SetattrRequest, resp *fu
enterGate()
defer leaveGate()
- stat, err = f.mountHandle.Getstat(inode.InodeUserID(req.Header.Uid), inode.InodeGroupID(req.Header.Gid), nil, f.inodeNumber)
+ stat, err = f.volumeHandle.Getstat(inode.InodeUserID(req.Header.Uid), inode.InodeGroupID(req.Header.Gid), nil, f.inodeNumber)
if nil != err {
err = newFuseError(err)
return
@@ -117,14 +117,14 @@ func (f File) Setattr(ctx context.Context, req *fuselib.SetattrRequest, resp *fu
statUpdates[fs.StatCRTime] = uint64(req.Crtime.UnixNano())
}
- err = f.mountHandle.Setstat(inode.InodeUserID(req.Header.Uid), inode.InodeGroupID(req.Header.Gid), nil, f.inodeNumber, statUpdates)
+ err = f.volumeHandle.Setstat(inode.InodeUserID(req.Header.Uid), inode.InodeGroupID(req.Header.Gid), nil, f.inodeNumber, statUpdates)
if nil != err {
err = newFuseError(err)
return
}
if 0 != (fuselib.SetattrSize & req.Valid) {
- err = f.mountHandle.Resize(inode.InodeUserID(req.Header.Uid), inode.InodeGroupID(req.Header.Gid), nil, f.inodeNumber, req.Size)
+ err = f.volumeHandle.Resize(inode.InodeUserID(req.Header.Uid), inode.InodeGroupID(req.Header.Gid), nil, f.inodeNumber, req.Size)
if nil != err {
err = newFuseError(err)
return
@@ -151,7 +151,7 @@ func (f File) Fsync(ctx context.Context, req *fuselib.FsyncRequest) (err error)
enterGate()
defer leaveGate()
- err = f.mountHandle.Flush(inode.InodeUserID(req.Header.Uid), inode.InodeGroupID(req.Header.Gid), nil, f.inodeNumber)
+ err = f.volumeHandle.Flush(inode.InodeUserID(req.Header.Uid), inode.InodeGroupID(req.Header.Gid), nil, f.inodeNumber)
if nil != err {
err = newFuseError(err)
}
@@ -162,7 +162,7 @@ func (f File) Read(ctx context.Context, req *fuselib.ReadRequest, resp *fuselib.
enterGate()
defer leaveGate()
- buf, err := f.mountHandle.Read(inode.InodeUserID(req.Header.Uid), inode.InodeGroupID(req.Header.Gid), nil, f.inodeNumber, uint64(req.Offset), uint64(req.Size), nil)
+ buf, err := f.volumeHandle.Read(inode.InodeUserID(req.Header.Uid), inode.InodeGroupID(req.Header.Gid), nil, f.inodeNumber, uint64(req.Offset), uint64(req.Size), nil)
if err != nil && err != io.EOF {
err = newFuseError(err)
return
@@ -183,7 +183,7 @@ func (f File) Write(ctx context.Context, req *fuselib.WriteRequest, resp *fuseli
bufferedData := make([]byte, len(req.Data), len(req.Data))
copy(bufferedData, req.Data)
- size, err := f.mountHandle.Write(inode.InodeUserID(req.Header.Uid), inode.InodeGroupID(req.Header.Gid), nil, f.inodeNumber, uint64(req.Offset), bufferedData, nil)
+ size, err := f.volumeHandle.Write(inode.InodeUserID(req.Header.Uid), inode.InodeGroupID(req.Header.Gid), nil, f.inodeNumber, uint64(req.Offset), bufferedData, nil)
if nil == err {
resp.Size = int(size)
} else {
diff --git a/fuse/fuse.go b/fuse/fuse.go
index ab586ade1..8985f3f7b 100644
--- a/fuse/fuse.go
+++ b/fuse/fuse.go
@@ -14,12 +14,12 @@ import (
)
type ProxyFUSE struct {
- mountHandle fs.MountHandle
- wg sync.WaitGroup // Used to synchronize mount
+ volumeHandle fs.VolumeHandle
+ wg sync.WaitGroup // Used to synchronize mount
}
func (pfs *ProxyFUSE) Root() (fusefslib.Node, error) {
- root := Dir{mountHandle: pfs.mountHandle, inodeNumber: inode.RootDirInodeNumber}
+ root := Dir{volumeHandle: pfs.volumeHandle, inodeNumber: inode.RootDirInodeNumber}
// Signal any waiters that we have completed mounting the volume.
// We know this because this call is only made after the user level FUSE
@@ -32,7 +32,7 @@ func (pfs *ProxyFUSE) Statfs(ctx context.Context, req *fuselib.StatfsRequest, re
enterGate()
defer leaveGate()
- statvfs, err := pfs.mountHandle.StatVfs()
+ statvfs, err := pfs.volumeHandle.StatVfs()
if err != nil {
return newFuseError(err)
}
diff --git a/fuse/symlink.go b/fuse/symlink.go
index 988e3b017..09c0b4c56 100644
--- a/fuse/symlink.go
+++ b/fuse/symlink.go
@@ -14,8 +14,8 @@ import (
)
type Symlink struct {
- mountHandle fs.MountHandle
- inodeNumber inode.InodeNumber
+ volumeHandle fs.VolumeHandle
+ inodeNumber inode.InodeNumber
}
func (s Symlink) Attr(ctx context.Context, attr *fuselib.Attr) (err error) {
@@ -26,7 +26,7 @@ func (s Symlink) Attr(ctx context.Context, attr *fuselib.Attr) (err error) {
enterGate()
defer leaveGate()
- stat, err = s.mountHandle.Getstat(inode.InodeRootUserID, inode.InodeGroupID(0), nil, s.inodeNumber)
+ stat, err = s.volumeHandle.Getstat(inode.InodeRootUserID, inode.InodeGroupID(0), nil, s.inodeNumber)
if nil != err {
err = newFuseError(err)
return
@@ -62,7 +62,7 @@ func (s Symlink) Setattr(ctx context.Context, req *fuselib.SetattrRequest, resp
enterGate()
defer leaveGate()
- stat, err = s.mountHandle.Getstat(inode.InodeUserID(req.Header.Uid), inode.InodeGroupID(req.Header.Gid), nil, s.inodeNumber)
+ stat, err = s.volumeHandle.Getstat(inode.InodeUserID(req.Header.Uid), inode.InodeGroupID(req.Header.Gid), nil, s.inodeNumber)
if nil != err {
err = newFuseError(err)
return
@@ -101,7 +101,7 @@ func (s Symlink) Setattr(ctx context.Context, req *fuselib.SetattrRequest, resp
statUpdates[fs.StatCRTime] = uint64(req.Crtime.UnixNano())
}
- err = s.mountHandle.Setstat(inode.InodeUserID(req.Header.Uid), inode.InodeGroupID(req.Header.Gid), nil, s.inodeNumber, statUpdates)
+ err = s.volumeHandle.Setstat(inode.InodeUserID(req.Header.Uid), inode.InodeGroupID(req.Header.Gid), nil, s.inodeNumber, statUpdates)
if nil != err {
err = newFuseError(err)
}
@@ -114,7 +114,7 @@ func (s Symlink) Fsync(ctx context.Context, req *fuselib.FsyncRequest) error {
}
func (s Symlink) Readlink(ctx context.Context, req *fuselib.ReadlinkRequest) (string, error) {
- target, err := s.mountHandle.Readsymlink(inode.InodeUserID(req.Header.Uid), inode.InodeGroupID(req.Header.Gid), nil, s.inodeNumber)
+ target, err := s.volumeHandle.Readsymlink(inode.InodeUserID(req.Header.Uid), inode.InodeGroupID(req.Header.Gid), nil, s.inodeNumber)
if nil != err {
err = newFuseError(err)
}
diff --git a/halter/config.go b/halter/config.go
index 010050507..a12fdf0ec 100644
--- a/halter/config.go
+++ b/halter/config.go
@@ -57,6 +57,9 @@ func (dummy *globalsStruct) ServeVolume(confMap conf.ConfMap, volumeName string)
func (dummy *globalsStruct) UnserveVolume(confMap conf.ConfMap, volumeName string) (err error) {
return nil
}
+func (dummy *globalsStruct) VolumeToBeUnserved(confMap conf.ConfMap, volumeName string) (err error) {
+ return nil
+}
func (dummy *globalsStruct) SignaledStart(confMap conf.ConfMap) (err error) {
return nil
}
diff --git a/headhunter/config.go b/headhunter/config.go
index 63e37e31a..7376698c1 100644
--- a/headhunter/config.go
+++ b/headhunter/config.go
@@ -673,6 +673,9 @@ func (dummy *globalsStruct) UnserveVolume(confMap conf.ConfMap, volumeName strin
return
}
+func (dummy *globalsStruct) VolumeToBeUnserved(confMap conf.ConfMap, volumeName string) (err error) {
+ return nil
+}
func (dummy *globalsStruct) SignaledStart(confMap conf.ConfMap) (err error) {
EnableObjectDeletions() // Otherwise, we will hang
return nil
diff --git a/httpserver/config.go b/httpserver/config.go
index 3b7d39b74..b89265b1b 100644
--- a/httpserver/config.go
+++ b/httpserver/config.go
@@ -67,7 +67,7 @@ type JobStatusJSONPackedStruct struct {
type volumeStruct struct {
trackedlock.Mutex
name string
- fsMountHandle fs.MountHandle
+ fsVolumeHandle fs.VolumeHandle
inodeVolumeHandle inode.VolumeHandle
headhunterVolumeHandle headhunter.VolumeHandle
fsckActiveJob *jobStruct
@@ -177,7 +177,7 @@ func (dummy *globalsStruct) ServeVolume(confMap conf.ConfMap, volumeName string)
scrubJobs: sortedmap.NewLLRBTree(sortedmap.CompareUint64, nil),
}
- volume.fsMountHandle, err = fs.MountByVolumeName(volume.name, 0)
+ volume.fsVolumeHandle, err = fs.FetchVolumeHandleByVolumeName(volume.name)
if nil != err {
return
}
@@ -266,6 +266,10 @@ func (dummy *globalsStruct) UnserveVolume(confMap conf.ConfMap, volumeName strin
return // return err from globals.volumeLLRB.DeleteByKey sufficient
}
+func (dummy *globalsStruct) VolumeToBeUnserved(confMap conf.ConfMap, volumeName string) (err error) {
+ return nil
+}
+
func (dummy *globalsStruct) SignaledStart(confMap conf.ConfMap) (err error) {
globals.confMap = confMap
globals.active = false
diff --git a/httpserver/request_handler.go b/httpserver/request_handler.go
index 99be9e2e3..6c599221e 100644
--- a/httpserver/request_handler.go
+++ b/httpserver/request_handler.go
@@ -1030,14 +1030,14 @@ func doDefrag(responseWriter http.ResponseWriter, request *http.Request, request
for ; pathPartIndex < requestState.numPathParts; pathPartIndex++ {
dirInodeNumber = dirEntryInodeNumber
- dirEntryInodeNumber, err = requestState.volume.fsMountHandle.Lookup(inode.InodeRootUserID, inode.InodeGroupID(0), nil, dirInodeNumber, requestState.pathSplit[pathPartIndex+1])
+ dirEntryInodeNumber, err = requestState.volume.fsVolumeHandle.Lookup(inode.InodeRootUserID, inode.InodeGroupID(0), nil, dirInodeNumber, requestState.pathSplit[pathPartIndex+1])
if nil != err {
responseWriter.WriteHeader(http.StatusNotFound)
return
}
}
- err = requestState.volume.fsMountHandle.DefragmentFile(inode.InodeRootUserID, inode.InodeGroupID(0), nil, dirEntryInodeNumber)
+ err = requestState.volume.fsVolumeHandle.DefragmentFile(inode.InodeRootUserID, inode.InodeGroupID(0), nil, dirEntryInodeNumber)
if nil == err {
responseWriter.WriteHeader(http.StatusOK)
@@ -1087,7 +1087,7 @@ func doExtentMap(responseWriter http.ResponseWriter, request *http.Request, requ
for ; pathPartIndex < requestState.numPathParts; pathPartIndex++ {
dirInodeNumber = dirEntryInodeNumber
- dirEntryInodeNumber, err = requestState.volume.fsMountHandle.Lookup(inode.InodeRootUserID, inode.InodeGroupID(0), nil, dirInodeNumber, requestState.pathSplit[pathPartIndex+1])
+ dirEntryInodeNumber, err = requestState.volume.fsVolumeHandle.Lookup(inode.InodeRootUserID, inode.InodeGroupID(0), nil, dirInodeNumber, requestState.pathSplit[pathPartIndex+1])
if nil != err {
if requestState.formatResponseAsJSON {
responseWriter.WriteHeader(http.StatusNotFound)
@@ -1102,7 +1102,7 @@ func doExtentMap(responseWriter http.ResponseWriter, request *http.Request, requ
}
}
- extentMapChunk, err = requestState.volume.fsMountHandle.FetchExtentMapChunk(inode.InodeRootUserID, inode.InodeGroupID(0), nil, dirEntryInodeNumber, uint64(0), math.MaxInt64, int64(0))
+ extentMapChunk, err = requestState.volume.fsVolumeHandle.FetchExtentMapChunk(inode.InodeRootUserID, inode.InodeGroupID(0), nil, dirEntryInodeNumber, uint64(0), math.MaxInt64, int64(0))
if nil != err {
if requestState.formatResponseAsJSON {
responseWriter.WriteHeader(http.StatusNotFound)
diff --git a/httpserver/setup_teardown_test.go b/httpserver/setup_teardown_test.go
index 2c8ae6578..d157921ba 100644
--- a/httpserver/setup_teardown_test.go
+++ b/httpserver/setup_teardown_test.go
@@ -81,6 +81,9 @@ func testSetup(t *testing.T) {
"JSONRPCServer.TCPPort=12346", // 12346 instead of 12345 so that test can run if proxyfsd is already running
"JSONRPCServer.FastTCPPort=32346", // ...and similarly here...
"JSONRPCServer.DataPathLogging=false",
+ "JSONRPCServer.MinLeaseDuration=250ms",
+ "JSONRPCServer.LeaseInterruptInterval=250ms",
+ "JSONRPCServer.LeaseInterruptLimit=20",
"RamSwiftInfo.MaxAccountNameLength=256",
"RamSwiftInfo.MaxContainerNameLength=256",
"RamSwiftInfo.MaxObjectNameLength=1024",
diff --git a/inode/api.go b/inode/api.go
index 1465cde01..96d480ca3 100644
--- a/inode/api.go
+++ b/inode/api.go
@@ -132,6 +132,14 @@ const (
SnapShotDirName = ".snapshot"
)
+type RWModeType uint8
+
+const (
+ RWModeNormal RWModeType = iota // All reads, writes, etc... should be allowed
+ RWModeNoWrite // Same as modeNormal except Write() & ProvisionObject() should fail
+ RWModeReadOnly // No operations that modify state (data or metadata) should be allowed
+)
+
func (de *DirEntry) Size() int {
// sizeof(InodeNumber) + sizeof(InodeType) + sizeof(DirLocation) + string data + null byte delimiter
return int(unsafe.Sizeof(de.InodeNumber)) + int(unsafe.Sizeof(de.Type)) + int(unsafe.Sizeof(de.NextDirLocation)) + len(de.Basename) + 1
@@ -235,3 +243,11 @@ type VolumeHandle interface {
CreateSymlink(target string, filePerm InodeMode, userID InodeUserID, groupID InodeGroupID) (symlinkInodeNumber InodeNumber, err error)
GetSymlink(symlinkInodeNumber InodeNumber) (target string, err error)
}
+
+// SetRWMode sets the package to either allow all read/write operations (RWModeNormal),
+// allow all except Write() and ProvisionObject() operations (RWModeNoWrite), or
+// disallow any data or metadata operations (RWModeReadOnly).
+func SetRWMode(rwMode RWModeType) (err error) {
+ err = setRWMode(rwMode)
+ return
+}
diff --git a/inode/config.go b/inode/config.go
index 41a466a73..813014c5e 100644
--- a/inode/config.go
+++ b/inode/config.go
@@ -9,8 +9,10 @@ import (
"github.com/swiftstack/cstruct"
"github.com/swiftstack/sortedmap"
+ "github.com/swiftstack/ProxyFS/blunder"
"github.com/swiftstack/ProxyFS/conf"
"github.com/swiftstack/ProxyFS/headhunter"
+ "github.com/swiftstack/ProxyFS/logger"
"github.com/swiftstack/ProxyFS/swiftclient"
"github.com/swiftstack/ProxyFS/trackedlock"
"github.com/swiftstack/ProxyFS/transitions"
@@ -82,6 +84,13 @@ type volumeStruct struct {
snapShotPolicy *snapShotPolicyStruct
}
+const (
+ defaultNoWriteErrno = blunder.NoSpaceError
+ defaultNoWriteErrnoString = "ENOSPC"
+ defaultReadOnlyErrno = blunder.ReadOnlyError
+ defaultReadOnlyErrnoString = "EROFS"
+)
+
type globalsStruct struct {
trackedlock.Mutex
whoAmI string
@@ -105,6 +114,11 @@ type globalsStruct struct {
openLogSegmentLRUHead *inFlightLogSegmentStruct
openLogSegmentLRUTail *inFlightLogSegmentStruct
openLogSegmentLRUItems uint64
+ noWriteThresholdErrno blunder.FsError // either blunder.NotPermError or blunder.ReadOnlyError or blunder.NoSpaceError
+ noWriteThresholdErrnoString string // either "EPERM" or "EROFS" or "ENOSPC"
+ readOnlyThresholdErrno blunder.FsError // either blunder.NotPermError or blunder.ReadOnlyError or blunder.NoSpaceError
+ readOnlyThresholdErrnoString string // either "EPERM" or "EROFS" or "ENOSPC"
+ rwMode RWModeType // One of RWMode{Normal|NoWrite|ReadOnly}
}
var globals globalsStruct
@@ -222,6 +236,8 @@ func (dummy *globalsStruct) Up(confMap conf.ConfMap) (err error) {
swiftclient.SetStarvationCallbackFunc(chunkedPutConnectionPoolStarvationCallback)
+ globals.rwMode = RWModeNormal
+
err = nil
return
}
@@ -676,6 +692,11 @@ func (dummy *globalsStruct) UnserveVolume(confMap conf.ConfMap, volumeName strin
return // err from call to adoptVolumeGroupReadCacheParameters() is fine to return here
}
+func (dummy *globalsStruct) VolumeToBeUnserved(confMap conf.ConfMap, volumeName string) (err error) {
+ err = nil
+ return
+}
+
func (dummy *globalsStruct) SignaledStart(confMap conf.ConfMap) (err error) {
var (
volume *volumeStruct
@@ -694,9 +715,55 @@ func (dummy *globalsStruct) SignaledStart(confMap conf.ConfMap) (err error) {
func (dummy *globalsStruct) SignaledFinish(confMap conf.ConfMap) (err error) {
var (
- volume *volumeStruct
+ swiftReconNoWriteErrno string
+ swiftReconReadOnlyErrno string
+ volume *volumeStruct
)
+ swiftReconNoWriteErrno, err = confMap.FetchOptionValueString("SwiftClient", "SwiftReconNoWriteErrno")
+ if nil == err {
+ switch swiftReconNoWriteErrno {
+ case "EPERM":
+ globals.noWriteThresholdErrno = blunder.NotPermError
+ globals.noWriteThresholdErrnoString = "EPERM"
+ case "EROFS":
+ globals.noWriteThresholdErrno = blunder.ReadOnlyError
+ globals.noWriteThresholdErrnoString = "EROFS"
+ case "ENOSPC":
+ globals.noWriteThresholdErrno = blunder.NoSpaceError
+ globals.noWriteThresholdErrnoString = "ENOSPC"
+ default:
+ err = fmt.Errorf("[SwiftClient]SwiftReconReadOnlyErrno must be either EPERM or EROFS or ENOSPC")
+ return
+ }
+ } else {
+ logger.WarnfWithError(err, "Unable to fetch [SwiftClient]SwiftReconNoWriteErrno... defaulting to %s", defaultNoWriteErrno)
+ globals.noWriteThresholdErrno = defaultNoWriteErrno
+ globals.noWriteThresholdErrnoString = defaultNoWriteErrnoString
+ }
+
+ swiftReconReadOnlyErrno, err = confMap.FetchOptionValueString("SwiftClient", "SwiftReconReadOnlyErrno")
+ if nil == err {
+ switch swiftReconReadOnlyErrno {
+ case "EPERM":
+ globals.readOnlyThresholdErrno = blunder.NotPermError
+ globals.readOnlyThresholdErrnoString = "EPERM"
+ case "EROFS":
+ globals.readOnlyThresholdErrno = blunder.ReadOnlyError
+ globals.readOnlyThresholdErrnoString = "EROFS"
+ case "ENOSPC":
+ globals.readOnlyThresholdErrno = blunder.NoSpaceError
+ globals.readOnlyThresholdErrnoString = "ENOSPC"
+ default:
+ err = fmt.Errorf("[SwiftClient]SwiftReconReadOnlyErrno must be either EPERM or EROFS or ENOSPC")
+ return
+ }
+ } else {
+ logger.WarnfWithError(err, "Unable to fetch [SwiftClient]SwiftReconReadOnlyErrno... defaulting to %s", defaultReadOnlyErrno)
+ globals.readOnlyThresholdErrno = defaultReadOnlyErrno
+ globals.readOnlyThresholdErrnoString = defaultReadOnlyErrnoString
+ }
+
for _, volume = range globals.volumeMap {
if volume.served {
err = volume.loadSnapShotPolicy(confMap)
diff --git a/inode/dir.go b/inode/dir.go
index f6cfda8e9..abcf7219a 100644
--- a/inode/dir.go
+++ b/inode/dir.go
@@ -90,6 +90,11 @@ func (vS *volumeStruct) createRootOrSubDir(filePerm InodeMode, userID InodeUserI
}
func (vS *volumeStruct) CreateDir(filePerm InodeMode, userID InodeUserID, groupID InodeGroupID) (dirInodeNumber InodeNumber, err error) {
+ err = enforceRWMode(false)
+ if nil != err {
+ return
+ }
+
stats.IncrementOperations(&stats.DirCreateOps)
dirInodeNumber, err = vS.createRootOrSubDir(filePerm, userID, groupID, false)
@@ -170,6 +175,11 @@ func (vS *volumeStruct) Link(dirInodeNumber InodeNumber, basename string, target
targetInode *inMemoryInodeStruct
)
+ err = enforceRWMode(false)
+ if nil != err {
+ return
+ }
+
if (RootDirInodeNumber == dirInodeNumber) && (SnapShotDirName == basename) {
err = blunder.NewError(blunder.InvalidArgError, "Link() to /%v not allowed", SnapShotDirName)
return
@@ -314,6 +324,11 @@ func (vS *volumeStruct) Unlink(dirInodeNumber InodeNumber, basename string, remo
untargetInodeNumber InodeNumber
)
+ err = enforceRWMode(false)
+ if nil != err {
+ return
+ }
+
if (RootDirInodeNumber == dirInodeNumber) && (SnapShotDirName == basename) {
err = blunder.NewError(blunder.InvalidArgError, "Unlink() of /%v not allowed", SnapShotDirName)
return
@@ -389,6 +404,11 @@ func (vS *volumeStruct) Unlink(dirInodeNumber InodeNumber, basename string, remo
}
func (vS *volumeStruct) Move(srcDirInodeNumber InodeNumber, srcBasename string, dstDirInodeNumber InodeNumber, dstBasename string) (err error) {
+ err = enforceRWMode(false)
+ if nil != err {
+ return
+ }
+
if (RootDirInodeNumber == srcDirInodeNumber) && (SnapShotDirName == srcBasename) {
err = blunder.NewError(blunder.InvalidArgError, "Move() from /%v not allowed", SnapShotDirName)
return
diff --git a/inode/file.go b/inode/file.go
index 334347843..63e493cc4 100644
--- a/inode/file.go
+++ b/inode/file.go
@@ -22,6 +22,11 @@ type fileExtentStruct struct {
}
func (vS *volumeStruct) CreateFile(filePerm InodeMode, userID InodeUserID, groupID InodeGroupID) (fileInodeNumber InodeNumber, err error) {
+ err = enforceRWMode(false)
+ if nil != err {
+ return
+ }
+
fileInode, err := vS.createFileInode(filePerm, userID, groupID)
if err != nil {
return 0, err
@@ -761,6 +766,11 @@ func recordWrite(fileInode *inMemoryInodeStruct, fileOffset uint64, length uint6
}
func (vS *volumeStruct) Write(fileInodeNumber InodeNumber, offset uint64, buf []byte, profiler *utils.Profiler) (err error) {
+ err = enforceRWMode(true)
+ if nil != err {
+ return
+ }
+
snapShotIDType, _, _ := vS.headhunterVolumeHandle.SnapShotU64Decode(uint64(fileInodeNumber))
if headhunter.SnapShotIDTypeLive != snapShotIDType {
err = fmt.Errorf("Write() on non-LiveView fileInodeNumber not allowed")
@@ -809,6 +819,11 @@ func (vS *volumeStruct) Write(fileInodeNumber InodeNumber, offset uint64, buf []
}
func (vS *volumeStruct) Wrote(fileInodeNumber InodeNumber, containerName string, objectName string, fileOffset []uint64, objectOffset []uint64, length []uint64, patchOnly bool) (err error) {
+ err = enforceRWMode(false)
+ if nil != err {
+ return
+ }
+
if (len(fileOffset) != len(objectOffset)) || (len(objectOffset) != len(length)) {
err = fmt.Errorf("Wrote() called with unequal # of fileOffset's (%d), objectOffset's (%d), and length's (%d)", len(fileOffset), len(objectOffset), len(length))
return
@@ -887,6 +902,11 @@ func (vS *volumeStruct) Wrote(fileInodeNumber InodeNumber, containerName string,
}
func (vS *volumeStruct) SetSize(fileInodeNumber InodeNumber, size uint64) (err error) {
+ err = enforceRWMode(false)
+ if nil != err {
+ return
+ }
+
snapShotIDType, _, _ := vS.headhunterVolumeHandle.SnapShotU64Decode(uint64(fileInodeNumber))
if headhunter.SnapShotIDTypeLive != snapShotIDType {
err = fmt.Errorf("SetSize() on non-LiveView fileInodeNumber not allowed")
@@ -921,6 +941,11 @@ func (vS *volumeStruct) SetSize(fileInodeNumber InodeNumber, size uint64) (err e
}
func (vS *volumeStruct) Flush(fileInodeNumber InodeNumber, andPurge bool) (err error) {
+ err = enforceRWMode(false)
+ if nil != err {
+ return
+ }
+
fileInode, ok, err := vS.fetchInode(fileInodeNumber)
if nil != err {
// this indicates disk corruption or software bug
@@ -1003,10 +1028,7 @@ func (vS *volumeStruct) resetFileInodeInMemory(fileInode *inMemoryInodeStruct) (
return
}
-func (vS *volumeStruct) Coalesce(destInodeNumber InodeNumber, metaDataName string,
- metaData []byte, elements []*CoalesceElement) (
- attrChangeTime time.Time, modificationTime time.Time, numWrites uint64, fileSize uint64, err error) {
-
+func (vS *volumeStruct) Coalesce(destInodeNumber InodeNumber, metaDataName string, metaData []byte, elements []*CoalesceElement) (attrChangeTime time.Time, modificationTime time.Time, numWrites uint64, fileSize uint64, err error) {
var (
alreadyInInodeMap bool
coalesceTime time.Time
@@ -1029,6 +1051,11 @@ func (vS *volumeStruct) Coalesce(destInodeNumber InodeNumber, metaDataName strin
snapShotIDType headhunter.SnapShotIDType
)
+ err = enforceRWMode(false)
+ if nil != err {
+ return
+ }
+
// Validate all referenced {Dir|File}Inodes
inodeMap = make(map[InodeNumber]*inMemoryInodeStruct)
@@ -1245,6 +1272,11 @@ func (vS *volumeStruct) DefragmentFile(fileInodeNumber InodeNumber, startingFile
fileInode *inMemoryInodeStruct
)
+ err = enforceRWMode(false)
+ if nil != err {
+ return
+ }
+
fileInode, err = vS.fetchInodeType(fileInodeNumber, FileType)
if nil != err {
return
diff --git a/inode/inode.go b/inode/inode.go
index 450ca23cb..c62c53fb9 100644
--- a/inode/inode.go
+++ b/inode/inode.go
@@ -139,6 +139,45 @@ func compareInodeNumber(key1 sortedmap.Key, key2 sortedmap.Key) (result int, err
return
}
+func setRWMode(rwMode RWModeType) (err error) {
+ if rwMode != globals.rwMode {
+ switch rwMode {
+ case RWModeNormal:
+ stats.IncrementOperations(&stats.ReconCheckTriggeredNormalMode)
+ case RWModeNoWrite:
+ stats.IncrementOperations(&stats.ReconCheckTriggeredNoWriteMode)
+ case RWModeReadOnly:
+ stats.IncrementOperations(&stats.ReconCheckTriggeredReadOnlyMode)
+ default:
+ err = fmt.Errorf("SetRWMode(rwMode==%d) not allowed... must be one of RWModeNormal(%d), RWModeNoWrite(%d), or RWModeReadOnly(%d)", rwMode, RWModeNormal, RWModeNoWrite, RWModeReadOnly)
+ return
+ }
+
+ globals.rwMode = rwMode
+ }
+
+ err = nil
+ return
+}
+
+func enforceRWMode(enforceNoWriteMode bool) (err error) {
+ var (
+ rwModeCopy RWModeType
+ )
+
+ rwModeCopy = globals.rwMode
+
+ if rwModeCopy == RWModeReadOnly {
+ err = blunder.NewError(globals.readOnlyThresholdErrno, globals.readOnlyThresholdErrnoString)
+ } else if enforceNoWriteMode && (rwModeCopy == RWModeNoWrite) {
+ err = blunder.NewError(globals.noWriteThresholdErrno, globals.noWriteThresholdErrnoString)
+ } else {
+ err = nil
+ }
+
+ return
+}
+
func (vS *volumeStruct) fetchOnDiskInode(inodeNumber InodeNumber) (inMemoryInode *inMemoryInodeStruct, ok bool, err error) {
var (
bytesConsumedByCorruptionDetected uint64
@@ -1116,6 +1155,11 @@ func (vS *volumeStruct) Access(inodeNumber InodeNumber, userID InodeUserID, grou
}
func (vS *volumeStruct) ProvisionObject() (objectPath string, err error) {
+ err = enforceRWMode(true)
+ if nil != err {
+ return
+ }
+
containerName, objectNumber, err := vS.provisionObject()
if nil != err {
return
@@ -1133,6 +1177,11 @@ func (vS *volumeStruct) Purge(inodeNumber InodeNumber) (err error) {
ok bool
)
+ err = enforceRWMode(false)
+ if nil != err {
+ return
+ }
+
inode, ok, err = vS.inodeCacheFetch(inodeNumber)
if (nil != err) || !ok {
return
@@ -1157,6 +1206,11 @@ func (vS *volumeStruct) Purge(inodeNumber InodeNumber) (err error) {
func (vS *volumeStruct) Destroy(inodeNumber InodeNumber) (err error) {
logger.Tracef("inode.Destroy(): volume '%s' inode %d", vS.volumeName, inodeNumber)
+ err = enforceRWMode(false)
+ if nil != err {
+ return
+ }
+
snapShotIDType, _, _ := vS.headhunterVolumeHandle.SnapShotU64Decode(uint64(inodeNumber))
if headhunter.SnapShotIDTypeLive != snapShotIDType {
err = fmt.Errorf("Destroy() on non-LiveView inodeNumber not allowed")
@@ -1391,6 +1445,11 @@ func (vS *volumeStruct) GetLinkCount(inodeNumber InodeNumber) (linkCount uint64,
// SetLinkCount is used to adjust the LinkCount property to match current reference count during FSCK TreeWalk.
func (vS *volumeStruct) SetLinkCount(inodeNumber InodeNumber, linkCount uint64) (err error) {
+ err = enforceRWMode(false)
+ if nil != err {
+ return
+ }
+
snapShotIDType, _, _ := vS.headhunterVolumeHandle.SnapShotU64Decode(uint64(inodeNumber))
if headhunter.SnapShotIDTypeLive != snapShotIDType {
err = fmt.Errorf("SetLinkCount() on non-LiveView inodeNumber not allowed")
@@ -1426,6 +1485,11 @@ func (vS *volumeStruct) SetLinkCount(inodeNumber InodeNumber, linkCount uint64)
}
func (vS *volumeStruct) SetCreationTime(inodeNumber InodeNumber, CreationTime time.Time) (err error) {
+ err = enforceRWMode(false)
+ if nil != err {
+ return
+ }
+
snapShotIDType, _, _ := vS.headhunterVolumeHandle.SnapShotU64Decode(uint64(inodeNumber))
if headhunter.SnapShotIDTypeLive != snapShotIDType {
err = fmt.Errorf("SetCreationTime() on non-LiveView inodeNumber not allowed")
@@ -1461,6 +1525,11 @@ func (vS *volumeStruct) SetCreationTime(inodeNumber InodeNumber, CreationTime ti
}
func (vS *volumeStruct) SetModificationTime(inodeNumber InodeNumber, ModificationTime time.Time) (err error) {
+ err = enforceRWMode(false)
+ if nil != err {
+ return
+ }
+
snapShotIDType, _, _ := vS.headhunterVolumeHandle.SnapShotU64Decode(uint64(inodeNumber))
if headhunter.SnapShotIDTypeLive != snapShotIDType {
err = fmt.Errorf("SetModificationTime() on non-LiveView inodeNumber not allowed")
@@ -1497,6 +1566,11 @@ func (vS *volumeStruct) SetModificationTime(inodeNumber InodeNumber, Modificatio
}
func (vS *volumeStruct) SetAccessTime(inodeNumber InodeNumber, accessTime time.Time) (err error) {
+ err = enforceRWMode(false)
+ if nil != err {
+ return
+ }
+
snapShotIDType, _, _ := vS.headhunterVolumeHandle.SnapShotU64Decode(uint64(inodeNumber))
if headhunter.SnapShotIDTypeLive != snapShotIDType {
err = fmt.Errorf("SetAccessTime() on non-LiveView inodeNumber not allowed")
@@ -1564,6 +1638,11 @@ func determineMode(filePerm InodeMode, inodeType InodeType) (fileMode InodeMode,
}
func (vS *volumeStruct) SetPermMode(inodeNumber InodeNumber, filePerm InodeMode) (err error) {
+ err = enforceRWMode(false)
+ if nil != err {
+ return
+ }
+
snapShotIDType, _, _ := vS.headhunterVolumeHandle.SnapShotU64Decode(uint64(inodeNumber))
if headhunter.SnapShotIDTypeLive != snapShotIDType {
err = fmt.Errorf("SetPermMode() on non-LiveView inodeNumber not allowed")
@@ -1608,6 +1687,11 @@ func (vS *volumeStruct) SetPermMode(inodeNumber InodeNumber, filePerm InodeMode)
}
func (vS *volumeStruct) SetOwnerUserID(inodeNumber InodeNumber, userID InodeUserID) (err error) {
+ err = enforceRWMode(false)
+ if nil != err {
+ return
+ }
+
snapShotIDType, _, _ := vS.headhunterVolumeHandle.SnapShotU64Decode(uint64(inodeNumber))
if headhunter.SnapShotIDTypeLive != snapShotIDType {
err = fmt.Errorf("SetOwnerUserID() on non-LiveView inodeNumber not allowed")
@@ -1646,6 +1730,11 @@ func (vS *volumeStruct) SetOwnerUserID(inodeNumber InodeNumber, userID InodeUser
}
func (vS *volumeStruct) SetOwnerUserIDGroupID(inodeNumber InodeNumber, userID InodeUserID, groupID InodeGroupID) (err error) {
+ err = enforceRWMode(false)
+ if nil != err {
+ return
+ }
+
snapShotIDType, _, _ := vS.headhunterVolumeHandle.SnapShotU64Decode(uint64(inodeNumber))
if headhunter.SnapShotIDTypeLive != snapShotIDType {
err = fmt.Errorf("SetOwnerUserIDGroupID() on non-LiveView inodeNumber not allowed")
@@ -1685,6 +1774,11 @@ func (vS *volumeStruct) SetOwnerUserIDGroupID(inodeNumber InodeNumber, userID In
}
func (vS *volumeStruct) SetOwnerGroupID(inodeNumber InodeNumber, groupID InodeGroupID) (err error) {
+ err = enforceRWMode(false)
+ if nil != err {
+ return
+ }
+
snapShotIDType, _, _ := vS.headhunterVolumeHandle.SnapShotU64Decode(uint64(inodeNumber))
if headhunter.SnapShotIDTypeLive != snapShotIDType {
err = fmt.Errorf("SetOwnerGroupID() on non-LiveView inodeNumber not allowed")
@@ -1762,6 +1856,11 @@ func (vS *volumeStruct) GetStream(inodeNumber InodeNumber, inodeStreamName strin
}
func (vS *volumeStruct) PutStream(inodeNumber InodeNumber, inodeStreamName string, buf []byte) (err error) {
+ err = enforceRWMode(false)
+ if nil != err {
+ return
+ }
+
snapShotIDType, _, _ := vS.headhunterVolumeHandle.SnapShotU64Decode(uint64(inodeNumber))
if headhunter.SnapShotIDTypeLive != snapShotIDType {
err = fmt.Errorf("PutStream() on non-LiveView inodeNumber not allowed")
@@ -1804,6 +1903,11 @@ func (vS *volumeStruct) PutStream(inodeNumber InodeNumber, inodeStreamName strin
}
func (vS *volumeStruct) DeleteStream(inodeNumber InodeNumber, inodeStreamName string) (err error) {
+ err = enforceRWMode(false)
+ if nil != err {
+ return
+ }
+
snapShotIDType, _, _ := vS.headhunterVolumeHandle.SnapShotU64Decode(uint64(inodeNumber))
if headhunter.SnapShotIDTypeLive != snapShotIDType {
err = fmt.Errorf("DeleteStream() on non-LiveView inodeNumber not allowed")
@@ -1881,6 +1985,11 @@ func (vS *volumeStruct) FetchFragmentationReport(inodeNumber InodeNumber) (fragm
}
func (vS *volumeStruct) Optimize(inodeNumber InodeNumber, maxDuration time.Duration) (err error) {
+ err = enforceRWMode(false)
+ if nil != err {
+ return
+ }
+
err = fmt.Errorf("Optimize not yet implemented")
return
}
diff --git a/inode/symlink.go b/inode/symlink.go
index 71648aff8..357fbf5d3 100644
--- a/inode/symlink.go
+++ b/inode/symlink.go
@@ -8,6 +8,11 @@ import (
)
func (vS *volumeStruct) CreateSymlink(target string, filePerm InodeMode, userID InodeUserID, groupID InodeGroupID) (symlinkInodeNumber InodeNumber, err error) {
+ err = enforceRWMode(false)
+ if nil != err {
+ return
+ }
+
// Create file mode out of file permissions plus inode type
fileMode, err := determineMode(filePerm, SymlinkType)
if err != nil {
diff --git a/inode/volume.go b/inode/volume.go
index dede3ebbd..8d4c3beb1 100644
--- a/inode/volume.go
+++ b/inode/volume.go
@@ -14,6 +14,11 @@ func (vS *volumeStruct) GetFSID() (fsid uint64) {
}
func (vS *volumeStruct) SnapShotCreate(name string) (id uint64, err error) {
+ err = enforceRWMode(false)
+ if nil != err {
+ return
+ }
+
if ("." == name) || (".." == name) {
err = fmt.Errorf("SnapShot cannot be named either '.' or '..'")
return
@@ -38,6 +43,11 @@ func (vS *volumeStruct) SnapShotDelete(id uint64) (err error) {
valueAsInodeStructPtr *inMemoryInodeStruct
)
+ err = enforceRWMode(false)
+ if nil != err {
+ return
+ }
+
vS.Lock()
err = vS.headhunterVolumeHandle.SnapShotDeleteByInodeLayer(id)
if nil == err {
diff --git a/jrpcfs/api.go b/jrpcfs/api.go
index cdf5981dc..a9cae95b7 100644
--- a/jrpcfs/api.go
+++ b/jrpcfs/api.go
@@ -416,6 +416,13 @@ type MountByVolumeNameReply struct {
RootCAx509CertificatePEM []byte
}
+// UnmountRequest is the request object for RpcUnmount.
+//
+// Note that all leases are implicitly released as part of servicing this request.
+type UnmountRequest struct {
+ MountID MountIDAsString
+}
+
// ReaddirRequest is the request object for RpcReaddir.
type ReaddirRequest struct {
InodeHandle
@@ -893,3 +900,72 @@ type SnapShotLookupByNameRequest struct {
type SnapShotLookupByNameReply struct {
SnapShot headhunter.SnapShotStruct
}
+
+// LeaseRequestType specifies the requested lease operation
+//
+type LeaseRequestType uint32
+
+const (
+ LeaseRequestTypeShared LeaseRequestType = iota // Currently unleased, requesting SharedLease
+ LeaseRequestTypePromote // Currently SharedLease held, requesting promoting to ExclusiveLease
+ LeaseRequestTypeExclusive // Currently unleased, requesting ExclusiveLease
+ LeaseRequestTypeDemote // Currently ExclusiveLease held, requesting demotion to SharedLease
+ LeaseRequestTypeRelease // Currently SharedLease or ExclusiveLease held, releasing it
+)
+
+// LeaseRequest is the request object for RpcLease
+//
+type LeaseRequest struct {
+ InodeHandle
+ LeaseRequestType // One of LeaseRequestType*
+}
+
+// LeaseReplyType specifies the acknowledgement that the requested lease operation
+// has been completed or denied (e.g. when a Promotion request cannot be satisfied
+// and the client will soon be receiving a LeaseInterruptTypeRelease)
+//
+type LeaseReplyType uint32
+
+const (
+ LeaseReplyTypeDenied LeaseReplyType = iota // Request denied (e.g. Promotion deadlock avoidance)
+ LeaseReplyTypeShared // SharedLease granted
+ LeaseReplyTypePromoted // SharedLease promoted to ExclusiveLease
+ LeaseReplyTypeExclusive // ExclusiveLease granted
+ LeaseReplyTypeDemoted // ExclusiveLease demoted to SharedLease
+ LeaseReplyTypeReleased // SharedLease or ExclusiveLease released
+)
+
+// LeaseReply is the reply object for RpcLease
+//
+type LeaseReply struct {
+ LeaseReplyType // One of LeaseReplyType*
+}
+
+// RPCInterruptType specifies the action (unmount, demotion, or release) requested by ProxyFS
+// of the client in an RpcInterrupt "upcall" to indicate that a lease or leases must be demoted
+// or released
+//
+type RPCInterruptType uint32
+
+const (
+ // RPCInterruptTypeUnmount indicates all Leases should be released (after performing necessary
+ // state saving RPCs) and the client should unmount
+ //
+ RPCInterruptTypeUnmount RPCInterruptType = iota
+
+ // RPCInterruptTypeDemote indicates the specified LeaseHandle should (at least) be demoted
+ // from Exclusive to Shared (after performing necessary state saving RPCs)
+ //
+ RPCInterruptTypeDemote
+
+ // RPCInterruptTypeRelease indicates the specified LeaseHandle should be released (after
+ // performing state saving RPCs and invalidating such cached state)
+ //
+ RPCInterruptTypeRelease
+)
+
+// RPCInterrupt is the "upcall" mechanism used by ProxyFS to interrupt the client
+type RPCInterrupt struct {
+ RPCInterruptType // One of RPCInterruptType*
+ InodeNumber int64 // if RPCInterruptType == RPCInterruptTypeUnmount, InodeNumber == 0 (ignored)
+}
diff --git a/jrpcfs/config.go b/jrpcfs/config.go
index 9b9fb9570..22d8b84df 100644
--- a/jrpcfs/config.go
+++ b/jrpcfs/config.go
@@ -9,42 +9,119 @@ import (
"github.com/swiftstack/ProxyFS/conf"
"github.com/swiftstack/ProxyFS/fs"
+ "github.com/swiftstack/ProxyFS/inode"
"github.com/swiftstack/ProxyFS/logger"
"github.com/swiftstack/ProxyFS/retryrpc"
"github.com/swiftstack/ProxyFS/transitions"
)
-type globalsStruct struct {
- mapsLock sync.Mutex // protects volumeMap/mountIDMap/bimodalMountMap
- gate sync.RWMutex // API Requests RLock()/RUnlock()
- // confMap changes Lock()/Unlock()
-
- whoAmI string
- publicIPAddr string
- privateIPAddr string
- portString string
- fastPortString string
- retryRPCPort uint16
- retryRPCTTLCompleted time.Duration
- retryRPCAckTrim time.Duration
- retryRPCDeadlineIO time.Duration
- retryRPCKEEPALIVEPeriod time.Duration
- rootCAx509CertificatePEM []byte
- dataPathLogging bool
+type leaseRequestOperationStruct struct {
+ mount *mountStruct
+ inodeLease *inodeLeaseStruct
+ LeaseRequestType
+ replyChan chan *LeaseReply
+}
+
+type leaseRequestStateType uint32
+
+const (
+ leaseRequestStateNone leaseRequestStateType = iota
+ leaseRequestStateSharedRequested
+ leaseRequestStateSharedGranted
+ leaseRequestStateSharedPromoting
+ leaseRequestStateSharedReleasing
+ leaseRequestStateExclusiveRequested
+ leaseRequestStateExclusiveGranted
+ leaseRequestStateExclusiveDemoting
+ leaseRequestStateExclusiveReleasing
+)
+
+type leaseRequestStruct struct {
+ mount *mountStruct
+ inodeLease *inodeLeaseStruct
+ requestState leaseRequestStateType
+ listElement *list.Element // used when on one of inodeList.*List's
+}
+
+type inodeLeaseStateType uint32
+
+const (
+ inodeLeaseStateNone inodeLeaseStateType = iota
+ inodeLeaseStateSharedGrantedRecently
+ inodeLeaseStateSharedGrantedLongAgo
+ inodeLeaseStateSharedReleasing
+ inodeLeaseStateSharedExpired
+ inodeLeaseStateExclusiveGrantedRecently
+ inodeLeaseStateExclusiveGrantedLongAgo
+ inodeLeaseStateExclusiveReleasing
+ inodeLeaseStateExclusiveDemoting
+ inodeLeaseStateExclusiveExpired
+)
+
+type inodeLeaseStruct struct {
+ volume *volumeStruct
+ inodeNumber inode.InodeNumber
+ leaseState inodeLeaseStateType
+
+ requestChan chan *leaseRequestOperationStruct // if closed, this is an order to revoke/reject all leaseRequestStruct's in *Holder* & requestedList
+
+ sharedHoldersList *list.List // each list.Element.Value.(*leaseRequestStruct).requestState == leaseRequestStateSharedGranted
+ promotingHolder *leaseRequestStruct // leaseRequest.requestState == leaseRequestStateSharedPromoting
+ exclusiveHolder *leaseRequestStruct // leaseRequest.requestState == leaseRequestStateExclusiveGranted
+ demotingHolder *leaseRequestStruct // leaseRequest.requestState == leaseRequestStateExclusiveDemoting
+ releasingHoldersList *list.List // each list.Element.Value.(*leaseRequestStruct).requestState == leaseRequestState{Shared|Exclusive}Releasing
+ requestedList *list.List // each list.Element.Value.(*leaseRequestStruct).requestState == leaseRequestState{Shared|Exclusive}Requested
+
+ lastGrantTime time.Time // records the time at which the last exclusive or shared holder was set/added-to exclusiveHolder/sharedHoldersList
+}
- // Map used to enumerate volumes served by this peer
- volumeMap map[string]bool // key == volumeName; value is ignored
+type mountStruct struct {
+ volume *volumeStruct
+ mountIDAsByteArray MountIDAsByteArray
+ mountIDAsString MountIDAsString
+ leaseRequestMap map[inode.InodeNumber]*leaseRequestStruct // key == leaseRequestStruct.inodeLease.inodeNumber
+}
- // Map used to store volumes by ID already mounted for bimodal support
- // TODO: These never get purged !!!
- mountIDAsByteArrayMap map[MountIDAsByteArray]fs.MountHandle
- mountIDAsStringMap map[MountIDAsString]fs.MountHandle
+type volumeStruct struct {
+ volumeName string
+ volumeHandle fs.VolumeHandle
+ acceptingMounts bool
+ mountMapByMountIDAsByteArray map[MountIDAsByteArray]*mountStruct // key == mountStruct.mountIDAsByteArray
+ mountMapByMountIDAsString map[MountIDAsString]*mountStruct // key == mountStruct.mountIDAsString
+ inodeLeaseMap map[inode.InodeNumber]*inodeLeaseStruct // key == inodeLeaseStruct.inodeNumber
+ leaseHandlerWG sync.WaitGroup // .Add(1) each inodeLease insertion into inodeLeaseMap
+ // .Done() each inodeLease after it is removed from inodeLeaseMap
+}
- // Map used to store volumes by name already mounted for bimodal support
- bimodalMountMap map[string]fs.MountHandle // key == volumeName
+type globalsStruct struct {
+ gate sync.RWMutex // API Requests RLock()/RUnlock()
+ // confMap changes Lock()/Unlock()
+
+ volumesLock sync.Mutex // protects mountMapByMountIDAsByteArray & mountMapByMountIDAsString
+ // as well as each volumeStruct/mountStruct map
+
+ whoAmI string
+ publicIPAddr string
+ privateIPAddr string
+ portString string
+ fastPortString string
+ retryRPCPort uint16
+ retryRPCTTLCompleted time.Duration
+ retryRPCAckTrim time.Duration
+ retryRPCDeadlineIO time.Duration
+ retryRPCKeepAlivePeriod time.Duration
+ minLeaseDuration time.Duration
+ leaseInterruptInterval time.Duration
+ leaseInterruptLimit uint32
+ dataPathLogging bool
+
+ volumeMap map[string]*volumeStruct // key == volumeStruct.volumeName
+ mountMapByMountIDAsByteArray map[MountIDAsByteArray]*mountStruct // key == mountStruct.mountIDAsByteArray
+ mountMapByMountIDAsString map[MountIDAsString]*mountStruct // key == mountStruct.mountIDAsString
// RetryRPC server
- retryrpcSvr *retryrpc.Server
+ retryrpcSvr *retryrpc.Server
+ rootCAx509CertificatePEM []byte
// Connection list and listener list to close during shutdown:
halting bool
@@ -62,10 +139,9 @@ func init() {
}
func (dummy *globalsStruct) Up(confMap conf.ConfMap) (err error) {
- globals.volumeMap = make(map[string]bool)
- globals.mountIDAsByteArrayMap = make(map[MountIDAsByteArray]fs.MountHandle)
- globals.mountIDAsStringMap = make(map[MountIDAsString]fs.MountHandle)
- globals.bimodalMountMap = make(map[string]fs.MountHandle)
+ globals.volumeMap = make(map[string]*volumeStruct)
+ globals.mountMapByMountIDAsByteArray = make(map[MountIDAsByteArray]*mountStruct)
+ globals.mountMapByMountIDAsString = make(map[MountIDAsString]*mountStruct)
// Fetch IPAddrs from config file
globals.whoAmI, err = confMap.FetchOptionValueString("Cluster", "WhoAmI")
@@ -115,10 +191,10 @@ func (dummy *globalsStruct) Up(confMap conf.ConfMap) (err error) {
logger.Infof("failed to get JSONRPCServer.RetryRPCDeadlineIO from config file - defaulting to 60s")
globals.retryRPCDeadlineIO = 60 * time.Second
}
- globals.retryRPCKEEPALIVEPeriod, err = confMap.FetchOptionValueDuration("JSONRPCServer", "RetryRPCKEEPALIVEPeriod")
+ globals.retryRPCKeepAlivePeriod, err = confMap.FetchOptionValueDuration("JSONRPCServer", "RetryRPCKeepAlivePeriod")
if nil != err {
- logger.Infof("failed to get JSONRPCServer.RetryRPCKEEPALIVEPeriod from config file - defaulting to 60s")
- globals.retryRPCKEEPALIVEPeriod = 60 * time.Second
+ logger.Infof("failed to get JSONRPCServer.RetryRPCKeepAlivePeriod from config file - defaulting to 60s")
+ globals.retryRPCKeepAlivePeriod = 60 * time.Second
}
} else {
logger.Infof("failed to get JSONRPCServer.RetryRPCPort from config file - skipping......")
@@ -126,7 +202,7 @@ func (dummy *globalsStruct) Up(confMap conf.ConfMap) (err error) {
globals.retryRPCTTLCompleted = time.Duration(0)
globals.retryRPCAckTrim = time.Duration(0)
globals.retryRPCDeadlineIO = time.Duration(0)
- globals.retryRPCKEEPALIVEPeriod = time.Duration(0)
+ globals.retryRPCKeepAlivePeriod = time.Duration(0)
}
// Set data path logging level to true, so that all trace logging is controlled by settings
@@ -139,6 +215,22 @@ func (dummy *globalsStruct) Up(confMap conf.ConfMap) (err error) {
return
}
+ globals.minLeaseDuration, err = confMap.FetchOptionValueDuration("JSONRPCServer", "MinLeaseDuration")
+ if nil != err {
+ logger.Infof("failed to get JSONRPCServer.MinLeaseDuration from config file - defaulting to 250ms")
+ globals.minLeaseDuration = 250 * time.Millisecond
+ }
+ globals.leaseInterruptInterval, err = confMap.FetchOptionValueDuration("JSONRPCServer", "LeaseInterruptInterval")
+ if nil != err {
+ logger.Infof("failed to get JSONRPCServer.LeaseInterruptInterval from config file - defaulting to 250ms")
+ globals.leaseInterruptInterval = 250 * time.Millisecond
+ }
+ globals.leaseInterruptLimit, err = confMap.FetchOptionValueUint32("JSONRPCServer", "LeaseInterruptLimit")
+ if nil != err {
+ logger.Infof("failed to get JSONRPCServer.LeaseInterruptLimit from config file - defaulting to 20")
+ globals.leaseInterruptLimit = 20
+ }
+
// Ensure gate starts out in the Exclusively Locked state
closeGate()
@@ -148,16 +240,14 @@ func (dummy *globalsStruct) Up(confMap conf.ConfMap) (err error) {
globals.halting = false
// Init JSON RPC server stuff
-
- // jsonRpcServerUp(globals.privateIPAddr, globals.portString)
- jsonRpcServerUp("0.0.0.0", globals.portString)
+ jsonRpcServerUp(globals.privateIPAddr, globals.portString)
// Now kick off our other, faster RPC server
ioServerUp(globals.privateIPAddr, globals.fastPortString)
// Init Retry RPC server
retryRPCServerUp(jserver, globals.publicIPAddr, globals.retryRPCPort, globals.retryRPCTTLCompleted, globals.retryRPCAckTrim,
- globals.retryRPCDeadlineIO, globals.retryRPCKEEPALIVEPeriod)
+ globals.retryRPCDeadlineIO, globals.retryRPCKeepAlivePeriod)
return
}
@@ -182,7 +272,39 @@ func (dummy *globalsStruct) VolumeDestroyed(confMap conf.ConfMap, volumeName str
}
func (dummy *globalsStruct) ServeVolume(confMap conf.ConfMap, volumeName string) (err error) {
- globals.volumeMap[volumeName] = true
+ var (
+ currentlyInVolumeMap bool
+ volume *volumeStruct
+ volumeHandle fs.VolumeHandle
+ )
+
+ globals.volumesLock.Lock()
+
+ _, currentlyInVolumeMap = globals.volumeMap[volumeName]
+ if currentlyInVolumeMap {
+ globals.volumesLock.Unlock()
+ err = fmt.Errorf("Cannot be told to ServeVolume(,\"%s\") twice", volumeName)
+ return
+ }
+
+ volumeHandle, err = fs.FetchVolumeHandleByVolumeName(volumeName)
+ if nil != err {
+ globals.volumesLock.Unlock()
+ return
+ }
+
+ volume = &volumeStruct{
+ volumeName: volumeName,
+ volumeHandle: volumeHandle,
+ acceptingMounts: true,
+ mountMapByMountIDAsByteArray: make(map[MountIDAsByteArray]*mountStruct),
+ mountMapByMountIDAsString: make(map[MountIDAsString]*mountStruct),
+ inodeLeaseMap: make(map[inode.InodeNumber]*inodeLeaseStruct),
+ }
+
+ globals.volumeMap[volumeName] = volume
+
+ globals.volumesLock.Unlock()
err = nil
return
@@ -190,44 +312,48 @@ func (dummy *globalsStruct) ServeVolume(confMap conf.ConfMap, volumeName string)
func (dummy *globalsStruct) UnserveVolume(confMap conf.ConfMap, volumeName string) (err error) {
var (
- mountHandle fs.MountHandle
- mountIDAsByteArray MountIDAsByteArray
- mountIDAsString MountIDAsString
- toRemoveMountIDAsByteArrayList []MountIDAsByteArray
- toRemoveMountIDAsStringList []MountIDAsString
+ currentlyInVolumeMap bool
+ mountIDAsByteArray MountIDAsByteArray
+ mountIDAsString MountIDAsString
+ volume *volumeStruct
)
- toRemoveMountIDAsByteArrayList = make([]MountIDAsByteArray, 0, len(globals.mountIDAsByteArrayMap))
+ globals.volumesLock.Lock()
- for mountIDAsByteArray, mountHandle = range globals.mountIDAsByteArrayMap {
- if mountHandle.VolumeName() == volumeName {
- toRemoveMountIDAsByteArrayList = append(toRemoveMountIDAsByteArrayList, mountIDAsByteArray)
- }
+ volume, currentlyInVolumeMap = globals.volumeMap[volumeName]
+ if !currentlyInVolumeMap {
+ globals.volumesLock.Unlock()
+ err = fmt.Errorf("Cannot be told to UnserveVolume(,\"%s\") a non-served volume", volumeName)
+ return
}
- for _, mountIDAsByteArray = range toRemoveMountIDAsByteArrayList {
- delete(globals.mountIDAsByteArrayMap, mountIDAsByteArray)
- }
+ volume.acceptingMounts = false
- toRemoveMountIDAsStringList = make([]MountIDAsString, 0, len(globals.mountIDAsStringMap))
+ // TODO: Lease Management changes - somehow while *not* holding volumesLock.Lock():
+ // Prevent new lease requests
+ // Fail outstanding lease requests
+ // Revoke granted leases
- for mountIDAsString, mountHandle = range globals.mountIDAsStringMap {
- if mountHandle.VolumeName() == volumeName {
- toRemoveMountIDAsStringList = append(toRemoveMountIDAsStringList, mountIDAsString)
- }
+ delete(globals.volumeMap, volumeName)
+
+ for mountIDAsByteArray = range volume.mountMapByMountIDAsByteArray {
+ delete(globals.mountMapByMountIDAsByteArray, mountIDAsByteArray)
}
- for _, mountIDAsString = range toRemoveMountIDAsStringList {
- delete(globals.mountIDAsStringMap, mountIDAsString)
+ for mountIDAsString = range volume.mountMapByMountIDAsString {
+ delete(globals.mountMapByMountIDAsString, mountIDAsString)
}
- delete(globals.volumeMap, volumeName)
- delete(globals.bimodalMountMap, volumeName)
+ globals.volumesLock.Unlock()
err = nil
return
}
+func (dummy *globalsStruct) VolumeToBeUnserved(confMap conf.ConfMap, volumeName string) (err error) {
+ return nil // TODO: this is where we get a chance to tell our clients to unmount !!!
+}
+
func (dummy *globalsStruct) SignaledStart(confMap conf.ConfMap) (err error) {
closeGate()
@@ -247,16 +373,12 @@ func (dummy *globalsStruct) Down(confMap conf.ConfMap) (err error) {
err = fmt.Errorf("jrpcfs.Down() called with 0 != len(globals.volumeMap)")
return
}
- if 0 != len(globals.mountIDAsByteArrayMap) {
- err = fmt.Errorf("jrpcfs.Down() called with 0 != len(globals.mountIDAsByteArrayMap)")
- return
- }
- if 0 != len(globals.mountIDAsStringMap) {
- err = fmt.Errorf("jrpcfs.Down() called with 0 != len(globals.mountIDAsStringMap)")
+ if 0 != len(globals.mountMapByMountIDAsByteArray) {
+ err = fmt.Errorf("jrpcfs.Down() called with 0 != len(globals.mountMapByMountIDAsByteArray)")
return
}
- if 0 != len(globals.bimodalMountMap) {
- err = fmt.Errorf("jrpcfs.Down() called with 0 != len(globals.bimodalMountMap)")
+ if 0 != len(globals.mountMapByMountIDAsString) {
+ err = fmt.Errorf("jrpcfs.Down() called with 0 != len(globals.mountMapByMountIDAsString)")
return
}
diff --git a/jrpcfs/filesystem.go b/jrpcfs/filesystem.go
index cfe646eb9..7b14d197f 100644
--- a/jrpcfs/filesystem.go
+++ b/jrpcfs/filesystem.go
@@ -207,62 +207,45 @@ var saveChannelSize int = 1000
// Default values here are false
var loggedOutOfStatsRoom map[OpType]bool = make(map[OpType]bool)
-func allocateMountID(mountHandle fs.MountHandle) (mountIDAsByteArray MountIDAsByteArray, mountIDAsString MountIDAsString) {
+func lookupVolumeHandleByMountIDAsByteArray(mountIDAsByteArray MountIDAsByteArray) (volumeHandle fs.VolumeHandle, err error) {
var (
- i int
- keepTrying bool
- randByteSlice []byte
+ mount *mountStruct
+ ok bool
)
- globals.mapsLock.Lock()
-
- keepTrying = true
- for keepTrying {
- randByteSlice = utils.FetchRandomByteSlice(len(mountIDAsByteArray))
- for i = 0; i < len(mountIDAsByteArray); i++ {
- if i != 0 {
- keepTrying = false // At least one of the bytes is non-zero... so it's a valid MountID
- }
- mountIDAsByteArray[i] = randByteSlice[i]
- }
- if !keepTrying {
- _, keepTrying = globals.mountIDAsByteArrayMap[mountIDAsByteArray]
- }
- }
-
- mountIDAsString = MountIDAsString(base64.StdEncoding.EncodeToString(mountIDAsByteArray[:]))
-
- globals.mountIDAsByteArrayMap[mountIDAsByteArray] = mountHandle
- globals.mountIDAsStringMap[mountIDAsString] = mountHandle
-
- globals.mapsLock.Unlock()
+ globals.volumesLock.Lock()
+ mount, ok = globals.mountMapByMountIDAsByteArray[mountIDAsByteArray]
+ globals.volumesLock.Unlock()
- return
-}
-
-func lookupMountHandleByMountIDAsByteArray(mountIDAsByteArray MountIDAsByteArray) (mountHandle fs.MountHandle, err error) {
- globals.mapsLock.Lock()
- mountHandle, ok := globals.mountIDAsByteArrayMap[mountIDAsByteArray]
- globals.mapsLock.Unlock()
if ok {
+ volumeHandle = mount.volume.volumeHandle
err = nil
} else {
- err = fmt.Errorf("MountID %v not found in jrpcfs globals.mountIDMap", mountIDAsByteArray)
+ err = fmt.Errorf("MountID %v not found in jrpcfs globals.mountMapByMountIDAsByteArray", mountIDAsByteArray)
err = blunder.AddError(err, blunder.BadMountIDError)
}
+
return
}
-func lookupMountHandleByMountIDAsString(mountIDAsString MountIDAsString) (mountHandle fs.MountHandle, err error) {
- globals.mapsLock.Lock()
- mountHandle, ok := globals.mountIDAsStringMap[mountIDAsString]
- globals.mapsLock.Unlock()
+func lookupVolumeHandleByMountIDAsString(mountIDAsString MountIDAsString) (volumeHandle fs.VolumeHandle, err error) {
+ var (
+ mount *mountStruct
+ ok bool
+ )
+
+ globals.volumesLock.Lock()
+ mount, ok = globals.mountMapByMountIDAsString[mountIDAsString]
+ globals.volumesLock.Unlock()
+
if ok {
+ volumeHandle = mount.volume.volumeHandle
err = nil
} else {
- err = fmt.Errorf("MountID %v not found in jrpcfs globals.mountIDMap", mountIDAsString)
+ err = fmt.Errorf("MountID %s not found in jrpcfs globals.mountMapByMountIDAsString", mountIDAsString)
err = blunder.AddError(err, blunder.BadMountIDError)
}
+
return
}
@@ -612,101 +595,6 @@ func rpcEncodeError(e *error) {
}
}
-// XXX TODO: To avoid repetitive error checking with the same blunder calls, can
-// we do something like this? (from https://blog.golang.org/errors-are-values):
-//
-// type errWriter struct {
-// w io.Writer
-// err error
-// }
-//
-// func (ew *errWriter) write(buf []byte) {
-// if ew.err != nil {
-// return
-// }
-// _, ew.err = ew.w.Write(buf)
-// }
-// As soon as an error occurs, the write method becomes a no-op but the error value is saved.
-//
-// Given the errWriter type and its write method, the code above can be refactored:
-//
-// ew := errWriter{w: fd}
-// ew.write(p0[a:b])
-// ew.write(p1[c:d])
-// ew.write(p2[e:f])
-// // and so on
-// if ew.err != nil {
-// return ew.err
-// }
-//
-// Editorial comment:
-// Since we aren't wrapping the same function call all the time, the exact same
-// approach can't be used. But can we create something that can track the error
-// and check it before each call?
-//
-// Could use function variable or something, but would that make the code
-// too hard to read?
-// OR...
-// go actually has a goto statement; that might be easier to follow than an obscure
-// mechanism, while still allowing us to only write the blunder call in one place
-// per function.
-// BUT you can't jump over variable declarations, just like in C.
-// OR...
-// create a deferred function that runs on exit and does the error handling?
-// have to be careful with the syntax so that the final version of err is
-// passed to the onExit function.
-
-/*
-type errTracker struct {
- err error
-}
-
-func (et *errTracker) call() {
- if et.err != nil {
- return
- }
-
- // Otherwise call the function we were passed?
-}
-
-// XXX TODO: Alternate error-handling approach - use goto so that error handling
-// code only appears once in a given function.
-// Downside: can't jump over variable creation (i.e. can't use :=)
-func (s *Server) RpcLinkPath(in *LinkPathRequest, reply *Reply) (err error) {
- flog := logger.TraceEnter("in.", in)
- defer func() { flog.TraceExitErr("reply.", err, reply) }()
-
- var src_ino, tgt_ino inode.InodeNumber
-
- mountHandle, err := lookupMountHandle(in.MountID)
- if nil != err {
- return
- }
-
- // Split fullpath into parent dir and basename
- parentDir, basename := splitPath(in.Fullpath)
-
- // Get the inode for the (source) parent dir
- src_ino, err = mountHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, parentDir)
- if err != nil {
- goto Done
- }
-
- // Get the inode for the target dir
- tgt_ino, err = mountHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, in.TargetFullpath)
- if err != nil {
- goto Done
- }
-
- // Do the link
- err = mountHandle.Link(inode.InodeRootUserID, inode.InodeGroupID(0), nil, src_ino, basename, tgt_ino)
-
-Done:
- err = fmt.Errorf("errno: %d", blunder.Errno(err))
- return
-}
-*/
-
// Shorthand for our internal API debug log id; global to the package
const internalDebug = logger.DbgInternal
@@ -725,7 +613,7 @@ func (s *Server) RpcChown(in *ChownRequest, reply *Reply) (err error) {
defer func() { flog.TraceExitErr("reply.", err, reply) }()
defer func() { rpcEncodeError(&err) }() // Encode error for return by RPC
- mountHandle, err := lookupMountHandleByMountIDAsString(in.MountID)
+ volumeHandle, err := lookupVolumeHandleByMountIDAsString(in.MountID)
if nil != err {
return
}
@@ -740,7 +628,7 @@ func (s *Server) RpcChown(in *ChownRequest, reply *Reply) (err error) {
if in.GroupID != -1 {
stat[fs.StatGroupID] = uint64(in.GroupID)
}
- err = mountHandle.Setstat(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(in.InodeNumber), stat)
+ err = volumeHandle.Setstat(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(in.InodeNumber), stat)
return
}
@@ -752,7 +640,7 @@ func (s *Server) RpcChownPath(in *ChownPathRequest, reply *Reply) (err error) {
defer func() { flog.TraceExitErr("reply.", err, reply) }()
defer func() { rpcEncodeError(&err) }() // Encode error for return by RPC
- mountHandle, err := lookupMountHandleByMountIDAsString(in.MountID)
+ volumeHandle, err := lookupVolumeHandleByMountIDAsString(in.MountID)
if nil != err {
return
}
@@ -761,7 +649,7 @@ func (s *Server) RpcChownPath(in *ChownPathRequest, reply *Reply) (err error) {
// We do not check/enforce it; that is the caller's responsibility.
// Get the inode
- ino, err := mountHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, in.Fullpath)
+ ino, err := volumeHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, in.Fullpath)
if err != nil {
return
}
@@ -774,7 +662,7 @@ func (s *Server) RpcChownPath(in *ChownPathRequest, reply *Reply) (err error) {
if in.GroupID != -1 {
stat[fs.StatGroupID] = uint64(in.GroupID)
}
- err = mountHandle.Setstat(inode.InodeRootUserID, inode.InodeGroupID(0), nil, ino, stat)
+ err = volumeHandle.Setstat(inode.InodeRootUserID, inode.InodeGroupID(0), nil, ino, stat)
return
}
@@ -789,7 +677,7 @@ func (s *Server) RpcChmod(in *ChmodRequest, reply *Reply) (err error) {
// NOTE: We currently just store and return per-inode ownership info.
// We do not check/enforce it; that is the caller's responsibility.
- mountHandle, err := lookupMountHandleByMountIDAsString(in.MountID)
+ volumeHandle, err := lookupVolumeHandleByMountIDAsString(in.MountID)
if nil != err {
return
}
@@ -798,7 +686,7 @@ func (s *Server) RpcChmod(in *ChmodRequest, reply *Reply) (err error) {
// bits can be changed by SetStat().
stat := make(fs.Stat)
stat[fs.StatMode] = uint64(in.FileMode) & 07777
- err = mountHandle.Setstat(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(in.InodeNumber), stat)
+ err = volumeHandle.Setstat(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(in.InodeNumber), stat)
return
}
@@ -810,7 +698,7 @@ func (s *Server) RpcChmodPath(in *ChmodPathRequest, reply *Reply) (err error) {
defer func() { flog.TraceExitErr("reply.", err, reply) }()
defer func() { rpcEncodeError(&err) }() // Encode error for return by RPC
- mountHandle, err := lookupMountHandleByMountIDAsString(in.MountID)
+ volumeHandle, err := lookupVolumeHandleByMountIDAsString(in.MountID)
if nil != err {
return
}
@@ -819,7 +707,7 @@ func (s *Server) RpcChmodPath(in *ChmodPathRequest, reply *Reply) (err error) {
// We do not check/enforce it; that is the caller's responsibility.
// Get the inode
- ino, err := mountHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, in.Fullpath)
+ ino, err := volumeHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, in.Fullpath)
if err != nil {
return
}
@@ -830,7 +718,7 @@ func (s *Server) RpcChmodPath(in *ChmodPathRequest, reply *Reply) (err error) {
// bits can be changed by SetStat().
stat := make(fs.Stat)
stat[fs.StatMode] = uint64(in.FileMode) & 07777
- err = mountHandle.Setstat(inode.InodeRootUserID, inode.InodeGroupID(0), nil, ino, stat)
+ err = volumeHandle.Setstat(inode.InodeRootUserID, inode.InodeGroupID(0), nil, ino, stat)
return
}
@@ -842,12 +730,12 @@ func (s *Server) RpcCreate(in *CreateRequest, reply *InodeReply) (err error) {
defer func() { flog.TraceExitErr("reply.", err, reply) }()
defer func() { rpcEncodeError(&err) }() // Encode error for return by RPC
- mountHandle, err := lookupMountHandleByMountIDAsString(in.MountID)
+ volumeHandle, err := lookupVolumeHandleByMountIDAsString(in.MountID)
if nil != err {
return
}
- fino, err := mountHandle.Create(inode.InodeUserID(in.UserID), inode.InodeGroupID(in.GroupID), nil, inode.InodeNumber(in.InodeNumber), in.Basename, inode.InodeMode(in.FileMode))
+ fino, err := volumeHandle.Create(inode.InodeUserID(in.UserID), inode.InodeGroupID(in.GroupID), nil, inode.InodeNumber(in.InodeNumber), in.Basename, inode.InodeMode(in.FileMode))
reply.InodeNumber = int64(uint64(fino))
return
}
@@ -860,7 +748,7 @@ func (s *Server) RpcCreatePath(in *CreatePathRequest, reply *InodeReply) (err er
defer func() { flog.TraceExitErr("reply.", err, reply) }()
defer func() { rpcEncodeError(&err) }() // Encode error for return by RPC
- mountHandle, err := lookupMountHandleByMountIDAsString(in.MountID)
+ volumeHandle, err := lookupVolumeHandleByMountIDAsString(in.MountID)
if nil != err {
return
}
@@ -878,13 +766,13 @@ func (s *Server) RpcCreatePath(in *CreatePathRequest, reply *InodeReply) (err er
parentDir, basename := splitPath(in.Fullpath)
// Get the inode for the parent dir
- ino, err := mountHandle.LookupPath(inode.InodeUserID(in.UserID), inode.InodeGroupID(in.GroupID), nil, parentDir)
+ ino, err := volumeHandle.LookupPath(inode.InodeUserID(in.UserID), inode.InodeGroupID(in.GroupID), nil, parentDir)
if err != nil {
return
}
// Do the create
- fino, err := mountHandle.Create(inode.InodeUserID(in.UserID), inode.InodeGroupID(in.GroupID), nil, ino, basename, inode.InodeMode(in.FileMode))
+ fino, err := volumeHandle.Create(inode.InodeUserID(in.UserID), inode.InodeGroupID(in.GroupID), nil, ino, basename, inode.InodeMode(in.FileMode))
reply.InodeNumber = int64(uint64(fino))
return
}
@@ -897,7 +785,7 @@ func (s *Server) RpcFlock(in *FlockRequest, reply *FlockReply) (err error) {
defer func() { flog.TraceExitErr("reply.", err, reply) }()
defer func() { rpcEncodeError(&err) }() // Encode error for return by RPC
- mountHandle, err := lookupMountHandleByMountIDAsString(in.MountID)
+ volumeHandle, err := lookupVolumeHandleByMountIDAsString(in.MountID)
if nil != err {
return
}
@@ -909,7 +797,7 @@ func (s *Server) RpcFlock(in *FlockRequest, reply *FlockReply) (err error) {
flock.Len = in.FlockLen
flock.Pid = in.FlockPid
- lockStruct, err := mountHandle.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(in.InodeNumber), in.FlockCmd, &flock)
+ lockStruct, err := volumeHandle.Flock(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(in.InodeNumber), in.FlockCmd, &flock)
if lockStruct != nil {
reply.FlockType = lockStruct.Type
reply.FlockWhence = lockStruct.Whence
@@ -936,9 +824,9 @@ func (s *Server) RpcProvisionObject(in *ProvisionObjectRequest, reply *Provision
defer func() { flog.TraceExitErr("reply.", err, reply) }()
defer func() { rpcEncodeError(&err) }() // Encode error for return by RPC
- mountHandle, err := lookupMountHandleByMountIDAsString(in.MountID)
+ volumeHandle, err := lookupVolumeHandleByMountIDAsString(in.MountID)
if nil == err {
- reply.PhysPath, err = mountHandle.CallInodeToProvisionObject()
+ reply.PhysPath, err = volumeHandle.CallInodeToProvisionObject()
}
return
@@ -952,9 +840,9 @@ func (s *Server) RpcWrote(in *WroteRequest, reply *WroteReply) (err error) {
defer func() { flog.TraceExitErr("reply.", err, reply) }()
defer func() { rpcEncodeError(&err) }() // Encode error for return by RPC
- mountHandle, err := lookupMountHandleByMountIDAsString(in.MountID)
+ volumeHandle, err := lookupVolumeHandleByMountIDAsString(in.MountID)
if nil == err {
- err = mountHandle.Wrote(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(in.InodeNumber), in.ContainerName, in.ObjectName, in.FileOffset, in.ObjectOffset, in.Length)
+ err = volumeHandle.Wrote(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(in.InodeNumber), in.ContainerName, in.ObjectName, in.FileOffset, in.ObjectOffset, in.Length)
}
return
@@ -972,9 +860,9 @@ func (s *Server) RpcFetchExtentMapChunk(in *FetchExtentMapChunkRequest, reply *F
defer func() { flog.TraceExitErr("reply.", err, reply) }()
defer func() { rpcEncodeError(&err) }() // Encode error for return by RPC
- mountHandle, err := lookupMountHandleByMountIDAsString(in.MountID)
+ volumeHandle, err := lookupVolumeHandleByMountIDAsString(in.MountID)
if nil == err {
- extentMapChunk, err = mountHandle.FetchExtentMapChunk(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(in.InodeNumber), in.FileOffset, in.MaxEntriesFromFileOffset, in.MaxEntriesBeforeFileOffset)
+ extentMapChunk, err = volumeHandle.FetchExtentMapChunk(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(in.InodeNumber), in.FileOffset, in.MaxEntriesFromFileOffset, in.MaxEntriesBeforeFileOffset)
if nil == err {
reply.FileOffsetRangeStart = extentMapChunk.FileOffsetRangeStart
reply.FileOffsetRangeEnd = extentMapChunk.FileOffsetRangeEnd
@@ -1012,9 +900,9 @@ func (s *Server) RpcFlush(in *FlushRequest, reply *Reply) (err error) {
defer func() { rpcEncodeError(&err) }() // Encode error for return by RPC
profiler.AddEventNow("before fs.Flush()")
- mountHandle, err := lookupMountHandleByMountIDAsString(in.MountID)
+ volumeHandle, err := lookupVolumeHandleByMountIDAsString(in.MountID)
if nil == err {
- err = mountHandle.Flush(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(in.InodeNumber))
+ err = volumeHandle.Flush(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(in.InodeNumber))
}
profiler.AddEventNow("after fs.Flush()")
@@ -1057,9 +945,9 @@ func (s *Server) RpcGetStat(in *GetStatRequest, reply *StatStruct) (err error) {
defer func() { rpcEncodeError(&err) }() // Encode error for return by RPC
profiler.AddEventNow("before fs.Getstat()")
- mountHandle, err := lookupMountHandleByMountIDAsString(in.MountID)
+ volumeHandle, err := lookupVolumeHandleByMountIDAsString(in.MountID)
if nil == err {
- stat, err = mountHandle.Getstat(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(in.InodeNumber))
+ stat, err = volumeHandle.Getstat(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(in.InodeNumber))
}
profiler.AddEventNow("after fs.Getstat()")
if err == nil {
@@ -1083,14 +971,14 @@ func (s *Server) RpcGetStatPath(in *GetStatPathRequest, reply *StatStruct) (err
defer func() { flog.TraceExitErr("reply.", err, reply) }()
defer func() { rpcEncodeError(&err) }() // Encode error for return by RPC
- mountHandle, err := lookupMountHandleByMountIDAsString(in.MountID)
+ volumeHandle, err := lookupVolumeHandleByMountIDAsString(in.MountID)
if nil != err {
return
}
// Get the inode
profiler.AddEventNow("before fs.LookupPath()")
- ino, err := mountHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, in.Fullpath)
+ ino, err := volumeHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, in.Fullpath)
profiler.AddEventNow("after fs.LookupPath()")
if err != nil {
// Save profiler with server op stats
@@ -1101,7 +989,7 @@ func (s *Server) RpcGetStatPath(in *GetStatPathRequest, reply *StatStruct) (err
// Do the GetStat
profiler.AddEventNow("before fs.Getstat()")
- stat, err := mountHandle.Getstat(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(ino))
+ stat, err := volumeHandle.Getstat(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(ino))
profiler.AddEventNow("after fs.Getstat()")
if err == nil {
reply.fsStatToStatStruct(stat)
@@ -1125,9 +1013,9 @@ func (s *Server) RpcGetXAttr(in *GetXAttrRequest, reply *GetXAttrReply) (err err
defer func() { rpcEncodeError(&err) }() // Encode error for return by RPC
profiler.AddEventNow("before fs.GetXAttr()")
- mountHandle, err := lookupMountHandleByMountIDAsString(in.MountID)
+ volumeHandle, err := lookupVolumeHandleByMountIDAsString(in.MountID)
if nil == err {
- reply.AttrValue, err = mountHandle.GetXAttr(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(in.InodeNumber), in.AttrName)
+ reply.AttrValue, err = volumeHandle.GetXAttr(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(in.InodeNumber), in.AttrName)
}
profiler.AddEventNow("after fs.GetXAttr()")
@@ -1148,13 +1036,13 @@ func (s *Server) RpcGetXAttrPath(in *GetXAttrPathRequest, reply *GetXAttrReply)
defer func() { flog.TraceExitErr("reply.", err, reply) }()
defer func() { rpcEncodeError(&err) }() // Encode error for return by RPC
- mountHandle, err := lookupMountHandleByMountIDAsString(in.MountID)
+ volumeHandle, err := lookupVolumeHandleByMountIDAsString(in.MountID)
if nil != err {
return
}
profiler.AddEventNow("before fs.LookupPath()")
- ino, err := mountHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, in.Fullpath)
+ ino, err := volumeHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, in.Fullpath)
profiler.AddEventNow("after fs.LookupPath()")
if err != nil {
// Save profiler with server op stats
@@ -1164,7 +1052,7 @@ func (s *Server) RpcGetXAttrPath(in *GetXAttrPathRequest, reply *GetXAttrReply)
}
profiler.AddEventNow("before fs.GetXAttr()")
- reply.AttrValue, err = mountHandle.GetXAttr(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(ino), in.AttrName)
+ reply.AttrValue, err = volumeHandle.GetXAttr(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(ino), in.AttrName)
profiler.AddEventNow("after fs.GetXAttr()")
if err == nil {
reply.AttrValueSize = uint64(len(reply.AttrValue))
@@ -1195,13 +1083,13 @@ func (s *Server) RpcLookupPath(in *LookupPathRequest, reply *InodeReply) (err er
defer func() { flog.TraceExitErr("reply.", err, reply) }()
defer func() { rpcEncodeError(&err) }() // Encode error for return by RPC
- mountHandle, err := lookupMountHandleByMountIDAsString(in.MountID)
+ volumeHandle, err := lookupVolumeHandleByMountIDAsString(in.MountID)
if nil != err {
return
}
profiler.AddEventNow("before fs.LookupPath()")
- ino, err := mountHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, in.Fullpath)
+ ino, err := volumeHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, in.Fullpath)
profiler.AddEventNow("after fs.LookupPath()")
if err == nil {
reply.InodeNumber = int64(uint64(ino))
@@ -1222,12 +1110,12 @@ func (s *Server) RpcLink(in *LinkRequest, reply *Reply) (err error) {
defer func() { flog.TraceExitErr("reply.", err, reply) }()
defer func() { rpcEncodeError(&err) }() // Encode error for return by RPC
- mountHandle, err := lookupMountHandleByMountIDAsString(in.MountID)
+ volumeHandle, err := lookupVolumeHandleByMountIDAsString(in.MountID)
if nil != err {
return
}
- err = mountHandle.Link(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(in.InodeNumber), in.Basename, inode.InodeNumber(in.TargetInodeNumber))
+ err = volumeHandle.Link(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(in.InodeNumber), in.Basename, inode.InodeNumber(in.TargetInodeNumber))
return
}
@@ -1239,7 +1127,7 @@ func (s *Server) RpcLinkPath(in *LinkPathRequest, reply *Reply) (err error) {
defer func() { flog.TraceExitErr("reply.", err, reply) }()
defer func() { rpcEncodeError(&err) }() // Encode error for return by RPC
- mountHandle, err := lookupMountHandleByMountIDAsString(in.MountID)
+ volumeHandle, err := lookupVolumeHandleByMountIDAsString(in.MountID)
if nil != err {
return
}
@@ -1248,19 +1136,19 @@ func (s *Server) RpcLinkPath(in *LinkPathRequest, reply *Reply) (err error) {
parentDir, basename := splitPath(in.Fullpath)
// Get the inode for the (source) parent dir
- srcIno, err := mountHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, parentDir)
+ srcIno, err := volumeHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, parentDir)
if err != nil {
return
}
// Get the inode for the target
- tgtIno, err := mountHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, in.TargetFullpath)
+ tgtIno, err := volumeHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, in.TargetFullpath)
if err != nil {
return
}
// Do the link
- err = mountHandle.Link(inode.InodeRootUserID, inode.InodeGroupID(0), nil, srcIno, basename, tgtIno)
+ err = volumeHandle.Link(inode.InodeRootUserID, inode.InodeGroupID(0), nil, srcIno, basename, tgtIno)
return
}
@@ -1272,12 +1160,12 @@ func (s *Server) RpcListXAttr(in *ListXAttrRequest, reply *ListXAttrReply) (err
defer func() { flog.TraceExitErr("reply.", err, reply) }()
defer func() { rpcEncodeError(&err) }() // Encode error for return by RPC
- mountHandle, err := lookupMountHandleByMountIDAsString(in.MountID)
+ volumeHandle, err := lookupVolumeHandleByMountIDAsString(in.MountID)
if nil != err {
return
}
- reply.AttrNames, err = mountHandle.ListXAttr(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(in.InodeNumber))
+ reply.AttrNames, err = volumeHandle.ListXAttr(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(in.InodeNumber))
return
}
@@ -1289,17 +1177,17 @@ func (s *Server) RpcListXAttrPath(in *ListXAttrPathRequest, reply *ListXAttrRepl
defer func() { flog.TraceExitErr("reply.", err, reply) }()
defer func() { rpcEncodeError(&err) }() // Encode error for return by RPC
- mountHandle, err := lookupMountHandleByMountIDAsString(in.MountID)
+ volumeHandle, err := lookupVolumeHandleByMountIDAsString(in.MountID)
if nil != err {
return
}
- ino, err := mountHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, in.Fullpath)
+ ino, err := volumeHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, in.Fullpath)
if err != nil {
return
}
- reply.AttrNames, err = mountHandle.ListXAttr(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(ino))
+ reply.AttrNames, err = volumeHandle.ListXAttr(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(ino))
if err != nil {
return
}
@@ -1317,13 +1205,13 @@ func (s *Server) RpcLookup(in *LookupRequest, reply *InodeReply) (err error) {
defer func() { flog.TraceExitErr("reply.", err, reply) }()
defer func() { rpcEncodeError(&err) }() // Encode error for return by RPC
- mountHandle, err := lookupMountHandleByMountIDAsString(in.MountID)
+ volumeHandle, err := lookupVolumeHandleByMountIDAsString(in.MountID)
if nil != err {
return
}
profiler.AddEventNow("before fs.Lookup()")
- ino, err := mountHandle.Lookup(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(in.InodeNumber), in.Basename)
+ ino, err := volumeHandle.Lookup(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(in.InodeNumber), in.Basename)
profiler.AddEventNow("after fs.Lookup()")
// line below is for testing fault injection
//err = blunder.AddError(err, blunder.TryAgainError)
@@ -1346,17 +1234,17 @@ func (s *Server) RpcLookupPlus(in *LookupPlusRequest, reply *LookupPlusReply) (e
defer func() { flog.TraceExitErr("reply.", err, reply) }()
defer func() { rpcEncodeError(&err) }() // Encode error for return by RPC
- mountHandle, err := lookupMountHandleByMountIDAsString(in.MountID)
+ volumeHandle, err := lookupVolumeHandleByMountIDAsString(in.MountID)
if nil != err {
return
}
- ino, err := mountHandle.Lookup(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(in.InodeNumber), in.Basename)
+ ino, err := volumeHandle.Lookup(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(in.InodeNumber), in.Basename)
if nil != err {
return
}
- stat, err := mountHandle.Getstat(inode.InodeRootUserID, inode.InodeGroupID(0), nil, ino)
+ stat, err := volumeHandle.Getstat(inode.InodeRootUserID, inode.InodeGroupID(0), nil, ino)
if nil != err {
return
}
@@ -1375,12 +1263,12 @@ func (s *Server) RpcAccess(in *AccessRequest, reply *InodeReply) (err error) {
defer func() { flog.TraceExitErr("reply.", err, reply) }()
defer func() { rpcEncodeError(&err) }() // Encode error for return by RPC
- mountHandle, err := lookupMountHandleByMountIDAsString(in.MountID)
+ volumeHandle, err := lookupVolumeHandleByMountIDAsString(in.MountID)
if nil != err {
return
}
- ok := mountHandle.Access(inode.InodeUserID(in.UserID), inode.InodeGroupID(in.GroupID), nil, inode.InodeNumber(in.InodeNumber), inode.InodeMode(in.AccessMode))
+ ok := volumeHandle.Access(inode.InodeUserID(in.UserID), inode.InodeGroupID(in.GroupID), nil, inode.InodeNumber(in.InodeNumber), inode.InodeMode(in.AccessMode))
if ok {
err = nil
} else {
@@ -1398,12 +1286,12 @@ func (s *Server) RpcMkdir(in *MkdirRequest, reply *InodeReply) (err error) {
defer func() { flog.TraceExitErr("reply.", err, reply) }()
defer func() { rpcEncodeError(&err) }() // Encode error for return by RPC
- mountHandle, err := lookupMountHandleByMountIDAsString(in.MountID)
+ volumeHandle, err := lookupVolumeHandleByMountIDAsString(in.MountID)
if nil != err {
return
}
- ino, err := mountHandle.Mkdir(inode.InodeUserID(in.UserID), inode.InodeGroupID(in.GroupID), nil, inode.InodeNumber(in.InodeNumber), in.Basename, inode.InodeMode(in.FileMode))
+ ino, err := volumeHandle.Mkdir(inode.InodeUserID(in.UserID), inode.InodeGroupID(in.GroupID), nil, inode.InodeNumber(in.InodeNumber), in.Basename, inode.InodeMode(in.FileMode))
reply.InodeNumber = int64(uint64(ino))
return
}
@@ -1416,7 +1304,7 @@ func (s *Server) RpcMkdirPath(in *MkdirPathRequest, reply *Reply) (err error) {
defer func() { flog.TraceExitErr("reply.", err, reply) }()
defer func() { rpcEncodeError(&err) }() // Encode error for return by RPC
- mountHandle, err := lookupMountHandleByMountIDAsString(in.MountID)
+ volumeHandle, err := lookupVolumeHandleByMountIDAsString(in.MountID)
if nil != err {
return
}
@@ -1434,13 +1322,68 @@ func (s *Server) RpcMkdirPath(in *MkdirPathRequest, reply *Reply) (err error) {
parentDir, basename := splitPath(in.Fullpath)
// Get the inode for the parent dir
- ino, err := mountHandle.LookupPath(inode.InodeUserID(in.UserID), inode.InodeGroupID(in.GroupID), nil, parentDir)
+ ino, err := volumeHandle.LookupPath(inode.InodeUserID(in.UserID), inode.InodeGroupID(in.GroupID), nil, parentDir)
if err != nil {
return
}
// Do the mkdir
- _, err = mountHandle.Mkdir(inode.InodeUserID(in.UserID), inode.InodeGroupID(in.GroupID), nil, ino, basename, inode.InodeMode(in.FileMode))
+ _, err = volumeHandle.Mkdir(inode.InodeUserID(in.UserID), inode.InodeGroupID(in.GroupID), nil, ino, basename, inode.InodeMode(in.FileMode))
+ return
+}
+
+func performMount(volumeHandle fs.VolumeHandle) (mountIDAsByteArray MountIDAsByteArray, mountIDAsString MountIDAsString, err error) {
+ var (
+ i int
+ keepTrying bool
+ mount *mountStruct
+ ok bool
+ randByteSlice []byte
+ volume *volumeStruct
+ volumeName string
+ )
+
+ globals.volumesLock.Lock()
+
+ keepTrying = true
+ for keepTrying {
+ randByteSlice = utils.FetchRandomByteSlice(len(mountIDAsByteArray))
+ for i = 0; i < len(mountIDAsByteArray); i++ {
+ if i != 0 {
+ keepTrying = false // At least one of the bytes is non-zero... so it's a valid MountID
+ }
+ mountIDAsByteArray[i] = randByteSlice[i]
+ }
+ if !keepTrying {
+ _, keepTrying = globals.mountMapByMountIDAsByteArray[mountIDAsByteArray]
+ }
+ }
+
+ mountIDAsString = MountIDAsString(base64.StdEncoding.EncodeToString(mountIDAsByteArray[:]))
+
+ volumeName = volumeHandle.VolumeName()
+
+ volume, ok = globals.volumeMap[volumeName]
+ if !ok {
+ globals.volumesLock.Unlock()
+ err = fmt.Errorf("performMount(volumeHandle.VolumeName==\"%s\") cannot be found in globals.volumeMap", volumeName)
+ return
+ }
+
+ mount = &mountStruct{
+ volume: volume,
+ mountIDAsByteArray: mountIDAsByteArray,
+ mountIDAsString: mountIDAsString,
+ }
+
+ volume.mountMapByMountIDAsByteArray[mountIDAsByteArray] = mount
+ volume.mountMapByMountIDAsString[mountIDAsString] = mount
+
+ globals.mountMapByMountIDAsByteArray[mountIDAsByteArray] = mount
+ globals.mountMapByMountIDAsString[mountIDAsString] = mount
+
+ globals.volumesLock.Unlock()
+
return
}
@@ -1452,13 +1395,15 @@ func (s *Server) RpcMountByAccountName(in *MountByAccountNameRequest, reply *Mou
defer func() { flog.TraceExitErr("reply.", err, reply) }()
defer func() { rpcEncodeError(&err) }() // Encode error for return by RPC
- mountHandle, err := fs.MountByAccountName(in.AccountName, fs.MountOptions(in.MountOptions))
- if err == nil {
- _, reply.MountID = allocateMountID(mountHandle)
- reply.RootDirInodeNumber = int64(uint64(inode.RootDirInodeNumber))
- reply.RetryRPCPublicIPAddr = globals.publicIPAddr
- reply.RetryRPCPort = globals.retryRPCPort
- reply.RootCAx509CertificatePEM = globals.rootCAx509CertificatePEM
+ volumeHandle, err := fs.FetchVolumeHandleByAccountName(in.AccountName)
+ if nil == err {
+ _, reply.MountID, err = performMount(volumeHandle)
+ if nil == err {
+ reply.RootDirInodeNumber = int64(uint64(inode.RootDirInodeNumber))
+ reply.RetryRPCPublicIPAddr = globals.publicIPAddr
+ reply.RetryRPCPort = globals.retryRPCPort
+ reply.RootCAx509CertificatePEM = globals.rootCAx509CertificatePEM
+ }
}
return
}
@@ -1471,17 +1416,54 @@ func (s *Server) RpcMountByVolumeName(in *MountByVolumeNameRequest, reply *Mount
defer func() { flog.TraceExitErr("reply.", err, reply) }()
defer func() { rpcEncodeError(&err) }() // Encode error for return by RPC
- mountHandle, err := fs.MountByVolumeName(in.VolumeName, fs.MountOptions(in.MountOptions))
- if err == nil {
- _, reply.MountID = allocateMountID(mountHandle)
- reply.RootDirInodeNumber = int64(uint64(inode.RootDirInodeNumber))
- reply.RetryRPCPublicIPAddr = globals.publicIPAddr
- reply.RetryRPCPort = globals.retryRPCPort
- reply.RootCAx509CertificatePEM = globals.rootCAx509CertificatePEM
+ volumeHandle, err := fs.FetchVolumeHandleByVolumeName(in.VolumeName)
+ if nil == err {
+ _, reply.MountID, err = performMount(volumeHandle)
+ if nil == err {
+ reply.RootDirInodeNumber = int64(uint64(inode.RootDirInodeNumber))
+ reply.RetryRPCPublicIPAddr = globals.publicIPAddr
+ reply.RetryRPCPort = globals.retryRPCPort
+ reply.RootCAx509CertificatePEM = globals.rootCAx509CertificatePEM
+ }
}
return
}
+func (s *Server) RpcUnmount(in *UnmountRequest, reply *Reply) (err error) {
+ var (
+ mount *mountStruct
+ ok bool
+ volume *volumeStruct
+ )
+
+ enterGate()
+ defer leaveGate()
+
+ globals.volumesLock.Lock()
+
+ mount, ok = globals.mountMapByMountIDAsString[in.MountID]
+ if !ok {
+ globals.volumesLock.Unlock()
+ err = fmt.Errorf("RpcUnmount(in.MountID==\"%s\",) cannot be found in globals.volumeMap", in.MountID)
+ return
+ }
+
+ // TODO: Lease Management must implicitly release all leases held
+
+ volume = mount.volume
+
+ delete(volume.mountMapByMountIDAsByteArray, mount.mountIDAsByteArray)
+ delete(volume.mountMapByMountIDAsString, mount.mountIDAsString)
+
+ delete(globals.mountMapByMountIDAsByteArray, mount.mountIDAsByteArray)
+ delete(globals.mountMapByMountIDAsString, mount.mountIDAsString)
+
+ globals.volumesLock.Unlock()
+
+ err = nil
+ return
+}
+
func (dirEnt *DirEntry) fsDirentToDirEntryStruct(fsDirent inode.DirEntry) {
dirEnt.InodeNumber = int64(uint64(fsDirent.InodeNumber))
dirEnt.Basename = fsDirent.Basename
@@ -1509,16 +1491,16 @@ func (s *Server) RpcReaddirByLoc(in *ReaddirByLocRequest, reply *ReaddirReply) (
func (s *Server) rpcReaddirInternal(in interface{}, reply *ReaddirReply, profiler *utils.Profiler) (err error) {
var (
- dirEnts []inode.DirEntry
- flog logger.FuncCtx
- i int
- iH InodeHandle
- inByLoc *ReaddirByLocRequest
- inByName *ReaddirRequest
- maxEntries uint64
- mountHandle fs.MountHandle
- okByName bool
- prevMarker interface{}
+ dirEnts []inode.DirEntry
+ flog logger.FuncCtx
+ i int
+ iH InodeHandle
+ inByLoc *ReaddirByLocRequest
+ inByName *ReaddirRequest
+ maxEntries uint64
+ okByName bool
+ prevMarker interface{}
+ volumeHandle fs.VolumeHandle
)
inByName, okByName = in.(*ReaddirRequest)
@@ -1541,13 +1523,13 @@ func (s *Server) rpcReaddirInternal(in interface{}, reply *ReaddirReply, profile
defer func() { flog.TraceExitErr("reply.", err, reply) }()
defer func() { rpcEncodeError(&err) }() // Encode error for return by RPC
- mountHandle, err = lookupMountHandleByMountIDAsString(iH.MountID)
+ volumeHandle, err = lookupVolumeHandleByMountIDAsString(iH.MountID)
if nil != err {
return
}
profiler.AddEventNow("before fs.Readdir()")
- dirEnts, _, _, err = mountHandle.Readdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(iH.InodeNumber), maxEntries, prevMarker)
+ dirEnts, _, _, err = volumeHandle.Readdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(iH.InodeNumber), maxEntries, prevMarker)
profiler.AddEventNow("after fs.Readdir()")
if nil == err {
@@ -1580,17 +1562,17 @@ func (s *Server) RpcReaddirPlusByLoc(in *ReaddirPlusByLocRequest, reply *Readdir
func (s *Server) rpcReaddirPlusInternal(in interface{}, reply *ReaddirPlusReply, profiler *utils.Profiler) (err error) {
var (
- dirEnts []inode.DirEntry
- flog logger.FuncCtx
- i int
- iH InodeHandle
- inByLoc *ReaddirPlusByLocRequest
- inByName *ReaddirPlusRequest
- maxEntries uint64
- mountHandle fs.MountHandle
- okByName bool
- prevMarker interface{}
- statEnts []fs.Stat
+ dirEnts []inode.DirEntry
+ flog logger.FuncCtx
+ i int
+ iH InodeHandle
+ inByLoc *ReaddirPlusByLocRequest
+ inByName *ReaddirPlusRequest
+ maxEntries uint64
+ okByName bool
+ prevMarker interface{}
+ statEnts []fs.Stat
+ volumeHandle fs.VolumeHandle
)
inByName, okByName = in.(*ReaddirPlusRequest)
@@ -1613,13 +1595,13 @@ func (s *Server) rpcReaddirPlusInternal(in interface{}, reply *ReaddirPlusReply,
defer func() { flog.TraceExitErr("reply.", err, reply) }()
defer func() { rpcEncodeError(&err) }() // Encode error for return by RPC
- mountHandle, err = lookupMountHandleByMountIDAsString(iH.MountID)
+ volumeHandle, err = lookupVolumeHandleByMountIDAsString(iH.MountID)
if err != nil {
return
}
profiler.AddEventNow("before fs.ReaddirPlus()")
- dirEnts, statEnts, _, _, err = mountHandle.ReaddirPlus(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(iH.InodeNumber), maxEntries, prevMarker)
+ dirEnts, statEnts, _, _, err = volumeHandle.ReaddirPlus(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(iH.InodeNumber), maxEntries, prevMarker)
profiler.AddEventNow("after fs.ReaddirPlus()")
if nil == err {
@@ -1642,12 +1624,12 @@ func (s *Server) RpcReadSymlink(in *ReadSymlinkRequest, reply *ReadSymlinkReply)
defer func() { flog.TraceExitErr("reply.", err, reply) }()
defer func() { rpcEncodeError(&err) }() // Encode error for return by RPC
- mountHandle, err := lookupMountHandleByMountIDAsString(in.MountID)
+ volumeHandle, err := lookupVolumeHandleByMountIDAsString(in.MountID)
if nil != err {
return
}
- target, err := mountHandle.Readsymlink(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(in.InodeNumber))
+ target, err := volumeHandle.Readsymlink(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(in.InodeNumber))
reply.Target = target
return
}
@@ -1660,18 +1642,18 @@ func (s *Server) RpcReadSymlinkPath(in *ReadSymlinkPathRequest, reply *ReadSymli
defer func() { flog.TraceExitErr("reply.", err, reply) }()
defer func() { rpcEncodeError(&err) }() // Encode error for return by RPC
- mountHandle, err := lookupMountHandleByMountIDAsString(in.MountID)
+ volumeHandle, err := lookupVolumeHandleByMountIDAsString(in.MountID)
if nil != err {
return
}
// Get the inode
- ino, err := mountHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, in.Fullpath)
+ ino, err := volumeHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, in.Fullpath)
if err != nil {
return
}
- target, err := mountHandle.Readsymlink(inode.InodeRootUserID, inode.InodeGroupID(0), nil, ino)
+ target, err := volumeHandle.Readsymlink(inode.InodeRootUserID, inode.InodeGroupID(0), nil, ino)
reply.Target = target
return
}
@@ -1684,12 +1666,12 @@ func (s *Server) RpcRemoveXAttr(in *RemoveXAttrRequest, reply *Reply) (err error
defer func() { flog.TraceExitErr("reply.", err, reply) }()
defer func() { rpcEncodeError(&err) }() // Encode error for return by RPC
- mountHandle, err := lookupMountHandleByMountIDAsString(in.MountID)
+ volumeHandle, err := lookupVolumeHandleByMountIDAsString(in.MountID)
if nil != err {
return
}
- err = mountHandle.RemoveXAttr(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(in.InodeNumber), in.AttrName)
+ err = volumeHandle.RemoveXAttr(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(in.InodeNumber), in.AttrName)
return
}
@@ -1701,17 +1683,17 @@ func (s *Server) RpcRemoveXAttrPath(in *RemoveXAttrPathRequest, reply *Reply) (e
defer func() { flog.TraceExitErr("reply.", err, reply) }()
defer func() { rpcEncodeError(&err) }() // Encode error for return by RPC
- mountHandle, err := lookupMountHandleByMountIDAsString(in.MountID)
+ volumeHandle, err := lookupVolumeHandleByMountIDAsString(in.MountID)
if nil != err {
return
}
- ino, err := mountHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, in.Fullpath)
+ ino, err := volumeHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, in.Fullpath)
if err != nil {
return
}
- err = mountHandle.RemoveXAttr(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(ino), in.AttrName)
+ err = volumeHandle.RemoveXAttr(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(ino), in.AttrName)
return
}
@@ -1723,12 +1705,12 @@ func (s *Server) RpcRename(in *RenameRequest, reply *Reply) (err error) {
defer func() { flog.TraceExitErr("reply.", err, reply) }()
defer func() { rpcEncodeError(&err) }() // Encode error for return by RPC
- mountHandle, err := lookupMountHandleByMountIDAsString(in.MountID)
+ volumeHandle, err := lookupVolumeHandleByMountIDAsString(in.MountID)
if nil != err {
return
}
- err = mountHandle.Rename(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(in.SrcDirInodeNumber), in.SrcBasename, inode.InodeNumber(in.DstDirInodeNumber), in.DstBasename)
+ err = volumeHandle.Rename(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(in.SrcDirInodeNumber), in.SrcBasename, inode.InodeNumber(in.DstDirInodeNumber), in.DstBasename)
return
}
@@ -1740,7 +1722,7 @@ func (s *Server) RpcRenamePath(in *RenamePathRequest, reply *Reply) (err error)
defer func() { flog.TraceExitErr("reply.", err, reply) }()
defer func() { rpcEncodeError(&err) }() // Encode error for return by RPC
- mountHandle, err := lookupMountHandleByMountIDAsString(in.MountID)
+ volumeHandle, err := lookupVolumeHandleByMountIDAsString(in.MountID)
if nil != err {
return
}
@@ -1749,7 +1731,7 @@ func (s *Server) RpcRenamePath(in *RenamePathRequest, reply *Reply) (err error)
srcParentDir, srcBasename := splitPath(in.Fullpath)
// Get the inode for the (source) parent dir
- srcIno, err := mountHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, srcParentDir)
+ srcIno, err := volumeHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, srcParentDir)
if err != nil {
return
}
@@ -1758,13 +1740,13 @@ func (s *Server) RpcRenamePath(in *RenamePathRequest, reply *Reply) (err error)
dstParentDir, dstBasename := splitPath(in.DstFullpath)
// Get the inode for the dest parent dir
- dstIno, err := mountHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, dstParentDir)
+ dstIno, err := volumeHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, dstParentDir)
if err != nil {
return
}
// Do the rename
- err = mountHandle.Rename(inode.InodeRootUserID, inode.InodeGroupID(0), nil, srcIno, srcBasename, dstIno, dstBasename)
+ err = volumeHandle.Rename(inode.InodeRootUserID, inode.InodeGroupID(0), nil, srcIno, srcBasename, dstIno, dstBasename)
return
}
@@ -1776,12 +1758,12 @@ func (s *Server) RpcResize(in *ResizeRequest, reply *Reply) (err error) {
defer func() { flog.TraceExitErr("reply.", err, reply) }()
defer func() { rpcEncodeError(&err) }() // Encode error for return by RPC
- mountHandle, err := lookupMountHandleByMountIDAsString(in.MountID)
+ volumeHandle, err := lookupVolumeHandleByMountIDAsString(in.MountID)
if nil != err {
return
}
- err = mountHandle.Resize(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(in.InodeNumber), in.NewSize)
+ err = volumeHandle.Resize(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(in.InodeNumber), in.NewSize)
return
}
@@ -1793,12 +1775,12 @@ func (s *Server) RpcRmdir(in *UnlinkRequest, reply *Reply) (err error) {
defer func() { flog.TraceExitErr("reply.", err, reply) }()
defer func() { rpcEncodeError(&err) }() // Encode error for return by RPC
- mountHandle, err := lookupMountHandleByMountIDAsString(in.MountID)
+ volumeHandle, err := lookupVolumeHandleByMountIDAsString(in.MountID)
if nil != err {
return
}
- err = mountHandle.Rmdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(in.InodeNumber), in.Basename)
+ err = volumeHandle.Rmdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(in.InodeNumber), in.Basename)
return
}
@@ -1810,7 +1792,7 @@ func (s *Server) RpcRmdirPath(in *UnlinkPathRequest, reply *Reply) (err error) {
defer func() { flog.TraceExitErr("reply.", err, reply) }()
defer func() { rpcEncodeError(&err) }() // Encode error for return by RPC
- mountHandle, err := lookupMountHandleByMountIDAsString(in.MountID)
+ volumeHandle, err := lookupVolumeHandleByMountIDAsString(in.MountID)
if nil != err {
return
}
@@ -1819,13 +1801,13 @@ func (s *Server) RpcRmdirPath(in *UnlinkPathRequest, reply *Reply) (err error) {
parentDir, basename := splitPath(in.Fullpath)
// Get the inode for the parent dir
- ino, err := mountHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, parentDir)
+ ino, err := volumeHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, parentDir)
if err != nil {
return
}
// Do the rmdir
- err = mountHandle.Rmdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil, ino, basename)
+ err = volumeHandle.Rmdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil, ino, basename)
return
}
@@ -1837,7 +1819,7 @@ func (s *Server) RpcSetstat(in *SetstatRequest, reply *Reply) (err error) {
defer func() { flog.TraceExitErr("reply.", err, reply) }()
defer func() { rpcEncodeError(&err) }() // Encode error for return by RPC
- mountHandle, err := lookupMountHandleByMountIDAsString(in.MountID)
+ volumeHandle, err := lookupVolumeHandleByMountIDAsString(in.MountID)
if nil != err {
return
}
@@ -1850,7 +1832,7 @@ func (s *Server) RpcSetstat(in *SetstatRequest, reply *Reply) (err error) {
stat[fs.StatSize] = in.Size
stat[fs.StatNLink] = in.NumLinks
// XXX TODO: add in mode/userid/groupid?
- err = mountHandle.Setstat(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(in.InodeNumber), stat)
+ err = volumeHandle.Setstat(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(in.InodeNumber), stat)
return
}
@@ -1862,7 +1844,7 @@ func (s *Server) RpcSetTime(in *SetTimeRequest, reply *Reply) (err error) {
defer func() { flog.TraceExitErr("reply.", err, reply) }()
defer func() { rpcEncodeError(&err) }() // Encode error for return by RPC
- mountHandle, err := lookupMountHandleByMountIDAsString(in.MountID)
+ volumeHandle, err := lookupVolumeHandleByMountIDAsString(in.MountID)
if nil != err {
return
}
@@ -1876,7 +1858,7 @@ func (s *Server) RpcSetTime(in *SetTimeRequest, reply *Reply) (err error) {
stat[fs.StatATime] = in.ATimeNs
}
- err = mountHandle.Setstat(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(in.InodeNumber), stat)
+ err = volumeHandle.Setstat(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(in.InodeNumber), stat)
return
}
@@ -1889,13 +1871,13 @@ func (s *Server) RpcSetTimePath(in *SetTimePathRequest, reply *Reply) (err error
defer func() { flog.TraceExitErr("reply.", err, reply) }()
defer func() { rpcEncodeError(&err) }() // Encode error for return by RPC
- mountHandle, err := lookupMountHandleByMountIDAsString(in.MountID)
+ volumeHandle, err := lookupVolumeHandleByMountIDAsString(in.MountID)
if nil != err {
return
}
// Get the inode
- ino, err := mountHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, in.Fullpath)
+ ino, err := volumeHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, in.Fullpath)
if err != nil {
return
}
@@ -1909,7 +1891,7 @@ func (s *Server) RpcSetTimePath(in *SetTimePathRequest, reply *Reply) (err error
stat[fs.StatATime] = in.ATimeNs
}
- err = mountHandle.Setstat(inode.InodeRootUserID, inode.InodeGroupID(0), nil, ino, stat)
+ err = volumeHandle.Setstat(inode.InodeRootUserID, inode.InodeGroupID(0), nil, ino, stat)
return
}
@@ -1922,12 +1904,12 @@ func (s *Server) RpcSetXAttr(in *SetXAttrRequest, reply *Reply) (err error) {
defer func() { flog.TraceExitErr("reply.", err, reply) }()
defer func() { rpcEncodeError(&err) }() // Encode error for return by RPC
- mountHandle, err := lookupMountHandleByMountIDAsString(in.MountID)
+ volumeHandle, err := lookupVolumeHandleByMountIDAsString(in.MountID)
if nil != err {
return
}
- err = mountHandle.SetXAttr(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(in.InodeNumber), in.AttrName, in.AttrValue, in.AttrFlags)
+ err = volumeHandle.SetXAttr(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(in.InodeNumber), in.AttrName, in.AttrValue, in.AttrFlags)
return
}
@@ -1939,17 +1921,17 @@ func (s *Server) RpcSetXAttrPath(in *SetXAttrPathRequest, reply *Reply) (err err
defer func() { flog.TraceExitErr("reply.", err, reply) }()
defer func() { rpcEncodeError(&err) }() // Encode error for return by RPC
- mountHandle, err := lookupMountHandleByMountIDAsString(in.MountID)
+ volumeHandle, err := lookupVolumeHandleByMountIDAsString(in.MountID)
if nil != err {
return
}
- ino, err := mountHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, in.Fullpath)
+ ino, err := volumeHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, in.Fullpath)
if err != nil {
return
}
- err = mountHandle.SetXAttr(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(ino), in.AttrName, in.AttrValue, in.AttrFlags)
+ err = volumeHandle.SetXAttr(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(ino), in.AttrName, in.AttrValue, in.AttrFlags)
return
}
@@ -1961,12 +1943,12 @@ func (s *Server) RpcStatVFS(in *StatVFSRequest, reply *StatVFS) (err error) {
defer func() { flog.TraceExitErr("reply.", err, reply) }()
defer func() { rpcEncodeError(&err) }() // Encode error for return by RPC
- mountHandle, err := lookupMountHandleByMountIDAsString(in.MountID)
+ volumeHandle, err := lookupVolumeHandleByMountIDAsString(in.MountID)
if nil != err {
return
}
- statvfs, err := mountHandle.StatVfs()
+ statvfs, err := volumeHandle.StatVfs()
if err != nil {
return
}
@@ -1995,12 +1977,12 @@ func (s *Server) RpcSymlink(in *SymlinkRequest, reply *InodeReply) (err error) {
defer func() { flog.TraceExitErr("reply.", err, reply) }()
defer func() { rpcEncodeError(&err) }() // Encode error for return by RPC
- mountHandle, err := lookupMountHandleByMountIDAsString(in.MountID)
+ volumeHandle, err := lookupVolumeHandleByMountIDAsString(in.MountID)
if nil != err {
return
}
- ino, err := mountHandle.Symlink(inode.InodeUserID(in.UserID), inode.InodeGroupID(in.GroupID), nil, inode.InodeNumber(in.InodeNumber), in.Basename, in.Target)
+ ino, err := volumeHandle.Symlink(inode.InodeUserID(in.UserID), inode.InodeGroupID(in.GroupID), nil, inode.InodeNumber(in.InodeNumber), in.Basename, in.Target)
reply.InodeNumber = int64(uint64(ino))
return
}
@@ -2013,7 +1995,7 @@ func (s *Server) RpcSymlinkPath(in *SymlinkPathRequest, reply *Reply) (err error
defer func() { flog.TraceExitErr("reply.", err, reply) }()
defer func() { rpcEncodeError(&err) }() // Encode error for return by RPC
- mountHandle, err := lookupMountHandleByMountIDAsString(in.MountID)
+ volumeHandle, err := lookupVolumeHandleByMountIDAsString(in.MountID)
if nil != err {
return
}
@@ -2022,12 +2004,12 @@ func (s *Server) RpcSymlinkPath(in *SymlinkPathRequest, reply *Reply) (err error
srcParentDir, srcBasename := splitPath(in.Fullpath)
// Get the inode for the (source) parent dir
- srcIno, err := mountHandle.LookupPath(inode.InodeUserID(in.UserID), inode.InodeGroupID(in.GroupID), nil, srcParentDir)
+ srcIno, err := volumeHandle.LookupPath(inode.InodeUserID(in.UserID), inode.InodeGroupID(in.GroupID), nil, srcParentDir)
if err != nil {
return
}
- _, err = mountHandle.Symlink(inode.InodeUserID(in.UserID), inode.InodeGroupID(in.GroupID), nil, srcIno, srcBasename, in.TargetFullpath)
+ _, err = volumeHandle.Symlink(inode.InodeUserID(in.UserID), inode.InodeGroupID(in.GroupID), nil, srcIno, srcBasename, in.TargetFullpath)
return
}
@@ -2039,12 +2021,12 @@ func (s *Server) RpcType(in *TypeRequest, reply *TypeReply) (err error) {
defer func() { flog.TraceExitErr("reply.", err, reply) }()
defer func() { rpcEncodeError(&err) }() // Encode error for return by RPC
- mountHandle, err := lookupMountHandleByMountIDAsString(in.MountID)
+ volumeHandle, err := lookupVolumeHandleByMountIDAsString(in.MountID)
if nil != err {
return
}
- ftype, err := mountHandle.GetType(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(in.InodeNumber))
+ ftype, err := volumeHandle.GetType(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(in.InodeNumber))
// Cast as a uint16 here to get the underlying DT_* constant
reply.FileType = uint16(ftype)
return
@@ -2058,12 +2040,12 @@ func (s *Server) RpcUnlink(in *UnlinkRequest, reply *Reply) (err error) {
defer func() { flog.TraceExitErr("reply.", err, reply) }()
defer func() { rpcEncodeError(&err) }() // Encode error for return by RPC
- mountHandle, err := lookupMountHandleByMountIDAsString(in.MountID)
+ volumeHandle, err := lookupVolumeHandleByMountIDAsString(in.MountID)
if nil != err {
return
}
- err = mountHandle.Unlink(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(in.InodeNumber), in.Basename)
+ err = volumeHandle.Unlink(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(in.InodeNumber), in.Basename)
return
}
@@ -2075,7 +2057,7 @@ func (s *Server) RpcUnlinkPath(in *UnlinkPathRequest, reply *Reply) (err error)
defer func() { flog.TraceExitErr("reply.", err, reply) }()
defer func() { rpcEncodeError(&err) }() // Encode error for return by RPC
- mountHandle, err := lookupMountHandleByMountIDAsString(in.MountID)
+ volumeHandle, err := lookupVolumeHandleByMountIDAsString(in.MountID)
if nil != err {
return
}
@@ -2084,27 +2066,27 @@ func (s *Server) RpcUnlinkPath(in *UnlinkPathRequest, reply *Reply) (err error)
parentDir, basename := splitPath(in.Fullpath)
// Get the inode for the parent dir
- ino, err := mountHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, parentDir)
+ ino, err := volumeHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, parentDir)
if err != nil {
return
}
// Do the unlink
- err = mountHandle.Unlink(inode.InodeRootUserID, inode.InodeGroupID(0), nil, ino, basename)
+ err = volumeHandle.Unlink(inode.InodeRootUserID, inode.InodeGroupID(0), nil, ino, basename)
return
}
func (s *Server) RpcSnapShotCreate(in *SnapShotCreateRequest, reply *SnapShotCreateReply) (err error) {
var (
- fsMountHandle fs.MountHandle
+ fsVolumeHandle fs.VolumeHandle
inodeVolumeHandle inode.VolumeHandle
)
- fsMountHandle, err = lookupMountHandleByMountIDAsString(in.MountID)
+ fsVolumeHandle, err = lookupVolumeHandleByMountIDAsString(in.MountID)
if nil != err {
return
}
- inodeVolumeHandle, err = inode.FetchVolumeHandle(fsMountHandle.VolumeName())
+ inodeVolumeHandle, err = inode.FetchVolumeHandle(fsVolumeHandle.VolumeName())
if nil != err {
return
}
@@ -2116,15 +2098,15 @@ func (s *Server) RpcSnapShotCreate(in *SnapShotCreateRequest, reply *SnapShotCre
func (s *Server) RpcSnapShotDelete(in *SnapShotDeleteRequest, reply *SnapShotDeleteReply) (err error) {
var (
- fsMountHandle fs.MountHandle
+ fsVolumeHandle fs.VolumeHandle
inodeVolumeHandle inode.VolumeHandle
)
- fsMountHandle, err = lookupMountHandleByMountIDAsString(in.MountID)
+ fsVolumeHandle, err = lookupVolumeHandleByMountIDAsString(in.MountID)
if nil != err {
return
}
- inodeVolumeHandle, err = inode.FetchVolumeHandle(fsMountHandle.VolumeName())
+ inodeVolumeHandle, err = inode.FetchVolumeHandle(fsVolumeHandle.VolumeName())
if nil != err {
return
}
@@ -2136,15 +2118,15 @@ func (s *Server) RpcSnapShotDelete(in *SnapShotDeleteRequest, reply *SnapShotDel
func (s *Server) RpcSnapShotListByID(in *SnapShotListRequest, reply *SnapShotListReply) (err error) {
var (
- fsMountHandle fs.MountHandle
+ fsVolumeHandle fs.VolumeHandle
headhunterVolumeHandle headhunter.VolumeHandle
)
- fsMountHandle, err = lookupMountHandleByMountIDAsString(in.MountID)
+ fsVolumeHandle, err = lookupVolumeHandleByMountIDAsString(in.MountID)
if nil != err {
return
}
- headhunterVolumeHandle, err = headhunter.FetchVolumeHandle(fsMountHandle.VolumeName())
+ headhunterVolumeHandle, err = headhunter.FetchVolumeHandle(fsVolumeHandle.VolumeName())
if nil != err {
return
}
@@ -2156,15 +2138,15 @@ func (s *Server) RpcSnapShotListByID(in *SnapShotListRequest, reply *SnapShotLis
func (s *Server) RpcSnapShotListByName(in *SnapShotListRequest, reply *SnapShotListReply) (err error) {
var (
- fsMountHandle fs.MountHandle
+ fsVolumeHandle fs.VolumeHandle
headhunterVolumeHandle headhunter.VolumeHandle
)
- fsMountHandle, err = lookupMountHandleByMountIDAsString(in.MountID)
+ fsVolumeHandle, err = lookupVolumeHandleByMountIDAsString(in.MountID)
if nil != err {
return
}
- headhunterVolumeHandle, err = headhunter.FetchVolumeHandle(fsMountHandle.VolumeName())
+ headhunterVolumeHandle, err = headhunter.FetchVolumeHandle(fsVolumeHandle.VolumeName())
if nil != err {
return
}
@@ -2176,15 +2158,15 @@ func (s *Server) RpcSnapShotListByName(in *SnapShotListRequest, reply *SnapShotL
func (s *Server) RpcSnapShotListByTime(in *SnapShotListRequest, reply *SnapShotListReply) (err error) {
var (
- fsMountHandle fs.MountHandle
+ fsVolumeHandle fs.VolumeHandle
headhunterVolumeHandle headhunter.VolumeHandle
)
- fsMountHandle, err = lookupMountHandleByMountIDAsString(in.MountID)
+ fsVolumeHandle, err = lookupVolumeHandleByMountIDAsString(in.MountID)
if nil != err {
return
}
- headhunterVolumeHandle, err = headhunter.FetchVolumeHandle(fsMountHandle.VolumeName())
+ headhunterVolumeHandle, err = headhunter.FetchVolumeHandle(fsVolumeHandle.VolumeName())
if nil != err {
return
}
@@ -2196,16 +2178,16 @@ func (s *Server) RpcSnapShotListByTime(in *SnapShotListRequest, reply *SnapShotL
func (s *Server) RpcSnapShotLookupByName(in *SnapShotLookupByNameRequest, reply *SnapShotLookupByNameReply) (err error) {
var (
- fsMountHandle fs.MountHandle
+ fsVolumeHandle fs.VolumeHandle
headhunterVolumeHandle headhunter.VolumeHandle
ok bool
)
- fsMountHandle, err = lookupMountHandleByMountIDAsString(in.MountID)
+ fsVolumeHandle, err = lookupVolumeHandleByMountIDAsString(in.MountID)
if nil != err {
return
}
- headhunterVolumeHandle, err = headhunter.FetchVolumeHandle(fsMountHandle.VolumeName())
+ headhunterVolumeHandle, err = headhunter.FetchVolumeHandle(fsVolumeHandle.VolumeName())
if nil != err {
return
}
diff --git a/jrpcfs/io.go b/jrpcfs/io.go
index 0e6c81301..d08f1b5a5 100644
--- a/jrpcfs/io.go
+++ b/jrpcfs/io.go
@@ -363,7 +363,7 @@ func makeReq(bytes []byte, req *ioRequest) {
func ioHandle(conn net.Conn) {
var (
- mountHandle fs.MountHandle
+ volumeHandle fs.VolumeHandle
)
// NOTE: This function runs in a goroutine and only processes
@@ -407,9 +407,9 @@ func ioHandle(conn net.Conn) {
}
profiler.AddEventNow("before fs.Write()")
- mountHandle, err = lookupMountHandleByMountIDAsByteArray(ctx.req.mountID)
+ volumeHandle, err = lookupVolumeHandleByMountIDAsByteArray(ctx.req.mountID)
if err == nil {
- ctx.resp.ioSize, err = mountHandle.Write(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(ctx.req.inodeID), ctx.req.offset, ctx.data, profiler)
+ ctx.resp.ioSize, err = volumeHandle.Write(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(ctx.req.inodeID), ctx.req.offset, ctx.data, profiler)
}
profiler.AddEventNow("after fs.Write()")
@@ -425,9 +425,9 @@ func ioHandle(conn net.Conn) {
}
profiler.AddEventNow("before fs.Read()")
- mountHandle, err = lookupMountHandleByMountIDAsByteArray(ctx.req.mountID)
+ volumeHandle, err = lookupVolumeHandleByMountIDAsByteArray(ctx.req.mountID)
if err == nil {
- ctx.data, err = mountHandle.Read(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(ctx.req.inodeID), ctx.req.offset, ctx.req.length, profiler)
+ ctx.data, err = volumeHandle.Read(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(ctx.req.inodeID), ctx.req.offset, ctx.req.length, profiler)
}
profiler.AddEventNow("after fs.Read()")
diff --git a/jrpcfs/lease.go b/jrpcfs/lease.go
new file mode 100644
index 000000000..63cdabbcf
--- /dev/null
+++ b/jrpcfs/lease.go
@@ -0,0 +1,595 @@
+package jrpcfs
+
+import (
+ "container/list"
+ "fmt"
+
+ "github.com/swiftstack/ProxyFS/blunder"
+ "github.com/swiftstack/ProxyFS/inode"
+)
+
+// RpcLease is called to either request a Shared|Exclusive Lease or to
+// Promote|Demote|Release a granted Shared|Exclusive|either Lease.
+//
+func (s *Server) RpcLease(in *LeaseRequest, reply *LeaseReply) (err error) {
+ var (
+ inodeLease *inodeLeaseStruct
+ inodeNumber inode.InodeNumber
+ leaseRequestOperation *leaseRequestOperationStruct
+ mount *mountStruct
+ ok bool
+ volume *volumeStruct
+ )
+
+ enterGate()
+ defer leaveGate()
+
+ switch in.LeaseRequestType {
+ case LeaseRequestTypeShared:
+ case LeaseRequestTypePromote:
+ case LeaseRequestTypeExclusive:
+ case LeaseRequestTypeDemote:
+ case LeaseRequestTypeRelease:
+ default:
+ reply = &LeaseReply{
+ LeaseReplyType: LeaseReplyTypeDenied,
+ }
+ err = fmt.Errorf("LeaseRequestType %v not supported", in.LeaseRequestType)
+ err = blunder.AddError(err, blunder.BadLeaseRequest)
+ return
+ }
+
+ inodeNumber = inode.InodeNumber(in.InodeHandle.InodeNumber)
+
+ globals.volumesLock.Lock()
+
+ mount, ok = globals.mountMapByMountIDAsString[in.MountID]
+ if !ok {
+ reply = &LeaseReply{
+ LeaseReplyType: LeaseReplyTypeDenied,
+ }
+ err = fmt.Errorf("MountID %s not found in jrpcfs globals.mountMapByMountIDAsString", in.MountID)
+ err = blunder.AddError(err, blunder.BadMountIDError)
+ return
+ }
+
+ globals.volumesLock.Lock()
+
+ mount, ok = globals.mountMapByMountIDAsString[in.MountID]
+ if !ok {
+ globals.volumesLock.Unlock()
+ reply = &LeaseReply{
+ LeaseReplyType: LeaseReplyTypeDenied,
+ }
+ err = fmt.Errorf("MountID %s not found in jrpcfs globals.mountMapByMountIDAsString", in.MountID)
+ err = blunder.AddError(err, blunder.BadMountIDError)
+ return
+ }
+
+ volume = mount.volume
+
+ if (in.LeaseRequestType == LeaseRequestTypeShared) || (in.LeaseRequestType == LeaseRequestTypeExclusive) {
+ inodeLease, ok = volume.inodeLeaseMap[inodeNumber]
+ if !ok {
+ inodeLease = &inodeLeaseStruct{
+ volume: volume,
+ inodeNumber: inodeNumber,
+ leaseState: inodeLeaseStateNone,
+ requestChan: make(chan *leaseRequestOperationStruct),
+ sharedHoldersList: list.New(),
+ promotingHolder: nil,
+ exclusiveHolder: nil,
+ releasingHoldersList: list.New(),
+ requestedList: list.New(),
+ }
+
+ volume.inodeLeaseMap[inodeNumber] = inodeLease
+
+ volume.leaseHandlerWG.Add(1)
+ go inodeLease.handler()
+ }
+ } else { // in.LeaseRequestType is one of LeaseRequestType{Promote|Demote|Release}
+ inodeLease, ok = volume.inodeLeaseMap[inodeNumber]
+ if !ok {
+ globals.volumesLock.Unlock()
+ reply = &LeaseReply{
+ LeaseReplyType: LeaseReplyTypeDenied,
+ }
+ err = fmt.Errorf("LeaseRequestType %v not allowed for non-existent Lease", in.LeaseRequestType)
+ err = blunder.AddError(err, blunder.BadLeaseRequest)
+ return
+ }
+ }
+
+ // Send Lease Request Operation to *inodeLeaseStruct.handler()
+ //
+ // Note that we still hold the volumesLock, so inodeLease can't disappear out from under us
+
+ leaseRequestOperation = &leaseRequestOperationStruct{
+ mount: mount,
+ inodeLease: inodeLease,
+ LeaseRequestType: in.LeaseRequestType,
+ replyChan: make(chan *LeaseReply),
+ }
+
+ inodeLease.requestChan <- leaseRequestOperation
+
+ globals.volumesLock.Unlock()
+
+ reply = <-leaseRequestOperation.replyChan
+
+ if reply.LeaseReplyType == LeaseReplyTypeDenied {
+ err = fmt.Errorf("LeaseRequestType %v was denied", in.LeaseRequestType)
+ err = blunder.AddError(err, blunder.BadLeaseRequest)
+ } else {
+ err = nil
+ }
+
+ return
+}
+
+func (inodeLease *inodeLeaseStruct) handler() {
+ // TODO
+}
+
+/*
+func (s *Server) RpcLeaseOLD(in *LeaseRequest, reply *LeaseReply) (err error) {
+ var (
+ inodeLease *inodeLeaseStruct
+ leaseRequest *leaseRequestStruct
+ mount *mountStruct
+ ok bool
+ volume *volumeStruct
+ )
+
+ enterGate()
+
+ volume = mount.volume
+
+ switch in.LeaseRequestType {
+
+ case LeaseRequestTypeShared, LeaseRequestTypeExclusive:
+
+ if LeaseHandleEmpty != in.LeaseHandle {
+ globals.volumesLock.Unlock()
+ leaveGate()
+ reply = &LeaseReply{
+ LeaseHandle: in.LeaseHandle,
+ LeaseReplyType: LeaseReplyTypeDenied,
+ }
+ err = fmt.Errorf("For LeaseRequestType %v, LeaseHandle (%v) must be %v", in.LeaseRequestType, in.LeaseHandle, LeaseHandleEmpty)
+ err = blunder.AddError(err, blunder.BadLeaseRequest)
+ return
+ }
+
+ volume.lastLeaseHandle++
+
+ inodeLease, ok = volume.inodeLeaseMap[inode.InodeNumber(in.InodeNumber)]
+ if !ok {
+ inodeLease = &inodeLeaseStruct{
+ inodeNumber: inode.InodeNumber(in.InodeNumber),
+ volume: volume,
+ leaseState: inodeLeaseStateNone,
+ lastGrantTime: time.Time{},
+ exclusiveHolder: nil,
+ promotingHolder: nil,
+ sharedHoldersList: list.New(),
+ waitersList: list.New(),
+ }
+
+ volume.inodeLeaseMap[inodeLease.inodeNumber] = inodeLease
+ }
+
+ leaseRequest = &leaseRequestStruct{
+ leaseHandle: volume.lastLeaseHandle,
+ inodeLease: inodeLease,
+ mount: mount,
+ requestState: leaseRequestStateNone,
+ listElement: nil,
+ operationCV: sync.NewCond(&globals.volumesLock),
+ operationList: list.New(),
+ unblockCV: sync.NewCond(&globals.volumesLock),
+ replyChan: make(chan *LeaseReply, 1),
+ }
+
+ mount.leaseRequestMap[leaseRequest.leaseHandle] = leaseRequest
+
+ volume.leaseHandlerWG.Add(1)
+ go leaseRequest.handler()
+
+ case LeaseRequestTypePromote, LeaseRequestTypeDemote, LeaseRequestTypeRelease:
+
+ if LeaseHandleEmpty == in.LeaseHandle {
+ globals.volumesLock.Unlock()
+ leaveGate()
+ reply = &LeaseReply{
+ LeaseHandle: in.LeaseHandle,
+ LeaseReplyType: LeaseReplyTypeDenied,
+ }
+ err = fmt.Errorf("For LeaseRequestType %v, LeaseHandle must not be %v", in.LeaseRequestType, LeaseHandleEmpty)
+ err = blunder.AddError(err, blunder.BadLeaseRequest)
+ return
+ }
+
+ leaseRequest, ok = mount.leaseRequestMap[in.LeaseHandle]
+ if !ok {
+ globals.volumesLock.Unlock()
+ leaveGate()
+ reply = &LeaseReply{
+ LeaseHandle: in.LeaseHandle,
+ LeaseReplyType: LeaseReplyTypeDenied,
+ }
+ err = fmt.Errorf("For LeaseRequestType %v, unknown LeaseHandle (%v)", in.LeaseRequestType, in.LeaseHandle)
+ err = blunder.AddError(err, blunder.BadLeaseRequest)
+ return
+ }
+ if leaseRequest.mount != mount {
+ globals.volumesLock.Unlock()
+ leaveGate()
+ reply = &LeaseReply{
+ LeaseHandle: in.LeaseHandle,
+ LeaseReplyType: LeaseReplyTypeDenied,
+ }
+ err = fmt.Errorf("For LeaseRequestType %v, invalid LeaseHandle (%v)", in.LeaseRequestType, in.LeaseHandle)
+ err = blunder.AddError(err, blunder.BadLeaseRequest)
+ return
+ }
+
+ default:
+ globals.volumesLock.Unlock()
+ leaveGate()
+ reply = &LeaseReply{
+ LeaseHandle: in.LeaseHandle,
+ LeaseReplyType: LeaseReplyTypeDenied,
+ }
+ err = fmt.Errorf("LeaseRequestType %v not supported", in.LeaseRequestType)
+ err = blunder.AddError(err, blunder.BadLeaseRequest)
+ return
+ }
+
+ _ = leaseRequest.operationList.PushBack(in)
+
+ leaseRequest.operationCV.Signal()
+
+ globals.volumesLock.Unlock()
+ leaveGate()
+
+ reply = <-leaseRequest.replyChan
+
+ if LeaseReplyTypeDenied == reply.LeaseReplyType {
+ err = fmt.Errorf("LeaseRequestType %v was denied", in.LeaseRequestType)
+ err = blunder.AddError(err, blunder.BadLeaseRequest)
+ } else {
+ err = nil
+ }
+
+ return
+}
+
+func (leaseRequest *leaseRequestStruct) handler() {
+ var (
+ ok bool
+ operationListElement *list.Element
+ operationReply *LeaseReply
+ operationReplyType LeaseReplyType
+ operationRequest *LeaseRequest
+ )
+
+ // Fall into LeaseRequestServer state machine
+
+ for {
+ globals.volumesLock.Lock()
+
+ leaseRequest.operationCV.Wait()
+
+ operationListElement = leaseRequest.operationList.Front()
+ if nil == operationListElement {
+ globals.volumesLock.Unlock()
+ continue
+ }
+
+ leaseRequest.operationList.Remove(operationListElement)
+ operationRequest = operationListElement.Value.(*LeaseRequest)
+
+ // See if LeaseManagement has determined we are to expire the currently held Lease
+
+ if nil == operationRequest {
+ // Drain any outstanding operationRequests
+
+ for 0 != leaseRequest.operationList.Len() {
+ operationListElement = leaseRequest.operationList.Front()
+ leaseRequest.operationList.Remove(operationListElement)
+ operationRequest = operationListElement.Value.(*LeaseRequest)
+
+ if nil != operationRequest {
+ operationReply = &LeaseReply{
+ LeaseHandle: leaseRequest.leaseHandle,
+ LeaseReplyType: LeaseReplyTypeDenied,
+ }
+ leaseRequest.replyChan <- operationReply
+ }
+ }
+
+ // If necessary, remove this leaseRequest from its inodeLease holdersList or waitersList
+
+ switch leaseRequest.requestState {
+ case leaseRequestStateNone:
+ // Not on either list
+ case leaseRequestStateSharedGranted, leaseRequestStateSharedPromoting, leaseRequestStateSharedReleasing, leaseRequestStateExclusiveGranted, leaseRequestStateExclusiveDemoting, leaseRequestStateExclusiveReleasing:
+ leaseRequest.inodeLease.holdersList.Remove(leaseRequest.listElement)
+ case leaseRequestStateSharedRequested, leaseRequestStateExclusiveRequested:
+ leaseRequest.inodeLease.waitersList.Remove(leaseRequest.listElement)
+ }
+
+ // Remove this listRequest from mount
+
+ delete(leaseRequest.mount.leaseRequestMap, leaseRequest.leaseHandle)
+
+ // Cleanly exit
+
+ globals.volumesLock.Unlock()
+ leaseRequest.inodeLease.volume.leaseHandlerWG.Done()
+ runtime.Goexit()
+ }
+
+ switch leaseRequest.requestState {
+ case leaseRequestStateNone:
+ if (LeaseRequestTypeShared != operationRequest.LeaseRequestType) && (LeaseRequestTypeExclusive != operationRequest.LeaseRequestType) {
+ operationReplyType = LeaseReplyTypeDenied
+ } else {
+ switch leaseRequest.inodeLease.leaseState {
+ case inodeLeaseStateNone:
+ leaseRequest.requestState = leaseRequestStateExclusiveGranted
+ leaseRequest.listElement = leaseRequest.inodeLease.sharedHoldersList.PushBack(leaseRequest)
+ leaseRequest.inodeLease.leaseState = inodeLeaseStateExclusiveGrantedRecently
+ leaseRequest.inodeLease.lastGrantTime = time.Now()
+ operationReplyType = LeaseReplyTypeExclusive
+ case inodeLeaseStateSharedGrantedRecently:
+ if LeaseRequestTypeShared == operationRequest.LeaseRequestType {
+ leaseRequest.requestState = leaseRequestStateSharedGranted
+ leaseRequest.listElement = leaseRequest.inodeLease.sharedHoldersList.PushBack(leaseRequest)
+ leaseRequest.inodeLease.leaseState = inodeLeaseStateSharedGrantedRecently
+ leaseRequest.inodeLease.lastGrantTime = time.Now()
+ operationReplyType = LeaseReplyTypeShared
+ } else { // LeaseRequestTypeExclusive == operationRequest.LeaseRequestType
+ // TODO
+ }
+ case inodeLeaseStateSharedGrantedLongAgo:
+ if LeaseRequestTypeShared == operationRequest.LeaseRequestType {
+ leaseRequest.requestState = leaseRequestStateSharedGranted
+ leaseRequest.listElement = leaseRequest.inodeLease.sharedHoldersList.PushBack(leaseRequest)
+ leaseRequest.inodeLease.leaseState = inodeLeaseStateSharedGrantedRecently
+ leaseRequest.inodeLease.lastGrantTime = time.Now()
+ operationReplyType = LeaseReplyTypeShared
+ } else { // LeaseRequestTypeExclusive == operationRequest.LeaseRequestType
+ // TODO - block
+ }
+ case inodeLeaseStateSharedReleasing:
+ if LeaseRequestTypeShared == operationRequest.LeaseRequestType {
+ leaseRequest.requestState = leaseRequestStateSharedRequested
+ } else { // LeaseRequestTypeExclusive == operationRequest.LeaseRequestType
+ leaseRequest.requestState = leaseRequestStateExclusiveRequested
+ }
+ leaseRequest.listElement = leaseRequest.inodeLease.waitersList.PushBack(leaseRequest)
+ // TODO - block
+ case inodeLeaseStateSharedExpired:
+ if LeaseRequestTypeShared == operationRequest.LeaseRequestType {
+ leaseRequest.requestState = leaseRequestStateSharedRequested
+ } else { // LeaseRequestTypeExclusive == operationRequest.LeaseRequestType
+ leaseRequest.requestState = leaseRequestStateExclusiveRequested
+ }
+ leaseRequest.listElement = leaseRequest.inodeLease.waitersList.PushBack(leaseRequest)
+ // TODO - block
+ case inodeLeaseStateExclusiveGrantedRecently:
+ if LeaseRequestTypeShared == operationRequest.LeaseRequestType {
+ leaseRequest.requestState = leaseRequestStateSharedRequested
+ } else { // LeaseRequestTypeExclusive == operationRequest.LeaseRequestType
+ leaseRequest.requestState = leaseRequestStateExclusiveRequested
+ }
+ leaseRequest.listElement = leaseRequest.inodeLease.waitersList.PushBack(leaseRequest)
+ // TODO - possibly delay until inodeLeaseStateExclusiveGrantedLongAgo
+ // TODO - tell current exlusive holder to either demote or release
+ // TODO - block
+ case inodeLeaseStateExclusiveGrantedLongAgo:
+ if LeaseRequestTypeShared == operationRequest.LeaseRequestType {
+ leaseRequest.requestState = leaseRequestStateSharedRequested
+ } else { // LeaseRequestTypeExclusive == operationRequest.LeaseRequestType
+ leaseRequest.requestState = leaseRequestStateExclusiveRequested
+ }
+ leaseRequest.listElement = leaseRequest.inodeLease.waitersList.PushBack(leaseRequest)
+ // TODO - tell current exlusive holder to either demote or release
+ // TODO - block
+ case inodeLeaseStateExclusiveReleasing:
+ if LeaseRequestTypeShared == operationRequest.LeaseRequestType {
+ leaseRequest.requestState = leaseRequestStateSharedRequested
+ } else { // LeaseRequestTypeExclusive == operationRequest.LeaseRequestType
+ leaseRequest.requestState = leaseRequestStateExclusiveRequested
+ }
+ leaseRequest.listElement = leaseRequest.inodeLease.waitersList.PushBack(leaseRequest)
+ // TODO - tell current exlusive holder to either demote or release
+ // TODO - block
+ case inodeLeaseStateExclusiveDemoting:
+ // TODO - ???
+ leaseRequest.requestState = leaseRequestStateExclusiveRequested
+ case inodeLeaseStateExclusiveExpired:
+ if LeaseRequestTypeShared == operationRequest.LeaseRequestType {
+ leaseRequest.requestState = leaseRequestStateSharedRequested
+ } else { // LeaseRequestTypeExclusive == operationRequest.LeaseRequestType
+ leaseRequest.requestState = leaseRequestStateExclusiveRequested
+ }
+ leaseRequest.listElement = leaseRequest.inodeLease.waitersList.PushBack(leaseRequest)
+ // TODO - tell current exlusive holder to either demote or release
+ // TODO - block
+ }
+ }
+ case leaseRequestStateSharedRequested:
+ operationReplyType = LeaseReplyTypeDenied
+ case leaseRequestStateSharedGranted:
+ if LeaseRequestTypeRelease != operationRequest.LeaseRequestType {
+ operationReplyType = LeaseReplyTypeDenied
+ } else {
+ switch leaseRequest.inodeLease.leaseState {
+ case inodeLeaseStateNone:
+ // TODO
+ case inodeLeaseStateSharedGrantedRecently:
+ // TODO
+ case inodeLeaseStateSharedGrantedLongAgo:
+ // TODO
+ case inodeLeaseStateSharedReleasing:
+ // TODO
+ case inodeLeaseStateSharedExpired:
+ // TODO
+ case inodeLeaseStateExclusiveGrantedRecently:
+ // TODO
+ case inodeLeaseStateExclusiveGrantedLongAgo:
+ // TODO
+ case inodeLeaseStateExclusiveReleasing:
+ // TODO
+ case inodeLeaseStateExclusiveDemoting:
+ // TODO
+ case inodeLeaseStateExclusiveExpired:
+ // TODO
+ }
+ }
+ case leaseRequestStateSharedPromoting:
+ operationReplyType = LeaseReplyTypeDenied
+ case leaseRequestStateSharedReleasing:
+ if LeaseRequestTypeRelease != operationRequest.LeaseRequestType {
+ operationReplyType = LeaseReplyTypeDenied
+ } else {
+ switch leaseRequest.inodeLease.leaseState {
+ case inodeLeaseStateNone:
+ // TODO
+ case inodeLeaseStateSharedGrantedRecently:
+ // TODO
+ case inodeLeaseStateSharedGrantedLongAgo:
+ // TODO
+ case inodeLeaseStateSharedReleasing:
+ // TODO
+ case inodeLeaseStateSharedExpired:
+ // TODO
+ case inodeLeaseStateExclusiveGrantedRecently:
+ // TODO
+ case inodeLeaseStateExclusiveGrantedLongAgo:
+ // TODO
+ case inodeLeaseStateExclusiveReleasing:
+ // TODO
+ case inodeLeaseStateExclusiveDemoting:
+ // TODO
+ case inodeLeaseStateExclusiveExpired:
+ // TODO
+ }
+ }
+ case leaseRequestStateExclusiveRequested:
+ operationReplyType = LeaseReplyTypeDenied
+ case leaseRequestStateExclusiveGranted:
+ if (LeaseRequestTypeDemote != operationRequest.LeaseRequestType) && (LeaseRequestTypeRelease != operationRequest.LeaseRequestType) {
+ operationReplyType = LeaseReplyTypeDenied
+ } else {
+ switch leaseRequest.inodeLease.leaseState {
+ case inodeLeaseStateNone:
+ // TODO
+ case inodeLeaseStateSharedGrantedRecently:
+ // TODO
+ case inodeLeaseStateSharedGrantedLongAgo:
+ // TODO
+ case inodeLeaseStateSharedReleasing:
+ // TODO
+ case inodeLeaseStateSharedExpired:
+ // TODO
+ case inodeLeaseStateExclusiveGrantedRecently:
+ // TODO
+ case inodeLeaseStateExclusiveGrantedLongAgo:
+ // TODO
+ case inodeLeaseStateExclusiveReleasing:
+ // TODO
+ case inodeLeaseStateExclusiveDemoting:
+ // TODO
+ case inodeLeaseStateExclusiveExpired:
+ // TODO
+ }
+ }
+ case leaseRequestStateExclusiveDemoting:
+ if (LeaseRequestTypeDemote != operationRequest.LeaseRequestType) && (LeaseRequestTypeRelease != operationRequest.LeaseRequestType) {
+ operationReplyType = LeaseReplyTypeDenied
+ } else {
+ switch leaseRequest.inodeLease.leaseState {
+ case inodeLeaseStateNone:
+ // TODO
+ case inodeLeaseStateSharedGrantedRecently:
+ // TODO
+ case inodeLeaseStateSharedGrantedLongAgo:
+ // TODO
+ case inodeLeaseStateSharedReleasing:
+ // TODO
+ case inodeLeaseStateSharedExpired:
+ // TODO
+ case inodeLeaseStateExclusiveGrantedRecently:
+ // TODO
+ case inodeLeaseStateExclusiveGrantedLongAgo:
+ // TODO
+ case inodeLeaseStateExclusiveReleasing:
+ // TODO
+ case inodeLeaseStateExclusiveDemoting:
+ // TODO
+ case inodeLeaseStateExclusiveExpired:
+ // TODO
+ }
+ }
+ case leaseRequestStateExclusiveReleasing:
+ if LeaseRequestTypeRelease != operationRequest.LeaseRequestType {
+ operationReplyType = LeaseReplyTypeDenied
+ } else {
+ switch leaseRequest.inodeLease.leaseState {
+ case inodeLeaseStateNone:
+ // TODO
+ case inodeLeaseStateSharedGrantedRecently:
+ // TODO
+ case inodeLeaseStateSharedGrantedLongAgo:
+ // TODO
+ case inodeLeaseStateSharedReleasing:
+ // TODO
+ case inodeLeaseStateSharedExpired:
+ // TODO
+ case inodeLeaseStateExclusiveGrantedRecently:
+ // TODO
+ case inodeLeaseStateExclusiveGrantedLongAgo:
+ // TODO
+ case inodeLeaseStateExclusiveReleasing:
+ // TODO
+ case inodeLeaseStateExclusiveDemoting:
+ // TODO
+ case inodeLeaseStateExclusiveExpired:
+ // TODO
+ }
+ }
+ }
+
+ operationReply = &LeaseReply{
+ LeaseHandle: leaseRequest.leaseHandle,
+ LeaseReplyType: operationReplyType,
+ }
+
+ // Now address the race with RpcLease() that may have queued subsequent operations on this leaseRequest
+
+ enterGate()
+ globals.volumesLock.Lock()
+
+ leaseRequest.replyChan <- operationReply
+
+ // If leaseRequest.requestState is now leaseRequestStateNone, we can destroy this leaseRequest
+
+ if leaseRequestStateNone == leaseRequest.requestState {
+ // We can destroy this leaseRequest... but first, subsequent operations should be denied
+
+ for {
+
+ }
+ }
+
+ // If destroying this leaseRequest and inodeLease.leaseState is now inodeLeaseStateNone. we can destroy this inodeLease as well
+
+ // In either case, if we destroyed this leaseRequest, we can exit this goroutine
+ }
+}
+*/
diff --git a/jrpcfs/middleware.go b/jrpcfs/middleware.go
index e47c8193f..34c99aff9 100644
--- a/jrpcfs/middleware.go
+++ b/jrpcfs/middleware.go
@@ -17,47 +17,25 @@ import (
// NOTE: These functions should only verify the arguments and then call
// functions in package fs since there may be fs locking required.
-// Utility function to mount account if not already mounted and return needed fields
-func mountIfNotMounted(virtPath string) (accountName string, containerName string, objectName string, volumeName string, mountHandle fs.MountHandle, err error) {
+// parseVirtPath extracts path components and fetches the corresponding fs.VolumeHandle from virtPath
+func parseVirtPath(virtPath string) (accountName string, vContainerName string, vObjectName string, volumeName string, volumeHandle fs.VolumeHandle, err error) {
- // Extract vAccount and vContainer from VirtPath
- accountName, containerName, objectName, err = utils.PathToAcctContObj(virtPath)
- if err != nil {
- return "", "", "", "", nil, err
- }
-
- // Map vAccountName to a volumeName
- volumeName, ok := fs.AccountNameToVolumeName(accountName)
- if !ok {
- err = fmt.Errorf("%v is not a recognized accountName", accountName)
- return "", "", "", "", nil, err
+ // Extract Account, vContainer, and vObject from VirtPath
+ accountName, vContainerName, vObjectName, err = utils.PathToAcctContObj(virtPath)
+ if nil != err {
+ return
}
- // Multiple middleware threads could be attempting to mount the volume at the same time.
- //
- // fs.Mount() allows this since the fs wants to support read-only and read-write mounts from Samba.
- // However, this results in two different mountHandle's for the different jrpcfs threads supporting middleware.
- //
- // Therefore, jrpcfs has to do its own serialization and store the result in globals.bimodalMountMap.
- globals.mapsLock.Lock()
- defer globals.mapsLock.Unlock()
-
- // Is volume mounted for this user? If is, return the results.
- mountHandle, ok = globals.bimodalMountMap[volumeName]
- if ok {
- return accountName, containerName, objectName, volumeName, mountHandle, err
+ // Map accountName to volumeHandle
+ volumeHandle, err = fs.FetchVolumeHandleByAccountName(accountName)
+ if nil != err {
+ return
}
- // We have not already mounted it, mount it now and store result in bimodalMountMap
- // TODO - add proper mountOpts
- mountHandle, err = fs.MountByVolumeName(volumeName, fs.MountOptions(0))
- if err != nil {
- logger.DebugfIDWithError(internalDebug, err, "fs.Mount() of acct: %v container: %v failed!", accountName, containerName)
- return accountName, containerName, objectName, volumeName, nil, err
- }
+ // Map volumeHandle to volumeName
+ volumeName = volumeHandle.VolumeName()
- globals.bimodalMountMap[volumeName] = mountHandle
- return accountName, containerName, objectName, volumeName, mountHandle, err
+ return
}
// RpcCreateContainer is used by Middleware to PUT of a container.
@@ -73,7 +51,7 @@ func (s *Server) RpcCreateContainer(in *CreateContainerRequest, reply *CreateCon
// The blunder error package supports this, we just need to add some helper functions.
//defer func() { rpcEncodeError(&err) }() // Encode error for return by RPC
- accountName, containerName, _, _, mountHandle, err := mountIfNotMounted(in.VirtPath)
+ accountName, containerName, _, _, volumeHandle, err := parseVirtPath(in.VirtPath)
// Validate the components of the containerName
err = fs.ValidateFullPath(containerName)
@@ -87,7 +65,7 @@ func (s *Server) RpcCreateContainer(in *CreateContainerRequest, reply *CreateCon
}
// Make the directory
- _, err = mountHandle.Mkdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.RootDirInodeNumber, containerName, inode.PosixModePerm)
+ _, err = volumeHandle.Mkdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.RootDirInodeNumber, containerName, inode.PosixModePerm)
if err != nil {
logger.DebugfIDWithError(internalDebug, err, "fs.Mkdir() of acct: %v vContainerName: %v failed!", accountName, containerName)
@@ -104,7 +82,7 @@ func (s *Server) RpcDelete(in *DeleteReq, reply *DeleteReply) (err error) {
defer func() { flog.TraceExitErr("reply.", err, reply) }()
defer func() { rpcEncodeError(&err) }() // Encode error for return by RPC
- _, containerName, objectName, _, mountHandle, err := mountIfNotMounted(in.VirtPath)
+ _, containerName, objectName, _, volumeHandle, err := parseVirtPath(in.VirtPath)
parentDir, baseName := splitPath(containerName + "/" + objectName)
@@ -115,7 +93,7 @@ func (s *Server) RpcDelete(in *DeleteReq, reply *DeleteReply) (err error) {
}
// Call fs to delete the baseName if it is a file or an empty directory.
- err = mountHandle.MiddlewareDelete(parentDir, baseName)
+ err = volumeHandle.MiddlewareDelete(parentDir, baseName)
return err
}
@@ -126,13 +104,13 @@ func (s *Server) RpcGetAccount(in *GetAccountReq, reply *GetAccountReply) (err e
defer func() { flog.TraceExitErr("reply.", err, reply) }()
defer func() { rpcEncodeError(&err) }() // Encode error for return by RPC
- _, _, _, _, mountHandle, err := mountIfNotMounted(in.VirtPath)
+ _, _, _, _, volumeHandle, err := parseVirtPath(in.VirtPath)
if err != nil {
logger.ErrorfWithError(err, "RpcGetAccount: error mounting share for %s", in.VirtPath)
return err
}
- entries, mtime, ctime, err := mountHandle.MiddlewareGetAccount(in.MaxEntries, in.Marker, in.EndMarker)
+ entries, mtime, ctime, err := volumeHandle.MiddlewareGetAccount(in.MaxEntries, in.Marker, in.EndMarker)
if err != nil {
return err
}
@@ -148,7 +126,7 @@ func (s *Server) RpcHead(in *HeadReq, reply *HeadReply) (err error) {
defer func() { flog.TraceExitErr("reply.", err, reply) }()
defer func() { rpcEncodeError(&err) }() // Encode error for return by RPC
- _, vContainerName, vObjectName, _, mountHandle, err := mountIfNotMounted(in.VirtPath)
+ _, vContainerName, vObjectName, _, volumeHandle, err := parseVirtPath(in.VirtPath)
if err != nil {
logger.ErrorfWithError(err, "RpcHead: error mounting share for %s", in.VirtPath)
return err
@@ -159,7 +137,7 @@ func (s *Server) RpcHead(in *HeadReq, reply *HeadReply) (err error) {
entityPath = entityPath + "/" + vObjectName
}
- resp, err := mountHandle.MiddlewareHeadResponse(entityPath)
+ resp, err := volumeHandle.MiddlewareHeadResponse(entityPath)
if err != nil {
if !blunder.Is(err, blunder.NotFoundError) {
logger.ErrorfWithError(err, "RpcHead: error retrieving metadata for %s", in.VirtPath)
@@ -184,17 +162,17 @@ func (s *Server) RpcGetContainer(in *GetContainerReq, reply *GetContainerReply)
defer func() { flog.TraceExitErr("reply.", err, reply) }()
defer func() { rpcEncodeError(&err) }() // Encode error for return by RPC
- _, vContainerName, _, _, mountHandle, err := mountIfNotMounted(in.VirtPath)
+ _, vContainerName, _, _, volumeHandle, err := parseVirtPath(in.VirtPath)
if err != nil {
logger.ErrorfWithError(err, "RpcGetContainer: error mounting share for %s", in.VirtPath)
return err
}
- entries, err := mountHandle.MiddlewareGetContainer(vContainerName, in.MaxEntries, in.Marker, in.EndMarker, in.Prefix, in.Delimiter)
+ entries, err := volumeHandle.MiddlewareGetContainer(vContainerName, in.MaxEntries, in.Marker, in.EndMarker, in.Prefix, in.Delimiter)
if err != nil {
return err
}
- resp, err := mountHandle.MiddlewareHeadResponse(vContainerName)
+ resp, err := volumeHandle.MiddlewareHeadResponse(vContainerName)
if err != nil {
return err
}
@@ -211,11 +189,11 @@ func (s *Server) RpcGetObject(in *GetObjectReq, reply *GetObjectReply) (err erro
defer func() { flog.TraceExitErr("reply.", err, reply) }()
defer func() { rpcEncodeError(&err) }() // Encode error for return by RPC
- _, vContainerName, objectName, _, mountHandle, err := mountIfNotMounted(in.VirtPath)
+ _, vContainerName, objectName, _, volumeHandle, err := parseVirtPath(in.VirtPath)
mountRelativePath := vContainerName + "/" + objectName
- resp, err := mountHandle.MiddlewareGetObject(mountRelativePath, in.ReadEntsIn, &reply.ReadEntsOut)
+ resp, err := volumeHandle.MiddlewareGetObject(mountRelativePath, in.ReadEntsIn, &reply.ReadEntsOut)
if err != nil {
if !blunder.Is(err, blunder.NotFoundError) {
logger.ErrorfWithError(err, "RpcGetObject(): error retrieving metadata for %s", in.VirtPath)
@@ -239,7 +217,7 @@ func (s *Server) RpcPost(in *MiddlewarePostReq, reply *MiddlewarePostReply) (err
flog := logger.TraceEnter("in.", in)
defer func() { flog.TraceExitErr("reply.", err, reply) }()
- accountName, containerName, objectName, _, mountHandle, err := mountIfNotMounted(in.VirtPath)
+ accountName, containerName, objectName, _, volumeHandle, err := parseVirtPath(in.VirtPath)
// Don't allow a POST on an invalid account or on just an account
if accountName == "" || containerName == "" {
@@ -256,7 +234,7 @@ func (s *Server) RpcPost(in *MiddlewarePostReq, reply *MiddlewarePostReply) (err
parentDir, baseName = splitPath(containerName)
}
- err = mountHandle.MiddlewarePost(parentDir, baseName, in.NewMetaData, in.OldMetaData)
+ err = volumeHandle.MiddlewarePost(parentDir, baseName, in.NewMetaData, in.OldMetaData)
return err
@@ -267,7 +245,7 @@ func (s *Server) RpcMiddlewareMkdir(in *MiddlewareMkdirReq, reply *MiddlewareMkd
flog := logger.TraceEnter("in.", in)
defer func() { flog.TraceExitErr("reply.", err, reply) }()
- _, containerName, objectName, _, mountHandle, err := mountIfNotMounted(in.VirtPath)
+ _, containerName, objectName, _, volumeHandle, err := parseVirtPath(in.VirtPath)
// Require a reference to an object; you can't create a container with this method.
if objectName == "" {
@@ -277,7 +255,7 @@ func (s *Server) RpcMiddlewareMkdir(in *MiddlewareMkdirReq, reply *MiddlewareMkd
return err
}
- mtime, ctime, inodeNumber, numWrites, err := mountHandle.MiddlewareMkdir(containerName, objectName, in.Metadata)
+ mtime, ctime, inodeNumber, numWrites, err := volumeHandle.MiddlewareMkdir(containerName, objectName, in.Metadata)
reply.ModificationTime = mtime
reply.AttrChangeTime = ctime
reply.InodeNumber = int64(uint64(inodeNumber))
@@ -293,11 +271,11 @@ func (s *Server) RpcPutComplete(in *PutCompleteReq, reply *PutCompleteReply) (er
defer func() { flog.TraceExitErr("reply.", err, reply) }()
defer func() { rpcEncodeError(&err) }() // Encode error for return by RPC
- _, containerName, objectName, _, mountHandle, err := mountIfNotMounted(in.VirtPath)
+ _, containerName, objectName, _, volumeHandle, err := parseVirtPath(in.VirtPath)
// Call fs to complete the creation of the inode for the file and
// the directories.
- mtime, ctime, ino, numWrites, err := mountHandle.MiddlewarePutComplete(containerName, objectName, in.PhysPaths, in.PhysLengths, in.Metadata)
+ mtime, ctime, ino, numWrites, err := volumeHandle.MiddlewarePutComplete(containerName, objectName, in.PhysPaths, in.PhysLengths, in.Metadata)
reply.ModificationTime = mtime
reply.AttrChangeTime = ctime
reply.InodeNumber = int64(uint64(ino))
@@ -317,7 +295,7 @@ func (s *Server) RpcPutLocation(in *PutLocationReq, reply *PutLocationReply) (er
flog := logger.TraceEnter("in.", in)
defer func() { flog.TraceExitErr("reply.", err, reply) }()
- accountName, containerName, objectName, _, mountHandle, err := mountIfNotMounted(in.VirtPath)
+ accountName, containerName, objectName, _, volumeHandle, err := parseVirtPath(in.VirtPath)
// Validate the components of the objectName
err = fs.ValidateFullPath(containerName + "/" + objectName)
@@ -332,7 +310,7 @@ func (s *Server) RpcPutLocation(in *PutLocationReq, reply *PutLocationReply) (er
}
// Via fs package, ask inode package to provision object
- reply.PhysPath, err = mountHandle.CallInodeToProvisionObject()
+ reply.PhysPath, err = volumeHandle.CallInodeToProvisionObject()
if err != nil {
logger.DebugfIDWithError(internalDebug, err, "fs.CallInodeToProvisionObject() of acct: %v container: %v failed!", accountName, containerName)
@@ -349,12 +327,12 @@ func (s *Server) RpcPutContainer(in *PutContainerReq, reply *PutContainerReply)
defer func() { flog.TraceExitErr("reply.", err, reply) }()
defer func() { rpcEncodeError(&err) }() // Encode error for return by RPC
- _, containerName, _, _, mountHandle, err := mountIfNotMounted(in.VirtPath)
+ _, containerName, _, _, volumeHandle, err := parseVirtPath(in.VirtPath)
if err != nil {
return err
}
- err = mountHandle.MiddlewarePutContainer(containerName, in.OldMetadata, in.NewMetadata)
+ err = volumeHandle.MiddlewarePutContainer(containerName, in.OldMetadata, in.NewMetadata)
return err
}
@@ -368,11 +346,11 @@ func (s *Server) RpcCoalesce(in *CoalesceReq, reply *CoalesceReply) (err error)
defer func() { flog.TraceExitErr("reply.", err, reply) }()
defer func() { rpcEncodeError(&err) }() // Encode error for return by RPC
- _, destContainer, destObject, _, mountHandle, err := mountIfNotMounted(in.VirtPath)
+ _, destContainer, destObject, _, volumeHandle, err := parseVirtPath(in.VirtPath)
var ino uint64
ino, reply.NumWrites, reply.AttrChangeTime, reply.ModificationTime, err =
- mountHandle.MiddlewareCoalesce(
+ volumeHandle.MiddlewareCoalesce(
destContainer+"/"+destObject, in.NewMetaData, in.ElementAccountRelativePaths)
reply.InodeNumber = int64(ino)
return
diff --git a/jrpcfs/middleware_test.go b/jrpcfs/middleware_test.go
index 1fd538a9d..e0b36fa03 100644
--- a/jrpcfs/middleware_test.go
+++ b/jrpcfs/middleware_test.go
@@ -154,6 +154,9 @@ func testSetup() []func() {
"JSONRPCServer.RetryRPCTTLCompleted=10s",
"JSONRPCServer.RetryRPCAckTrim=10ms",
"JSONRPCServer.DataPathLogging=false",
+ "JSONRPCServer.MinLeaseDuration=250ms",
+ "JSONRPCServer.LeaseInterruptInterval=250ms",
+ "JSONRPCServer.LeaseInterruptLimit=20",
}
tempDir, err = ioutil.TempDir("", "jrpcfs_test")
@@ -181,49 +184,43 @@ func testSetup() []func() {
panic(fmt.Sprintf("transitions.Up() failed: %v", err))
}
- // Unfortunately, we cannot call the jrpcfs Up() method here since it will start the RPC server.
- // Therefore, we have to do this step here.
- if globals.bimodalMountMap == nil {
- globals.bimodalMountMap = make(map[string]fs.MountHandle)
- }
-
return cleanupFuncs
}
func fsStatPath(accountName string, path string) fs.Stat {
- _, _, _, _, mountHandle, err := mountIfNotMounted(accountName)
+ _, _, _, _, volumeHandle, err := parseVirtPath(accountName)
if err != nil {
panic(err)
}
- ino, err := mountHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, path)
+ ino, err := volumeHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, path)
if err != nil {
panic(err)
}
- stats, err := mountHandle.Getstat(inode.InodeRootUserID, inode.InodeGroupID(0), nil, ino)
+ stats, err := volumeHandle.Getstat(inode.InodeRootUserID, inode.InodeGroupID(0), nil, ino)
if err != nil {
panic(err)
}
return stats
}
-func fsMkDir(mountHandle fs.MountHandle, parentDirInode inode.InodeNumber, newDirName string) (createdInode inode.InodeNumber) {
- createdInode, err := mountHandle.Mkdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil, parentDirInode, newDirName, inode.PosixModePerm)
+func fsMkDir(volumeHandle fs.VolumeHandle, parentDirInode inode.InodeNumber, newDirName string) (createdInode inode.InodeNumber) {
+ createdInode, err := volumeHandle.Mkdir(inode.InodeRootUserID, inode.InodeGroupID(0), nil, parentDirInode, newDirName, inode.PosixModePerm)
if err != nil {
panic(fmt.Sprintf("failed to create %v: %v", newDirName, err))
}
return
}
-func fsCreateFile(mountHandle fs.MountHandle, parentDirInode inode.InodeNumber, newFileName string) (createdInode inode.InodeNumber) {
- createdInode, err := mountHandle.Create(inode.InodeRootUserID, inode.InodeGroupID(0), nil, parentDirInode, newFileName, inode.PosixModePerm)
+func fsCreateFile(volumeHandle fs.VolumeHandle, parentDirInode inode.InodeNumber, newFileName string) (createdInode inode.InodeNumber) {
+ createdInode, err := volumeHandle.Create(inode.InodeRootUserID, inode.InodeGroupID(0), nil, parentDirInode, newFileName, inode.PosixModePerm)
if err != nil {
panic(fmt.Sprintf("failed to create file %v: %v", newFileName, err))
}
return
}
-func fsCreateSymlink(mountHandle fs.MountHandle, parentDirInode inode.InodeNumber, symlinkName string, symlinkTarget string) {
- _, err := mountHandle.Symlink(inode.InodeRootUserID, inode.InodeGroupID(0), nil, parentDirInode, symlinkName, symlinkTarget)
+func fsCreateSymlink(volumeHandle fs.VolumeHandle, parentDirInode inode.InodeNumber, symlinkName string, symlinkTarget string) {
+ _, err := volumeHandle.Symlink(inode.InodeRootUserID, inode.InodeGroupID(0), nil, parentDirInode, symlinkName, symlinkTarget)
if err != nil {
panic(fmt.Sprintf("failed to create symlink %s -> %s: %v", symlinkName, symlinkTarget, err))
}
@@ -278,30 +275,30 @@ func middlewarePutLocation(t *testing.T, server *Server, newPutPath string, expe
func makeSomeFilesAndSuch() {
// we should have enough stuff up now that we can actually make
// some files and directories and such
- mountHandle, err := fs.MountByVolumeName("SomeVolume", fs.MountOptions(0))
+ volumeHandle, err := fs.FetchVolumeHandleByVolumeName("SomeVolume")
if nil != err {
panic(fmt.Sprintf("failed to mount SomeVolume: %v", err))
}
- cInode := fsMkDir(mountHandle, inode.RootDirInodeNumber, "c")
- cNestedInode := fsMkDir(mountHandle, inode.RootDirInodeNumber, "c-nested")
- fsCreateSymlink(mountHandle, inode.RootDirInodeNumber, "c-symlink", "c")
+ cInode := fsMkDir(volumeHandle, inode.RootDirInodeNumber, "c")
+ cNestedInode := fsMkDir(volumeHandle, inode.RootDirInodeNumber, "c-nested")
+ fsCreateSymlink(volumeHandle, inode.RootDirInodeNumber, "c-symlink", "c")
- err = mountHandle.MiddlewarePost("", "c", []byte("metadata for c"), []byte{})
+ err = volumeHandle.MiddlewarePost("", "c", []byte("metadata for c"), []byte{})
if err != nil {
panic(err)
}
- _ = fsMkDir(mountHandle, inode.RootDirInodeNumber, "c-no-metadata")
- _ = fsMkDir(mountHandle, cInode, "empty-directory")
+ _ = fsMkDir(volumeHandle, inode.RootDirInodeNumber, "c-no-metadata")
+ _ = fsMkDir(volumeHandle, cInode, "empty-directory")
- readmeInode := fsCreateFile(mountHandle, cInode, "README")
- _, err = mountHandle.Write(inode.InodeRootUserID, inode.InodeGroupID(0), nil, readmeInode, 0, []byte("who am I kidding? nobody reads these."), nil)
- err = mountHandle.MiddlewarePost("", "c/README", []byte("metadata for c/README"), []byte{})
+ readmeInode := fsCreateFile(volumeHandle, cInode, "README")
+ _, err = volumeHandle.Write(inode.InodeRootUserID, inode.InodeGroupID(0), nil, readmeInode, 0, []byte("who am I kidding? nobody reads these."), nil)
+ err = volumeHandle.MiddlewarePost("", "c/README", []byte("metadata for c/README"), []byte{})
if err != nil {
panic(err)
}
- animalsInode := fsMkDir(mountHandle, cInode, "animals")
+ animalsInode := fsMkDir(volumeHandle, cInode, "animals")
files := map[string]string{
"dog.txt": "dog goes woof",
"cat.txt": "cat goes meow",
@@ -312,17 +309,17 @@ func makeSomeFilesAndSuch() {
"elephant.txt": "elephant goes toot",
}
for fileName, fileContents := range files {
- fileInode := fsCreateFile(mountHandle, animalsInode, fileName)
+ fileInode := fsCreateFile(volumeHandle, animalsInode, fileName)
- _, err = mountHandle.Write(inode.InodeRootUserID, inode.InodeGroupID(0), nil, fileInode, 0, []byte(fileContents), nil)
+ _, err = volumeHandle.Write(inode.InodeRootUserID, inode.InodeGroupID(0), nil, fileInode, 0, []byte(fileContents), nil)
if err != nil {
panic(fmt.Sprintf("failed to write file %s: %v", fileName, err))
}
}
- plantsInode := fsMkDir(mountHandle, cInode, "plants")
- ino := fsCreateFile(mountHandle, cInode, "plants-README")
- _, err = mountHandle.Write(inode.InodeRootUserID, inode.InodeGroupID(0), nil, ino, 0, []byte("nah"), nil)
+ plantsInode := fsMkDir(volumeHandle, cInode, "plants")
+ ino := fsCreateFile(volumeHandle, cInode, "plants-README")
+ _, err = volumeHandle.Write(inode.InodeRootUserID, inode.InodeGroupID(0), nil, ino, 0, []byte("nah"), nil)
if err != nil {
panic(fmt.Sprintf("failed to write file plants-README: %v", err))
}
@@ -336,83 +333,83 @@ func makeSomeFilesAndSuch() {
}
for fileName, fileContents := range files {
- fileInode := fsCreateFile(mountHandle, plantsInode, fileName)
+ fileInode := fsCreateFile(volumeHandle, plantsInode, fileName)
- _, err = mountHandle.Write(inode.InodeRootUserID, inode.InodeGroupID(0), nil, fileInode, 0, []byte(fileContents), nil)
+ _, err = volumeHandle.Write(inode.InodeRootUserID, inode.InodeGroupID(0), nil, fileInode, 0, []byte(fileContents), nil)
if err != nil {
panic(fmt.Sprintf("failed to write file %s: %v", fileName, err))
}
}
- fsCreateSymlink(mountHandle, cInode, "plants-symlink", "plants")
- fsCreateSymlink(mountHandle, plantsInode, "eggplant.txt-symlink", "eggplant.txt")
+ fsCreateSymlink(volumeHandle, cInode, "plants-symlink", "plants")
+ fsCreateSymlink(volumeHandle, plantsInode, "eggplant.txt-symlink", "eggplant.txt")
// Put some deeply nested things in c-nested. This listing is a
// shortened version of a real directory tree that exposed a bug.
- fsCreateFile(mountHandle, cNestedInode, ".DS_Store")
- dotGitInode := fsMkDir(mountHandle, cNestedInode, ".git")
- fsCreateFile(mountHandle, dotGitInode, ".DS_Store")
- fsCreateFile(mountHandle, dotGitInode, "COMMIT_EDITMSG")
- fsCreateFile(mountHandle, dotGitInode, "FETCH_HEAD")
- fsCreateFile(mountHandle, dotGitInode, "HEAD")
- fsCreateFile(mountHandle, dotGitInode, "ORIG_HEAD")
- fsCreateFile(mountHandle, dotGitInode, "index")
- dotGitHooks := fsMkDir(mountHandle, dotGitInode, "hooks")
- fsCreateFile(mountHandle, dotGitHooks, ".DS_Store")
- fsCreateFile(mountHandle, dotGitHooks, "applypatch-msg.sample")
- fsCreateFile(mountHandle, dotGitHooks, "commit-msg.sample")
- dotGitLogs := fsMkDir(mountHandle, dotGitInode, "logs")
- fsCreateFile(mountHandle, dotGitLogs, ".DS_Store")
- fsCreateFile(mountHandle, dotGitLogs, "HEAD")
- dotGitLogsRefs := fsMkDir(mountHandle, dotGitLogs, "refs")
- fsCreateFile(mountHandle, dotGitLogsRefs, ".DS_Store")
- fsCreateFile(mountHandle, dotGitLogsRefs, "stash")
- dotGitLogsRefsHeads := fsMkDir(mountHandle, dotGitLogsRefs, "heads")
- fsCreateFile(mountHandle, dotGitLogsRefsHeads, ".DS_Store")
- fsCreateFile(mountHandle, dotGitLogsRefsHeads, "development")
- fsCreateFile(mountHandle, dotGitLogsRefsHeads, "stable")
-
- aInode := fsMkDir(mountHandle, cNestedInode, "a")
- fsCreateFile(mountHandle, aInode, "b-1")
- fsCreateFile(mountHandle, aInode, "b-2")
- abInode := fsMkDir(mountHandle, aInode, "b")
- fsCreateFile(mountHandle, abInode, "c-1")
- fsCreateFile(mountHandle, abInode, "c-2")
- abcInode := fsMkDir(mountHandle, abInode, "c")
- fsCreateFile(mountHandle, abcInode, "d-1")
- fsCreateFile(mountHandle, abcInode, "d-2")
+ fsCreateFile(volumeHandle, cNestedInode, ".DS_Store")
+ dotGitInode := fsMkDir(volumeHandle, cNestedInode, ".git")
+ fsCreateFile(volumeHandle, dotGitInode, ".DS_Store")
+ fsCreateFile(volumeHandle, dotGitInode, "COMMIT_EDITMSG")
+ fsCreateFile(volumeHandle, dotGitInode, "FETCH_HEAD")
+ fsCreateFile(volumeHandle, dotGitInode, "HEAD")
+ fsCreateFile(volumeHandle, dotGitInode, "ORIG_HEAD")
+ fsCreateFile(volumeHandle, dotGitInode, "index")
+ dotGitHooks := fsMkDir(volumeHandle, dotGitInode, "hooks")
+ fsCreateFile(volumeHandle, dotGitHooks, ".DS_Store")
+ fsCreateFile(volumeHandle, dotGitHooks, "applypatch-msg.sample")
+ fsCreateFile(volumeHandle, dotGitHooks, "commit-msg.sample")
+ dotGitLogs := fsMkDir(volumeHandle, dotGitInode, "logs")
+ fsCreateFile(volumeHandle, dotGitLogs, ".DS_Store")
+ fsCreateFile(volumeHandle, dotGitLogs, "HEAD")
+ dotGitLogsRefs := fsMkDir(volumeHandle, dotGitLogs, "refs")
+ fsCreateFile(volumeHandle, dotGitLogsRefs, ".DS_Store")
+ fsCreateFile(volumeHandle, dotGitLogsRefs, "stash")
+ dotGitLogsRefsHeads := fsMkDir(volumeHandle, dotGitLogsRefs, "heads")
+ fsCreateFile(volumeHandle, dotGitLogsRefsHeads, ".DS_Store")
+ fsCreateFile(volumeHandle, dotGitLogsRefsHeads, "development")
+ fsCreateFile(volumeHandle, dotGitLogsRefsHeads, "stable")
+
+ aInode := fsMkDir(volumeHandle, cNestedInode, "a")
+ fsCreateFile(volumeHandle, aInode, "b-1")
+ fsCreateFile(volumeHandle, aInode, "b-2")
+ abInode := fsMkDir(volumeHandle, aInode, "b")
+ fsCreateFile(volumeHandle, abInode, "c-1")
+ fsCreateFile(volumeHandle, abInode, "c-2")
+ abcInode := fsMkDir(volumeHandle, abInode, "c")
+ fsCreateFile(volumeHandle, abcInode, "d-1")
+ fsCreateFile(volumeHandle, abcInode, "d-2")
// SomeVolume2 is set up for testing account listings
- mountHandle2, err := fs.MountByVolumeName("SomeVolume2", fs.MountOptions(0))
- _ = fsMkDir(mountHandle2, inode.RootDirInodeNumber, "alpha")
- _ = fsMkDir(mountHandle2, inode.RootDirInodeNumber, "bravo")
- _ = fsMkDir(mountHandle2, inode.RootDirInodeNumber, "charlie")
- _ = fsMkDir(mountHandle2, inode.RootDirInodeNumber, "delta")
- _ = fsMkDir(mountHandle2, inode.RootDirInodeNumber, "echo")
- _ = fsMkDir(mountHandle2, inode.RootDirInodeNumber, "foxtrot")
- _ = fsMkDir(mountHandle2, inode.RootDirInodeNumber, "golf")
- _ = fsMkDir(mountHandle2, inode.RootDirInodeNumber, "hotel")
- _ = fsMkDir(mountHandle2, inode.RootDirInodeNumber, "india")
- _ = fsMkDir(mountHandle2, inode.RootDirInodeNumber, "juliet")
- _ = fsMkDir(mountHandle2, inode.RootDirInodeNumber, "kilo")
- _ = fsMkDir(mountHandle2, inode.RootDirInodeNumber, "lima")
- _ = fsMkDir(mountHandle2, inode.RootDirInodeNumber, "mancy")
- _ = fsMkDir(mountHandle2, inode.RootDirInodeNumber, "november")
- _ = fsMkDir(mountHandle2, inode.RootDirInodeNumber, "oscar")
- _ = fsMkDir(mountHandle2, inode.RootDirInodeNumber, "papa")
- _ = fsMkDir(mountHandle2, inode.RootDirInodeNumber, "quebec")
- _ = fsMkDir(mountHandle2, inode.RootDirInodeNumber, "romeo")
- _ = fsMkDir(mountHandle2, inode.RootDirInodeNumber, "sierra")
- _ = fsMkDir(mountHandle2, inode.RootDirInodeNumber, "tango")
- _ = fsMkDir(mountHandle2, inode.RootDirInodeNumber, "uniform")
- _ = fsMkDir(mountHandle2, inode.RootDirInodeNumber, "victor")
- _ = fsMkDir(mountHandle2, inode.RootDirInodeNumber, "whiskey")
- _ = fsMkDir(mountHandle2, inode.RootDirInodeNumber, "xray")
- _ = fsMkDir(mountHandle2, inode.RootDirInodeNumber, "yankee")
- _ = fsMkDir(mountHandle2, inode.RootDirInodeNumber, "zulu")
- _ = fsCreateFile(mountHandle2, inode.RootDirInodeNumber, "alice.txt")
- _ = fsCreateFile(mountHandle2, inode.RootDirInodeNumber, "bob.txt")
- _ = fsCreateFile(mountHandle2, inode.RootDirInodeNumber, "carol.txt")
+ volumeHandle2, err := fs.FetchVolumeHandleByVolumeName("SomeVolume2")
+ _ = fsMkDir(volumeHandle2, inode.RootDirInodeNumber, "alpha")
+ _ = fsMkDir(volumeHandle2, inode.RootDirInodeNumber, "bravo")
+ _ = fsMkDir(volumeHandle2, inode.RootDirInodeNumber, "charlie")
+ _ = fsMkDir(volumeHandle2, inode.RootDirInodeNumber, "delta")
+ _ = fsMkDir(volumeHandle2, inode.RootDirInodeNumber, "echo")
+ _ = fsMkDir(volumeHandle2, inode.RootDirInodeNumber, "foxtrot")
+ _ = fsMkDir(volumeHandle2, inode.RootDirInodeNumber, "golf")
+ _ = fsMkDir(volumeHandle2, inode.RootDirInodeNumber, "hotel")
+ _ = fsMkDir(volumeHandle2, inode.RootDirInodeNumber, "india")
+ _ = fsMkDir(volumeHandle2, inode.RootDirInodeNumber, "juliet")
+ _ = fsMkDir(volumeHandle2, inode.RootDirInodeNumber, "kilo")
+ _ = fsMkDir(volumeHandle2, inode.RootDirInodeNumber, "lima")
+ _ = fsMkDir(volumeHandle2, inode.RootDirInodeNumber, "mancy")
+ _ = fsMkDir(volumeHandle2, inode.RootDirInodeNumber, "november")
+ _ = fsMkDir(volumeHandle2, inode.RootDirInodeNumber, "oscar")
+ _ = fsMkDir(volumeHandle2, inode.RootDirInodeNumber, "papa")
+ _ = fsMkDir(volumeHandle2, inode.RootDirInodeNumber, "quebec")
+ _ = fsMkDir(volumeHandle2, inode.RootDirInodeNumber, "romeo")
+ _ = fsMkDir(volumeHandle2, inode.RootDirInodeNumber, "sierra")
+ _ = fsMkDir(volumeHandle2, inode.RootDirInodeNumber, "tango")
+ _ = fsMkDir(volumeHandle2, inode.RootDirInodeNumber, "uniform")
+ _ = fsMkDir(volumeHandle2, inode.RootDirInodeNumber, "victor")
+ _ = fsMkDir(volumeHandle2, inode.RootDirInodeNumber, "whiskey")
+ _ = fsMkDir(volumeHandle2, inode.RootDirInodeNumber, "xray")
+ _ = fsMkDir(volumeHandle2, inode.RootDirInodeNumber, "yankee")
+ _ = fsMkDir(volumeHandle2, inode.RootDirInodeNumber, "zulu")
+ _ = fsCreateFile(volumeHandle2, inode.RootDirInodeNumber, "alice.txt")
+ _ = fsCreateFile(volumeHandle2, inode.RootDirInodeNumber, "bob.txt")
+ _ = fsCreateFile(volumeHandle2, inode.RootDirInodeNumber, "carol.txt")
}
func TestMain(m *testing.M) {
@@ -1053,47 +1050,47 @@ func testRpcDelete(t *testing.T, server *Server) {
middlewareCreateContainer(t, server, testVerAccountContainerName, blunder.SuccessError)
// Create an object which is a directory and see if we can delete it via bimodal.
- _, _, _, _, mountHandle, err := mountIfNotMounted(testVerAccountName)
+ _, _, _, _, volumeHandle, err := parseVirtPath(testVerAccountName)
assert.Nil(err)
- cInode, err := mountHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, testContainerName)
+ cInode, err := volumeHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, testContainerName)
assert.Nil(err)
var emptyDir string = "empty-directory"
- _ = fsMkDir(mountHandle, cInode, emptyDir)
+ _ = fsMkDir(volumeHandle, cInode, emptyDir)
err = middlewareDeleteObject(server, emptyDir)
assert.Nil(err)
- _, err = mountHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, emptyDir)
+ _, err = volumeHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, emptyDir)
assert.NotNil(err)
// Now create an object which is a file and see if we can delete it via bimodal.
var emptyFile string = "empty-file"
- _ = fsCreateFile(mountHandle, cInode, emptyFile)
+ _ = fsCreateFile(volumeHandle, cInode, emptyFile)
err = middlewareDeleteObject(server, emptyFile)
assert.Nil(err)
- _, err = mountHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, emptyFile)
+ _, err = volumeHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, emptyFile)
assert.NotNil(err)
// Now create a directory with one file in it and prove we can remove file and
// then directory.
var aDir string = "dir1"
- aDirInode := fsMkDir(mountHandle, cInode, aDir)
- _ = fsCreateFile(mountHandle, aDirInode, emptyFile)
+ aDirInode := fsMkDir(volumeHandle, cInode, aDir)
+ _ = fsCreateFile(volumeHandle, aDirInode, emptyFile)
err = middlewareDeleteObject(server, aDir+"/"+emptyFile)
assert.Nil(err)
- _, err = mountHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, "/"+aDir+"/"+emptyFile)
+ _, err = volumeHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, "/"+aDir+"/"+emptyFile)
assert.NotNil(err)
err = middlewareDeleteObject(server, aDir)
assert.Nil(err)
- _, err = mountHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, "/"+aDir)
+ _, err = volumeHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, "/"+aDir)
assert.NotNil(err)
// Now delete the container
@@ -1124,35 +1121,35 @@ func TestRpcDeleteSymlinks(t *testing.T) {
// d1/crackle
// d1/pop
// d1-symlink -> d1
- mountHandle, err := fs.MountByVolumeName("SomeVolume", fs.MountOptions(0))
+ volumeHandle, err := fs.FetchVolumeHandleByVolumeName("SomeVolume")
if nil != err {
panic(fmt.Sprintf("failed to mount SomeVolume: %v", err))
}
- containerInode := fsMkDir(mountHandle, inode.RootDirInodeNumber, containerName)
+ containerInode := fsMkDir(volumeHandle, inode.RootDirInodeNumber, containerName)
- tlInode := fsCreateFile(mountHandle, containerInode, "top-level.txt")
- _, err = mountHandle.Write(inode.InodeRootUserID, inode.InodeGroupID(0), nil, tlInode, 0, []byte("conusance-callboy"), nil)
+ tlInode := fsCreateFile(volumeHandle, containerInode, "top-level.txt")
+ _, err = volumeHandle.Write(inode.InodeRootUserID, inode.InodeGroupID(0), nil, tlInode, 0, []byte("conusance-callboy"), nil)
- d1Inode := fsMkDir(mountHandle, containerInode, "d1")
+ d1Inode := fsMkDir(volumeHandle, containerInode, "d1")
files := map[string]string{
"snap": "contents of snap",
"crackle": "contents of crackle",
"pop": "contents of pop",
}
for fileName, fileContents := range files {
- fileInode := fsCreateFile(mountHandle, d1Inode, fileName)
- _, err = mountHandle.Write(inode.InodeRootUserID, inode.InodeGroupID(0), nil, fileInode, 0, []byte(fileContents), nil)
+ fileInode := fsCreateFile(volumeHandle, d1Inode, fileName)
+ _, err = volumeHandle.Write(inode.InodeRootUserID, inode.InodeGroupID(0), nil, fileInode, 0, []byte(fileContents), nil)
if err != nil {
panic(fmt.Sprintf("failed to write file %s: %v", fileName, err))
}
}
- fsCreateSymlink(mountHandle, containerInode, "d1-symlink", "d1")
- fsCreateSymlink(mountHandle, d1Inode, "snap-symlink", "snap")
- fsCreateSymlink(mountHandle, d1Inode, "pop-symlink", "./pop")
+ fsCreateSymlink(volumeHandle, containerInode, "d1-symlink", "d1")
+ fsCreateSymlink(volumeHandle, d1Inode, "snap-symlink", "snap")
+ fsCreateSymlink(volumeHandle, d1Inode, "pop-symlink", "./pop")
- fsCreateSymlink(mountHandle, d1Inode, "dot-symlink", ".")
+ fsCreateSymlink(volumeHandle, d1Inode, "dot-symlink", ".")
// Symlinks in the directory portion of the name are followed
deleteRequest := DeleteReq{
@@ -1162,7 +1159,7 @@ func TestRpcDeleteSymlinks(t *testing.T) {
err = s.RpcDelete(&deleteRequest, &deleteResponse)
assert.Nil(err)
- _, err = mountHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, containerName+"/d1/crackle")
+ _, err = volumeHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, containerName+"/d1/crackle")
assert.NotNil(err)
assert.True(blunder.Is(err, blunder.NotFoundError))
@@ -1175,9 +1172,9 @@ func TestRpcDeleteSymlinks(t *testing.T) {
err = s.RpcDelete(&deleteRequest, &deleteResponse)
assert.Nil(err)
- _, err = mountHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, containerName+"/d1/snap")
+ _, err = volumeHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, containerName+"/d1/snap")
assert.Nil(err)
- _, err = mountHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, containerName+"/d1/snap-symlink")
+ _, err = volumeHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, containerName+"/d1/snap-symlink")
assert.NotNil(err)
assert.True(blunder.Is(err, blunder.NotFoundError))
@@ -1189,9 +1186,9 @@ func TestRpcDeleteSymlinks(t *testing.T) {
err = s.RpcDelete(&deleteRequest, &deleteResponse)
assert.Nil(err)
- _, err = mountHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, containerName+"/d1/pop")
+ _, err = volumeHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, containerName+"/d1/pop")
assert.Nil(err)
- _, err = mountHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, containerName+"/d1/pop-symlink")
+ _, err = volumeHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, containerName+"/d1/pop-symlink")
assert.NotNil(err)
assert.True(blunder.Is(err, blunder.NotFoundError))
}
@@ -1202,7 +1199,7 @@ func testRpcPost(t *testing.T, server *Server) {
// We assume that the container already exists since currently we cannot
// delete the container.
- _, _, _, _, mountHandle, err := mountIfNotMounted(testVerAccountName)
+ _, _, _, _, volumeHandle, err := parseVirtPath(testVerAccountName)
assert.Nil(err)
// POST to account with empty string for account
@@ -1242,14 +1239,14 @@ func testRpcPost(t *testing.T, server *Server) {
// Now POST to account/container/object after creating an object which
// is a directory and one which is a file.
- cInode, err := mountHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, testContainerName)
+ cInode, err := volumeHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, testContainerName)
assert.Nil(err)
var emptyDir string = "empty-directory"
- _ = fsMkDir(mountHandle, cInode, emptyDir)
+ _ = fsMkDir(volumeHandle, cInode, emptyDir)
var emptyFile string = "empty-file"
- _ = fsCreateFile(mountHandle, cInode, emptyFile)
+ _ = fsCreateFile(volumeHandle, cInode, emptyFile)
var emptyFileSymlink string = "empty-file-symlink"
- fsCreateSymlink(mountHandle, cInode, emptyFileSymlink, emptyFile)
+ fsCreateSymlink(volumeHandle, cInode, emptyFileSymlink, emptyFile)
virtPath = testVerAccountContainerName + "/" + emptyDir
newContMetaData = []byte("object emptyDir metadata")
@@ -1270,7 +1267,7 @@ func testRpcPost(t *testing.T, server *Server) {
err = middlewarePost(server, virtPath, newContMetaData, oldContMetaData)
assert.Nil(err)
- headResponse, err := mountHandle.MiddlewareHeadResponse(testContainerName + "/" + emptyFile)
+ headResponse, err := volumeHandle.MiddlewareHeadResponse(testContainerName + "/" + emptyFile)
assert.Nil(err)
assert.Equal(newContMetaData, headResponse.Metadata)
@@ -1302,7 +1299,7 @@ func testNameLength(t *testing.T, server *Server) {
// Tests for RpcPutLocation and RpcPutComplete together; an object PUT
// calls both
-func testPutObjectSetup(t *testing.T) (*assert.Assertions, *Server, string, fs.MountHandle) {
+func testPutObjectSetup(t *testing.T) (*assert.Assertions, *Server, string, fs.VolumeHandle) {
// Just some common setup crud
// We can't delete containers, so we grab a name and hope that it
@@ -1310,17 +1307,17 @@ func testPutObjectSetup(t *testing.T) (*assert.Assertions, *Server, string, fs.M
// almost certainly okay.)
containerName := fmt.Sprintf("mware-TestPutObject-%d", time.Now().UnixNano())
- mountHandle, err := fs.MountByVolumeName("SomeVolume", fs.MountOptions(0))
+ volumeHandle, err := fs.FetchVolumeHandleByVolumeName("SomeVolume")
if nil != err {
panic(fmt.Sprintf("failed to mount SomeVolume: %v", err))
}
- fsMkDir(mountHandle, inode.RootDirInodeNumber, containerName)
+ fsMkDir(volumeHandle, inode.RootDirInodeNumber, containerName)
assert := assert.New(t)
server := &Server{}
- return assert, server, containerName, mountHandle
+ return assert, server, containerName, volumeHandle
}
// Helper function to put a file into Swift using RpcPutLocation / RpcPutComplete plus an HTTP PUT request
@@ -1374,7 +1371,7 @@ func putFileInSwift(server *Server, virtPath string, objData []byte, objMetadata
}
func TestPutObjectSimple(t *testing.T) {
- assert, server, containerName, mountHandle := testPutObjectSetup(t)
+ assert, server, containerName, volumeHandle := testPutObjectSetup(t)
objName := "toplevel.bin"
objData := []byte("hello world\n")
@@ -1385,19 +1382,19 @@ func TestPutObjectSimple(t *testing.T) {
assert.Nil(err) // sanity check
// The file should exist now, so we can verify its attributes
- theInode, err := mountHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, containerName+"/"+objName)
+ theInode, err := volumeHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, containerName+"/"+objName)
assert.Nil(err)
- contents, err := mountHandle.Read(inode.InodeRootUserID, inode.InodeGroupID(0), nil, theInode, 0, 99999, nil)
+ contents, err := volumeHandle.Read(inode.InodeRootUserID, inode.InodeGroupID(0), nil, theInode, 0, 99999, nil)
assert.Nil(err)
assert.Equal(objData, contents)
- headResponse, err := mountHandle.MiddlewareHeadResponse(containerName + "/" + objName)
+ headResponse, err := volumeHandle.MiddlewareHeadResponse(containerName + "/" + objName)
assert.Nil(err)
assert.Equal([]byte(objMetadata), headResponse.Metadata)
}
func TestPutObjectInAllNewSubdirs(t *testing.T) {
- assert, server, containerName, mountHandle := testPutObjectSetup(t)
+ assert, server, containerName, volumeHandle := testPutObjectSetup(t)
objName := "d1/d2/d3/d4/nested.bin"
objData := []byte("hello nested world\n")
@@ -1408,23 +1405,23 @@ func TestPutObjectInAllNewSubdirs(t *testing.T) {
assert.Nil(err) // sanity check
// The file should exist now, so we can verify its attributes
- theInode, err := mountHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, containerName+"/"+objName)
+ theInode, err := volumeHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, containerName+"/"+objName)
assert.Nil(err)
- contents, err := mountHandle.Read(inode.InodeRootUserID, inode.InodeGroupID(0), nil, theInode, 0, 99999, nil)
+ contents, err := volumeHandle.Read(inode.InodeRootUserID, inode.InodeGroupID(0), nil, theInode, 0, 99999, nil)
assert.Nil(err)
assert.Equal(objData, contents)
}
func TestPutObjectInSomeNewSubdirs(t *testing.T) {
- assert, server, containerName, mountHandle := testPutObjectSetup(t)
+ assert, server, containerName, volumeHandle := testPutObjectSetup(t)
// make d1 and d1/d2, but leave creation of the rest to the RPC call
- containerInode, err := mountHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, containerName)
+ containerInode, err := volumeHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, containerName)
if err != nil {
panic(err)
}
- d1Inode := fsMkDir(mountHandle, containerInode, "exists-d1")
- _ = fsMkDir(mountHandle, d1Inode, "exists-d2")
+ d1Inode := fsMkDir(volumeHandle, containerInode, "exists-d1")
+ _ = fsMkDir(volumeHandle, d1Inode, "exists-d2")
objName := "exists-d1/exists-d2/d3/d4/nested.bin"
objData := []byte("hello nested world\n")
@@ -1435,15 +1432,15 @@ func TestPutObjectInSomeNewSubdirs(t *testing.T) {
assert.Nil(err) // sanity check
// The file should exist now, so we can verify its attributes
- theInode, err := mountHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, containerName+"/"+objName)
+ theInode, err := volumeHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, containerName+"/"+objName)
assert.Nil(err)
- contents, err := mountHandle.Read(inode.InodeRootUserID, inode.InodeGroupID(0), nil, theInode, 0, 99999, nil)
+ contents, err := volumeHandle.Read(inode.InodeRootUserID, inode.InodeGroupID(0), nil, theInode, 0, 99999, nil)
assert.Nil(err)
assert.Equal(objData, contents)
}
func TestPutObjectOverwriteFile(t *testing.T) {
- assert, server, containerName, mountHandle := testPutObjectSetup(t)
+ assert, server, containerName, volumeHandle := testPutObjectSetup(t)
objName := "overwritten.bin"
objData1 := []byte("hello world 1\n")
@@ -1456,29 +1453,29 @@ func TestPutObjectOverwriteFile(t *testing.T) {
err = putFileInSwift(server, objVirtPath, objData2, objMetadata)
assert.Nil(err)
- theInode, err := mountHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, containerName+"/"+objName)
+ theInode, err := volumeHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, containerName+"/"+objName)
assert.Nil(err)
- contents, err := mountHandle.Read(inode.InodeRootUserID, inode.InodeGroupID(0), nil, theInode, 0, 99999, nil)
+ contents, err := volumeHandle.Read(inode.InodeRootUserID, inode.InodeGroupID(0), nil, theInode, 0, 99999, nil)
assert.Nil(err)
assert.Equal(objData2, contents)
}
func TestPutObjectOverwriteDirectory(t *testing.T) {
- assert, server, containerName, mountHandle := testPutObjectSetup(t)
+ assert, server, containerName, volumeHandle := testPutObjectSetup(t)
objName := "dir-with-stuff-in-it"
objData := []byte("irrelevant")
objMetadata := []byte("won't get written")
objVirtPath := testVerAccountName + "/" + containerName + "/" + objName
- containerInode, err := mountHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, containerName)
+ containerInode, err := volumeHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, containerName)
if err != nil {
panic(err)
}
- dirInodeNumber := fsMkDir(mountHandle, containerInode, "dir-with-stuff-in-it")
+ dirInodeNumber := fsMkDir(volumeHandle, containerInode, "dir-with-stuff-in-it")
- fileInodeNumber := fsCreateFile(mountHandle, dirInodeNumber, "stuff.txt")
- _, err = mountHandle.Write(inode.InodeRootUserID, inode.InodeGroupID(0), nil, fileInodeNumber, 0, []byte("churches, lead, small rocks, apples"), nil)
+ fileInodeNumber := fsCreateFile(volumeHandle, dirInodeNumber, "stuff.txt")
+ _, err = volumeHandle.Write(inode.InodeRootUserID, inode.InodeGroupID(0), nil, fileInodeNumber, 0, []byte("churches, lead, small rocks, apples"), nil)
if err != nil {
panic(err)
}
@@ -1489,34 +1486,34 @@ func TestPutObjectOverwriteDirectory(t *testing.T) {
// remove the file in the directory and the put should succeed,
// replacing the directory with a file
- err = mountHandle.Unlink(inode.InodeRootUserID, inode.InodeGroupID(0), nil,
+ err = volumeHandle.Unlink(inode.InodeRootUserID, inode.InodeGroupID(0), nil,
dirInodeNumber, "stuff.txt")
assert.Nil(err)
err = putFileInSwift(server, objVirtPath, objData, objMetadata)
assert.Nil(err)
- dirInodeNumber, err = mountHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil,
+ dirInodeNumber, err = volumeHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil,
containerName+"/"+objName)
assert.Nil(err)
- statResult, err := mountHandle.Getstat(inode.InodeRootUserID, inode.InodeGroupID(0), nil, dirInodeNumber)
+ statResult, err := volumeHandle.Getstat(inode.InodeRootUserID, inode.InodeGroupID(0), nil, dirInodeNumber)
assert.Nil(err)
assert.Equal(statResult[fs.StatFType], uint64(inode.FileType))
}
func TestPutObjectSymlinkedDir(t *testing.T) {
- assert, server, containerName, mountHandle := testPutObjectSetup(t)
+ assert, server, containerName, volumeHandle := testPutObjectSetup(t)
- containerInode, err := mountHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, containerName)
+ containerInode, err := volumeHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, containerName)
if err != nil {
panic(err)
}
- d1Inode := fsMkDir(mountHandle, containerInode, "d1")
- d2Inode := fsMkDir(mountHandle, d1Inode, "d2")
- fsCreateSymlink(mountHandle, d1Inode, "d2-symlink", "./d2")
- fsCreateSymlink(mountHandle, d1Inode, "dot-symlink", ".")
- fsCreateSymlink(mountHandle, d2Inode, "abs-container-symlink", "/"+containerName)
+ d1Inode := fsMkDir(volumeHandle, containerInode, "d1")
+ d2Inode := fsMkDir(volumeHandle, d1Inode, "d2")
+ fsCreateSymlink(volumeHandle, d1Inode, "d2-symlink", "./d2")
+ fsCreateSymlink(volumeHandle, d1Inode, "dot-symlink", ".")
+ fsCreateSymlink(volumeHandle, d2Inode, "abs-container-symlink", "/"+containerName)
objName := "d1/d2-symlink/abs-container-symlink/d1/dot-symlink/dot-symlink/d2/d3/thing.dat"
objData := []byte("kamik-defensory")
@@ -1527,21 +1524,21 @@ func TestPutObjectSymlinkedDir(t *testing.T) {
assert.Nil(err) // sanity check
// The file should exist now, so we can verify its attributes
- theInode, err := mountHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, containerName+"/"+"d1/d2/d3/thing.dat")
+ theInode, err := volumeHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, containerName+"/"+"d1/d2/d3/thing.dat")
assert.Nil(err)
- contents, err := mountHandle.Read(inode.InodeRootUserID, inode.InodeGroupID(0), nil, theInode, 0, 99999, nil)
+ contents, err := volumeHandle.Read(inode.InodeRootUserID, inode.InodeGroupID(0), nil, theInode, 0, 99999, nil)
assert.Nil(err)
assert.Equal(objData, contents)
}
func TestPutObjectOverwriteSymlink(t *testing.T) {
- assert, server, containerName, mountHandle := testPutObjectSetup(t)
+ assert, server, containerName, volumeHandle := testPutObjectSetup(t)
- containerInode, err := mountHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, containerName)
+ containerInode, err := volumeHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, containerName)
if err != nil {
panic(err)
}
- fsCreateSymlink(mountHandle, containerInode, "thing.dat", "somewhere-else")
+ fsCreateSymlink(volumeHandle, containerInode, "thing.dat", "somewhere-else")
objName := "thing.dat"
objData := []byte("cottontop-aleuroscope")
@@ -1552,30 +1549,30 @@ func TestPutObjectOverwriteSymlink(t *testing.T) {
assert.Nil(err) // sanity check
// The file should exist now, so we can verify its attributes
- theInode, err := mountHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, containerName+"/"+"somewhere-else")
+ theInode, err := volumeHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, containerName+"/"+"somewhere-else")
assert.Nil(err)
- contents, err := mountHandle.Read(inode.InodeRootUserID, inode.InodeGroupID(0), nil, theInode, 0, 99999, nil)
+ contents, err := volumeHandle.Read(inode.InodeRootUserID, inode.InodeGroupID(0), nil, theInode, 0, 99999, nil)
assert.Nil(err)
assert.Equal(objData, contents)
}
func TestPutObjectFileInDirPath(t *testing.T) {
- assert, server, containerName, mountHandle := testPutObjectSetup(t)
+ assert, server, containerName, volumeHandle := testPutObjectSetup(t)
objName := "d1/d2/actually-a-file/d3/d4/stuff.txt"
objData := []byte("irrelevant")
objMetadata := []byte("won't get written")
objVirtPath := testVerAccountName + "/" + containerName + "/" + objName
- containerInode, err := mountHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, containerName)
+ containerInode, err := volumeHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, containerName)
if err != nil {
panic(err)
}
- d1InodeNumber := fsMkDir(mountHandle, containerInode, "d1")
- d2InodeNumber := fsMkDir(mountHandle, d1InodeNumber, "d2")
+ d1InodeNumber := fsMkDir(volumeHandle, containerInode, "d1")
+ d2InodeNumber := fsMkDir(volumeHandle, d1InodeNumber, "d2")
- fileInodeNumber := fsCreateFile(mountHandle, d2InodeNumber, "actually-a-file")
- _, err = mountHandle.Write(inode.InodeRootUserID, inode.InodeGroupID(0), nil, fileInodeNumber, 0, []byte("not a directory"), nil)
+ fileInodeNumber := fsCreateFile(volumeHandle, d2InodeNumber, "actually-a-file")
+ _, err = volumeHandle.Write(inode.InodeRootUserID, inode.InodeGroupID(0), nil, fileInodeNumber, 0, []byte("not a directory"), nil)
if err != nil {
panic(err)
}
@@ -1588,7 +1585,7 @@ func TestPutObjectFileInDirPath(t *testing.T) {
func TestPutObjectCompound(t *testing.T) {
// In this test, we put data into two different log segments, but
// the data is for the same file
- assert, server, containerName, mountHandle := testPutObjectSetup(t)
+ assert, server, containerName, volumeHandle := testPutObjectSetup(t)
objName := "helloworld.txt"
objMetadata := []byte("{}")
@@ -1677,20 +1674,20 @@ func TestPutObjectCompound(t *testing.T) {
}
// The file should exist now, so we can verify its attributes
- theInode, err := mountHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, containerName+"/"+objName)
+ theInode, err := volumeHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, containerName+"/"+objName)
assert.Nil(err)
- contents, err := mountHandle.Read(inode.InodeRootUserID, inode.InodeGroupID(0), nil, theInode, 0, 99999, nil)
+ contents, err := volumeHandle.Read(inode.InodeRootUserID, inode.InodeGroupID(0), nil, theInode, 0, 99999, nil)
assert.Nil(err)
assert.Equal([]byte("hello world!"), contents)
assert.Equal(uint64(theInode), uint64(putCompleteResp.InodeNumber))
// 2 is the number of log segments we wrote
assert.Equal(uint64(2), putCompleteResp.NumWrites)
- headResponse, err := mountHandle.MiddlewareHeadResponse(containerName + "/" + objName)
+ headResponse, err := volumeHandle.MiddlewareHeadResponse(containerName + "/" + objName)
assert.Nil(err)
assert.Equal([]byte(objMetadata), headResponse.Metadata)
- statResult, err := mountHandle.Getstat(inode.InodeRootUserID, inode.InodeGroupID(0), nil, theInode)
+ statResult, err := volumeHandle.Getstat(inode.InodeRootUserID, inode.InodeGroupID(0), nil, theInode)
assert.Nil(err)
assert.Equal(statResult[fs.StatMTime], putCompleteResp.ModificationTime)
assert.Equal(statResult[fs.StatCTime], putCompleteResp.AttrChangeTime)
@@ -1724,22 +1721,22 @@ func TestRpcGetObjectMetadata(t *testing.T) {
// We're not actually going to test any read plans here; that is tested elsewhere.
server := &Server{}
assert := assert.New(t)
- mountHandle, err := fs.MountByVolumeName("SomeVolume", fs.MountOptions(0))
+ volumeHandle, err := fs.FetchVolumeHandleByVolumeName("SomeVolume")
if nil != err {
panic(fmt.Sprintf("failed to mount SomeVolume: %v", err))
}
containerName := "covetingly-ahead"
- cInode := fsMkDir(mountHandle, inode.RootDirInodeNumber, containerName)
- readmeInode := fsCreateFile(mountHandle, cInode, "README")
+ cInode := fsMkDir(volumeHandle, inode.RootDirInodeNumber, containerName)
+ readmeInode := fsCreateFile(volumeHandle, cInode, "README")
- _, err = mountHandle.Write(inode.InodeRootUserID, inode.InodeGroupID(0), nil, readmeInode, 0, []byte("unsurpassably-Rigelian"), nil)
+ _, err = volumeHandle.Write(inode.InodeRootUserID, inode.InodeGroupID(0), nil, readmeInode, 0, []byte("unsurpassably-Rigelian"), nil)
if err != nil {
panic(err)
}
- statResult, err := mountHandle.Getstat(inode.InodeRootUserID, inode.InodeGroupID(0), nil, readmeInode)
+ statResult, err := volumeHandle.Getstat(inode.InodeRootUserID, inode.InodeGroupID(0), nil, readmeInode)
if err != nil {
panic(err)
}
@@ -1767,7 +1764,7 @@ func TestRpcGetObjectSymlinkFollowing(t *testing.T) {
// We're not actually going to test any read plans here; that is tested elsewhere.
server := &Server{}
assert := assert.New(t)
- mountHandle, err := fs.MountByVolumeName("SomeVolume", fs.MountOptions(0))
+ volumeHandle, err := fs.FetchVolumeHandleByVolumeName("SomeVolume")
if nil != err {
panic(fmt.Sprintf("failed to mount SomeVolume: %v", err))
}
@@ -1803,54 +1800,54 @@ func TestRpcGetObjectSymlinkFollowing(t *testing.T) {
// /c4/d1/symlink-d2 -> d2
// /c4/d1/d2/symlink-kitten.png -> /c1/kitten.png
- c1Inode := fsMkDir(mountHandle, inode.RootDirInodeNumber, "c1")
- c2Inode := fsMkDir(mountHandle, inode.RootDirInodeNumber, "c2")
- c3Inode := fsMkDir(mountHandle, inode.RootDirInodeNumber, "c3")
- c4Inode := fsMkDir(mountHandle, inode.RootDirInodeNumber, "c4")
+ c1Inode := fsMkDir(volumeHandle, inode.RootDirInodeNumber, "c1")
+ c2Inode := fsMkDir(volumeHandle, inode.RootDirInodeNumber, "c2")
+ c3Inode := fsMkDir(volumeHandle, inode.RootDirInodeNumber, "c3")
+ c4Inode := fsMkDir(volumeHandle, inode.RootDirInodeNumber, "c4")
- fileInode := fsCreateFile(mountHandle, c1Inode, "kitten.png")
- _, err = mountHandle.Write(inode.InodeRootUserID, inode.InodeGroupID(0), nil, fileInode, 0, []byte("if this were a real kitten, it would be cute"), nil)
+ fileInode := fsCreateFile(volumeHandle, c1Inode, "kitten.png")
+ _, err = volumeHandle.Write(inode.InodeRootUserID, inode.InodeGroupID(0), nil, fileInode, 0, []byte("if this were a real kitten, it would be cute"), nil)
if err != nil {
panic(err)
}
- fsCreateSymlink(mountHandle, c1Inode, "symlink-1", "kitten.png")
- fsCreateSymlink(mountHandle, c1Inode, "symlink-2", "symlink-1")
- fsCreateSymlink(mountHandle, c1Inode, "symlink-3", "symlink-2")
- fsCreateSymlink(mountHandle, c1Inode, "symlink-4", "symlink-3")
- fsCreateSymlink(mountHandle, c1Inode, "symlink-5", "symlink-4")
- fsCreateSymlink(mountHandle, c1Inode, "symlink-6", "symlink-5")
- fsCreateSymlink(mountHandle, c1Inode, "symlink-7", "symlink-6")
- fsCreateSymlink(mountHandle, c1Inode, "symlink-8", "symlink-7")
- fsCreateSymlink(mountHandle, c1Inode, "symlink-9", "symlink-8")
+ fsCreateSymlink(volumeHandle, c1Inode, "symlink-1", "kitten.png")
+ fsCreateSymlink(volumeHandle, c1Inode, "symlink-2", "symlink-1")
+ fsCreateSymlink(volumeHandle, c1Inode, "symlink-3", "symlink-2")
+ fsCreateSymlink(volumeHandle, c1Inode, "symlink-4", "symlink-3")
+ fsCreateSymlink(volumeHandle, c1Inode, "symlink-5", "symlink-4")
+ fsCreateSymlink(volumeHandle, c1Inode, "symlink-6", "symlink-5")
+ fsCreateSymlink(volumeHandle, c1Inode, "symlink-7", "symlink-6")
+ fsCreateSymlink(volumeHandle, c1Inode, "symlink-8", "symlink-7")
+ fsCreateSymlink(volumeHandle, c1Inode, "symlink-9", "symlink-8")
- fileInode = fsCreateFile(mountHandle, c2Inode, "10-bytes")
- _, err = mountHandle.Write(inode.InodeRootUserID, inode.InodeGroupID(0), nil, fileInode, 0, []byte("abcdefghij"), nil)
+ fileInode = fsCreateFile(volumeHandle, c2Inode, "10-bytes")
+ _, err = volumeHandle.Write(inode.InodeRootUserID, inode.InodeGroupID(0), nil, fileInode, 0, []byte("abcdefghij"), nil)
if err != nil {
panic(err)
}
- fsCreateSymlink(mountHandle, c2Inode, "symlink-10-bytes", "10-bytes")
- fsCreateSymlink(mountHandle, c2Inode, "symlink-20-bytes", "/c3/20-bytes")
- fsCreateSymlink(mountHandle, c2Inode, "symlink-20-bytes-indirect", "symlink-20-bytes")
+ fsCreateSymlink(volumeHandle, c2Inode, "symlink-10-bytes", "10-bytes")
+ fsCreateSymlink(volumeHandle, c2Inode, "symlink-20-bytes", "/c3/20-bytes")
+ fsCreateSymlink(volumeHandle, c2Inode, "symlink-20-bytes-indirect", "symlink-20-bytes")
- fileInode = fsCreateFile(mountHandle, c3Inode, "20-bytes")
- _, err = mountHandle.Write(inode.InodeRootUserID, inode.InodeGroupID(0), nil, fileInode, 0, []byte("abcdefghijklmnopqrst"), nil)
+ fileInode = fsCreateFile(volumeHandle, c3Inode, "20-bytes")
+ _, err = volumeHandle.Write(inode.InodeRootUserID, inode.InodeGroupID(0), nil, fileInode, 0, []byte("abcdefghijklmnopqrst"), nil)
if err != nil {
panic(err)
}
- fsCreateSymlink(mountHandle, c3Inode, "symlink-20-bytes-double-indirect", "/c2/symlink-20-bytes-indirect")
- fsCreateSymlink(mountHandle, c3Inode, "symlink-c2", "/c2")
- fsCreateSymlink(mountHandle, c3Inode, "cycle-a", "cycle-b")
- fsCreateSymlink(mountHandle, c3Inode, "cycle-b", "cycle-c")
- fsCreateSymlink(mountHandle, c3Inode, "cycle-c", "cycle-a")
+ fsCreateSymlink(volumeHandle, c3Inode, "symlink-20-bytes-double-indirect", "/c2/symlink-20-bytes-indirect")
+ fsCreateSymlink(volumeHandle, c3Inode, "symlink-c2", "/c2")
+ fsCreateSymlink(volumeHandle, c3Inode, "cycle-a", "cycle-b")
+ fsCreateSymlink(volumeHandle, c3Inode, "cycle-b", "cycle-c")
+ fsCreateSymlink(volumeHandle, c3Inode, "cycle-c", "cycle-a")
- c4d1Inode := fsMkDir(mountHandle, c4Inode, "d1")
- c4d1d2Inode := fsMkDir(mountHandle, c4d1Inode, "d2")
- fsCreateSymlink(mountHandle, c4Inode, "symlink-d1", "d1")
- fsCreateSymlink(mountHandle, c4d1Inode, "symlink-d2", "d2")
- fsCreateSymlink(mountHandle, c4d1d2Inode, "symlink-kitten.png", "/c1/kitten.png")
+ c4d1Inode := fsMkDir(volumeHandle, c4Inode, "d1")
+ c4d1d2Inode := fsMkDir(volumeHandle, c4d1Inode, "d2")
+ fsCreateSymlink(volumeHandle, c4Inode, "symlink-d1", "d1")
+ fsCreateSymlink(volumeHandle, c4d1Inode, "symlink-d2", "d2")
+ fsCreateSymlink(volumeHandle, c4d1d2Inode, "symlink-kitten.png", "/c1/kitten.png")
// Test setup complete
// Test following a single symlink to a file in the same directory
@@ -1923,7 +1920,7 @@ func TestRpcGetObjectSymlinkFollowing(t *testing.T) {
func TestRpcPutContainer(t *testing.T) {
server := &Server{}
assert := assert.New(t)
- _, err := fs.MountByVolumeName("SomeVolume", fs.MountOptions(0))
+ _, err := fs.FetchVolumeHandleByVolumeName("SomeVolume")
if nil != err {
panic(fmt.Sprintf("failed to mount SomeVolume: %v", err))
}
@@ -1985,7 +1982,7 @@ func TestRpcPutContainer(t *testing.T) {
func TestRpcPutContainerTooLong(t *testing.T) {
server := &Server{}
assert := assert.New(t)
- _, err := fs.MountByVolumeName("SomeVolume", fs.MountOptions(0))
+ _, err := fs.FetchVolumeHandleByVolumeName("SomeVolume")
if nil != err {
panic(fmt.Sprintf("failed to mount SomeVolume: %v", err))
}
@@ -2007,13 +2004,13 @@ func TestRpcPutContainerTooLong(t *testing.T) {
func TestRpcMiddlewareMkdir(t *testing.T) {
server := &Server{}
assert := assert.New(t)
- mountHandle, err := fs.MountByVolumeName("SomeVolume", fs.MountOptions(0))
+ volumeHandle, err := fs.FetchVolumeHandleByVolumeName("SomeVolume")
if nil != err {
panic(fmt.Sprintf("failed to mount SomeVolume: %v", err))
}
containerName := "rpc-middleware-mkdir-container"
- fsMkDir(mountHandle, inode.RootDirInodeNumber, containerName)
+ fsMkDir(volumeHandle, inode.RootDirInodeNumber, containerName)
dirName := "rpc-middleware-mkdir-test"
dirPath := testVerAccountName + "/" + containerName + "/" + dirName
dirMetadata := []byte("some metadata b5fdbc4a0f1484225fcb7aa64b1e6b94")
@@ -2051,13 +2048,13 @@ func TestRpcMiddlewareMkdir(t *testing.T) {
func TestRpcMiddlewareMkdirNested(t *testing.T) {
server := &Server{}
assert := assert.New(t)
- mountHandle, err := fs.MountByVolumeName("SomeVolume", fs.MountOptions(0))
+ volumeHandle, err := fs.FetchVolumeHandleByVolumeName("SomeVolume")
if nil != err {
panic(fmt.Sprintf("failed to mount SomeVolume: %v", err))
}
containerName := "rpc-middleware-mkdir-container-nested"
- fsMkDir(mountHandle, inode.RootDirInodeNumber, containerName)
+ fsMkDir(volumeHandle, inode.RootDirInodeNumber, containerName)
dirName := "some/deeply/nested/dir"
dirPath := testVerAccountName + "/" + containerName + "/" + dirName
dirMetadata := []byte("some metadata eeef146ba9e5875cb52b047ba4f03660")
@@ -2084,7 +2081,7 @@ func TestRpcMiddlewareMkdirNested(t *testing.T) {
func TestRpcCoalesce(t *testing.T) {
server := &Server{}
assert := assert.New(t)
- mountHandle, err := fs.MountByVolumeName("SomeVolume", fs.MountOptions(0))
+ volumeHandle, err := fs.FetchVolumeHandleByVolumeName("SomeVolume")
if nil != err {
panic(fmt.Sprintf("failed to mount SomeVolume: %v", err))
}
@@ -2095,15 +2092,15 @@ func TestRpcCoalesce(t *testing.T) {
destinationPath := containerAPath + "/" + "combined-file"
- containerAInode := fsMkDir(mountHandle, inode.RootDirInodeNumber, containerAName)
- containerBInode := fsMkDir(mountHandle, inode.RootDirInodeNumber, containerBName)
+ containerAInode := fsMkDir(volumeHandle, inode.RootDirInodeNumber, containerAName)
+ containerBInode := fsMkDir(volumeHandle, inode.RootDirInodeNumber, containerBName)
- containerADir1Inode := fsMkDir(mountHandle, containerAInode, "dir1")
- containerADir1Dir2Inode := fsMkDir(mountHandle, containerADir1Inode, "dir2")
+ containerADir1Inode := fsMkDir(volumeHandle, containerAInode, "dir1")
+ containerADir1Dir2Inode := fsMkDir(volumeHandle, containerADir1Inode, "dir2")
fileA1Path := "/" + containerAName + "/dir1/dir2/a1"
- fileA1Inode := fsCreateFile(mountHandle, containerADir1Dir2Inode, "a1")
- _, err = mountHandle.Write(inode.InodeRootUserID, inode.InodeGroupID(0), nil, fileA1Inode, 0, []byte("red "), nil)
+ fileA1Inode := fsCreateFile(volumeHandle, containerADir1Dir2Inode, "a1")
+ _, err = volumeHandle.Write(inode.InodeRootUserID, inode.InodeGroupID(0), nil, fileA1Inode, 0, []byte("red "), nil)
if err != nil {
panic(err)
}
@@ -2111,15 +2108,15 @@ func TestRpcCoalesce(t *testing.T) {
// Element paths are relative to the account, but the destination path is absolute. It's a little weird, but it
// means we don't have to worry about element paths pointing to different accounts.
fileA2Path := "/" + containerAName + "/dir1/dir2/a2"
- fileA2Inode := fsCreateFile(mountHandle, containerADir1Dir2Inode, "a2")
- _, err = mountHandle.Write(inode.InodeRootUserID, inode.InodeGroupID(0), nil, fileA2Inode, 0, []byte("orange "), nil)
+ fileA2Inode := fsCreateFile(volumeHandle, containerADir1Dir2Inode, "a2")
+ _, err = volumeHandle.Write(inode.InodeRootUserID, inode.InodeGroupID(0), nil, fileA2Inode, 0, []byte("orange "), nil)
if err != nil {
panic(err)
}
fileBPath := "/" + containerBName + "/b"
- fileBInode := fsCreateFile(mountHandle, containerBInode, "b")
- _, err = mountHandle.Write(inode.InodeRootUserID, inode.InodeGroupID(0), nil, fileBInode, 0, []byte("yellow"), nil)
+ fileBInode := fsCreateFile(volumeHandle, containerBInode, "b")
+ _, err = volumeHandle.Write(inode.InodeRootUserID, inode.InodeGroupID(0), nil, fileBInode, 0, []byte("yellow"), nil)
if err != nil {
panic(err)
}
@@ -2138,7 +2135,7 @@ func TestRpcCoalesce(t *testing.T) {
err = server.RpcCoalesce(&coalesceRequest, &coalesceReply)
assert.Nil(err)
- combinedInode, err := mountHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, containerAName+"/combined-file")
+ combinedInode, err := volumeHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, containerAName+"/combined-file")
assert.Nil(err)
assert.Equal(uint64(combinedInode), uint64(coalesceReply.InodeNumber))
assert.True(coalesceReply.NumWrites > 0)
@@ -2146,7 +2143,7 @@ func TestRpcCoalesce(t *testing.T) {
assert.True(coalesceReply.ModificationTime > timeBeforeRequest)
assert.True(coalesceReply.ModificationTime == coalesceReply.AttrChangeTime)
- combinedContents, err := mountHandle.Read(inode.InodeRootUserID, inode.InodeGroupID(0), nil, combinedInode, 0, 99999, nil)
+ combinedContents, err := volumeHandle.Read(inode.InodeRootUserID, inode.InodeGroupID(0), nil, combinedInode, 0, 99999, nil)
assert.Nil(err)
assert.Equal([]byte("red orange yellow"), combinedContents)
}
@@ -2154,21 +2151,21 @@ func TestRpcCoalesce(t *testing.T) {
func TestRpcCoalesceOverwrite(t *testing.T) {
server := &Server{}
assert := assert.New(t)
- mountHandle, err := fs.MountByVolumeName("SomeVolume", fs.MountOptions(0))
+ volumeHandle, err := fs.FetchVolumeHandleByVolumeName("SomeVolume")
if nil != err {
panic(fmt.Sprintf("failed to mount SomeVolume: %v", err))
}
containerName := "rpc-coalesce-Callynteria-sapor"
containerPath := testVerAccountName + "/" + containerName
- containerInode := fsMkDir(mountHandle, inode.RootDirInodeNumber, containerName)
+ containerInode := fsMkDir(volumeHandle, inode.RootDirInodeNumber, containerName)
destinationPath := containerPath + "/" + "combined"
filesToWrite := []string{"red", "orange", "yellow", "green", "blue", "indigo", "violet"}
for _, fileName := range filesToWrite {
- fileInode := fsCreateFile(mountHandle, containerInode, fileName)
- _, err = mountHandle.Write(inode.InodeRootUserID, inode.InodeGroupID(0), nil, fileInode, 0, []byte(fileName+" "), nil)
+ fileInode := fsCreateFile(volumeHandle, containerInode, fileName)
+ _, err = volumeHandle.Write(inode.InodeRootUserID, inode.InodeGroupID(0), nil, fileInode, 0, []byte(fileName+" "), nil)
if err != nil {
panic(err)
}
@@ -2186,7 +2183,7 @@ func TestRpcCoalesceOverwrite(t *testing.T) {
coalesceReply := CoalesceReply{}
err = server.RpcCoalesce(&coalesceRequest, &coalesceReply)
assert.Nil(err)
- combinedContents, err := mountHandle.Read(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(coalesceReply.InodeNumber), 0, 99999, nil)
+ combinedContents, err := volumeHandle.Read(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(coalesceReply.InodeNumber), 0, 99999, nil)
assert.Nil(err)
assert.Equal([]byte("red orange yellow "), combinedContents) // sanity check
@@ -2204,7 +2201,7 @@ func TestRpcCoalesceOverwrite(t *testing.T) {
err = server.RpcCoalesce(&coalesceRequest, &coalesceReply)
assert.Nil(err)
- combinedContents, err = mountHandle.Read(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(coalesceReply.InodeNumber), 0, 99999, nil)
+ combinedContents, err = volumeHandle.Read(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.InodeNumber(coalesceReply.InodeNumber), 0, 99999, nil)
assert.Nil(err)
assert.Equal([]byte("green blue indigo violet "), combinedContents)
@@ -2213,27 +2210,27 @@ func TestRpcCoalesceOverwrite(t *testing.T) {
func TestRpcCoalesceOverwriteDir(t *testing.T) {
server := &Server{}
assert := assert.New(t)
- mountHandle, err := fs.MountByVolumeName("SomeVolume", fs.MountOptions(0))
+ volumeHandle, err := fs.FetchVolumeHandleByVolumeName("SomeVolume")
if nil != err {
panic(fmt.Sprintf("failed to mount SomeVolume: %v", err))
}
containerName := "rpc-coalesce-speller-spinally"
containerPath := testVerAccountName + "/" + containerName
- containerInode := fsMkDir(mountHandle, inode.RootDirInodeNumber, containerName)
+ containerInode := fsMkDir(volumeHandle, inode.RootDirInodeNumber, containerName)
destinationPath := containerPath + "/" + "combined"
filesToWrite := []string{"red", "orange", "yellow", "green", "blue", "indigo", "violet"}
for _, fileName := range filesToWrite {
- fileInode := fsCreateFile(mountHandle, containerInode, fileName)
- _, err = mountHandle.Write(inode.InodeRootUserID, inode.InodeGroupID(0), nil, fileInode, 0, []byte(fileName+" "), nil)
+ fileInode := fsCreateFile(volumeHandle, containerInode, fileName)
+ _, err = volumeHandle.Write(inode.InodeRootUserID, inode.InodeGroupID(0), nil, fileInode, 0, []byte(fileName+" "), nil)
if err != nil {
panic(err)
}
}
- combinedInode := fsMkDir(mountHandle, containerInode, "combined")
+ combinedInode := fsMkDir(volumeHandle, containerInode, "combined")
// Create the file
coalesceRequest := CoalesceReq{
@@ -2249,33 +2246,33 @@ func TestRpcCoalesceOverwriteDir(t *testing.T) {
assert.NotNil(err)
// The old dir is still there
- ino, err := mountHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, containerName+"/combined")
+ ino, err := volumeHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, containerName+"/combined")
assert.Equal(combinedInode, ino)
}
func TestRpcCoalesceMakesDirs(t *testing.T) {
server := &Server{}
assert := assert.New(t)
- mountHandle, err := fs.MountByVolumeName("SomeVolume", fs.MountOptions(0))
+ volumeHandle, err := fs.FetchVolumeHandleByVolumeName("SomeVolume")
if nil != err {
panic(fmt.Sprintf("failed to mount SomeVolume: %v", err))
}
containerName := "rpc-coalesce-subsaturation-rowy"
containerPath := testVerAccountName + "/" + containerName
- containerInode := fsMkDir(mountHandle, inode.RootDirInodeNumber, containerName)
+ containerInode := fsMkDir(volumeHandle, inode.RootDirInodeNumber, containerName)
destinationPath := containerPath + "/a/b/c/d/e/f/combined"
// The directory structure partially exists, but not totally
- aInode := fsMkDir(mountHandle, containerInode, "a")
- bInode := fsMkDir(mountHandle, aInode, "b")
- fsMkDir(mountHandle, bInode, "c")
+ aInode := fsMkDir(volumeHandle, containerInode, "a")
+ bInode := fsMkDir(volumeHandle, aInode, "b")
+ fsMkDir(volumeHandle, bInode, "c")
filesToWrite := []string{"red", "orange", "yellow", "green", "blue", "indigo", "violet"}
for _, fileName := range filesToWrite {
- fileInode := fsCreateFile(mountHandle, containerInode, fileName)
- _, err = mountHandle.Write(inode.InodeRootUserID, inode.InodeGroupID(0), nil, fileInode, 0, []byte(fileName+" "), nil)
+ fileInode := fsCreateFile(volumeHandle, containerInode, fileName)
+ _, err = volumeHandle.Write(inode.InodeRootUserID, inode.InodeGroupID(0), nil, fileInode, 0, []byte(fileName+" "), nil)
if err != nil {
panic(err)
}
@@ -2294,11 +2291,11 @@ func TestRpcCoalesceMakesDirs(t *testing.T) {
err = server.RpcCoalesce(&coalesceRequest, &coalesceReply)
assert.Nil(err)
- ino, err := mountHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, containerName+"/a/b/c/d/e/f/combined")
+ ino, err := volumeHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, containerName+"/a/b/c/d/e/f/combined")
assert.Nil(err)
assert.Equal(inode.InodeNumber(coalesceReply.InodeNumber), ino)
- combinedContents, err := mountHandle.Read(inode.InodeRootUserID, inode.InodeGroupID(0), nil, ino, 0, 99999, nil)
+ combinedContents, err := volumeHandle.Read(inode.InodeRootUserID, inode.InodeGroupID(0), nil, ino, 0, 99999, nil)
assert.Nil(err)
assert.Equal([]byte("red orange yellow "), combinedContents) // sanity check
}
@@ -2306,28 +2303,28 @@ func TestRpcCoalesceMakesDirs(t *testing.T) {
func TestRpcCoalesceSymlinks(t *testing.T) {
server := &Server{}
assert := assert.New(t)
- mountHandle, err := fs.MountByVolumeName("SomeVolume", fs.MountOptions(0))
+ volumeHandle, err := fs.FetchVolumeHandleByVolumeName("SomeVolume")
if nil != err {
panic(fmt.Sprintf("failed to mount SomeVolume: %v", err))
}
containerName := "rpc-coalesce-salpingian-utilizer"
containerPath := testVerAccountName + "/" + containerName
- containerInode := fsMkDir(mountHandle, inode.RootDirInodeNumber, containerName)
+ containerInode := fsMkDir(volumeHandle, inode.RootDirInodeNumber, containerName)
destinationPath := containerPath + "/a/b-sl/abs-a-sl/b-sl/c/combined"
// The directory structure partially exists, but not totally
- aInode := fsMkDir(mountHandle, containerInode, "a")
- fsCreateSymlink(mountHandle, aInode, "b-sl", "b")
- bInode := fsMkDir(mountHandle, aInode, "b")
- fsCreateSymlink(mountHandle, bInode, "abs-a-sl", "/"+containerName+"/a")
- fsMkDir(mountHandle, bInode, "c")
+ aInode := fsMkDir(volumeHandle, containerInode, "a")
+ fsCreateSymlink(volumeHandle, aInode, "b-sl", "b")
+ bInode := fsMkDir(volumeHandle, aInode, "b")
+ fsCreateSymlink(volumeHandle, bInode, "abs-a-sl", "/"+containerName+"/a")
+ fsMkDir(volumeHandle, bInode, "c")
filesToWrite := []string{"red", "orange", "yellow", "green", "blue", "indigo", "violet"}
for _, fileName := range filesToWrite {
- fileInode := fsCreateFile(mountHandle, containerInode, fileName)
- _, err = mountHandle.Write(inode.InodeRootUserID, inode.InodeGroupID(0), nil, fileInode, 0, []byte(fileName+" "), nil)
+ fileInode := fsCreateFile(volumeHandle, containerInode, fileName)
+ _, err = volumeHandle.Write(inode.InodeRootUserID, inode.InodeGroupID(0), nil, fileInode, 0, []byte(fileName+" "), nil)
if err != nil {
panic(err)
}
@@ -2346,11 +2343,11 @@ func TestRpcCoalesceSymlinks(t *testing.T) {
err = server.RpcCoalesce(&coalesceRequest, &coalesceReply)
assert.Nil(err)
- ino, err := mountHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, containerName+"/a/b/c/combined")
+ ino, err := volumeHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, containerName+"/a/b/c/combined")
assert.Nil(err)
assert.Equal(inode.InodeNumber(coalesceReply.InodeNumber), ino)
- combinedContents, err := mountHandle.Read(inode.InodeRootUserID, inode.InodeGroupID(0), nil, ino, 0, 99999, nil)
+ combinedContents, err := volumeHandle.Read(inode.InodeRootUserID, inode.InodeGroupID(0), nil, ino, 0, 99999, nil)
assert.Nil(err)
assert.Equal([]byte("red orange yellow "), combinedContents) // sanity check
}
@@ -2358,25 +2355,25 @@ func TestRpcCoalesceSymlinks(t *testing.T) {
func TestRpcCoalesceBrokenSymlink(t *testing.T) {
server := &Server{}
assert := assert.New(t)
- mountHandle, err := fs.MountByVolumeName("SomeVolume", fs.MountOptions(0))
+ volumeHandle, err := fs.FetchVolumeHandleByVolumeName("SomeVolume")
if nil != err {
panic(fmt.Sprintf("failed to mount SomeVolume: %v", err))
}
containerName := "rpc-coalesce-Clathrus-playmonger"
containerPath := testVerAccountName + "/" + containerName
- containerInode := fsMkDir(mountHandle, inode.RootDirInodeNumber, containerName)
+ containerInode := fsMkDir(volumeHandle, inode.RootDirInodeNumber, containerName)
destinationPath := containerPath + "/a/busted/c/combined"
// The directory structure partially exists, but not totally
- aInode := fsMkDir(mountHandle, containerInode, "a")
- fsCreateSymlink(mountHandle, aInode, "busted", "this-symlink-is-broken")
+ aInode := fsMkDir(volumeHandle, containerInode, "a")
+ fsCreateSymlink(volumeHandle, aInode, "busted", "this-symlink-is-broken")
filesToWrite := []string{"red", "orange", "yellow", "green", "blue", "indigo", "violet"}
for _, fileName := range filesToWrite {
- fileInode := fsCreateFile(mountHandle, containerInode, fileName)
- _, err = mountHandle.Write(inode.InodeRootUserID, inode.InodeGroupID(0), nil, fileInode, 0, []byte(fileName+" "), nil)
+ fileInode := fsCreateFile(volumeHandle, containerInode, fileName)
+ _, err = volumeHandle.Write(inode.InodeRootUserID, inode.InodeGroupID(0), nil, fileInode, 0, []byte(fileName+" "), nil)
if err != nil {
panic(err)
}
@@ -2395,11 +2392,11 @@ func TestRpcCoalesceBrokenSymlink(t *testing.T) {
err = server.RpcCoalesce(&coalesceRequest, &coalesceReply)
assert.Nil(err)
- ino, err := mountHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, containerName+"/a/this-symlink-is-broken/c/combined")
+ ino, err := volumeHandle.LookupPath(inode.InodeRootUserID, inode.InodeGroupID(0), nil, containerName+"/a/this-symlink-is-broken/c/combined")
assert.Nil(err)
assert.Equal(inode.InodeNumber(coalesceReply.InodeNumber), ino)
- combinedContents, err := mountHandle.Read(inode.InodeRootUserID, inode.InodeGroupID(0), nil, ino, 0, 99999, nil)
+ combinedContents, err := volumeHandle.Read(inode.InodeRootUserID, inode.InodeGroupID(0), nil, ino, 0, 99999, nil)
assert.Nil(err)
assert.Equal([]byte("red orange yellow "), combinedContents) // sanity check
}
@@ -2407,25 +2404,25 @@ func TestRpcCoalesceBrokenSymlink(t *testing.T) {
func TestRpcCoalesceSubdirOfAFile(t *testing.T) {
server := &Server{}
assert := assert.New(t)
- mountHandle, err := fs.MountByVolumeName("SomeVolume", fs.MountOptions(0))
+ volumeHandle, err := fs.FetchVolumeHandleByVolumeName("SomeVolume")
if nil != err {
panic(fmt.Sprintf("failed to mount SomeVolume: %v", err))
}
containerName := "rpc-coalesce-fanam-outswim"
containerPath := testVerAccountName + "/" + containerName
- containerInode := fsMkDir(mountHandle, inode.RootDirInodeNumber, containerName)
+ containerInode := fsMkDir(volumeHandle, inode.RootDirInodeNumber, containerName)
destinationPath := containerPath + "/a/b-is-a-file/c/combined"
// The directory structure partially exists, but not totally
- aInode := fsMkDir(mountHandle, containerInode, "a")
- fsCreateFile(mountHandle, aInode, "b-is-a-file")
+ aInode := fsMkDir(volumeHandle, containerInode, "a")
+ fsCreateFile(volumeHandle, aInode, "b-is-a-file")
filesToWrite := []string{"red", "orange", "yellow"}
for _, fileName := range filesToWrite {
- fileInode := fsCreateFile(mountHandle, containerInode, fileName)
- _, err = mountHandle.Write(inode.InodeRootUserID, inode.InodeGroupID(0), nil, fileInode, 0, []byte(fileName+" "), nil)
+ fileInode := fsCreateFile(volumeHandle, containerInode, fileName)
+ _, err = volumeHandle.Write(inode.InodeRootUserID, inode.InodeGroupID(0), nil, fileInode, 0, []byte(fileName+" "), nil)
if err != nil {
panic(err)
}
diff --git a/jrpcfs/retryrpc.go b/jrpcfs/retryrpc.go
index 89646573c..12cd11816 100644
--- a/jrpcfs/retryrpc.go
+++ b/jrpcfs/retryrpc.go
@@ -9,7 +9,7 @@ import (
func retryRPCServerUp(jserver *Server, publicIPAddr string, retryRPCPort uint16,
retryRPCTTLCompleted time.Duration, retryRPCAckTrim time.Duration,
- retryRPCDeadlineIO time.Duration, retryRPCKEEPALIVEPeriod time.Duration) {
+ retryRPCDeadlineIO time.Duration, retryRPCKeepAlivePeriod time.Duration) {
var err error
@@ -19,7 +19,7 @@ func retryRPCServerUp(jserver *Server, publicIPAddr string, retryRPCPort uint16,
// Create a new RetryRPC Server.
retryConfig := &retryrpc.ServerConfig{LongTrim: retryRPCTTLCompleted, ShortTrim: retryRPCAckTrim, IPAddr: publicIPAddr,
- Port: int(retryRPCPort), DeadlineIO: retryRPCDeadlineIO, KEEPALIVEPeriod: retryRPCKEEPALIVEPeriod}
+ Port: int(retryRPCPort), DeadlineIO: retryRPCDeadlineIO, KeepAlivePeriod: retryRPCKeepAlivePeriod}
rrSvr := retryrpc.NewServer(retryConfig)
diff --git a/liveness/api.go b/liveness/api.go
index 2d62cd39a..add7a61d9 100644
--- a/liveness/api.go
+++ b/liveness/api.go
@@ -30,9 +30,15 @@ type ServingPeerStruct struct {
VolumeGroup []*VolumeGroupStruct
}
+type ReconEndpointStruct struct {
+ IPAddrPort string
+ MaxDiskUsagePercentage uint8
+}
+
type ObservingPeerStruct struct {
- Name string
- ServingPeer []*ServingPeerStruct
+ Name string
+ ServingPeer []*ServingPeerStruct
+ ReconEndpoint []*ReconEndpointStruct
}
type LivenessReportStruct struct {
diff --git a/liveness/api_internal.go b/liveness/api_internal.go
index b23250e29..262578a32 100644
--- a/liveness/api_internal.go
+++ b/liveness/api_internal.go
@@ -5,23 +5,6 @@ import (
"sync"
)
-/*
-type FetchLivenessReportRequestStruct struct {
- MsgType MsgType // == MsgTypeFetchLivenessReportRequest
- // Used to request Liveness Report from who we think is the Leader
- MsgTag uint64 // Used for matching this FetchLivenessReportRequestStruct to a subsequent FetchLivenessReportResponseStruct
- CurrentTerm uint64
-}
-type FetchLivenessReportResponseStruct struct {
- MsgType MsgType // == MsgTypeFetchLivenessReportResponse
- MsgTag uint64 // Used for matching this FetchLivenessReportResponseStruct to a previous FetchLivenessReportRequestStruct
- CurrentTerm uint64 // == LeaderTerm if Success === true (by definition)
- CurrentLeader string // If Success == false, this is who should actually be contacted for this (if known)
- Success bool // == true if Leader is responding; == false if we are not the Leader
- LivenessReport *LivenessReportStruct // Liveness Report as collected by Leader
-}
-func sendRequest(peer *peerStruct, msgTag uint64, requestContext interface{}, requestMsg interface{}, callback func(request *requestStruct)) (err error) {
-*/
func fetchLivenessReport() (livenessReport *LivenessReportStruct) {
var (
err error
@@ -133,12 +116,14 @@ func convertExternalToInternalLivenessReport(externalLivenessReport *LivenessRep
func convertInternalToExternalObservingPeerReport(internalObservingPeerReport *internalObservingPeerReportStruct) (externalObservingPeer *ObservingPeerStruct) {
var (
- internalServingPeerReport *internalServingPeerReportStruct
- internalVolumeGroupReport *internalVolumeGroupReportStruct
- internalVolumeReport *internalVolumeReportStruct
- servingPeer *ServingPeerStruct
- volume *VolumeStruct
- volumeGroup *VolumeGroupStruct
+ internalReconEndpointReport *internalReconEndpointReportStruct
+ internalServingPeerReport *internalServingPeerReportStruct
+ internalVolumeGroupReport *internalVolumeGroupReportStruct
+ internalVolumeReport *internalVolumeReportStruct
+ reconEndpoint *ReconEndpointStruct
+ servingPeer *ServingPeerStruct
+ volume *VolumeStruct
+ volumeGroup *VolumeGroupStruct
)
if nil == internalObservingPeerReport {
@@ -147,8 +132,9 @@ func convertInternalToExternalObservingPeerReport(internalObservingPeerReport *i
}
externalObservingPeer = &ObservingPeerStruct{
- Name: internalObservingPeerReport.name,
- ServingPeer: make([]*ServingPeerStruct, 0, len(internalObservingPeerReport.servingPeer)),
+ Name: internalObservingPeerReport.name,
+ ServingPeer: make([]*ServingPeerStruct, 0, len(internalObservingPeerReport.servingPeer)),
+ ReconEndpoint: make([]*ReconEndpointStruct, 0, len(internalObservingPeerReport.reconEndpoint)),
}
for _, internalServingPeerReport = range internalObservingPeerReport.servingPeer {
@@ -183,17 +169,28 @@ func convertInternalToExternalObservingPeerReport(internalObservingPeerReport *i
externalObservingPeer.ServingPeer = append(externalObservingPeer.ServingPeer, servingPeer)
}
+ for _, internalReconEndpointReport = range internalObservingPeerReport.reconEndpoint {
+ reconEndpoint = &ReconEndpointStruct{
+ IPAddrPort: internalReconEndpointReport.ipAddrPort,
+ MaxDiskUsagePercentage: internalReconEndpointReport.maxDiskUsagePercentage,
+ }
+
+ externalObservingPeer.ReconEndpoint = append(externalObservingPeer.ReconEndpoint, reconEndpoint)
+ }
+
return
}
func convertExternalToInternalObservingPeerReport(externalObservingPeer *ObservingPeerStruct) (internalObservingPeerReport *internalObservingPeerReportStruct) {
var (
- internalServingPeerReport *internalServingPeerReportStruct
- internalVolumeGroupReport *internalVolumeGroupReportStruct
- internalVolumeReport *internalVolumeReportStruct
- servingPeer *ServingPeerStruct
- volume *VolumeStruct
- volumeGroup *VolumeGroupStruct
+ internalReconEndpointReport *internalReconEndpointReportStruct
+ internalServingPeerReport *internalServingPeerReportStruct
+ internalVolumeGroupReport *internalVolumeGroupReportStruct
+ internalVolumeReport *internalVolumeReportStruct
+ reconEndpoint *ReconEndpointStruct
+ servingPeer *ServingPeerStruct
+ volume *VolumeStruct
+ volumeGroup *VolumeGroupStruct
)
if nil == externalObservingPeer {
@@ -202,8 +199,9 @@ func convertExternalToInternalObservingPeerReport(externalObservingPeer *Observi
}
internalObservingPeerReport = &internalObservingPeerReportStruct{
- name: externalObservingPeer.Name,
- servingPeer: make(map[string]*internalServingPeerReportStruct),
+ name: externalObservingPeer.Name,
+ servingPeer: make(map[string]*internalServingPeerReportStruct),
+ reconEndpoint: make(map[string]*internalReconEndpointReportStruct),
}
for _, servingPeer = range externalObservingPeer.ServingPeer {
@@ -241,5 +239,15 @@ func convertExternalToInternalObservingPeerReport(externalObservingPeer *Observi
internalObservingPeerReport.servingPeer[internalServingPeerReport.name] = internalServingPeerReport
}
+ for _, reconEndpoint = range externalObservingPeer.ReconEndpoint {
+ internalReconEndpointReport = &internalReconEndpointReportStruct{
+ observingPeer: internalObservingPeerReport,
+ ipAddrPort: reconEndpoint.IPAddrPort,
+ maxDiskUsagePercentage: reconEndpoint.MaxDiskUsagePercentage,
+ }
+
+ internalObservingPeerReport.reconEndpoint[internalReconEndpointReport.ipAddrPort] = internalReconEndpointReport
+ }
+
return
}
diff --git a/liveness/config.go b/liveness/config.go
index 4c078b048..48a41e824 100644
--- a/liveness/config.go
+++ b/liveness/config.go
@@ -11,6 +11,7 @@ import (
"time"
"github.com/swiftstack/ProxyFS/conf"
+ "github.com/swiftstack/ProxyFS/inode"
"github.com/swiftstack/ProxyFS/logger"
"github.com/swiftstack/ProxyFS/trackedlock"
"github.com/swiftstack/ProxyFS/transitions"
@@ -47,6 +48,11 @@ const (
LogLevelMax = uint64(4)
LogLevelDefault = LogLevelNone
+
+ DefaultSwiftReconNoWriteThreshold = 80
+ DefaultSwiftReconReadOnlyThreshold = 90
+ DefaultSwiftConfDir = "/etc/swift"
+ DefaultSwiftReconChecksPerConfCheck = 10
)
type volumeStruct struct {
@@ -104,9 +110,16 @@ type internalServingPeerReportStruct struct {
volumeGroup map[string]*internalVolumeGroupReportStruct // Key = internalVolumeGroupReportStruct.name
}
+type internalReconEndpointReportStruct struct {
+ observingPeer *internalObservingPeerReportStruct
+ ipAddrPort string
+ maxDiskUsagePercentage uint8
+}
+
type internalObservingPeerReportStruct struct {
- name string
- servingPeer map[string]*internalServingPeerReportStruct // Key = internalServingPeerReportStruct.name
+ name string
+ servingPeer map[string]*internalServingPeerReportStruct // Key = internalServingPeerReportStruct.name
+ reconEndpoint map[string]*internalReconEndpointReportStruct // Key = internalReconEndpointReportStruct.ipAddrPort
}
type internalLivenessReportStruct struct {
@@ -115,46 +128,53 @@ type internalLivenessReportStruct struct {
type globalsStruct struct {
trackedlock.Mutex
- active bool
- whoAmI string
- myPublicIPAddr net.IP
- myPrivateIPAddr net.IP
- myUDPAddr *net.UDPAddr
- myUDPConn *net.UDPConn
- myVolumeGroupMap map[string]*volumeGroupStruct // Key == volumeGroupStruct.name
- peersByName map[string]*peerStruct // Key == peerStruct.name
- peersByTuple map[string]*peerStruct // Key == peerStruct.udpAddr.String() (~= peerStruct.tuple)
- udpPacketSendSize uint64
- udpPacketSendPayloadSize uint64
- udpPacketRecvSize uint64
- udpPacketRecvPayloadSize uint64
- udpPacketCapPerMessage uint8
- sendMsgMessageSizeMax uint64
- heartbeatDuration time.Duration
- heartbeatMissLimit uint64
- heartbeatMissDuration time.Duration
- messageQueueDepthPerPeer uint64
- maxRequestDuration time.Duration
- livenessCheckRedundancy uint64
- logLevel uint64
- jsonRPCServerPort uint16
- crc64ECMATable *crc64.Table
- nextNonce uint64 // Randomly initialized... skips 0
- recvMsgsDoneChan chan struct{}
- recvMsgQueue *list.List // FIFO ordered
- recvMsgChan chan struct{}
- requestsByExpirationTime *list.List // FIFO ordered
- requestsByMsgTag map[uint64]*requestStruct // Key == requestStruct.msgTag
- requestExpirerStartChan chan struct{} // Signaled when inserting the first element of requestsByExpirationTime
- requestExpirerStopChan chan struct{} // Signaled when asking requestExpirer() to halt
- requestExpirerDone sync.WaitGroup // Signaled when requestExpirer() has exited
- currentLeader *peerStruct
- currentVote *peerStruct
- currentTerm uint64
- nextState func()
- stateMachineStopChan chan struct{}
- stateMachineDone sync.WaitGroup
- livenessCheckerControlChan chan bool // Send true to trigger livenessChecker() to recompute polling schedule
+ active bool
+ whoAmI string
+ myPublicIPAddr net.IP
+ myPrivateIPAddr net.IP
+ myUDPAddr *net.UDPAddr
+ myUDPConn *net.UDPConn
+ myVolumeGroupMap map[string]*volumeGroupStruct // Key == volumeGroupStruct.name
+ peersByName map[string]*peerStruct // Key == peerStruct.name
+ peersByTuple map[string]*peerStruct // Key == peerStruct.udpAddr.String() (~= peerStruct.tuple)
+ udpPacketSendSize uint64
+ udpPacketSendPayloadSize uint64
+ udpPacketRecvSize uint64
+ udpPacketRecvPayloadSize uint64
+ udpPacketCapPerMessage uint8
+ sendMsgMessageSizeMax uint64
+ heartbeatDuration time.Duration
+ heartbeatMissLimit uint64
+ heartbeatMissDuration time.Duration
+ messageQueueDepthPerPeer uint64
+ maxRequestDuration time.Duration
+ livenessCheckRedundancy uint64
+ logLevel uint64
+ jsonRPCServerPort uint16
+ swiftReconNoWriteThreshold uint8
+ swiftReconReadOnlyThreshold uint8
+ swiftConfDir string
+ swiftReconChecksPerConfCheck uint64
+ swiftReconChecksUntilConfCheck uint64
+ swiftConfFileMap map[string]time.Time // Key == os.FileInfo.Name(); Value == os.FileInfo.ModTime()
+ swiftReconEndpointSet map[string]struct{} // Key == IPAddrPort of ReconEndpoint
+ crc64ECMATable *crc64.Table
+ nextNonce uint64 // Randomly initialized... skips 0
+ recvMsgsDoneChan chan struct{}
+ recvMsgQueue *list.List // FIFO ordered
+ recvMsgChan chan struct{}
+ requestsByExpirationTime *list.List // FIFO ordered
+ requestsByMsgTag map[uint64]*requestStruct // Key == requestStruct.msgTag
+ requestExpirerStartChan chan struct{} // Signaled when inserting the first element of requestsByExpirationTime
+ requestExpirerStopChan chan struct{} // Signaled when asking requestExpirer() to halt
+ requestExpirerDone sync.WaitGroup // Signaled when requestExpirer() has exited
+ currentLeader *peerStruct
+ currentVote *peerStruct
+ currentTerm uint64
+ nextState func()
+ stateMachineStopChan chan struct{}
+ stateMachineDone sync.WaitGroup
+ livenessCheckerControlChan chan bool // Send true to trigger livenessChecker() to recompute polling schedule
// Send false to trigger livenessChecker() to exit
livenessCheckerWG sync.WaitGroup
volumeToCheckList []*volumeStruct
@@ -162,6 +182,7 @@ type globalsStruct struct {
emptyServingPeerToCheckSet map[string]struct{} // List (in "set" form) of ServingPeers (by name) with no VolumeGroups
myObservingPeerReport *internalObservingPeerReportStruct
livenessReport *internalLivenessReportStruct
+ curRWMode inode.RWModeType
}
var globals globalsStruct
@@ -199,6 +220,9 @@ func (dummy *globalsStruct) Up(confMap conf.ConfMap) (err error) {
globals.requestExpirerStartChan = make(chan struct{}, 1)
globals.requestExpirerStopChan = make(chan struct{}, 1)
+ globals.curRWMode = inode.RWModeNormal
+ inode.SetRWMode(globals.curRWMode)
+
globals.requestExpirerDone.Add(1)
go requestExpirer()
@@ -232,6 +256,9 @@ func (dummy *globalsStruct) ServeVolume(confMap conf.ConfMap, volumeName string)
func (dummy *globalsStruct) UnserveVolume(confMap conf.ConfMap, volumeName string) (err error) {
return nil
}
+func (dummy *globalsStruct) VolumeToBeUnserved(confMap conf.ConfMap, volumeName string) (err error) {
+ return nil
+}
// SignaledStart will be used to halt the cluster leadership process. This is to support
// SIGHUP handling incorporates all confMap changes are incorporated... not just during a restart.
@@ -273,6 +300,13 @@ func (dummy *globalsStruct) SignaledStart(confMap conf.ConfMap) (err error) {
}
}
+ // Clear out Swift recon settings and computed details
+
+ globals.swiftReconNoWriteThreshold = 101 // Never enforce NoWrite Mode
+ globals.swiftReconReadOnlyThreshold = 101 // Never enforce ReadOnly Mode
+ globals.swiftConfDir = ""
+ globals.swiftReconChecksPerConfCheck = 0 // Disabled
+
// Free up remaining allocated resources
globals.myVolumeGroupMap = nil
@@ -708,15 +742,73 @@ func (dummy *globalsStruct) SignaledFinish(confMap conf.ConfMap) (err error) {
}
}
- // Fetch remaining ConfMap data
+ // Fetch JSON RPC Port to be used when polling Peers
globals.jsonRPCServerPort, err = confMap.FetchOptionValueUint16("JSONRPCServer", "TCPPort")
if nil != err {
return
}
+ // Fetch Swift recon settings
+
+ err = confMap.VerifyOptionIsMissing("SwiftClient", "SwiftReconChecksPerConfCheck")
+ if nil == err {
+ logger.WarnfWithError(err, "Unable to fetch [SwiftClient]SwiftReconChecksPerConfCheck... defaulting to %d", DefaultSwiftReconChecksPerConfCheck)
+ globals.swiftReconChecksPerConfCheck = DefaultSwiftReconChecksPerConfCheck
+ } else {
+ globals.swiftReconChecksPerConfCheck, err = confMap.FetchOptionValueUint64("SwiftClient", "SwiftReconChecksPerConfCheck")
+ if nil != err {
+ logger.ErrorfWithError(err, "Unable to parse [SwiftClient]SwiftReconChecksPerConfCheck")
+ return
+ }
+ }
+
+ if 0 == globals.swiftReconChecksPerConfCheck {
+ logger.Warnf("[SwiftClient]SwiftReconChecksPerConfCheck == 0... disabling recon checks")
+ } else {
+ globals.swiftReconNoWriteThreshold, err = confMap.FetchOptionValueUint8("SwiftClient", "SwiftReconNoWriteThreshold")
+ if nil == err {
+ if 100 < globals.swiftReconNoWriteThreshold {
+ err = fmt.Errorf("[SwiftClient]SwiftReconNoWriteThreshold cannot be greater than 100")
+ return
+ }
+ } else {
+ logger.WarnfWithError(err, "Unable to fetch [SwiftClient]SwiftReconNoWriteThreshold... defaulting to %d", DefaultSwiftReconNoWriteThreshold)
+ globals.swiftReconNoWriteThreshold = DefaultSwiftReconNoWriteThreshold
+ }
+
+ globals.swiftReconReadOnlyThreshold, err = confMap.FetchOptionValueUint8("SwiftClient", "SwiftReconReadOnlyThreshold")
+ if nil == err {
+ if 100 < globals.swiftReconReadOnlyThreshold {
+ err = fmt.Errorf("[SwiftClient]SwiftReconReadOnlyThreshold cannot be greater than 100")
+ return
+ }
+ if globals.swiftReconReadOnlyThreshold < globals.swiftReconNoWriteThreshold {
+ err = fmt.Errorf("[SwiftClient]SwiftReconReadOnlyThreshold cannot be less than [SwiftClient]SwiftReconNoWriteThreshold")
+ return
+ }
+ } else {
+ if globals.swiftReconNoWriteThreshold > DefaultSwiftReconReadOnlyThreshold {
+ logger.WarnfWithError(err, "Unable to fetch [SwiftClient]SwiftReconReadOnlyThreadhold... defaulting to %d", globals.swiftReconNoWriteThreshold)
+ globals.swiftReconReadOnlyThreshold = globals.swiftReconNoWriteThreshold
+ } else {
+ logger.WarnfWithError(err, "Unable to fetch [SwiftClient]SwiftReconReadOnlyThreadhold... defaulting to %d", DefaultSwiftReconReadOnlyThreshold)
+ globals.swiftReconReadOnlyThreshold = DefaultSwiftReconReadOnlyThreshold
+ }
+ }
+
+ globals.swiftConfDir, err = confMap.FetchOptionValueString("SwiftClient", "SwiftConfDir")
+ if nil != err {
+ logger.WarnfWithError(err, "Unable to fetch [SwiftClient]SwiftConfDir... defaulting to %s", DefaultSwiftConfDir)
+ globals.swiftConfDir = DefaultSwiftConfDir
+ }
+ }
+
// Initialize remaining globals
+ globals.swiftReconChecksUntilConfCheck = 0 // First ReconCheck will trigger a ConfCheck
+ globals.swiftConfFileMap = make(map[string]time.Time)
+
globals.recvMsgQueue = list.New()
globals.recvMsgChan = make(chan struct{}, 1)
diff --git a/liveness/messages.go b/liveness/messages.go
index f928e4d74..67bc5bc81 100644
--- a/liveness/messages.go
+++ b/liveness/messages.go
@@ -9,6 +9,7 @@ import (
"reflect"
"time"
+ "github.com/swiftstack/ProxyFS/inode"
"github.com/swiftstack/ProxyFS/logger"
)
@@ -42,6 +43,7 @@ type HeartBeatRequestStruct struct {
MsgType MsgType // == MsgTypeHeartBeatRequest
MsgTag uint64 // Used for matching this HeartBeatRequestStruct to a subsequent HeartBeatResponseStruct
LeaderTerm uint64
+ NewRWMode inode.RWModeType // One of inode.RWModeNormal, inode.RWModeNoWrite, or inode.RWModeReadOnly
ToObserve *ObservingPeerStruct // VolumeStruct.State & VolumeStruct.LastCheckTime are ignored
}
diff --git a/liveness/polling.go b/liveness/polling.go
index ce81fb653..ab630ec50 100644
--- a/liveness/polling.go
+++ b/liveness/polling.go
@@ -1,11 +1,19 @@
package liveness
import (
+ "bytes"
+ "compress/gzip"
"container/list"
+ "encoding/binary"
"encoding/json"
"fmt"
+ "io/ioutil"
+ "math/big"
"net"
+ "net/http"
+ "os"
"reflect"
+ "regexp"
"time"
"github.com/swiftstack/ProxyFS/jrpcfs"
@@ -26,6 +34,20 @@ type pingReplyStruct struct {
Error string `json:"error"`
}
+type ringFilePayloadJSONDevStruct struct {
+ IP string `json:"ip"`
+ Port uint16 `json:"port"`
+}
+
+type ringFilePayloadJSONStruct struct {
+ Devs []*ringFilePayloadJSONDevStruct `json:"devs"`
+}
+
+type reconDevReportStruct struct {
+ Size int64 `json:"size"`
+ Used int64 `json:"used"`
+}
+
const maxRPCReplySize = 4096
func livenessChecker() {
@@ -37,6 +59,7 @@ func livenessChecker() {
err error
livenessCheckerControlChanValue bool
myObservingPeerReport *internalObservingPeerReportStruct
+ reconEndpoint *internalReconEndpointReportStruct
servingPeer *internalServingPeerReportStruct
volume *internalVolumeReportStruct
volumeGroup *internalVolumeGroupReportStruct
@@ -78,29 +101,23 @@ func livenessChecker() {
checkEntityList = list.New()
for _, servingPeer = range myObservingPeerReport.servingPeer {
- if utils.FetchRandomBool() {
- _ = checkEntityList.PushFront(servingPeer)
- } else {
- _ = checkEntityList.PushBack(servingPeer)
- }
+ _ = checkEntityList.PushBack(servingPeer)
for _, volumeGroup = range servingPeer.volumeGroup {
- if utils.FetchRandomBool() {
- _ = checkEntityList.PushFront(volumeGroup)
- } else {
- _ = checkEntityList.PushBack(volumeGroup)
- }
+ _ = checkEntityList.PushBack(volumeGroup)
for _, volume = range volumeGroup.volume {
- if utils.FetchRandomBool() {
- _ = checkEntityList.PushFront(volume)
- } else {
- _ = checkEntityList.PushBack(volume)
- }
+ _ = checkEntityList.PushBack(volume)
}
}
}
+ for _, reconEndpoint = range myObservingPeerReport.reconEndpoint {
+ _ = checkEntityList.PushBack(reconEndpoint)
+ }
+
+ utils.RandomizeList(checkEntityList)
+
// Compute number of entities to check & time between each check
// Allow for one extra time slice to hopefully get all entities checked
@@ -120,6 +137,8 @@ func livenessChecker() {
livenessCheckVolumeGroup(entityToCheck.Value.(*internalVolumeGroupReportStruct))
case reflect.TypeOf(volume):
livenessCheckVolume(entityToCheck.Value.(*internalVolumeReportStruct))
+ case reflect.TypeOf(reconEndpoint):
+ livenessCheckReconEndpoint(entityToCheck.Value.(*internalReconEndpointReportStruct))
default:
err = fmt.Errorf("Unrecognized reflect.TypeOf(entityToCheck.Value): %v", reflect.TypeOf(entityToCheck.Value))
panic(err)
@@ -291,15 +310,85 @@ func livenessCheckVolume(volume *internalVolumeReportStruct) {
// TODO: Implement livenessCheckVolume()
}
+func livenessCheckReconEndpoint(reconEndpoint *internalReconEndpointReportStruct) {
+ var (
+ bigDividend *big.Int
+ bigDivisor *big.Int
+ bigQuotient *big.Int
+ bigRemainder *big.Int
+ devUtilization uint8
+ err error
+ quotient int64
+ reconDevReport *reconDevReportStruct
+ reconDevReportSlice []*reconDevReportStruct
+ reconResp *http.Response
+ reconRespBody []byte
+ remainder int64
+ url string
+ )
+
+ reconEndpoint.maxDiskUsagePercentage = 0
+
+ url = fmt.Sprintf("http://%s/recon/diskusage", reconEndpoint.ipAddrPort)
+
+ reconResp, err = http.Get(url)
+ if nil == err {
+ reconRespBody, err = ioutil.ReadAll(reconResp.Body)
+ if nil == err {
+ if http.StatusOK == reconResp.StatusCode {
+ reconDevReportSlice = make([]*reconDevReportStruct, 0)
+ err = json.Unmarshal(reconRespBody, &reconDevReportSlice)
+ if nil == err {
+ for _, reconDevReport = range reconDevReportSlice {
+ if (reconDevReport.Used > 0) && (reconDevReport.Size > 0) && (reconDevReport.Used <= reconDevReport.Size) {
+ bigDividend = new(big.Int).Mul(big.NewInt(100), big.NewInt(reconDevReport.Used))
+ bigDivisor = big.NewInt(reconDevReport.Size)
+ bigQuotient = new(big.Int).Quo(bigDividend, bigDivisor)
+ bigRemainder = new(big.Int).Rem(bigDividend, bigDivisor)
+ quotient = bigQuotient.Int64()
+ remainder = bigRemainder.Int64()
+ if 0 == remainder {
+ devUtilization = uint8(quotient)
+ } else {
+ devUtilization = uint8(quotient) + 1
+ }
+ if devUtilization > reconEndpoint.maxDiskUsagePercentage {
+ reconEndpoint.maxDiskUsagePercentage = devUtilization
+ }
+ } else {
+ logger.Warnf("livenessCheckReconEndpoint() GET to %s got responseBody with unreasonable used and size values", url)
+ }
+ }
+ } else {
+ logger.WarnfWithError(err, "livenessCheckReconEndpoint() GET to %s got response.Body with invalid JSON", url)
+ }
+ } else {
+ logger.WarnfWithError(err, "livenessCheckReconEndpoint() GET to %s got bad status: %s", url, reconResp.Status)
+ }
+ } else {
+ logger.WarnfWithError(err, "livenessCheckReconEndpoint() GET to %s response.Body() read failed", url)
+ }
+ err = reconResp.Body.Close()
+ if nil != err {
+ logger.WarnfWithError(err, "livenessCheckReconEndpoint() GET to %s response.Body.Close() failed", url)
+ }
+ } else {
+ logger.WarnfWithError(err, "livenessCheckReconEndpoint() failed to issue GET to %s", url)
+ }
+}
+
// computeLivenessCheckAssignments takes a list of ObservingPeer and produces a
// template internalLivenessReport that is to be filled in by this collection of peers.
-// While the elements of the resultant internalLivenessReport have State and LastCheckTime
-// fields, these are ignored as they will ultimately be filled in by each ObservingPeer.
-// The livenessCheckRedundancy is used to ensure that each ServingPeer, VolumeGroup,
-// and Volume is adequately covered. As every Volume is part of a VolumeGroup and every
-// VolumeGroup is assigned to a single ServingPeer, this amounts to just dolling out
-// the Volumes to ObervingPeers with the required livenessCheckRedundancy. That said,
-// it is a bit misleading for an ObservingPeer to report that a VolumeGroup is "alive"
+// While the elements of the resultant internalLivenessReport have State, LastCheckTime,
+// and MaxDiskUsagePercentage fields, these are ignored as they will ultimately be filled
+// in by each ObservingPeer. The livenessCheckRedundancy is used to ensure that each
+// ServingPeer, VolumeGroup, Volume, and ReconEndpoint is adequately covered. As every
+// Volume is part of a VolumeGroup and every VolumeGroup is assigned to a single ServingPeer,
+// this amounts to just dolling out the Volumes to ObervingPeers with the required
+// livenessCheckRedundancy. Similarly, the ReconEndpoints are dolled out with this
+// same livenessCheckRedundancy.
+//
+// It is a bit misleading for an ObservingPeer to report that a VolumeGroup is "alive"
// when not all of that VolumeGroup's Volumes have been checked. Similarly, it is a
// bit misleading for an ObservingPeer to report that a ServingPeer is "alive" when
// not all of that ServingPeer's VolumeGroups have been checked. Therefore, to get an
@@ -310,19 +399,42 @@ func livenessCheckVolume(volume *internalVolumeReportStruct) {
// VolumeGroups assigned will still be in the resultant internalLivenessReport.
func computeLivenessCheckAssignments(observingPeerNameList []string) (internalLivenessReport *internalLivenessReportStruct) {
var (
+ alreadyInSwiftReconEndpointIAddrSet bool
+ curSwiftConfFileMap map[string]time.Time
effectiveLivenessCheckRedundancy uint64
effectiveLivenessCheckRedundancyIndex uint64
err error
+ fileInfo os.FileInfo
+ fileInfoSlice []os.FileInfo
+ fileInfoModTime time.Time
+ fileInfoName string
+ inSwiftConfFileMap bool
internalObservingPeerReport *internalObservingPeerReportStruct
+ internalReconEndpointReport *internalReconEndpointReportStruct
internalServingPeerReport *internalServingPeerReportStruct
internalVolumeGroupReport *internalVolumeGroupReportStruct
internalVolumeReport *internalVolumeReportStruct
+ matchedRingFilename bool
+ needToUpdateSwiftConfFileMap bool
notYetAdded bool
observingPeerIndex uint64
observingPeerName string
ok bool
+ prevFileInfoModTime time.Time
+ ringFileData []byte
+ ringFileName string
+ ringFileMagic []byte
+ ringFilePayload []byte
+ ringFilePayloadJSON *ringFilePayloadJSONStruct
+ ringFilePayloadJSONDev *ringFilePayloadJSONDevStruct
+ ringFilePayloadLen int32
+ ringFileReader *gzip.Reader
+ ringFileReadLen int
+ ringFileVersion uint16
servingPeer *peerStruct
servingPeerName string
+ swiftReconEndpoint string
+ swiftReconEndpointIPAddrSet map[string]struct{}
volumeGroup *volumeGroupStruct
volumeGroupName string
volumeName string
@@ -334,6 +446,138 @@ func computeLivenessCheckAssignments(observingPeerNameList []string) (internalLi
panic(err)
}
+ // Determine reconEndpoints
+
+ if 0 == globals.swiftReconChecksPerConfCheck {
+ globals.swiftReconEndpointSet = make(map[string]struct{})
+ } else {
+ if 0 == globals.swiftReconChecksUntilConfCheck {
+ // Time to potentially refresh globals.swiftConfFileMap & globals.swiftReconEndpointSet
+
+ globals.swiftReconChecksUntilConfCheck = globals.swiftReconChecksPerConfCheck
+
+ fileInfoSlice, err = ioutil.ReadDir(globals.swiftConfDir)
+ if nil != err {
+ logger.FatalfWithError(err, "Unable to read [SwiftClient]SwiftConfDir (%s)", globals.swiftConfDir)
+ }
+
+ curSwiftConfFileMap = make(map[string]time.Time)
+
+ for _, fileInfo = range fileInfoSlice {
+ fileInfoName = fileInfo.Name()
+ switch fileInfoName {
+ case "account.ring.gz":
+ matchedRingFilename = true
+ case "container.ring.gz":
+ matchedRingFilename = true
+ default:
+ matchedRingFilename, err = regexp.MatchString("^object.*\\.ring\\.gz$", fileInfoName)
+ if nil != err {
+ logger.FatalfWithError(err, "Unexpected failure calling regexp.MatchString()")
+ }
+ }
+
+ if matchedRingFilename {
+ curSwiftConfFileMap[fileInfoName] = fileInfo.ModTime()
+ }
+ }
+
+ if len(globals.swiftConfFileMap) != len(curSwiftConfFileMap) {
+ needToUpdateSwiftConfFileMap = true
+ } else {
+ needToUpdateSwiftConfFileMap = false
+ for fileInfoName, fileInfoModTime = range curSwiftConfFileMap {
+ prevFileInfoModTime, inSwiftConfFileMap = globals.swiftConfFileMap[fileInfoName]
+ if !inSwiftConfFileMap || (fileInfoModTime != prevFileInfoModTime) {
+ needToUpdateSwiftConfFileMap = true
+ }
+ }
+ }
+
+ if needToUpdateSwiftConfFileMap {
+ // We must refresh globals.swiftConfFileMap & globals.swiftReconEndpointSet
+
+ globals.swiftConfFileMap = curSwiftConfFileMap
+
+ swiftReconEndpointIPAddrSet = make(map[string]struct{})
+ globals.swiftReconEndpointSet = make(map[string]struct{})
+
+ for ringFileName = range globals.swiftConfFileMap {
+ ringFileData, err = ioutil.ReadFile(globals.swiftConfDir + "/" + ringFileName)
+ if nil == err {
+ ringFileReader, err = gzip.NewReader(bytes.NewReader(ringFileData))
+ if nil == err {
+ ringFileMagic = make([]byte, 4)
+ ringFileReadLen, err = ringFileReader.Read(ringFileMagic)
+ if nil == err {
+ if ringFileReadLen == len(ringFileMagic) {
+ if bytes.Equal([]byte("R1NG"), ringFileMagic) {
+ err = binary.Read(ringFileReader, binary.BigEndian, &ringFileVersion)
+ if nil == err {
+ if 1 == ringFileVersion {
+ err = binary.Read(ringFileReader, binary.BigEndian, &ringFilePayloadLen)
+ if nil == err {
+ ringFilePayload = make([]byte, ringFilePayloadLen)
+ ringFileReadLen, err = ringFileReader.Read(ringFilePayload)
+ if nil == err {
+ if ringFileReadLen == len(ringFilePayload) {
+ ringFilePayloadJSON = &ringFilePayloadJSONStruct{}
+ err = json.Unmarshal(ringFilePayload, ringFilePayloadJSON)
+ if nil == err {
+ for _, ringFilePayloadJSONDev = range ringFilePayloadJSON.Devs {
+ _, alreadyInSwiftReconEndpointIAddrSet = swiftReconEndpointIPAddrSet[ringFilePayloadJSONDev.IP]
+ if !alreadyInSwiftReconEndpointIAddrSet {
+ swiftReconEndpointIPAddrSet[ringFilePayloadJSONDev.IP] = struct{}{}
+ swiftReconEndpoint = fmt.Sprintf("%s:%d", ringFilePayloadJSONDev.IP, ringFilePayloadJSONDev.Port)
+ globals.swiftReconEndpointSet[swiftReconEndpoint] = struct{}{}
+ }
+ }
+ } else {
+ logger.WarnfWithError(err, "Unable to json.Unmarshal ringFilePayload from ring file %s", fileInfoName)
+ }
+ } else {
+ logger.Warnf("Misread of ringFilePayload from ring file %s", fileInfoName)
+ }
+ } else {
+ logger.WarnfWithError(err, "Unable to read ringFilePayload from ring file %s", fileInfoName)
+ }
+ } else {
+ logger.WarnfWithError(err, "Unable to read ringFilePayloadLen from ring file %s", fileInfoName)
+ }
+ } else {
+ logger.Warnf("Value of ringFileVersion unexpected from ring file %s", fileInfoName)
+ }
+ } else {
+ logger.WarnfWithError(err, "Unable to read ringFileVersion from ring file %s", fileInfoName)
+ }
+ } else {
+ logger.Warnf("Value of ringFileMagic unexpected from ring file %s", fileInfoName)
+ }
+ } else {
+ logger.Warnf("Misread of ringFileMagic from ring file %s", fileInfoName)
+ }
+ } else {
+ logger.WarnfWithError(err, "Unable to read ringFileMagic from ring file %s", fileInfoName)
+ }
+ err = ringFileReader.Close()
+ if nil != err {
+ logger.WarnfWithError(err, "Unable to close gzip.Reader from ring file %s", fileInfoName)
+ }
+ } else {
+ logger.WarnfWithError(err, "Unable to create gzip.Reader from ring file %s", fileInfoName)
+ }
+ } else {
+ logger.WarnfWithError(err, "Unable to read ring file %s", fileInfoName)
+ }
+ }
+ }
+ } else {
+ globals.swiftReconChecksUntilConfCheck--
+ }
+ }
+
+ // Prepare fresh internalLivenessReport
+
internalLivenessReport = &internalLivenessReportStruct{
observingPeer: make(map[string]*internalObservingPeerReportStruct),
}
@@ -372,8 +616,9 @@ func computeLivenessCheckAssignments(observingPeerNameList []string) (internalLi
internalObservingPeerReport, ok = internalLivenessReport.observingPeer[observingPeerName]
if !ok {
internalObservingPeerReport = &internalObservingPeerReportStruct{
- name: observingPeerName,
- servingPeer: make(map[string]*internalServingPeerReportStruct),
+ name: observingPeerName,
+ servingPeer: make(map[string]*internalServingPeerReportStruct),
+ reconEndpoint: make(map[string]*internalReconEndpointReportStruct),
}
internalLivenessReport.observingPeer[observingPeerName] = internalObservingPeerReport
}
@@ -445,8 +690,9 @@ func computeLivenessCheckAssignments(observingPeerNameList []string) (internalLi
internalObservingPeerReport, ok = internalLivenessReport.observingPeer[observingPeerName]
if !ok {
internalObservingPeerReport = &internalObservingPeerReportStruct{
- name: observingPeerName,
- servingPeer: make(map[string]*internalServingPeerReportStruct),
+ name: observingPeerName,
+ servingPeer: make(map[string]*internalServingPeerReportStruct),
+ reconEndpoint: make(map[string]*internalReconEndpointReportStruct),
}
internalLivenessReport.observingPeer[observingPeerName] = internalObservingPeerReport
}
@@ -507,8 +753,9 @@ func computeLivenessCheckAssignments(observingPeerNameList []string) (internalLi
internalObservingPeerReport, ok = internalLivenessReport.observingPeer[observingPeerName]
if !ok {
internalObservingPeerReport = &internalObservingPeerReportStruct{
- name: observingPeerName,
- servingPeer: make(map[string]*internalServingPeerReportStruct),
+ name: observingPeerName,
+ servingPeer: make(map[string]*internalServingPeerReportStruct),
+ reconEndpoint: make(map[string]*internalReconEndpointReportStruct),
}
internalLivenessReport.observingPeer[observingPeerName] = internalObservingPeerReport
}
@@ -543,6 +790,55 @@ func computeLivenessCheckAssignments(observingPeerNameList []string) (internalLi
}
}
+ // Iterate through observingPeerNameList effectiveLivenessCheckRedundancy times scheduling ReconEndpoints
+
+ for effectiveLivenessCheckRedundancyIndex = 0; effectiveLivenessCheckRedundancyIndex < effectiveLivenessCheckRedundancy; effectiveLivenessCheckRedundancyIndex++ {
+ for swiftReconEndpoint = range globals.swiftReconEndpointSet {
+ // Add volumeToCheck to currently indexed ObservingPeer
+
+ notYetAdded = true // Avoid duplicate assignments
+
+ for notYetAdded {
+ observingPeerName = observingPeerNameList[observingPeerIndex]
+
+ internalObservingPeerReport, ok = internalLivenessReport.observingPeer[observingPeerName]
+ if !ok {
+ internalObservingPeerReport = &internalObservingPeerReportStruct{
+ name: observingPeerName,
+ servingPeer: make(map[string]*internalServingPeerReportStruct),
+ reconEndpoint: make(map[string]*internalReconEndpointReportStruct),
+ }
+ internalLivenessReport.observingPeer[observingPeerName] = internalObservingPeerReport
+ }
+
+ _, ok = internalObservingPeerReport.reconEndpoint[swiftReconEndpoint]
+
+ if ok {
+ // Need to step to the next ObservingPeer because this one is already watching this ReconEndpoint
+ } else {
+ // New ReconEndpoint for this ObservingPeer... so add it
+
+ internalReconEndpointReport = &internalReconEndpointReportStruct{
+ observingPeer: internalObservingPeerReport,
+ ipAddrPort: swiftReconEndpoint,
+ maxDiskUsagePercentage: 0,
+ }
+
+ internalObservingPeerReport.reconEndpoint[swiftReconEndpoint] = internalReconEndpointReport
+
+ notYetAdded = false
+ }
+
+ // Cycle to next ObservingPeer
+
+ observingPeerIndex++
+ if observingPeerIndex == uint64(len(observingPeerNameList)) {
+ observingPeerIndex = 0
+ }
+ }
+ }
+ }
+
return
}
@@ -561,17 +857,19 @@ func mergeObservingPeerReportIntoLivenessReport(internalObservingPeerReport *int
func updateMyObservingPeerReportWhileLocked(internalObservingPeerReport *internalObservingPeerReportStruct) {
var (
- ok bool
- servingPeerName string
- servingPeerNameSet map[string]struct{}
- servingPeerNew *internalServingPeerReportStruct
- servingPeerOld *internalServingPeerReportStruct
- volumeGroupName string
- volumeGroupNameSet map[string]struct{}
- volumeGroupNew *internalVolumeGroupReportStruct
- volumeGroupOld *internalVolumeGroupReportStruct
- volumeName string
- volumeNameSet map[string]struct{}
+ ok bool
+ reconEndpointIPAddrPort string
+ reconEndpointIPAddrPortSet map[string]struct{}
+ servingPeerName string
+ servingPeerNameSet map[string]struct{}
+ servingPeerNew *internalServingPeerReportStruct
+ servingPeerOld *internalServingPeerReportStruct
+ volumeGroupName string
+ volumeGroupNameSet map[string]struct{}
+ volumeGroupNew *internalVolumeGroupReportStruct
+ volumeGroupOld *internalVolumeGroupReportStruct
+ volumeName string
+ volumeNameSet map[string]struct{}
)
if (nil == globals.myObservingPeerReport) || (nil == internalObservingPeerReport) {
@@ -679,4 +977,32 @@ func updateMyObservingPeerReportWhileLocked(internalObservingPeerReport *interna
}
}
}
+
+ // Remove any ReconEndpoints from globals.myObservingPeerReport missing from internalObservingPeerReport
+
+ reconEndpointIPAddrPortSet = make(map[string]struct{})
+
+ for reconEndpointIPAddrPort = range globals.myObservingPeerReport.reconEndpoint {
+ _, ok = internalObservingPeerReport.reconEndpoint[reconEndpointIPAddrPort]
+ if !ok {
+ reconEndpointIPAddrPortSet[reconEndpointIPAddrPort] = struct{}{}
+ }
+ }
+
+ for reconEndpointIPAddrPort = range reconEndpointIPAddrPortSet {
+ delete(globals.myObservingPeerReport.reconEndpoint, reconEndpointIPAddrPort)
+ }
+
+ // Add any ReconEndpoints from internalObservingPeerReport missing from globals.myObservingPeerReport
+
+ for reconEndpointIPAddrPort = range internalObservingPeerReport.reconEndpoint {
+ _, ok = globals.myObservingPeerReport.reconEndpoint[reconEndpointIPAddrPort]
+ if !ok {
+ globals.myObservingPeerReport.reconEndpoint[reconEndpointIPAddrPort] = &internalReconEndpointReportStruct{
+ observingPeer: globals.myObservingPeerReport,
+ ipAddrPort: reconEndpointIPAddrPort,
+ maxDiskUsagePercentage: 0,
+ }
+ }
+ }
}
diff --git a/liveness/states.go b/liveness/states.go
index 1e6bf40c9..0bdd0c6cc 100644
--- a/liveness/states.go
+++ b/liveness/states.go
@@ -7,6 +7,7 @@ import (
"runtime"
"time"
+ "github.com/swiftstack/ProxyFS/inode"
"github.com/swiftstack/ProxyFS/logger"
)
@@ -347,6 +348,12 @@ func doFollower() {
// In case this is the first, record .currentLeader
globals.currentLeader = peer
globals.currentVote = nil
+ // Update RWMode
+ globals.curRWMode = msgAsHeartBeatRequest.NewRWMode
+ err = inode.SetRWMode(globals.curRWMode)
+ if nil != err {
+ logger.FatalfWithError(err, "inode.SetRWMode(%d) failed", globals.curRWMode)
+ }
// Compute msgAsHeartBeatResponse.Observed & reset globals.myObservingPeerReport
globals.Lock()
observedPeerReport = convertInternalToExternalObservingPeerReport(globals.myObservingPeerReport)
@@ -372,6 +379,12 @@ func doFollower() {
// We missed out on Leader election, so record .currentLeader
globals.currentLeader = peer
globals.currentVote = nil
+ // Update RWMode
+ globals.curRWMode = msgAsHeartBeatRequest.NewRWMode
+ err = inode.SetRWMode(globals.curRWMode)
+ if nil != err {
+ logger.FatalfWithError(err, "inode.SetRWMode(%d) failed", globals.curRWMode)
+ }
// Compute msgAsHeartBeatResponse.Observed & reset globals.myObservingPeerReport
globals.Lock()
observedPeerReport = convertInternalToExternalObservingPeerReport(globals.myObservingPeerReport)
@@ -544,6 +557,7 @@ func doLeader() {
heartbeatSuccessfulResponses uint64
heartbeatSuccessfulResponsesRequiredForQuorum uint64
livenessReportThisHeartBeat *internalLivenessReportStruct
+ maxDiskUsagePercentage uint8
msgAsFetchLivenessReportRequest *FetchLivenessReportRequestStruct
msgAsFetchLivenessReportResponse *FetchLivenessReportResponseStruct
msgAsHeartBeatRequest *HeartBeatRequestStruct
@@ -555,6 +569,7 @@ func doLeader() {
observingPeerReport *internalObservingPeerReportStruct
quorumMembersLastHeartBeat []string
quorumMembersThisHeartBeat []string
+ reconEndpointReport *internalReconEndpointReportStruct
recvMsgQueueElement *recvMsgQueueElementStruct
timeNow time.Time
)
@@ -569,8 +584,9 @@ func doLeader() {
globals.Lock()
globals.myObservingPeerReport = &internalObservingPeerReportStruct{
- name: globals.whoAmI,
- servingPeer: make(map[string]*internalServingPeerReportStruct),
+ name: globals.whoAmI,
+ servingPeer: make(map[string]*internalServingPeerReportStruct),
+ reconEndpoint: make(map[string]*internalReconEndpointReportStruct),
}
globals.Unlock()
globals.livenessCheckerControlChan <- true
@@ -612,6 +628,7 @@ func doLeader() {
MsgType: MsgTypeHeartBeatRequest,
MsgTag: heartbeatMsgTag,
LeaderTerm: globals.currentTerm,
+ NewRWMode: globals.curRWMode,
ToObserve: convertInternalToExternalObservingPeerReport(livenessReportThisHeartBeat.observingPeer[peer.name]),
}
@@ -799,7 +816,32 @@ func doLeader() {
}
case <-time.After(heartbeatDurationRemaining):
if heartbeatSuccessfulResponses >= heartbeatSuccessfulResponsesRequiredForQuorum {
- // Just loop back and issue a fresh HeartBeat
+ // Compute new RWMode
+
+ maxDiskUsagePercentage = 0
+
+ for _, observingPeerReport = range globals.livenessReport.observingPeer {
+ for _, reconEndpointReport = range observingPeerReport.reconEndpoint {
+ if reconEndpointReport.maxDiskUsagePercentage > maxDiskUsagePercentage {
+ maxDiskUsagePercentage = reconEndpointReport.maxDiskUsagePercentage
+ }
+ }
+ }
+
+ if maxDiskUsagePercentage >= globals.swiftReconReadOnlyThreshold {
+ globals.curRWMode = inode.RWModeReadOnly
+ } else if maxDiskUsagePercentage >= globals.swiftReconNoWriteThreshold {
+ globals.curRWMode = inode.RWModeNoWrite
+ } else {
+ globals.curRWMode = inode.RWModeNormal
+ }
+
+ err = inode.SetRWMode(globals.curRWMode)
+ if nil != err {
+ logger.FatalfWithError(err, "inode.SetRWMode(%d) failed", globals.curRWMode)
+ }
+
+ // Now just loop back and issue a fresh HeartBeat
} else {
// Quorum lost... convert to Candidate state
globals.nextState = doCandidate
diff --git a/logger/config.go b/logger/config.go
index ca592444a..348dff985 100644
--- a/logger/config.go
+++ b/logger/config.go
@@ -133,19 +133,6 @@ func Up(confMap conf.ConfMap) (err error) {
return nil
}
-func Down(confMap conf.ConfMap) (err error) {
- // We open and close our own logfile
- if logFile != nil {
- // Sync() flushes data cached in the kernel to disk, which is
- // really only useful if the OS were to crash soon
- logFile.Sync()
- logFile.Close()
- }
- logTargets.Clear()
- err = nil
- return
-}
-
func SignaledStart(confMap conf.ConfMap) (err error) {
err = nil
return
@@ -159,6 +146,19 @@ func SignaledFinish(confMap conf.ConfMap) (err error) {
return
}
+func Down(confMap conf.ConfMap) (err error) {
+ // We open and close our own logfile
+ if logFile != nil {
+ // Sync() flushes data cached in the kernel to disk, which is
+ // really only useful if the OS were to crash soon
+ logFile.Sync()
+ logFile.Close()
+ }
+ logTargets.Clear()
+ err = nil
+ return
+}
+
// Parse a log entry captured via LogTarget return the fields as key value pairs
// in a map.
//
diff --git a/pfs-jrpc/Makefile b/pfs-jrpc/Makefile
new file mode 100644
index 000000000..fc62e96f9
--- /dev/null
+++ b/pfs-jrpc/Makefile
@@ -0,0 +1,3 @@
+gosubdir := github.com/swiftstack/ProxyFS/pfs-jrpc
+
+include ../GoMakefile
diff --git a/pfs-jrpc/dummy_test.go b/pfs-jrpc/dummy_test.go
new file mode 100644
index 000000000..cc697532a
--- /dev/null
+++ b/pfs-jrpc/dummy_test.go
@@ -0,0 +1,8 @@
+package main
+
+import (
+ "testing"
+)
+
+func TestDummy(t *testing.T) {
+}
diff --git a/pfs-jrpc/main.go b/pfs-jrpc/main.go
new file mode 100644
index 000000000..eb37af121
--- /dev/null
+++ b/pfs-jrpc/main.go
@@ -0,0 +1,529 @@
+package main
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "net/http"
+ "os"
+ "strings"
+
+ "github.com/swiftstack/ProxyFS/conf"
+ "github.com/swiftstack/ProxyFS/jrpcfs"
+)
+
+type jrpcRequestStruct struct {
+ JSONrpc string `json:"jsonrpc"`
+ Method string `json:"method"`
+ ID uint64 `json:"id"`
+ Params [1]interface{} `json:"params"`
+}
+
+type jrpcRequestEmptyParamStruct struct{}
+
+type jrpcResponseIDAndErrorStruct struct {
+ ID uint64 `json:"id"`
+ Error string `json:"error"`
+}
+
+type jrpcResponseNoErrorStruct struct {
+ ID uint64 `json:"id"`
+ Result interface{} `json:"result"`
+}
+
+type envStruct struct {
+ AuthToken string
+ StorageURL string
+ LastMessageID uint64
+ MountID jrpcfs.MountIDAsString
+}
+
+var (
+ authKey string
+ authURL string
+ authUser string
+ envPath string
+ verbose bool
+
+ env *envStruct
+)
+
+func main() {
+ var (
+ args []string
+ cmd string
+ confFilePath string
+ confMap conf.ConfMap
+ err error
+ )
+
+ // Parse arguments
+
+ args = os.Args[1:]
+
+ if 2 > len(args) {
+ doHelp()
+ log.Fatalf("Must specify a .conf and a command")
+ }
+
+ confFilePath = args[0]
+
+ confMap, err = conf.MakeConfMapFromFile(confFilePath)
+ if nil != err {
+ log.Fatalf("Failed to load %s: %v", confFilePath, err)
+ }
+
+ authURL, err = confMap.FetchOptionValueString("JRPCTool", "AuthURL")
+ if nil != err {
+ log.Fatalf("Failed to parse %s value for JRPCTool.AuthURL: %v", confFilePath, err)
+ }
+ if !strings.HasPrefix(strings.ToLower(authURL), "http:") && !strings.HasPrefix(strings.ToLower(authURL), "https:") {
+ log.Fatalf("JRPCTool.AuthURL (\"%s\") must start with either \"http:\" or \"https:\"", authURL)
+ }
+ authUser, err = confMap.FetchOptionValueString("JRPCTool", "AuthUser")
+ if nil != err {
+ log.Fatalf("Failed to parse %s value for JRPCTool.AuthUser: %v", confFilePath, err)
+ }
+ authKey, err = confMap.FetchOptionValueString("JRPCTool", "AuthKey")
+ if nil != err {
+ log.Fatalf("Failed to parse %s value for JRPCTool.AuthKey: %v", confFilePath, err)
+ }
+ envPath, err = confMap.FetchOptionValueString("JRPCTool", "EnvPath")
+ if nil != err {
+ log.Fatalf("Failed to parse %s value for JRPCTool.EnvPath: %v", confFilePath, err)
+ }
+ verbose, err = confMap.FetchOptionValueBool("JRPCTool", "Verbose")
+ if nil != err {
+ log.Fatalf("Failed to parse %s value for JRPCTool.Verbose: %v", confFilePath, err)
+ }
+
+ cmd = args[1]
+
+ switch cmd {
+ case "a":
+ doAuth()
+ case "m":
+ doMount()
+ case "r":
+ doJRPC(args[2:]...)
+ case "c":
+ doClean()
+ default:
+ doHelp()
+ log.Fatalf("Could not understand command \"%s\"", cmd)
+ }
+}
+
+func doHelp() {
+ log.Printf("pfs-jrpc - commandline ProxyFS JSON RPC tool")
+ log.Printf("")
+ log.Printf("Usage: pfs-jrpc <.conf> a")
+ log.Printf(" pfs-jrpc <.conf> m")
+ log.Printf(" pfs-jrpc <.conf> r Server.RpcXXX ['\"Key\":[,\"Key\":]*']")
+ log.Printf(" pfs-jrpc <.conf> c")
+ log.Printf("")
+ log.Printf("Commands:")
+ log.Printf("")
+ log.Printf(" a pass AuthUser/AuthKey to AuthURL to fetch AuthToken/StorageURL")
+ log.Printf(" m perform Server.RpcMountByAccountName of AuthUser's Account")
+ log.Printf(" r Server.RpcXXX perform selected Server.RpcXXX passing JSON RPC params (if any)")
+ log.Printf(" c clean up EnvPath")
+ log.Printf("")
+ log.Printf("Example:")
+ log.Printf("")
+ log.Printf("$ cat pfs-jrpc.conf")
+ log.Printf("")
+ log.Printf("[JRPCTool]")
+ log.Printf("AuthURL: http://localhost:8080/auth/v1.0")
+ log.Printf("AuthUser: test:tester")
+ log.Printf("AuthKey: testing")
+ log.Printf("EnvPath: ./pfs-jrpc.env")
+ log.Printf("Verbose: true")
+ log.Printf("")
+ log.Printf("$ pfs-jrpc pfs-jrpc.conf a")
+ log.Printf("")
+ log.Printf("Auth Request:")
+ log.Printf(" httpRequest.URL: http://localhost:8080/auth/v1.0")
+ log.Printf(" httpRequest.Header: map[X-Auth-Key:[testing] X-Auth-User:[test:tester]]")
+ log.Printf("Auth Response:")
+ log.Printf(" env.AuthToken: AUTH_tk928c2374f62c4de3bfffe4c62bce2e5f")
+ log.Printf(" env.StorageURL: http://localhost:8080/v1/AUTH_test")
+ log.Printf("")
+ log.Printf("$ pfs-jrpc pfs-jrpc.conf m")
+ log.Printf("")
+ log.Printf("jrpcRequestBuf:")
+ log.Printf("{")
+ log.Printf(" \"jsonrpc\": \"2.0\",")
+ log.Printf(" \"method\": \"Server.RpcMountByAccountName\",")
+ log.Printf(" \"id\": 1,")
+ log.Printf(" \"params\": [")
+ log.Printf(" {")
+ log.Printf(" \"AccountName\": \"AUTH_test\",")
+ log.Printf(" \"MountOptions\": 0,")
+ log.Printf(" \"AuthUserID\": 0,")
+ log.Printf(" \"AuthGroupID\": 0")
+ log.Printf(" }")
+ log.Printf(" ]")
+ log.Printf("}")
+ log.Printf("httpResponseBody:")
+ log.Printf("{")
+ log.Printf(" \"error\": null,")
+ log.Printf(" \"id\": 1,")
+ log.Printf(" \"result\": {")
+ log.Printf(" \"RootCAx509CertificatePEM\": \"LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJVVENDQVFPZ0F3SUJBZ0lSQVB3eGNWNGtmOGk5RkxhODViQlg0amt3QlFZREsyVndNQ3d4R0RBV0JnTlYKQkFvVEQwTkJJRTl5WjJGdWFYcGhkR2x2YmpFUU1BNEdBMVVFQXhNSFVtOXZkQ0JEUVRBZ0Z3MHhPVEF4TURFdwpNREF3TURCYUdBOHlNVEU1TURFd01UQXdNREF3TUZvd0xERVlNQllHQTFVRUNoTVBRMEVnVDNKbllXNXBlbUYwCmFXOXVNUkF3RGdZRFZRUURFd2RTYjI5MElFTkJNQ293QlFZREsyVndBeUVBUjlrZ1ZNaFpoaHpDMTJCa0RUMkQKUHE3OTJoRThEVVRLd0ZvbjNKcVhwbTZqT0RBMk1BNEdBMVVkRHdFQi93UUVBd0lDaERBVEJnTlZIU1VFRERBSwpCZ2dyQmdFRkJRY0RBVEFQQmdOVkhSTUJBZjhFQlRBREFRSC9NQVVHQXl0bGNBTkJBT0RQWUl4aVRoZ3RhY3l2Clg0SlhINGNDWGxWU3g4NUk4MjA0YkN2Zy8xeVorL3VNWXFVNGtDUW14ejZxM0h0eDh5aEV2YlpQa1V0QkI5b3cKWkVyaE93TT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=\",")
+ log.Printf(" \"RetryRPCPublicIPAddr\": \"0.0.0.0\",")
+ log.Printf(" \"RootDirInodeNumber\": 1,")
+ log.Printf(" \"RetryRPCPort\": 32356,")
+ log.Printf(" \"MountID\": \"W3g9ItrLBkdYCoSlKQjSnA==\"")
+ log.Printf(" }")
+ log.Printf("}")
+ log.Printf("")
+ log.Printf("$ pfs-jrpc pfs-jrpc.conf r Server.RpcPing '\"Message\":\"Hi\"'")
+ log.Printf("")
+ log.Printf("jrpcRequestBuf:")
+ log.Printf("{")
+ log.Printf(" \"jsonrpc\": \"2.0\",")
+ log.Printf(" \"method\": \"Server.RpcPing\",")
+ log.Printf(" \"id\": 2,")
+ log.Printf(" \"params\": [")
+ log.Printf(" {")
+ log.Printf(" \"MountID\": \"W3g9ItrLBkdYCoSlKQjSnA==\",")
+ log.Printf(" \"Message\": \"Hi\"")
+ log.Printf(" }")
+ log.Printf(" ]")
+ log.Printf("}")
+ log.Printf("httpResponseBody:")
+ log.Printf("{")
+ log.Printf(" \"error\": null,")
+ log.Printf(" \"id\": 2,")
+ log.Printf(" \"result\": {")
+ log.Printf(" \"Message\": \"pong 2 bytes\"")
+ log.Printf(" }")
+ log.Printf("}")
+ log.Printf("")
+ log.Printf("$ pfs-jrpc pfs-jrpc.conf c")
+ log.Printf("")
+}
+
+func doAuth() {
+ var (
+ err error
+ httpClient *http.Client
+ httpRequest *http.Request
+ httpResponse *http.Response
+ )
+
+ httpRequest, err = http.NewRequest("GET", authURL, nil)
+ if nil != err {
+ log.Fatalf("Failed to create GET of authURL==\"%s\": %v", authURL, err)
+ }
+
+ httpRequest.Header["X-Auth-User"] = []string{authUser}
+ httpRequest.Header["X-Auth-Key"] = []string{authKey}
+
+ if verbose {
+ fmt.Printf("Auth Request:\n")
+ fmt.Println(" httpRequest.URL: ", httpRequest.URL)
+ fmt.Println(" httpRequest.Header:", httpRequest.Header)
+ }
+
+ httpClient = &http.Client{}
+
+ httpResponse, err = httpClient.Do(httpRequest)
+ if nil != err {
+ log.Fatalf("Failed to issue GET of authURL==\"%s\": %v", authURL, err)
+ }
+ if http.StatusOK != httpResponse.StatusCode {
+ log.Fatalf("Received unexpected HTTP Status for Get of authURL==\"%s\": %s", authURL, httpResponse.Status)
+ }
+
+ env = &envStruct{
+ AuthToken: httpResponse.Header.Get("X-Auth-Token"),
+ StorageURL: httpResponse.Header.Get("X-Storage-Url"),
+ LastMessageID: 0,
+ MountID: "",
+ }
+
+ if verbose {
+ fmt.Printf("Auth Response:\n")
+ fmt.Printf(" env.AuthToken: %s\n", env.AuthToken)
+ fmt.Printf(" env.StorageURL: %s\n", env.StorageURL)
+ }
+
+ if strings.HasPrefix(env.StorageURL, "http:") && strings.HasPrefix(httpRequest.URL.String(), "https:") {
+ env.StorageURL = strings.Replace(env.StorageURL, "http:", "https:", 1)
+
+ if verbose {
+ fmt.Printf("Auth Response (proxy-corrected):\n")
+ fmt.Printf(" env.AuthToken: %s\n", env.AuthToken)
+ fmt.Printf(" env.StorageURL: %s\n", env.StorageURL)
+ }
+ }
+
+ writeEnv()
+}
+
+func doMount() {
+ var (
+ accountName string
+ err error
+ httpClient *http.Client
+ httpRequest *http.Request
+ httpResponse *http.Response
+ httpResponseBody []byte
+ httpResponseBodyBytesBuffer bytes.Buffer
+ jrpcRequest *jrpcRequestStruct
+ jrpcRequestBuf []byte
+ jrpcRequestBytesBuffer bytes.Buffer
+ jrpcResponseIDAndError *jrpcResponseIDAndErrorStruct
+ jrpcResponseNoError *jrpcResponseNoErrorStruct
+ mountReply *jrpcfs.MountByAccountNameReply
+ mountRequest *jrpcfs.MountByAccountNameRequest
+ storageURLSplit []string
+ thisMessageID uint64
+ )
+
+ readEnv()
+
+ thisMessageID = env.LastMessageID + 1
+ env.LastMessageID = thisMessageID
+
+ writeEnv()
+
+ storageURLSplit = strings.Split(env.StorageURL, "/")
+ if 0 == len(storageURLSplit) {
+ log.Fatalf("Attempt to compute accountName from strings.Split(env.StorageURL, \"/\") failed")
+ }
+
+ accountName = storageURLSplit[len(storageURLSplit)-1]
+
+ mountRequest = &jrpcfs.MountByAccountNameRequest{
+ AccountName: accountName,
+ MountOptions: 0,
+ AuthUserID: 0,
+ AuthGroupID: 0,
+ }
+
+ jrpcRequest = &jrpcRequestStruct{
+ JSONrpc: "2.0",
+ Method: "Server.RpcMountByAccountName",
+ ID: thisMessageID,
+ Params: [1]interface{}{mountRequest},
+ }
+
+ jrpcRequestBuf, err = json.Marshal(jrpcRequest)
+ if nil != err {
+ log.Fatalf("Attempt to marshal jrpcRequest(mount) failed: %v", err)
+ }
+
+ if verbose {
+ _ = json.Indent(&jrpcRequestBytesBuffer, jrpcRequestBuf, "", " ")
+ fmt.Printf("jrpcRequestBuf:\n%s\n", jrpcRequestBytesBuffer.Bytes())
+ }
+
+ httpRequest, err = http.NewRequest("PROXYFS", env.StorageURL, bytes.NewReader(jrpcRequestBuf))
+ if nil != err {
+ log.Fatalf("Failed to create httpRequest for mount: %v", err)
+ }
+
+ httpRequest.Header["X-Auth-Token"] = []string{env.AuthToken}
+ httpRequest.Header["Content-Type"] = []string{"application/json"}
+
+ httpClient = &http.Client{}
+
+ httpResponse, err = httpClient.Do(httpRequest)
+ if nil != err {
+ log.Fatalf("Failed to issue PROXYFS Mount: %v", err)
+ }
+ if http.StatusOK != httpResponse.StatusCode {
+ log.Fatalf("Received unexpected HTTP Status for PROXYFS Mount: %s", httpResponse.Status)
+ }
+
+ httpResponseBody, err = ioutil.ReadAll(httpResponse.Body)
+ if nil != err {
+ log.Fatalf("Failed to read httpResponse.Body: %v", err)
+ }
+ err = httpResponse.Body.Close()
+ if nil != err {
+ log.Fatalf("Failed to close httpResponse.Body: %v", err)
+ }
+
+ if verbose {
+ _ = json.Indent(&httpResponseBodyBytesBuffer, httpResponseBody, "", " ")
+ fmt.Printf("httpResponseBody:\n%s\n", httpResponseBodyBytesBuffer.Bytes())
+ }
+
+ jrpcResponseIDAndError = &jrpcResponseIDAndErrorStruct{}
+
+ err = json.Unmarshal(httpResponseBody, jrpcResponseIDAndError)
+ if nil != err {
+ log.Fatalf("Failed to json.Unmarshal(httpResponseBody) [Case 1]: %v", err)
+ }
+ if thisMessageID != jrpcResponseIDAndError.ID {
+ log.Fatalf("Got unexpected MessageID in httpResponseBody [Case 1]")
+ }
+ if "" != jrpcResponseIDAndError.Error {
+ log.Fatalf("Got JRPC Failure on PROXYFS Mount: %s", jrpcResponseIDAndError.Error)
+ }
+
+ mountReply = &jrpcfs.MountByAccountNameReply{}
+ jrpcResponseNoError = &jrpcResponseNoErrorStruct{Result: mountReply}
+
+ err = json.Unmarshal(httpResponseBody, jrpcResponseNoError)
+ if nil != err {
+ log.Fatalf("Failed to json.Unmarshal(httpResponseBody) [Case 2]: %v", err)
+ }
+ if thisMessageID != jrpcResponseIDAndError.ID {
+ log.Fatalf("Got unexpected MessageID in httpResponseBody [Case 2]")
+ }
+
+ env.MountID = mountReply.MountID
+
+ writeEnv()
+}
+
+func doJRPC(s ...string) {
+ var (
+ arbitraryRequestMethod string
+ arbitraryRequestParam string
+ err error
+ httpClient *http.Client
+ httpRequest *http.Request
+ httpResponse *http.Response
+ httpResponseBody []byte
+ httpResponseBodyBytesBuffer bytes.Buffer
+ jrpcRequest *jrpcRequestStruct
+ jrpcRequestBuf []byte
+ jrpcRequestBytesBuffer bytes.Buffer
+ jrpcResponseIDAndError *jrpcResponseIDAndErrorStruct
+ thisMessageID uint64
+ )
+
+ readEnv()
+
+ if "" == env.MountID {
+ log.Fatalf("Attempt to issue JSON RPC to unmounted volume")
+ }
+
+ thisMessageID = env.LastMessageID + 1
+ env.LastMessageID = thisMessageID
+
+ writeEnv()
+
+ arbitraryRequestMethod = s[0]
+
+ switch len(s) {
+ case 1:
+ arbitraryRequestParam = "\"MountID\":\"" + string(env.MountID) + "\""
+ case 2:
+ arbitraryRequestParam = "\"MountID\":\"" + string(env.MountID) + "\"," + s[1]
+ default:
+ log.Fatalf("JSON RPC must be either [Method] or [Method Param]... not %v", s)
+ }
+
+ jrpcRequest = &jrpcRequestStruct{
+ JSONrpc: "2.0",
+ Method: arbitraryRequestMethod,
+ ID: thisMessageID,
+ Params: [1]interface{}{&jrpcRequestEmptyParamStruct{}},
+ }
+
+ jrpcRequestBuf, err = json.Marshal(jrpcRequest)
+ if nil != err {
+ log.Fatalf("Attempt to marshal jrpcRequest failed: %v", err)
+ }
+
+ jrpcRequestBuf = append(jrpcRequestBuf[:len(jrpcRequestBuf)-3], arbitraryRequestParam...)
+ jrpcRequestBuf = append(jrpcRequestBuf, "}]}"...)
+
+ if verbose {
+ _ = json.Indent(&jrpcRequestBytesBuffer, jrpcRequestBuf, "", " ")
+ fmt.Printf("jrpcRequestBuf:\n%s\n", jrpcRequestBytesBuffer.Bytes())
+ }
+
+ httpRequest, err = http.NewRequest("PROXYFS", env.StorageURL, bytes.NewReader(jrpcRequestBuf))
+ if nil != err {
+ log.Fatalf("Failed to create httpRequest: %v", err)
+ }
+
+ httpRequest.Header["X-Auth-Token"] = []string{env.AuthToken}
+ httpRequest.Header["Content-Type"] = []string{"application/json"}
+
+ httpClient = &http.Client{}
+
+ httpResponse, err = httpClient.Do(httpRequest)
+ if nil != err {
+ log.Fatalf("Failed to issue PROXYFS: %v", err)
+ }
+ if http.StatusOK != httpResponse.StatusCode {
+ log.Fatalf("Received unexpected HTTP Status: %s", httpResponse.Status)
+ }
+
+ httpResponseBody, err = ioutil.ReadAll(httpResponse.Body)
+ if nil != err {
+ log.Fatalf("Failed to read httpResponse.Body: %v", err)
+ }
+ err = httpResponse.Body.Close()
+ if nil != err {
+ log.Fatalf("Failed to close httpResponse.Body: %v", err)
+ }
+
+ if verbose {
+ _ = json.Indent(&httpResponseBodyBytesBuffer, httpResponseBody, "", " ")
+ fmt.Printf("httpResponseBody:\n%s\n", httpResponseBodyBytesBuffer.Bytes())
+ }
+
+ jrpcResponseIDAndError = &jrpcResponseIDAndErrorStruct{}
+
+ err = json.Unmarshal(httpResponseBody, jrpcResponseIDAndError)
+ if nil != err {
+ log.Fatalf("Failed to json.Unmarshal(httpResponseBody) [Case 2]: %v", err)
+ }
+ if thisMessageID != jrpcResponseIDAndError.ID {
+ log.Fatalf("Got unexpected MessageID in httpResponseBody [Case 2]")
+ }
+ if "" != jrpcResponseIDAndError.Error {
+ log.Fatalf("Got JRPC Failure: %s", jrpcResponseIDAndError.Error)
+ }
+}
+
+func doClean() {
+ _ = os.RemoveAll(envPath)
+}
+
+func writeEnv() {
+ var (
+ envBuf []byte
+ err error
+ )
+
+ envBuf, err = json.Marshal(env)
+ if nil != err {
+ log.Fatalf("Failed to json.Marshal(env): %v", err)
+ }
+
+ err = ioutil.WriteFile(envPath, envBuf, 0644)
+ if nil != err {
+ log.Fatalf("Failed to persist ENV to envPath==\"%s\": %v", envPath, err)
+ }
+}
+
+func readEnv() {
+ var (
+ envBuf []byte
+ err error
+ )
+
+ env = &envStruct{}
+ envBuf, err = ioutil.ReadFile(envPath)
+ if nil != err {
+ log.Fatalf("Failed to recover ENV from envPath==\"%s\": %v", envPath, err)
+ }
+
+ err = json.Unmarshal(envBuf, env)
+ if nil != err {
+ log.Fatalf("Failed to json.Unmarshal(envBuf): %v", err)
+ }
+}
diff --git a/pfs-jrpc/pfs-jrpc.conf b/pfs-jrpc/pfs-jrpc.conf
new file mode 100644
index 000000000..ed1aaccbc
--- /dev/null
+++ b/pfs-jrpc/pfs-jrpc.conf
@@ -0,0 +1,6 @@
+[JRPCTool]
+AuthURL: http://localhost:8080/auth/v1.0
+AuthUser: test:tester
+AuthKey: testing
+EnvPath: ./pfs-jrpc.env
+Verbose: true
diff --git a/pfs_middleware/pfs_middleware/rpc.py b/pfs_middleware/pfs_middleware/rpc.py
index c5d47e8cf..4d624e746 100644
--- a/pfs_middleware/pfs_middleware/rpc.py
+++ b/pfs_middleware/pfs_middleware/rpc.py
@@ -31,6 +31,7 @@
"Server.RpcGetObject",
"Server.RpcGetStat",
"Server.RpcGetXAttr",
+ "Server.RpcLease",
"Server.RpcListXAttr",
"Server.RpcLookup",
"Server.RpcLookupPlus",
@@ -43,6 +44,7 @@
"Server.RpcReaddir",
"Server.RpcStatVFS",
"Server.RpcType",
+ "Server.RpcUnmount",
}
allow_read_write = {
@@ -59,6 +61,7 @@
"Server.RpcGetObject",
"Server.RpcGetStat",
"Server.RpcGetXAttr",
+ "Server.RpcLease",
"Server.RpcLink",
"Server.RpcListXAttr",
"Server.RpcLog",
@@ -94,6 +97,7 @@
"Server.RpcSymlink",
"Server.RpcType",
"Server.RpcUnlink",
+ "Server.RpcUnmount",
"Server.RpcWrote",
}
diff --git a/pfs_middleware/tox.ini b/pfs_middleware/tox.ini
index d308f280c..2921b5cc6 100644
--- a/pfs_middleware/tox.ini
+++ b/pfs_middleware/tox.ini
@@ -7,7 +7,7 @@ usedevelop = True
deps =
lint: flake8
!lint: -r{toxinidir}/test-requirements.txt
- release: git+git://github.com/swiftstack/swift.git@ss-release-2.24.0.3
+ release: git+git://github.com/swiftstack/swift.git@ss-release-2.25.0.4
minver: http://tarballs.openstack.org/swift/swift-2.9.0.tar.gz
master: http://tarballs.openstack.org/swift/swift-master.tar.gz
commands = python -m unittest discover
diff --git a/pfsagentd/README.md b/pfsagentd/README.md
index e3e247692..c60b4ae8d 100644
--- a/pfsagentd/README.md
+++ b/pfsagentd/README.md
@@ -71,7 +71,7 @@ FUSEMaxBackground: 100
FUSECongestionThreshhold: 0
FUSEMaxWrite: 131072
RetryRPCDeadlineIO: 60s
-RetryRPCKEEPALIVEPeriod: 60s
+RetryRPCKeepAlivePeriod: 60s
```
In the above example, some important fields are as follows:
diff --git a/pfsagentd/globals.go b/pfsagentd/globals.go
index 97b5b0c1f..058b6f87d 100644
--- a/pfsagentd/globals.go
+++ b/pfsagentd/globals.go
@@ -64,7 +64,7 @@ type configStruct struct {
FUSECongestionThreshhold uint16
FUSEMaxWrite uint32
RetryRPCDeadlineIO time.Duration
- RetryRPCKEEPALIVEPeriod time.Duration
+ RetryRPCKeepAlivePeriod time.Duration
}
type retryDelayElementStruct struct {
@@ -613,7 +613,7 @@ func initializeGlobals(confMap conf.ConfMap) {
logFatal(err)
}
- globals.config.RetryRPCKEEPALIVEPeriod, err = confMap.FetchOptionValueDuration("Agent", "RetryRPCKEEPALIVEPeriod")
+ globals.config.RetryRPCKeepAlivePeriod, err = confMap.FetchOptionValueDuration("Agent", "RetryRPCKeepAlivePeriod")
if nil != err {
logFatal(err)
}
diff --git a/pfsagentd/pfsagent.conf b/pfsagentd/pfsagent.conf
index 0eec2f0fd..a2ceaf21a 100644
--- a/pfsagentd/pfsagent.conf
+++ b/pfsagentd/pfsagent.conf
@@ -37,4 +37,4 @@ FUSEMaxBackground: 100
FUSECongestionThreshhold: 0
FUSEMaxWrite: 131072
RetryRPCDeadlineIO: 60s
-RetryRPCKEEPALIVEPeriod: 60s
+RetryRPCKeepAlivePeriod: 60s
diff --git a/pfsagentd/request.go b/pfsagentd/request.go
index 2734340d1..6a2163268 100644
--- a/pfsagentd/request.go
+++ b/pfsagentd/request.go
@@ -39,6 +39,9 @@ func doMountProxyFS() {
)
swiftStorageURL = fetchStorageURL()
+ if "" == swiftStorageURL {
+ logFatalf("unable to fetchStorageURL()")
+ }
swiftStorageURLSplit = strings.Split(swiftStorageURL, "/")
@@ -66,7 +69,7 @@ func doMountProxyFS() {
retryrpcConfig := &retryrpc.ClientConfig{MyUniqueID: string(globals.mountID), IPAddr: globals.retryRPCPublicIPAddr, Port: int(globals.retryRPCPort),
RootCAx509CertificatePEM: globals.rootCAx509CertificatePEM, DeadlineIO: globals.config.RetryRPCDeadlineIO,
- KEEPALIVEPeriod: globals.config.RetryRPCKEEPALIVEPeriod}
+ KeepAlivePeriod: globals.config.RetryRPCKeepAlivePeriod}
globals.retryRPCClient, err = retryrpc.NewClient(retryrpcConfig)
if nil != err {
logFatalf("unable to retryRPCClient.NewClient(%v,%v): Volume: %s (Account: %s) err: %v", globals.retryRPCPublicIPAddr, globals.retryRPCPort, globals.config.FUSEVolumeName, accountName, err)
@@ -74,10 +77,27 @@ func doMountProxyFS() {
}
func doUnmountProxyFS() {
+ var (
+ err error
+ unmountReply *jrpcfs.Reply
+ unmountRequest *jrpcfs.UnmountRequest
+ )
+
// TODO: Flush outstanding FileInode's
// TODO: Tell ProxyFS we are releasing all leases
// TODO: Tell ProxyFS we are unmounting
+ unmountRequest = &jrpcfs.UnmountRequest{
+ MountID: globals.mountID,
+ }
+
+ unmountReply = &jrpcfs.Reply{}
+
+ err = doJRPCRequest("Server.RpcUnmount", unmountRequest, unmountReply)
+ if nil != err {
+ logFatalf("unable to unmount Volume %s: %v", globals.config.FUSEVolumeName, err)
+ }
+
globals.retryRPCClient.Close()
}
@@ -100,6 +120,9 @@ func doJRPCRequest(jrpcMethod string, jrpcParam interface{}, jrpcResult interfac
}
swiftStorageURL = fetchStorageURL()
+ if "" == swiftStorageURL {
+ logFatalf("unable to fetchStorageURL()")
+ }
httpRequest, httpErr = http.NewRequest("PROXYFS", swiftStorageURL, bytes.NewReader(jrpcRequest))
if nil != httpErr {
diff --git a/pfsagentd/setup_teardown_test.go b/pfsagentd/setup_teardown_test.go
index 128fa82d7..44a09c1da 100644
--- a/pfsagentd/setup_teardown_test.go
+++ b/pfsagentd/setup_teardown_test.go
@@ -117,7 +117,7 @@ func testSetup(t *testing.T) {
"Agent.FUSECongestionThreshhold=0",
"Agent.FUSEMaxWrite=131072", // Linux max... 128KiB is good enough for testing
"Agent.RetryRPCDeadlineIO=60s",
- "Agent.RetryRPCKEEPALIVEPeriod=60s",
+ "Agent.RetryRPCKeepAlivePeriod=60s",
"Stats.IPAddr=localhost",
"Stats.UDPPort=54324",
@@ -160,6 +160,7 @@ func testSetup(t *testing.T) {
"SwiftClient.RetryExpBackoffObject=2.0",
"SwiftClient.ChunkedConnectionPoolSize=64",
"SwiftClient.NonChunkedConnectionPoolSize=32",
+ "SwiftClient.SwiftReconChecksPerConfCheck=0",
"PhysicalContainerLayout:PhysicalContainerLayoutReplicated3Way.ContainerStoragePolicy=silver",
"PhysicalContainerLayout:PhysicalContainerLayoutReplicated3Way.ContainerNamePrefix=Replicated3Way_",
@@ -229,6 +230,9 @@ func testSetup(t *testing.T) {
"JSONRPCServer.RetryRPCTTLCompleted=10s",
"JSONRPCServer.RetryRPCAckTrim=10ms",
"JSONRPCServer.DataPathLogging=false",
+ "JSONRPCServer.MinLeaseDuration=250ms",
+ "JSONRPCServer.LeaseInterruptInterval=250ms",
+ "JSONRPCServer.LeaseInterruptLimit=20",
}
testConfStrings = append(testConfStrings, "RamSwiftInfo.MaxAccountNameLength="+strconv.FormatUint(testMaxAccountNameLength, 10))
diff --git a/pfsworkout/main.go b/pfsworkout/main.go
index d2d6d3a69..61996e351 100644
--- a/pfsworkout/main.go
+++ b/pfsworkout/main.go
@@ -34,7 +34,7 @@ type rwSizeEachStruct struct {
fsTimes rwTimesStruct
inodeTimes rwTimesStruct
swiftclientTimes rwTimesStruct
- MountHandle fs.MountHandle // Only used if all threads use same file
+ VolumeHandle fs.VolumeHandle // Only used if all threads use same file
FileInodeNumber inode.InodeNumber // Only used if all threads use same file
ObjectPath string // Only used if all threads use same object
}
@@ -394,7 +394,7 @@ func main() {
// If we are doing the operations on the same file for all threads, create the file now.
if doSameFile {
// Save off MountHandle and FileInodeNumber in rwSizeEach since all threads need this
- err, rwSizeEach.MountHandle, rwSizeEach.FileInodeNumber, fileName = createFsFile()
+ err, rwSizeEach.VolumeHandle, rwSizeEach.FileInodeNumber, fileName = createFsFile()
if nil != err {
// In an error, no point in continuing. Just break from this for loop.
break
@@ -452,7 +452,7 @@ func main() {
// Remove file if all threads used same file
if doSameFile {
- _ = unlinkFsFile(rwSizeEach.MountHandle, fileName)
+ _ = unlinkFsFile(rwSizeEach.VolumeHandle, fileName)
}
rwSizeEach.fsTimes.writeDuration = timeAfterWrites.Sub(timeBeforeWrites)
@@ -825,10 +825,10 @@ func fuseWorkout(rwSizeEach *rwSizeEachStruct, threadIndex uint64) {
stepErrChan <- nil
}
-func createFsFile() (err error, mountHandle fs.MountHandle, fileInodeNumber inode.InodeNumber, fileName string) {
- mountHandle, err = fs.MountByVolumeName(volumeName, fs.MountOptions(0))
+func createFsFile() (err error, volumeHandle fs.VolumeHandle, fileInodeNumber inode.InodeNumber, fileName string) {
+ volumeHandle, err = fs.FetchVolumeHandleByVolumeName(volumeName)
if nil != err {
- stepErrChan <- fmt.Errorf("fs.MountByVolumeName(\"%v\", fs.MountOptions(0), \"\") failed: %v\n", volumeName, err)
+ stepErrChan <- fmt.Errorf("fs.FetchVolumeHandleByVolumeName(\"%v\") failed: %v\n", volumeName, err)
return
}
@@ -836,7 +836,7 @@ func createFsFile() (err error, mountHandle fs.MountHandle, fileInodeNumber inod
fileName = fmt.Sprintf("%s%016X", basenamePrefix, nonce)
- fileInodeNumber, err = mountHandle.Create(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.RootDirInodeNumber, fileName, inode.PosixModePerm)
+ fileInodeNumber, err = volumeHandle.Create(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.RootDirInodeNumber, fileName, inode.PosixModePerm)
if nil != err {
stepErrChan <- fmt.Errorf("fs.Create(,,,, fileName==\"%s\", inode.PosixModePerm) failed: %v\n", fileName, err)
return
@@ -844,8 +844,8 @@ func createFsFile() (err error, mountHandle fs.MountHandle, fileInodeNumber inod
return
}
-func unlinkFsFile(mountHandle fs.MountHandle, fileName string) (err error) {
- err = mountHandle.Unlink(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.RootDirInodeNumber, fileName)
+func unlinkFsFile(volumeHandle fs.VolumeHandle, fileName string) (err error) {
+ err = volumeHandle.Unlink(inode.InodeRootUserID, inode.InodeGroupID(0), nil, inode.RootDirInodeNumber, fileName)
if nil != err {
stepErrChan <- fmt.Errorf("fs.Unlink(,,,, rootInodeNumber, \"%v\") failed: %v\n", fileName, err)
return
@@ -854,20 +854,22 @@ func unlinkFsFile(mountHandle fs.MountHandle, fileName string) (err error) {
}
func fsWorkout(rwSizeEach *rwSizeEachStruct, threadIndex uint64, doSameFile bool, doRandomIO bool) {
- var err error
- var mountHandle fs.MountHandle
- var fileInodeNumber inode.InodeNumber
- var fileName string
+ var (
+ err error
+ fileInodeNumber inode.InodeNumber
+ fileName string
+ volumeHandle fs.VolumeHandle
+ )
if !doSameFile {
// Create the file for this thread
- err, mountHandle, fileInodeNumber, fileName = createFsFile()
+ err, volumeHandle, fileInodeNumber, fileName = createFsFile()
if nil != err {
return
}
} else {
// File was already created during main()
- mountHandle = rwSizeEach.MountHandle
+ volumeHandle = rwSizeEach.VolumeHandle
fileInodeNumber = rwSizeEach.FileInodeNumber
}
rwSizeRequested := rwSizeEach.KiB * 1024
@@ -896,7 +898,7 @@ func fsWorkout(rwSizeEach *rwSizeEachStruct, threadIndex uint64, doSameFile bool
// to make sure we do not go past end of file.
rwOffset = rand.Int63n(int64(rwSizeTotal - rwSizeRequested))
}
- rwSizeDelivered, err := mountHandle.Write(inode.InodeRootUserID, inode.InodeGroupID(0), nil, fileInodeNumber, uint64(rwOffset), bufWritten, nil)
+ rwSizeDelivered, err := volumeHandle.Write(inode.InodeRootUserID, inode.InodeGroupID(0), nil, fileInodeNumber, uint64(rwOffset), bufWritten, nil)
if nil != err {
stepErrChan <- fmt.Errorf("fs.Write(,,,, fileInodeNumber, rwOffset, bufWritten) failed: %v\n", err)
return
@@ -909,7 +911,7 @@ func fsWorkout(rwSizeEach *rwSizeEachStruct, threadIndex uint64, doSameFile bool
} else {
for rwOffset := uint64(0); rwOffset < rwSizeTotal; rwOffset += rwSizeRequested {
- rwSizeDelivered, err := mountHandle.Write(inode.InodeRootUserID, inode.InodeGroupID(0), nil, fileInodeNumber, rwOffset, bufWritten, nil)
+ rwSizeDelivered, err := volumeHandle.Write(inode.InodeRootUserID, inode.InodeGroupID(0), nil, fileInodeNumber, rwOffset, bufWritten, nil)
if nil != err {
stepErrChan <- fmt.Errorf("fs.Write(,,,, fileInodeNumber, rwOffset, bufWritten) failed: %v\n", err)
return
@@ -921,7 +923,7 @@ func fsWorkout(rwSizeEach *rwSizeEachStruct, threadIndex uint64, doSameFile bool
}
}
- err = mountHandle.Flush(inode.InodeRootUserID, inode.InodeGroupID(0), nil, fileInodeNumber)
+ err = volumeHandle.Flush(inode.InodeRootUserID, inode.InodeGroupID(0), nil, fileInodeNumber)
if nil != err {
stepErrChan <- fmt.Errorf("fs.Flush(,,,, fileInodeNumber) failed: %v\n", err)
return
@@ -938,7 +940,7 @@ func fsWorkout(rwSizeEach *rwSizeEachStruct, threadIndex uint64, doSameFile bool
// Calculate random offset
rwOffset := uint64(rand.Int63n(int64(rwSizeTotal - rwSizeRequested)))
- bufRead, err := mountHandle.Read(inode.InodeRootUserID, inode.InodeGroupID(0), nil, fileInodeNumber, rwOffset, rwSizeRequested, nil)
+ bufRead, err := volumeHandle.Read(inode.InodeRootUserID, inode.InodeGroupID(0), nil, fileInodeNumber, rwOffset, rwSizeRequested, nil)
if nil != err {
stepErrChan <- fmt.Errorf("fs.Read(,,,, fileInodeNumber, rwOffset, rwSizeRequested) failed: %v\n", err)
return
@@ -950,7 +952,7 @@ func fsWorkout(rwSizeEach *rwSizeEachStruct, threadIndex uint64, doSameFile bool
}
} else {
for rwOffset := uint64(0); rwOffset < rwSizeTotal; rwOffset += rwSizeRequested {
- bufRead, err := mountHandle.Read(inode.InodeRootUserID, inode.InodeGroupID(0), nil, fileInodeNumber, rwOffset, rwSizeRequested, nil)
+ bufRead, err := volumeHandle.Read(inode.InodeRootUserID, inode.InodeGroupID(0), nil, fileInodeNumber, rwOffset, rwSizeRequested, nil)
if nil != err {
stepErrChan <- fmt.Errorf("fs.Read(,,,, fileInodeNumber, rwOffset, rwSizeRequested) failed: %v\n", err)
return
@@ -966,7 +968,7 @@ func fsWorkout(rwSizeEach *rwSizeEachStruct, threadIndex uint64, doSameFile bool
_ = <-doNextStepChan
if !doSameFile {
- err = unlinkFsFile(mountHandle, fileName)
+ err = unlinkFsFile(volumeHandle, fileName)
if nil != err {
return
}
diff --git a/proxyfsd/daemon.go b/proxyfsd/daemon.go
index b664d0ca4..41252e915 100644
--- a/proxyfsd/daemon.go
+++ b/proxyfsd/daemon.go
@@ -144,6 +144,7 @@ func Daemon(confFile string, confStrings []string, errChan chan error, wg *sync.
if signalReceived != unix.SIGTERM && signalReceived != unix.SIGINT {
logger.Errorf("proxyfsd received unexpected signal: %v", signalReceived)
}
+
return
}
diff --git a/proxyfsd/daemon_test.go b/proxyfsd/daemon_test.go
index 41c980f43..6ab256cb3 100644
--- a/proxyfsd/daemon_test.go
+++ b/proxyfsd/daemon_test.go
@@ -32,7 +32,6 @@ func TestDaemon(t *testing.T) {
err error
errChan chan error
execArgs []string
- mountHandle fs.MountHandle
ramswiftDoneChan chan bool
ramswiftSignalHandlerIsArmedWG sync.WaitGroup
readData []byte
@@ -40,6 +39,7 @@ func TestDaemon(t *testing.T) {
testVersionConfFile *os.File
testVersionConfFileName string
toReadFileInodeNumber inode.InodeNumber
+ volumeHandle fs.VolumeHandle
wg sync.WaitGroup
)
@@ -152,6 +152,9 @@ func TestDaemon(t *testing.T) {
"JSONRPCServer.TCPPort=12346", // 12346 instead of 12345 so that test can run if proxyfsd is already running
"JSONRPCServer.FastTCPPort=32346", // ...and similarly here...
"JSONRPCServer.DataPathLogging=false",
+ "JSONRPCServer.MinLeaseDuration=250ms",
+ "JSONRPCServer.LeaseInterruptInterval=250ms",
+ "JSONRPCServer.LeaseInterruptLimit=20",
"RamSwiftInfo.MaxAccountNameLength=256",
"RamSwiftInfo.MaxContainerNameLength=256",
@@ -203,12 +206,12 @@ func TestDaemon(t *testing.T) {
// Write to the volume (with no flush so that only time-based/restart flush is performed)
- mountHandle, err = fs.MountByVolumeName("CommonVolume", fs.MountOptions(0))
+ volumeHandle, err = fs.FetchVolumeHandleByVolumeName("CommonVolume")
if nil != err {
- t.Fatalf("fs.MountByVolumeName() failed [case 1]: %v", err)
+ t.Fatalf("fs.FetchVolumeHandleByVolumeName() failed [case 1]: %v", err)
}
- createdFileInodeNumber, err = mountHandle.Create(
+ createdFileInodeNumber, err = volumeHandle.Create(
inode.InodeRootUserID,
inode.InodeGroupID(0),
nil,
@@ -220,7 +223,7 @@ func TestDaemon(t *testing.T) {
t.Fatalf("fs.Create() failed: %v", err)
}
- bytesWritten, err = mountHandle.Write(
+ bytesWritten, err = volumeHandle.Write(
inode.InodeRootUserID,
inode.InodeGroupID(0),
nil,
@@ -238,7 +241,7 @@ func TestDaemon(t *testing.T) {
// Verify written data before restart
- toReadFileInodeNumber, err = mountHandle.Lookup(
+ toReadFileInodeNumber, err = volumeHandle.Lookup(
inode.InodeRootUserID,
inode.InodeGroupID(0),
nil,
@@ -249,7 +252,7 @@ func TestDaemon(t *testing.T) {
t.Fatalf("fs.Lookup() failed [case 1]: %v", err)
}
- readData, err = mountHandle.Read(
+ readData, err = volumeHandle.Read(
inode.InodeRootUserID,
inode.InodeGroupID(0),
nil,
@@ -292,12 +295,12 @@ func TestDaemon(t *testing.T) {
// Verify written data after restart
- mountHandle, err = fs.MountByVolumeName("CommonVolume", fs.MountOptions(0))
+ volumeHandle, err = fs.FetchVolumeHandleByVolumeName("CommonVolume")
if nil != err {
- t.Fatalf("fs.MountByVolumeName() failed [case 2]: %v", err)
+ t.Fatalf("fs.FetchVolumeHandleByVolumeName() failed [case 2]: %v", err)
}
- toReadFileInodeNumber, err = mountHandle.Lookup(
+ toReadFileInodeNumber, err = volumeHandle.Lookup(
inode.InodeRootUserID,
inode.InodeGroupID(0),
nil,
@@ -308,7 +311,7 @@ func TestDaemon(t *testing.T) {
t.Fatalf("fs.Lookup() failed [case 2]: %v", err)
}
- readData, err = mountHandle.Read(
+ readData, err = volumeHandle.Read(
inode.InodeRootUserID,
inode.InodeGroupID(0),
nil,
diff --git a/proxyfsd/default.conf b/proxyfsd/default.conf
index d36784c6b..4972c049a 100644
--- a/proxyfsd/default.conf
+++ b/proxyfsd/default.conf
@@ -33,16 +33,25 @@ LogLevel: 0
[SwiftClient]
NoAuthIPAddr: 127.0.0.1
NoAuthTCPPort: 8090
-Timeout: 10s
-RetryLimit: 10
-RetryLimitObject: 6
+
RetryDelay: 1s
+RetryExpBackoff: 1.5
+RetryLimit: 11
+
RetryDelayObject: 1s
-RetryExpBackoff: 1.4
-RetryExpBackoffObject: 2.0
+RetryExpBackoffObject: 1.95
+RetryLimitObject: 8
+
ChunkedConnectionPoolSize: 512
NonChunkedConnectionPoolSize: 128
+SwiftReconNoWriteThreshold: 80
+SwiftReconNoWriteErrno: ENOSPC
+SwiftReconReadOnlyThreshold: 90
+SwiftReconReadOnlyErrno: EROFS
+SwiftConfDir: /etc/swift
+SwiftReconChecksPerConfCheck: 10
+
# A storage policy into which the chunks of files and directories will go
[PhysicalContainerLayout:CommonVolumePhysicalContainerLayoutReplicated3Way]
ContainerStoragePolicy: silver
@@ -207,7 +216,10 @@ RetryRPCPort: 32356
RetryRPCTTLCompleted: 10m
RetryRPCAckTrim: 100ms
RetryRPCDeadlineIO: 60s
-RetryRPCKEEPALIVEPeriod: 60s
+RetryRPCKeepAlivePeriod: 60s
+MinLeaseDuration: 250ms
+LeaseInterruptInterval: 250ms
+LeaseInterruptLimit: 20
# Log reporting parameters
[Logging]
diff --git a/proxyfsd/rpc_server.conf b/proxyfsd/rpc_server.conf
index e86daab21..b44f8531e 100644
--- a/proxyfsd/rpc_server.conf
+++ b/proxyfsd/rpc_server.conf
@@ -9,4 +9,7 @@ RetryRPCPort: 32356
RetryRPCTTLCompleted: 10m
RetryRPCAckTrim: 100ms
RetryRPCDeadlineIO: 60s
-RetryRPCKEEPALIVEPeriod: 60s
+RetryRPCKeepAlivePeriod: 60s
+MinLeaseDuration: 250ms
+LeaseInterruptInterval: 250ms
+LeaseInterruptLimit: 20
diff --git a/proxyfsd/swift_client.conf b/proxyfsd/swift_client.conf
index 4c15c569d..857ddc2f4 100644
--- a/proxyfsd/swift_client.conf
+++ b/proxyfsd/swift_client.conf
@@ -24,3 +24,10 @@ RetryLimitObject: 8
ChunkedConnectionPoolSize: 512
NonChunkedConnectionPoolSize: 128
+
+SwiftReconNoWriteThreshold: 80
+SwiftReconNoWriteErrno: ENOSPC
+SwiftReconReadOnlyThreshold: 90
+SwiftReconReadOnlyErrno: EROFS
+SwiftConfDir: /etc/swift
+SwiftReconChecksPerConfCheck: 10
diff --git a/ramswift/macramswift0.conf b/ramswift/macramswift0.conf
index 164003ce7..c0895f901 100644
--- a/ramswift/macramswift0.conf
+++ b/ramswift/macramswift0.conf
@@ -5,3 +5,6 @@
.include ./chaos_settings.conf
.include ./swift_info.conf
+
+[SwiftClient]
+SwiftReconChecksPerConfCheck: 0
diff --git a/ramswift/ramswift0.conf b/ramswift/ramswift0.conf
index 280c6313f..ec97388da 100644
--- a/ramswift/ramswift0.conf
+++ b/ramswift/ramswift0.conf
@@ -5,3 +5,6 @@
.include ./chaos_settings.conf
.include ./swift_info.conf
+
+[SwiftClient]
+SwiftReconChecksPerConfCheck: 0
diff --git a/ramswift/saioramswift0.conf b/ramswift/saioramswift0.conf
index 25f3a282c..152f9f112 100644
--- a/ramswift/saioramswift0.conf
+++ b/ramswift/saioramswift0.conf
@@ -5,3 +5,6 @@
.include ./chaos_settings.conf
.include ./swift_info.conf
+
+[SwiftClient]
+SwiftReconChecksPerConfCheck: 0
diff --git a/release_notes.md b/release_notes.md
index 1dcebf9bb..e2c912b05 100644
--- a/release_notes.md
+++ b/release_notes.md
@@ -1,5 +1,40 @@
# ProxyFS Release Notes
+## 1.16.2 (July 1, 2020)
+
+### Bug Fixes:
+
+Memory leak in ProxyFS when PFSAgent is in use has been resolved.
+
+PFSAgent's Swift Auth PlugIn now may be located in the provisioned
+SAIO search path of both the root and vagrant users.
+
+### Features:
+
+Detection of available space on each Swift device (Account, Container,
+and Object) is now enabled. If any device utilization crosses either of
+two thresholds, access to all Volumes will be restricted. The first
+threshold prevents writes to files in the file system. Since there are
+many more operations that actually consume device space (even deletes),
+exceeding the second threshold converts all Volumes to be read-only.
+The "/liveness" URL (in the embedded HTTPServer in ProxyFS) will now
+also report the disk space utilization of each Swift device as well.
+
+In addition to the one-node SAIO VM already present, a new three-node
+SAIT ("Swift All In Three") set of VMs are now presented. This enables
+testing ProxyFS Cluster functionality.
+
+### Notes:
+
+This release also includes initial work in support of the Lease Management
+feature that will ultimately enable multiple PFSAgent instances to safely
+share read/write access to a common ProxyFS Volume. The new RPC is simply
+called "Lease" (i.e. Server.RpcLease) but remains non-functional at this
+point. Such functionality also includes an "upcall" mechanism such that
+ProxyFS may inform a PFSAgent instance of important events requiring their
+response (e.g. a conflicting Lease is needed by another PFSAgent instance,
+or the Volume is leaving and requires the PFSAgent to unmount it).
+
## 1.16.1 (May 26, 2020)
### Bug Fixes:
diff --git a/retryrpc/api.go b/retryrpc/api.go
index 4e61afd49..1a9a7c5fa 100644
--- a/retryrpc/api.go
+++ b/retryrpc/api.go
@@ -19,6 +19,7 @@ import (
"time"
"github.com/google/btree"
+ "github.com/swiftstack/ProxyFS/bucketstats"
"github.com/swiftstack/ProxyFS/logger"
)
@@ -53,18 +54,20 @@ type Server struct {
completedLongTicker *time.Ticker // Longer ~10 minute timer to trim
completedShortTicker *time.Ticker // Shorter ~100ms timer to trim known completed
deadlineIO time.Duration
- keepalivePeriod time.Duration
+ keepAlivePeriod time.Duration
completedDoneWG sync.WaitGroup
+ dontStartTrimmers bool // Used for testing
}
// ServerConfig is used to configure a retryrpc Server
type ServerConfig struct {
- LongTrim time.Duration // How long the results of an RPC are stored on a Server before removed
- ShortTrim time.Duration // How frequently completed and ACKed RPCs results are removed from Server
- IPAddr string // IP Address that Server uses to listen
- Port int // Port that Server uses to listen
- DeadlineIO time.Duration // How long I/Os on sockets wait even if idle
- KEEPALIVEPeriod time.Duration // How frequently a KEEPALIVE is sent
+ LongTrim time.Duration // How long the results of an RPC are stored on a Server before removed
+ ShortTrim time.Duration // How frequently completed and ACKed RPCs results are removed from Server
+ IPAddr string // IP Address that Server uses to listen
+ Port int // Port that Server uses to listen
+ DeadlineIO time.Duration // How long I/Os on sockets wait even if idle
+ KeepAlivePeriod time.Duration // How frequently a KEEPALIVE is sent
+ dontStartTrimmers bool // Used for testing
}
// NewServer creates the Server object
@@ -74,7 +77,7 @@ func NewServer(config *ServerConfig) *Server {
)
server := &Server{ipaddr: config.IPAddr, port: config.Port, completedLongTTL: config.LongTrim,
completedAckTrim: config.ShortTrim, deadlineIO: config.DeadlineIO,
- keepalivePeriod: config.KEEPALIVEPeriod}
+ keepAlivePeriod: config.KeepAlivePeriod, dontStartTrimmers: config.dontStartTrimmers}
server.svrMap = make(map[string]*methodArgs)
server.perClientInfo = make(map[string]*clientInfo)
server.completedTickerDone = make(chan bool)
@@ -106,7 +109,7 @@ func (server *Server) Start() (err error) {
Certificates: []tls.Certificate{server.Creds.serverTLSCertificate},
}
- listenConfig := &net.ListenConfig{KeepAlive: server.keepalivePeriod}
+ listenConfig := &net.ListenConfig{KeepAlive: server.keepAlivePeriod}
server.netListener, err = listenConfig.Listen(context.Background(), "tcp", hostPortStr)
if nil != err {
err = fmt.Errorf("tls.Listen() failed: %v", err)
@@ -117,24 +120,39 @@ func (server *Server) Start() (err error) {
server.listenersWG.Add(1)
- // Start ticker which removes older completedRequests
- server.completedLongTicker = time.NewTicker(server.completedLongTTL)
- // Start ticker which removes requests already ACKed by client
- server.completedShortTicker = time.NewTicker(server.completedAckTrim)
+ // Some of the unit tests disable starting trimmers
+ if !server.dontStartTrimmers {
+ // Start ticker which removes older completedRequests
+ server.completedLongTicker = time.NewTicker(server.completedLongTTL)
+ // Start ticker which removes requests already ACKed by client
+ server.completedShortTicker = time.NewTicker(server.completedAckTrim)
+ }
server.completedDoneWG.Add(1)
- go func() {
- for {
- select {
- case <-server.completedTickerDone:
- server.completedDoneWG.Done()
- return
- case tl := <-server.completedLongTicker.C:
- server.trimCompleted(tl, true)
- case ts := <-server.completedShortTicker.C:
- server.trimCompleted(ts, false)
+ if !server.dontStartTrimmers {
+ go func() {
+ for {
+ select {
+ case <-server.completedTickerDone:
+ server.completedDoneWG.Done()
+ return
+ case tl := <-server.completedLongTicker.C:
+ server.trimCompleted(tl, true)
+ case ts := <-server.completedShortTicker.C:
+ server.trimCompleted(ts, false)
+ }
+ }
+ }()
+ } else {
+ go func() {
+ for {
+ select {
+ case <-server.completedTickerDone:
+ server.completedDoneWG.Done()
+ return
+ }
}
- }
- }()
+ }()
+ }
return err
}
@@ -197,10 +215,20 @@ func (server *Server) Close() {
// Now close the client sockets to wakeup them up
server.closeClientConn()
- server.completedLongTicker.Stop()
- server.completedShortTicker.Stop()
+ if !server.dontStartTrimmers {
+ server.completedLongTicker.Stop()
+ server.completedShortTicker.Stop()
+ }
server.completedTickerDone <- true
server.completedDoneWG.Wait()
+
+ // Cleanup bucketstats so that unit tests can run
+ for _, ci := range server.perClientInfo {
+ ci.Lock()
+ bucketstats.UnRegister("proxyfs.retryrpc", ci.myUniqueID)
+ ci.Unlock()
+
+ }
}
// CloseClientConn - This is debug code to cause some connections to be closed
@@ -266,7 +294,7 @@ type Client struct {
myUniqueID string // Unique ID across all clients
cb interface{} // Callbacks to client
deadlineIO time.Duration
- keepalivePeriod time.Duration
+ keepAlivePeriod time.Duration
outstandingRequest map[requestID]*reqCtx // Map of outstanding requests sent
// or to be sent to server. Key is assigned from currentRequestID
highestConsecutive requestID // Highest requestID that can be
@@ -289,7 +317,7 @@ type ClientConfig struct {
RootCAx509CertificatePEM []byte // Root certificate
Callbacks interface{} // Structure implementing ClientCallbacks
DeadlineIO time.Duration // How long I/Os on sockets wait even if idle
- KEEPALIVEPeriod time.Duration // How frequently a KEEPALIVE is sent
+ KeepAlivePeriod time.Duration // How frequently a KEEPALIVE is sent
}
// TODO - pass loggers to Cient and Server objects
@@ -308,7 +336,7 @@ type ClientConfig struct {
func NewClient(config *ClientConfig) (client *Client, err error) {
client = &Client{myUniqueID: config.MyUniqueID, cb: config.Callbacks,
- keepalivePeriod: config.KEEPALIVEPeriod, deadlineIO: config.DeadlineIO}
+ keepAlivePeriod: config.KeepAlivePeriod, deadlineIO: config.DeadlineIO}
portStr := fmt.Sprintf("%d", config.Port)
client.connection.state = INITIAL
client.connection.hostPortStr = net.JoinHostPort(config.IPAddr, portStr)
diff --git a/retryrpc/api_internal.go b/retryrpc/api_internal.go
index 3b9ba1e4c..555ef2fe7 100644
--- a/retryrpc/api_internal.go
+++ b/retryrpc/api_internal.go
@@ -20,6 +20,7 @@ import (
"sync"
"time"
+ "github.com/swiftstack/ProxyFS/bucketstats"
"github.com/swiftstack/ProxyFS/logger"
)
@@ -37,6 +38,21 @@ const (
type requestID uint64
+// Useful stats for the clientInfo instance
+type statsInfo struct {
+ AddCompleted bucketstats.Total // Number added to completed list
+ RmCompleted bucketstats.Total // Number removed from completed list
+ RPCLenUsec bucketstats.BucketLog2Round // Tracks length of RPCs
+ ReplySize bucketstats.BucketLog2Round // Tracks completed RPC reply size
+ longestRPC time.Duration // Time of longest RPC
+ longestRPCMethod string // Method of longest RPC
+ largestReplySize uint64 // Tracks largest RPC reply size
+ largestReplySizeMethod string // Method of largest RPC reply size completed
+ RPCattempted bucketstats.Total // Number of RPCs attempted - may be completed or in process
+ RPCcompleted bucketstats.Total // Number of RPCs which completed - incremented after call returns
+ RPCretried bucketstats.Total // Number of RPCs which were just pulled from completed list
+}
+
// Server side data structure storing per client information
// such as completed requests, etc
type clientInfo struct {
@@ -47,6 +63,7 @@ type clientInfo struct {
completedRequestLRU *list.List // LRU used to remove completed request in ticker
highestReplySeen requestID // Highest consectutive requestID client has seen
previousHighestReplySeen requestID // Previous highest consectutive requestID client has seen
+ stats statsInfo
}
type completedEntry struct {
diff --git a/retryrpc/client.go b/retryrpc/client.go
index ebaab205e..94dd4ae41 100644
--- a/retryrpc/client.go
+++ b/retryrpc/client.go
@@ -13,6 +13,12 @@ import (
"github.com/swiftstack/ProxyFS/logger"
)
+const (
+ ConnectionRetryDelayMultiplier = 2
+ ConnectionRetryInitialDelay = 100 * time.Millisecond
+ ConnectionRetryLimit = 8
+)
+
// TODO - what if RPC was completed on Server1 and before response,
// proxyfsd fails over to Server2? Client will resend - not idempotent
// This is outside of our initial requirements but something we should
@@ -27,19 +33,35 @@ import (
// 4. readResponses goroutine will read response on socket
// and call a goroutine to do unmarshalling and notification
func (client *Client) send(method string, rpcRequest interface{}, rpcReply interface{}) (err error) {
- var crID requestID
+ var (
+ connectionRetryCount int
+ connectionRetryDelay time.Duration
+ crID requestID
+ )
client.Lock()
if client.connection.state == INITIAL {
+ connectionRetryCount = 0
+ connectionRetryDelay = ConnectionRetryInitialDelay
+
for {
err = client.dial()
if err == nil {
break
}
client.Unlock()
- time.Sleep(100 * time.Millisecond)
+ connectionRetryCount++
+ if connectionRetryCount > ConnectionRetryLimit {
+ err = fmt.Errorf("In send(), ConnectionRetryLimit (%v) on calling dial() exceeded", ConnectionRetryLimit)
+ logger.PanicfWithError(err, "")
+ }
+ time.Sleep(connectionRetryDelay)
+ connectionRetryDelay *= ConnectionRetryDelayMultiplier
client.Lock()
+ if client.connection.state != INITIAL {
+ break
+ }
}
}
@@ -278,6 +300,11 @@ func (client *Client) readReplies(callingGenNum uint64, tlsConn *tls.Conn) {
// retransmit is called when a socket related error occurs on the
// connection to the server.
func (client *Client) retransmit(genNum uint64) {
+ var (
+ connectionRetryCount int
+ connectionRetryDelay time.Duration
+ )
+
client.Lock()
// Check if we are already processing the socket error via
@@ -297,9 +324,12 @@ func (client *Client) retransmit(genNum uint64) {
// We are the first goroutine to notice the error on the
// socket - close the connection and start trying to reconnect.
- client.connection.tlsConn.Close()
+ _ = client.connection.tlsConn.Close()
client.connection.state = RETRANSMITTING
+ connectionRetryCount = 0
+ connectionRetryDelay = ConnectionRetryInitialDelay
+
for {
err := client.dial()
// If we were able to connect then break - otherwise retry
@@ -308,8 +338,13 @@ func (client *Client) retransmit(genNum uint64) {
break
}
client.Unlock()
- time.Sleep(100 * time.Millisecond)
-
+ connectionRetryCount++
+ if connectionRetryCount > ConnectionRetryLimit {
+ err = fmt.Errorf("In retransmit(), ConnectionRetryLimit (%v) on calling dial() exceeded", ConnectionRetryLimit)
+ logger.PanicfWithError(err, "")
+ }
+ time.Sleep(connectionRetryDelay)
+ connectionRetryDelay *= ConnectionRetryDelayMultiplier
client.Lock()
// While the lock was dropped we may be halting....
if client.halting == true {
@@ -318,8 +353,6 @@ func (client *Client) retransmit(genNum uint64) {
}
}
- client.connection.state = CONNECTED
-
for crID, ctx := range client.outstandingRequest {
// Note that we are holding the lock so these
// goroutines will block until we release it.
@@ -374,13 +407,14 @@ func (client *Client) sendMyInfo(tlsConn *tls.Conn) (err error) {
//
// NOTE: Client lock is held
func (client *Client) dial() (err error) {
+ var entryState = client.connection.state
client.connection.tlsConfig = &tls.Config{
RootCAs: client.connection.x509CertPool,
}
// Now dial the server
- d := &net.Dialer{KeepAlive: client.keepalivePeriod}
+ d := &net.Dialer{KeepAlive: client.keepAlivePeriod}
tlsConn, dialErr := tls.DialWithDialer(d, "tcp", client.connection.hostPortStr, client.connection.tlsConfig)
if dialErr != nil {
err = fmt.Errorf("tls.Dial() failed: %v", dialErr)
@@ -391,6 +425,7 @@ func (client *Client) dial() (err error) {
client.connection.tlsConn.Close()
client.connection.tlsConn = nil
}
+
client.connection.tlsConn = tlsConn
client.connection.state = CONNECTED
client.connection.genNum++
@@ -399,6 +434,9 @@ func (client *Client) dial() (err error) {
// be retried.
err = client.sendMyInfo(tlsConn)
if err != nil {
+ _ = client.connection.tlsConn.Close()
+ client.connection.tlsConn = nil
+ client.connection.state = entryState
return
}
diff --git a/retryrpc/retryrpc_test.go b/retryrpc/retryrpc_test.go
index 101291924..41843286d 100644
--- a/retryrpc/retryrpc_test.go
+++ b/retryrpc/retryrpc_test.go
@@ -5,6 +5,7 @@ import (
"time"
"github.com/stretchr/testify/assert"
+ "github.com/swiftstack/ProxyFS/bucketstats"
"github.com/swiftstack/ProxyFS/retryrpc/rpctest"
)
@@ -16,6 +17,7 @@ func TestRetryRPC(t *testing.T) {
testServer(t)
testBtree(t)
+ testStatsAndBucketstats(t)
}
type MyType struct {
@@ -39,13 +41,13 @@ func (m *MyType) unexportedFunction(i int) {
m.field1 = i
}
-func getNewServer(lt time.Duration) (rrSvr *Server, ip string, p int) {
+func getNewServer(lt time.Duration, dontStartTrimmers bool) (rrSvr *Server, ip string, p int) {
var (
ipaddr = "127.0.0.1"
port = 24456
)
config := &ServerConfig{LongTrim: lt, ShortTrim: 100 * time.Millisecond, IPAddr: "127.0.0.1",
- Port: 24456, DeadlineIO: 5 * time.Second}
+ Port: 24456, DeadlineIO: 5 * time.Second, dontStartTrimmers: dontStartTrimmers}
// Create a new RetryRPC Server. Completed request will live on
// completedRequests for 10 seconds.
@@ -65,7 +67,7 @@ func testServer(t *testing.T) {
// RPCs
myJrpcfs := rpctest.NewServer()
- rrSvr, ipaddr, port := getNewServer(10 * time.Second)
+ rrSvr, ipaddr, port := getNewServer(10*time.Second, false)
assert.NotNil(rrSvr)
// Register the Server - sets up the methods supported by the
@@ -121,7 +123,7 @@ func testServer(t *testing.T) {
func testBtree(t *testing.T) {
assert := assert.New(t)
- rrSvr, ipaddr, port := getNewServer(10 * time.Second)
+ rrSvr, ipaddr, port := getNewServer(10*time.Second, false)
assert.NotNil(rrSvr)
// Setup a client - we only will be targeting the btree
@@ -161,3 +163,66 @@ func testBtree(t *testing.T) {
assert.Equal(int(1), client.bt.Len())
assert.Equal(requestID(11), client.highestConsecutive)
}
+
+// Per pfsagent statistics
+type clientStats struct {
+ AddCompleted bucketstats.Total // Number added to completed list
+ RmCompleted bucketstats.Total // Number removed from completed list
+ RPCLenUsec bucketstats.BucketLog2Round // Average times of RPCs
+ LongestRPCMethod string // Method of longest RPC
+ ReplySize bucketstats.BucketLog2Round // Largest RPC reply size completed
+ LargestReplySizeMethod string // Method of largest RPC reply size completed
+ RPCcompleted bucketstats.Total // Number of RPCs which completed - incremented after call returns
+ RPCretried bucketstats.Total // Number of RPCs which were just pulled from completed list
+ RPCattempted bucketstats.Total // Number of RPCs attempted - may be completed or in process
+ RPCinprocess bucketstats.Total // Number of RPCs presently calling RPC - decremented when completed
+}
+
+// Test use of bucketstats package
+func testStatsAndBucketstats(t *testing.T) {
+ var (
+ myClient1 clientStats
+ myUniqueClient1 = "1111111"
+
+ myClient2 clientStats
+ myUniqueClient2 = "2222222"
+ )
+
+ // Register from bucketstats from pfsagent #1
+ bucketstats.Register("proxyfs.retryrpc", myUniqueClient1, &myClient1)
+
+ // Register from bucketstats from pfsagent #2
+ bucketstats.Register("proxyfs.retryrpc", myUniqueClient2, &myClient2)
+
+ // Completed list stats
+ myClient1.AddCompleted.Add(1)
+ myClient1.RmCompleted.Add(1)
+
+ // RPC counts
+ myClient1.RPCcompleted.Add(1)
+ myClient1.RPCretried.Add(1)
+ myClient1.RPCattempted.Add(1)
+ myClient1.RPCinprocess.Add(1)
+
+ // Track duration of all RPCs in a graph
+ start := time.Now()
+ time.Sleep(10 * time.Millisecond)
+ myClient1.RPCLenUsec.Add(uint64(time.Since(start) / time.Microsecond))
+ myClient1.ReplySize.Add(8192)
+
+ // Example of pfsagent #2
+ myClient2.RPCcompleted.Add(1)
+ myClient2.RPCretried.Add(1)
+ myClient2.RPCattempted.Add(1)
+ myClient2.RPCinprocess.Add(1)
+
+ // Dump stats
+ /* DEBUG ONLY -
+ fmt.Printf("pfsagent #1: %s\n", bucketstats.SprintStats(bucketstats.StatFormatParsable1, "proxyfs.retryrpc", myUniqueClient1))
+ fmt.Printf("pfsagent #2: %s\n", bucketstats.SprintStats(bucketstats.StatFormatParsable1, "proxyfs.retryrpc", myUniqueClient2))
+ */
+
+ // Unregister clients from bucketstats
+ bucketstats.UnRegister("proxyfs.retryrpc", myUniqueClient1)
+ bucketstats.UnRegister("proxyfs.retryrpc", myUniqueClient2)
+}
diff --git a/retryrpc/rpctest/ping.go b/retryrpc/rpctest/ping.go
index 354362f9d..e915a6df9 100644
--- a/retryrpc/rpctest/ping.go
+++ b/retryrpc/rpctest/ping.go
@@ -2,6 +2,7 @@ package rpctest
// Simple ping for testing the RPC layer
import (
+ "bytes"
"fmt"
"github.com/swiftstack/ProxyFS/blunder"
@@ -20,6 +21,23 @@ func (s *Server) RpcPing(in *PingReq, reply *PingReply) (err error) {
return nil
}
+var largeStr string = "111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111"
+
+// RpcPingLarge simply does a len on the message path and returns the result
+// along with a larger buffer.
+func (s *Server) RpcPingLarge(in *PingReq, reply *PingReply) (err error) {
+
+ buf := bytes.Buffer{}
+ p := fmt.Sprintf("pong %d bytes", len(in.Message))
+ buf.WriteString(p)
+ for i := 0; i < 1000; i++ {
+ buf.WriteString(largeStr)
+ }
+
+ reply.Message = fmt.Sprintf("%v", buf.String())
+ return nil
+}
+
// RpcPingWithError returns an error
func (s *Server) RpcPingWithError(in *PingReq, reply *PingReply) (err error) {
err = blunder.AddError(err, blunder.NotFoundError)
diff --git a/retryrpc/server.go b/retryrpc/server.go
index e47752875..ca58c5374 100644
--- a/retryrpc/server.go
+++ b/retryrpc/server.go
@@ -11,6 +11,7 @@ import (
"sync"
"time"
+ "github.com/swiftstack/ProxyFS/bucketstats"
"github.com/swiftstack/ProxyFS/logger"
"golang.org/x/sys/unix"
)
@@ -76,9 +77,15 @@ func (server *Server) run() {
go func(myConn net.Conn, myElm *list.Element) {
defer server.goroutineWG.Done()
+ logger.Infof("Servicing client: %v address: %v", ci.myUniqueID, myConn.RemoteAddr())
server.serviceClient(ci, cCtx)
+ logger.Infof("Closing client: %v address: %v", ci.myUniqueID, myConn.RemoteAddr())
server.closeClient(conn, elm)
+
+ // The clientInfo for this client will first be trimmed and then later
+ // deleted from the list of server.perClientInfo by the TTL trimmer.
+
}(conn, elm)
}
}
@@ -108,6 +115,7 @@ func (server *Server) processRequest(ci *clientInfo, myConnCtx *connCtx, buf []b
if jReq.HighestReplySeen > ci.highestReplySeen {
ci.highestReplySeen = jReq.HighestReplySeen
}
+ ci.stats.RPCattempted.Add(1)
// First check if we already completed this request by looking at
// completed queue.
@@ -119,6 +127,7 @@ func (server *Server) processRequest(ci *clientInfo, myConnCtx *connCtx, buf []b
// Just return the results.
setupHdrReply(ce.reply, RPC)
localIOR = *ce.reply
+ ci.stats.RPCretried.Add(1)
ci.Unlock()
} else {
@@ -129,17 +138,32 @@ func (server *Server) processRequest(ci *clientInfo, myConnCtx *connCtx, buf []b
// We pass buf to the call because the request will have to
// be unmarshaled again to retrieve the parameters specific to
// the RPC.
+ startRPC := time.Now()
ior := server.callRPCAndFormatReply(buf, &jReq)
+ ci.stats.RPCLenUsec.Add(uint64(time.Since(startRPC) / time.Microsecond))
+ ci.stats.RPCcompleted.Add(1)
// We had to drop the lock before calling the RPC since it
// could block.
ci.Lock()
+ dur := time.Since(startRPC)
+ if dur > ci.stats.longestRPC {
+ ci.stats.longestRPCMethod = jReq.Method
+ ci.stats.longestRPC = dur
+ }
// Update completed queue
ce := &completedEntry{reply: ior}
ci.completedRequest[rID] = ce
+ ci.stats.AddCompleted.Add(1)
setupHdrReply(ce.reply, RPC)
localIOR = *ce.reply
+ sz := uint64(len(ior.JResult))
+ if sz > ci.stats.largestReplySize {
+ ci.stats.largestReplySizeMethod = jReq.Method
+ ci.stats.largestReplySize = sz
+ }
+ ci.stats.ReplySize.Add(sz)
lruEntry := completedLRUEntry{requestID: rID, timeCompleted: time.Now()}
le := ci.completedRequestLRU.PushBack(lruEntry)
ce.lruElem = le
@@ -204,6 +228,8 @@ func (server *Server) getClientIDAndWait(cCtx *connCtx) (ci *clientInfo, err err
cCtx.Lock()
cCtx.ci = ci
cCtx.Unlock()
+
+ bucketstats.Register("proxyfs.retryrpc", c.myUniqueID, &c.stats)
} else {
server.Unlock()
ci = lci
@@ -363,7 +389,11 @@ func (server *Server) returnResults(ior *ioReply, cCtx *connCtx) {
//
// In error case - Conn will be closed when serviceClient() returns
cCtx.conn.SetDeadline(time.Now().Add(server.deadlineIO))
- _, _ = cCtx.conn.Write(ior.JResult)
+ cnt, e := cCtx.conn.Write(ior.JResult)
+ if e != nil {
+ logger.Infof("returnResults() returned err: %v cnt: %v length of JResult: %v", e, cnt, len(ior.JResult))
+ }
+
cCtx.Unlock()
}
@@ -387,15 +417,15 @@ func (server *Server) trimCompleted(t time.Time, long bool) {
server.Lock()
if long {
l := list.New()
- for k, v := range server.perClientInfo {
- n := server.trimTLLBased(k, v, t)
+ for k, ci := range server.perClientInfo {
+ n := server.trimTLLBased(ci, t)
totalItems += n
- v.Lock()
- if v.isEmpty() {
+ ci.Lock()
+ if ci.isEmpty() {
l.PushBack(k)
}
- v.Unlock()
+ ci.Unlock()
}
@@ -405,18 +435,22 @@ func (server *Server) trimCompleted(t time.Time, long bool) {
// lock.
for e := l.Front(); e != nil; e = e.Next() {
key := e.Value.(string)
- v := server.perClientInfo[key]
+ ci := server.perClientInfo[key]
- v.Lock()
- if v.isEmpty() {
+ ci.Lock()
+ ci.cCtx.Lock()
+ if ci.isEmpty() && ci.cCtx.serviceClientExited == true {
+ bucketstats.UnRegister("proxyfs.retryrpc", ci.myUniqueID)
delete(server.perClientInfo, key)
+ logger.Infof("Trim - DELETE inactive clientInfo with ID: %v", ci.myUniqueID)
}
- v.Unlock()
+ ci.cCtx.Unlock()
+ ci.Unlock()
}
logger.Infof("Trimmed completed RetryRpcs - Total: %v", totalItems)
} else {
- for k, v := range server.perClientInfo {
- n := server.trimAClientBasedACK(k, v)
+ for k, ci := range server.perClientInfo {
+ n := server.trimAClientBasedACK(k, ci)
totalItems += n
}
}
@@ -437,6 +471,7 @@ func (server *Server) trimAClientBasedACK(uniqueID string, ci *clientInfo) (numI
if ok {
ci.completedRequestLRU.Remove(v.lruElem)
delete(ci.completedRequest, h)
+ ci.stats.RmCompleted.Add(1)
numItems++
}
}
@@ -452,32 +487,27 @@ func (server *Server) trimAClientBasedACK(uniqueID string, ci *clientInfo) (numI
// This gets called every ~10 minutes to clean out older entries.
//
// NOTE: We assume Server Lock is held
-func (server *Server) trimTLLBased(uniqueID string, ci *clientInfo, t time.Time) (numItems int) {
-
- l := list.New()
-
+func (server *Server) trimTLLBased(ci *clientInfo, t time.Time) (numItems int) {
ci.Lock()
- for e := ci.completedRequestLRU.Front(); e != nil; e = e.Next() {
+ for e := ci.completedRequestLRU.Front(); e != nil; {
eTime := e.Value.(completedLRUEntry).timeCompleted.Add(server.completedLongTTL)
if eTime.Before(t) {
delete(ci.completedRequest, e.Value.(completedLRUEntry).requestID)
+ ci.stats.RmCompleted.Add(1)
- // Push on local list so don't delete while iterating
- l.PushBack(e)
+ eTmp := e
+ e = e.Next()
+ _ = ci.completedRequestLRU.Remove(eTmp)
+ numItems++
} else {
// Oldest is in front so just break
break
}
}
+ s := ci.stats
+ logger.Infof("ID: %v largestReplySize: %v largestReplySizeMethod: %v longest RPC: %v longest RPC Method: %v",
+ ci.myUniqueID, s.largestReplySize, s.largestReplySizeMethod, s.longestRPC, s.longestRPCMethod)
- numItems = l.Len()
-
- // Now delete from LRU using the local list
- for e2 := l.Front(); e2 != nil; e2 = e2.Next() {
- tmpE := ci.completedRequestLRU.Front()
- _ = ci.completedRequestLRU.Remove(tmpE)
-
- }
ci.Unlock()
return
}
diff --git a/retryrpc/stress_test.go b/retryrpc/stress_test.go
index a1bf49009..efa6cfba2 100644
--- a/retryrpc/stress_test.go
+++ b/retryrpc/stress_test.go
@@ -7,13 +7,28 @@ import (
"testing"
"time"
+ /* DEBUG for pprof
+ _ "net/http/pprof"
+ */
+
"github.com/stretchr/testify/assert"
"github.com/swiftstack/ProxyFS/retryrpc/rpctest"
)
func TestStress(t *testing.T) {
+ /*
+ * DEBUG - used to debug memory leaks
+ * Run " go tool pprof http://localhost:12123/debug/pprof/heap"
+ * to look at memory inuse
+ // Start the ws that listens for pprof requests
+ go http.ListenAndServe("localhost:12123", nil)
+ */
+
testLoop(t)
+ testLoopClientAckTrim(t)
+ testLoopTTLTrim(t)
+ testSendLargeRPC(t)
}
func testLoop(t *testing.T) {
@@ -29,7 +44,7 @@ func testLoop(t *testing.T) {
// RPCs
myJrpcfs := rpctest.NewServer()
- rrSvr, ipAddr, port := getNewServer(65 * time.Second)
+ rrSvr, ipAddr, port := getNewServer(65*time.Second, false)
assert.NotNil(rrSvr)
// Register the Server - sets up the methods supported by the
@@ -45,15 +60,211 @@ func testLoop(t *testing.T) {
rrSvr.Run()
// Start up the agents
- parallelAgentSenders(t, rrSvr, ipAddr, port, agentCount, sendCount, rrSvr.Creds.RootCAx509CertificatePEM)
+ parallelAgentSenders(t, rrSvr, ipAddr, port, agentCount, "RpcPing", sendCount, rrSvr.Creds.RootCAx509CertificatePEM)
rrSvr.Close()
}
-func sendIt(t *testing.T, client *Client, i int, sendWg *sync.WaitGroup, agentID uint64) {
+// testLoopClientAckTrim tests that we are correctly trimming messages
+// based on the shorter term trimmer. The shorter term trimmer relies
+// on the client code saying "this is the highest consecutive sqn we have
+// seen". Then the server can throw away messages up to and including the
+// highest consecutive sqn.
+func testLoopClientAckTrim(t *testing.T) {
+ var (
+ agentCount = 15
+ sendCount = 250
+ )
assert := assert.New(t)
- defer sendWg.Done()
+ zero := 0
+ assert.Equal(0, zero)
+
+ // Create new rpctest server - needed for calling
+ // RPCs
+ myJrpcfs := rpctest.NewServer()
+
+ whenTTL := 10 * time.Millisecond
+ rrSvr, ipAddr, port := getNewServer(whenTTL, true)
+ assert.NotNil(rrSvr)
+
+ // Register the Server - sets up the methods supported by the
+ // server
+ err := rrSvr.Register(myJrpcfs)
+ assert.Nil(err)
+
+ // Start listening for requests on the ipaddr/port
+ startErr := rrSvr.Start()
+ assert.Nil(startErr, "startErr is not nil")
+
+ // Tell server to start accepting and processing requests
+ rrSvr.Run()
+
+ // Start up the agents
+ parallelAgentSenders(t, rrSvr, ipAddr, port, agentCount, "RpcPing", sendCount, rrSvr.Creds.RootCAx509CertificatePEM)
+
+ // Now for both trimmers to run
+ tm := time.Now()
+
+ // First the 100ms trimmer - this will leave 1 entry on completed request queue
+ // for each agent since there is no remaining client request to say it is completed.
+ //
+ // We need the TTL timer to clean up the last entry
+ rrSvr.trimCompleted(tm, false)
+ assert.Equal(agentCount, cntNotTrimmed(rrSvr), "Should have agentCount messages remaining")
+
+ // Make sure the queue messages will be old enough to be trimmed
+ time.Sleep(whenTTL)
+
+ // Now the TTL timer to cleanup the last
+ tmTTL := time.Now()
+ rrSvr.trimCompleted(tmTTL, true)
+
+ // All messages should be trimmed at this point
+ assert.Equal(0, cntNotTrimmed(rrSvr), "Still have incomplete messages")
+
+ /*
+ * DEBUG - allows user to use pprof to check for memory leaks
+ // The caller of this test will block and we can check for memory leaks with pprof
+ fmt.Printf("\n=========== SLEEP 5 minutes ===================\n")
+ time.Sleep(5 * time.Minute)
+ */
+
+ rrSvr.Close()
+}
+
+func testLoopTTLTrim(t *testing.T) {
+ var (
+ agentCount = 15
+ sendCount = 250
+ )
+ assert := assert.New(t)
+ zero := 0
+ assert.Equal(0, zero)
+
+ // Create new rpctest server - needed for calling
+ // RPCs
+ myJrpcfs := rpctest.NewServer()
+
+ whenTTL := 10 * time.Millisecond
+ rrSvr, ipAddr, port := getNewServer(whenTTL, true)
+ assert.NotNil(rrSvr)
+
+ // Register the Server - sets up the methods supported by the
+ // server
+ err := rrSvr.Register(myJrpcfs)
+ assert.Nil(err)
+
+ // Start listening for requests on the ipaddr/port
+ startErr := rrSvr.Start()
+ assert.Nil(startErr, "startErr is not nil")
+
+ // Tell server to start accepting and processing requests
+ rrSvr.Run()
+
+ // Start up the agents
+ parallelAgentSenders(t, rrSvr, ipAddr, port, agentCount, "RpcPing", sendCount, rrSvr.Creds.RootCAx509CertificatePEM)
+
+ // Use the TTL trimmer to remove all messages after guaranteeing we are
+ // past time when they should be removed
+ time.Sleep(whenTTL)
+ tmTTL := time.Now()
+ rrSvr.trimCompleted(tmTTL, true)
+
+ assert.Equal(0, cntNotTrimmed(rrSvr), "Still have incomplete messages")
+
+ /*
+ * DEBUG - all time for pprof tool to be used for tracking down memory leaks
+ // The caller of this test will block and we can check for memory leaks with pprof
+ fmt.Printf("\n=========== SLEEP 5 minutes ===================\n")
+ time.Sleep(5 * time.Minute)
+ */
+ rrSvr.Close()
+}
+
+func testSendLargeRPC(t *testing.T) {
+ var (
+ agentCount = 15
+ sendCount = 250
+ )
+ assert := assert.New(t)
+ zero := 0
+ assert.Equal(0, zero)
+
+ // Create new rpctest server - needed for calling
+ // RPCs
+ myJrpcfs := rpctest.NewServer()
+
+ whenTTL := 10 * time.Millisecond
+ rrSvr, ipAddr, port := getNewServer(whenTTL, true)
+ assert.NotNil(rrSvr)
+
+ // Register the Server - sets up the methods supported by the
+ // server
+ err := rrSvr.Register(myJrpcfs)
+ assert.Nil(err)
+
+ // Start listening for requests on the ipaddr/port
+ startErr := rrSvr.Start()
+ assert.Nil(startErr, "startErr is not nil")
+
+ // Tell server to start accepting and processing requests
+ rrSvr.Run()
+
+ // Start up the agents
+ parallelAgentSenders(t, rrSvr, ipAddr, port, agentCount, "RpcPingLarge", sendCount, rrSvr.Creds.RootCAx509CertificatePEM)
+
+ // Now for both trimmers to run
+ tm := time.Now()
+
+ // First the 100ms trimmer - this will leave 1 entry on completed request queue
+ // for each agent since there is no remaining client request to say it is completed.
+ //
+ // We need the TTL timer to clean up the last entry
+ rrSvr.trimCompleted(tm, false)
+ assert.Equal(agentCount, cntNotTrimmed(rrSvr), "Should have agentCount messages remaining")
+
+ // Make sure the queue messages will be old enough to be trimmed
+ time.Sleep(whenTTL)
+
+ // Now the TTL timer to cleanup the last
+ tmTTL := time.Now()
+ rrSvr.trimCompleted(tmTTL, true)
+
+ /*
+ * DEBUG - sleep for a time for pprof tool to be used for tracking down memory leaks
+ // The caller of this test will block and we can check for memory leaks with pprof
+ fmt.Printf("\n=========== SLEEP 5 minutes ===================\n")
+ time.Sleep(5 * time.Minute)
+ */
+
+ // All messages should be trimmed at this point
+ assert.Equal(0, cntNotTrimmed(rrSvr), "Still have incomplete messages")
+
+ rrSvr.Close()
+}
+
+// testLoopClientAckTrim tests that we are correctly trimming messages
+
+func cntNotTrimmed(server *Server) (numItems int) {
+ server.Lock()
+ for _, ci := range server.perClientInfo {
+ ci.Lock()
+ if len(ci.completedRequest) != 0 {
+ numItems += len(ci.completedRequest)
+ } else {
+ if ci.completedRequestLRU.Len() != 0 {
+ numItems += ci.completedRequestLRU.Len()
+ }
+ }
+ ci.Unlock()
+ }
+ server.Unlock()
+
+ return
+}
+
+func ping(t *testing.T, client *Client, i int, agentID uint64, assert *assert.Assertions) {
// Send a ping RPC and print the results
msg := fmt.Sprintf("Ping Me - %v", i)
pingRequest := &rpctest.PingReq{Message: msg}
@@ -71,6 +282,37 @@ func sendIt(t *testing.T, client *Client, i int, sendWg *sync.WaitGroup, agentID
assert.Equal(expectedReply, pingReply.Message, "Received different output then expected")
}
+// pingLarge responds to the RPC with a large packet
+func pingLarge(t *testing.T, client *Client, i int, agentID uint64, assert *assert.Assertions) {
+ // Send a ping RPC and print the results
+ msg := fmt.Sprintf("Ping Me - %v", i)
+ pingRequest := &rpctest.PingReq{Message: msg}
+ pingReply := &rpctest.PingReply{}
+ err := client.Send("RpcPingLarge", pingRequest, pingReply)
+ assert.Nil(err, "client.Send() returned an error")
+}
+
+func sendIt(t *testing.T, client *Client, z int, sendCnt int, sendWg *sync.WaitGroup, prevWg *sync.WaitGroup, agentID uint64, method string, i int) {
+
+ assert := assert.New(t)
+ defer sendWg.Done()
+
+ switch method {
+ case "RpcPing":
+ ping(t, client, z, agentID, assert)
+ break
+ case "RpcPingLarge":
+ pingLarge(t, client, z, agentID, assert)
+ break
+ }
+
+ // The last send is blocked until the previous send has completed. This
+ // is how we test the short trimmer.
+ if i <= (sendCnt - 2) {
+ prevWg.Done()
+ }
+}
+
type stressMyClient struct {
sync.Mutex
cond *sync.Cond // Signal that received Interrupt() callback
@@ -88,8 +330,8 @@ func (cb *stressMyClient) Interrupt(payload []byte) {
}
// Represents a pfsagent - sepearate client
-func pfsagent(t *testing.T, rrSvr *Server, ipAddr string, port int, agentID uint64, agentWg *sync.WaitGroup,
- sendCnt int, rootCAx509CertificatePEM []byte) {
+func pfsagent(t *testing.T, rrSvr *Server, ipAddr string, port int, agentID uint64, method string,
+ agentWg *sync.WaitGroup, sendCnt int, rootCAx509CertificatePEM []byte) {
defer agentWg.Done()
cb := &stressMyClient{}
@@ -104,18 +346,49 @@ func pfsagent(t *testing.T, rrSvr *Server, ipAddr string, port int, agentID uint
}
defer client.Close()
+ // WG to verify all messages sent
var sendWg sync.WaitGroup
+ // WG to verify all but the last send() has been sent and
+ // received. This is needed to test the consecutive sequence
+ // trimmer is working.
+ var prevWg sync.WaitGroup
+
var z, r int
var msg1 []byte = []byte("server msg back to client")
for i := 0; i < sendCnt; i++ {
+
z = (z + i) * 10
+ if i == (sendCnt - 1) {
+ // Give server time to process messages. This last
+ // call gets us closer to highestConsecutive set to sendCnt - 1.
+ prevWg.Wait()
+
+ // The highest consecutive number is updated in the background with
+ // a goroutine when send() returns.
+ //
+ // Therefore, we loop waiting for it to hit (sendCnt - 1)
+ for {
+ var currentHighest requestID
+ client.Lock()
+ currentHighest = client.highestConsecutive
+ client.Unlock()
+
+ if int(currentHighest) == (sendCnt - 1) {
+ break
+ }
+ time.Sleep(10 * time.Millisecond)
+ }
+ } else {
+ prevWg.Add(1)
+ }
+
sendWg.Add(1)
- go func(z int) {
- sendIt(t, client, z, &sendWg, agentID)
+ go func(z int, i int) {
+ sendIt(t, client, z, sendCnt, &sendWg, &prevWg, agentID, method, i)
rrSvr.SendCallback(clientID, msg1)
- }(z)
+ }(z, i)
// Occasionally drop the connection to the server to
// simulate retransmits
@@ -129,7 +402,7 @@ func pfsagent(t *testing.T, rrSvr *Server, ipAddr string, port int, agentID uint
// Start a bunch of "pfsagents" in parallel
func parallelAgentSenders(t *testing.T, rrSrv *Server, ipAddr string, port int, agentCnt int,
- sendCnt int, rootCAx509CertificatePEM []byte) {
+ method string, sendCnt int, rootCAx509CertificatePEM []byte) {
var agentWg sync.WaitGroup
@@ -143,7 +416,7 @@ func parallelAgentSenders(t *testing.T, rrSrv *Server, ipAddr string, port int,
agentID = clientSeed + uint64(i)
agentWg.Add(1)
- go pfsagent(t, rrSrv, ipAddr, port, agentID, &agentWg, sendCnt, rootCAx509CertificatePEM)
+ go pfsagent(t, rrSrv, ipAddr, port, agentID, method, &agentWg, sendCnt, rootCAx509CertificatePEM)
}
agentWg.Wait()
}
diff --git a/retryrpc/upcall_test.go b/retryrpc/upcall_test.go
index aa8181478..8304ae861 100644
--- a/retryrpc/upcall_test.go
+++ b/retryrpc/upcall_test.go
@@ -40,7 +40,7 @@ func testUpCall(t *testing.T) {
// RPCs
myJrpcfs := rpctest.NewServer()
- rrSvr, ipaddr, port := getNewServer(10 * time.Second)
+ rrSvr, ipaddr, port := getNewServer(10*time.Second, false)
assert.NotNil(rrSvr)
// Register the Server - sets up the methods supported by the
diff --git a/saio/Vagrantfile b/saio/Vagrantfile
index c661abddf..a923d6bb1 100644
--- a/saio/Vagrantfile
+++ b/saio/Vagrantfile
@@ -9,7 +9,7 @@
# 4) ProxyFS repo git clone'd to $GOPATH/src/github.com/swiftstack/
# 5) samba repo automatically staged in $GOPATH/src/github.com/swiftstack/ProxyFS/saio/
# 6) Swift repos et. al. git clone'd to $GOPATH/src/github.com/swiftstack/ProxyFS/saio/
-# 7) ../regression_test.py will be ready to be executed after `cdpfs` inside the VM
+# 7) ../Makefile will be ready to be executed after `cdpfs` inside the VM
# 8) As GOPATH is effectively shared between Host and VM, builds in the two environments
# will collide in the contents of the $GOPATH/bin (only executables, not libraries)
diff --git a/saio/home/swift/bin/resetswift b/saio/home/swift/bin/resetswift
index 4663e074a..43a4dc492 100755
--- a/saio/home/swift/bin/resetswift
+++ b/saio/home/swift/bin/resetswift
@@ -3,20 +3,22 @@
set -e
sudo swift-init all kill
-if cut -d' ' -f2 /proc/mounts | grep -q /mnt/sdb1 ; then
- sudo umount /mnt/sdb1
-fi
-sudo truncate -s 0 /srv/swift-disk
-sudo truncate -s 10GB /srv/swift-disk
-sudo mkfs.xfs -f /srv/swift-disk
-sudo mount /mnt/sdb1
-sudo mkdir /mnt/sdb1/1 /mnt/sdb1/2 /mnt/sdb1/3 /mnt/sdb1/4
-sudo chown swift:swift /mnt/sdb1/*
-sudo mkdir -p /srv/1/node/sdb1 /srv/1/node/sdb5 /srv/1/node/sdb9 \
- /srv/2/node/sdb2 /srv/2/node/sdb6 /srv/2/node/sdbA \
- /srv/3/node/sdb3 /srv/3/node/sdb7 /srv/3/node/sdbB \
- /srv/4/node/sdb4 /srv/4/node/sdb8 /srv/4/node/sdbC
-for x in {1..4}; do sudo chown -R swift:swift /srv/$x/; done
+
+for x in 11 22 33 44 15 26 37 48 19 2A 3B 4C
+do
+ node=${x:0:1}
+ drive=${x:1:1}
+ if cut -d' ' -f2 /proc/mounts | grep -q /srv/$node/node/sdb$drive
+ then
+ sudo umount /srv/$node/node/sdb$drive
+ fi
+ sudo truncate -s 0 /srv/swift-disk-$drive
+ sudo truncate -s 1GB /srv/swift-disk-$drive
+ sudo mkfs.xfs -f /srv/swift-disk-$drive
+ sudo mount /srv/$node/node/sdb$drive
+ sudo chown swift:swift /srv/$node/node/sdb$drive
+done
+
sudo rm -f /var/log/debug /var/log/messages /var/log/rsyncd.log /var/log/syslog
find /var/cache/swift* -type f -name *.recon -exec rm -f {} \;
sudo systemctl restart memcached
diff --git a/saio/usr/lib/systemd/system/pfsagentd.service b/saio/usr/lib/systemd/system/pfsagentd.service
index 7ffd54153..2b674e103 100644
--- a/saio/usr/lib/systemd/system/pfsagentd.service
+++ b/saio/usr/lib/systemd/system/pfsagentd.service
@@ -3,7 +3,7 @@ Description=PFSAgent service
After=proxyfsd.service
[Service]
-Environment=PATH=/usr/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/sbin:/bin
+Environment=PATH=/usr/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/sbin:/bin:/vagrant/bin
Environment=GOTRACEBACK=crash
LimitCORE=infinity
ExecStart=/vagrant/bin/pfsagentd /vagrant/src/github.com/swiftstack/ProxyFS/pfsagentd/pfsagent.conf
diff --git a/saio/vagrant_provision.sh b/saio/vagrant_provision.sh
index 9486783b3..899474082 100644
--- a/saio/vagrant_provision.sh
+++ b/saio/vagrant_provision.sh
@@ -198,25 +198,27 @@ pip install tox==3.5.3
useradd --user-group --groups wheel swift
chmod 755 ~swift
-# Using a loopback device for storage
+# Using loopback devices for storage
mkdir -p /srv
-truncate -s 0 /srv/swift-disk
-truncate -s 10GB /srv/swift-disk
-mkfs.xfs -f /srv/swift-disk
-echo "/srv/swift-disk /mnt/sdb1 xfs loop,noatime,nodiratime,nobarrier,logbufs=8 0 0" >> /etc/fstab
-mkdir -p /mnt/sdb1
-mount /mnt/sdb1
-sudo mkdir /mnt/sdb1/1 /mnt/sdb1/2 /mnt/sdb1/3 /mnt/sdb1/4
-chown swift:swift /mnt/sdb1/*
-for x in {1..4}; do ln -s /mnt/sdb1/$x /srv/$x; done
-mkdir -p /srv/1/node/sdb1 /srv/1/node/sdb5 /srv/1/node/sdb9 \
- /srv/2/node/sdb2 /srv/2/node/sdb6 /srv/2/node/sdbA \
- /srv/3/node/sdb3 /srv/3/node/sdb7 /srv/3/node/sdbB \
- /srv/4/node/sdb4 /srv/4/node/sdb8 /srv/4/node/sdbC \
- /var/run/swift
+
+for x in 11 22 33 44 15 26 37 48 19 2A 3B 4C
+do
+ node=${x:0:1}
+ drive=${x:1:1}
+ truncate -s 0 /srv/swift-disk-$drive
+ truncate -s 1GB /srv/swift-disk-$drive
+ mkfs.xfs -f /srv/swift-disk-$drive
+ mkdir -p /srv/$node/node/sdb$drive
+ echo "/srv/swift-disk-$drive /srv/$node/node/sdb$drive xfs loop,noatime,nodiratime,nobarrier,logbufs=8 0 0" >> /etc/fstab
+ mount /srv/$node/node/sdb$drive
+ chown swift:swift /srv/$node/node/sdb$drive
+done
+
+# Create Swift temporary file dir
+
+mkdir -p /var/run/swift
chown -R swift:swift /var/run/swift
-for x in {1..4}; do chown -R swift:swift /srv/$x/; done
# [Setup Swift] Common Post-Device Setup (Add /var boot-time provisioning to /etc/rc.d/rc.local)
@@ -272,7 +274,7 @@ echo "export ST_KEY=testing" >> ~vagrant/.bash_profile
cd ~swift
git clone https://github.com/swiftstack/swift.git
cd swift
-git checkout ss-release-2.24.0.3
+git checkout ss-release-2.25.0.4
pip install wheel
python setup.py bdist_wheel
pip install --no-binary cryptography -r requirements.txt
@@ -443,6 +445,10 @@ EOF
cp /vagrant/src/github.com/swiftstack/ProxyFS/saio/usr/lib/systemd/system/proxyfsd.service /usr/lib/systemd/system/.
cp /vagrant/src/github.com/swiftstack/ProxyFS/saio/usr/lib/systemd/system/pfsagentd.service /usr/lib/systemd/system/.
+# Place symlink in root's $PATH to locate pfsagentd-swift-auth-plugin referenced without a path
+
+ln -s /vagrant/bin/pfsagentd-swift-auth-plugin /usr/bin/pfsagentd-swift-auth-plugin
+
# Enable Samba service in an SELinux environment
yum -y install policycoreutils-python
@@ -479,6 +485,10 @@ yum -y install dstat
yum -y install tree
+# Install jq... a very handy JSON parser
+
+yum -y install jq
+
# Install and configure a localhost-only one-node etcd cluster
ETCD_VERSION=3.4.7
@@ -512,11 +522,13 @@ EOF
systemctl daemon-reload
-# All done
-
-echo "SAIO for ProxyFS provisioned"
+# Add some VIPs
ip addr add dev enp0s8 172.28.128.21/24
ip addr add dev enp0s8 172.28.128.22/24
ip addr add dev enp0s8 172.28.128.23/24
ip addr add dev enp0s8 172.28.128.24/24
+
+# All done
+
+echo "SAIO for ProxyFS provisioned"
diff --git a/sait/Vagrantfile b/sait/Vagrantfile
new file mode 100644
index 000000000..fbca6e76e
--- /dev/null
+++ b/sait/Vagrantfile
@@ -0,0 +1,82 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+
+# Notes:
+#
+# 1) vboxnet0 is assumed to be a host-only network @ address 172.28.128.1 (DHCP disabled)
+# 2) Though not required, GOPATH is assumed to be the ../../../../../ directory
+# 3) The directory on the VM Host will be /vagrant on the VM and be the path in GOPATH
+# 4) ProxyFS repo git clone'd to $GOPATH/src/github.com/swiftstack/
+# 5) samba repo automatically staged in $GOPATH/src/github.com/swiftstack/ProxyFS/saio/
+# 6) Swift repos et. al. git clone'd to $GOPATH/src/github.com/swiftstack/ProxyFS/saio/
+# 7) ../Makefile will be ready to be executed after `cdpfs` inside the VM
+# 8) As GOPATH is effectively shared between Host and VM, builds in the two environments
+# will collide in the contents of the $GOPATH/bin (only executables, not libraries)
+# 9) As of now, only ProxyFS is supported (no Samba, no ramswift), so, at the most,
+# users shoult only do `make version fmt pre-generate generate install`
+# 10) Only the following tools in bin/ are currently supported:
+# provision_middleware
+# start_proxyfs_and_swift
+# start_swift_only
+# unmount_and_stop_pfs
+# 11) bin/start_proxyfs_and_swift requires an argument of either 1, 2, or 3
+# 12) home/swift/bin/resetswift requires an argument of either 1, 2, or 3
+
+Vagrant.configure(2) do |config|
+ config.vm.define "sait1" do |sconfig|
+ sconfig.vm.box = "centos-74-minimal-20171228"
+ sconfig.vm.box_url = "https://o.swiftstack.org/v1/AUTH_misc/vagrant_boxes/centos-74-minimal-20171228.box"
+ sconfig.vm.define "sait1pfs" do |sait1pfs|
+ end
+ sconfig.vm.provider :virtualbox do |vb|
+ vb.name = "SAIT 1 for ProxyFS"
+ vb.cpus = Integer(ENV['VAGRANT_CPUS'] || 1)
+ vb.memory = Integer(ENV['VAGRANT_RAM'] || 2048)
+ vb.customize ["modifyvm", :id, "--audio", "none"]
+ end
+ sconfig.vm.synced_folder "../../../../../", "/vagrant", type: "virtualbox"
+ sconfig.vm.network "private_network", ip: "172.28.128.3", :name => 'vboxnet0', :adapter => 2
+ sconfig.vm.network "forwarded_port", guest: 15346, host: 15347
+ sconfig.vm.network "forwarded_port", guest: 9090, host: 9092
+ sconfig.vm.network "private_network", ip: "192.168.22.114", :name => 'vboxnet1', :adapter => 3
+ sconfig.vm.provision "shell", path: "vagrant_provision.sh", args: "1"
+ end
+
+ config.vm.define "sait2" do |sconfig|
+ sconfig.vm.box = "centos-74-minimal-20171228"
+ sconfig.vm.box_url = "https://o.swiftstack.org/v1/AUTH_misc/vagrant_boxes/centos-74-minimal-20171228.box"
+ sconfig.vm.define "sait2pfs" do |sait2pfs|
+ end
+ sconfig.vm.provider :virtualbox do |vb|
+ vb.name = "SAIT 2 for ProxyFS"
+ vb.cpus = Integer(ENV['VAGRANT_CPUS'] || 1)
+ vb.memory = Integer(ENV['VAGRANT_RAM'] || 2048)
+ vb.customize ["modifyvm", :id, "--audio", "none"]
+ end
+ sconfig.vm.synced_folder "../../../../../", "/vagrant", type: "virtualbox"
+ sconfig.vm.network "private_network", ip: "172.28.128.4", :name => 'vboxnet0', :adapter => 2
+ sconfig.vm.network "forwarded_port", guest: 15346, host: 15348
+ sconfig.vm.network "forwarded_port", guest: 9090, host: 9093
+ sconfig.vm.network "private_network", ip: "192.168.22.115", :name => 'vboxnet1', :adapter => 3
+ sconfig.vm.provision "shell", path: "vagrant_provision.sh", args: "2"
+ end
+
+ config.vm.define "sait3" do |sconfig|
+ sconfig.vm.box = "centos-74-minimal-20171228"
+ sconfig.vm.box_url = "https://o.swiftstack.org/v1/AUTH_misc/vagrant_boxes/centos-74-minimal-20171228.box"
+ sconfig.vm.define "sait1pfs" do |sait3pfs|
+ end
+ sconfig.vm.provider :virtualbox do |vb|
+ vb.name = "SAIT 3 for ProxyFS"
+ vb.cpus = Integer(ENV['VAGRANT_CPUS'] || 1)
+ vb.memory = Integer(ENV['VAGRANT_RAM'] || 2048)
+ vb.customize ["modifyvm", :id, "--audio", "none"]
+ end
+ sconfig.vm.synced_folder "../../../../../", "/vagrant", type: "virtualbox"
+ sconfig.vm.network "private_network", ip: "172.28.128.5", :name => 'vboxnet0', :adapter => 2
+ sconfig.vm.network "forwarded_port", guest: 15346, host: 15349
+ sconfig.vm.network "forwarded_port", guest: 9090, host: 9094
+ sconfig.vm.network "private_network", ip: "192.168.22.116", :name => 'vboxnet1', :adapter => 3
+ sconfig.vm.provision "shell", path: "vagrant_provision.sh", args: "3"
+ end
+end
diff --git a/sait/bin/provision_middleware b/sait/bin/provision_middleware
new file mode 100755
index 000000000..f62831a27
--- /dev/null
+++ b/sait/bin/provision_middleware
@@ -0,0 +1,6 @@
+#!/bin/bash
+
+cd $GOPATH/src/github.com/swiftstack/ProxyFS/pfs_middleware
+sudo python setup.py develop
+cd $GOPATH/src/github.com/swiftstack/ProxyFS/meta_middleware
+sudo python setup.py develop
diff --git a/sait/bin/start_proxyfs_and_swift b/sait/bin/start_proxyfs_and_swift
new file mode 100755
index 000000000..8c597e82a
--- /dev/null
+++ b/sait/bin/start_proxyfs_and_swift
@@ -0,0 +1,87 @@
+#!/bin/bash
+
+if [ "$1" == "1" ]
+then
+ PRIVATE_IP_ADDR="192.168.22.114"
+ VOLUME_TO_SERVE="CommonVolume"
+elif [ "$1" == "2" ]
+then
+ PRIVATE_IP_ADDR="192.168.22.115"
+ VOLUME_TO_SERVE=""
+elif [ "$1" == "3" ]
+then
+ PRIVATE_IP_ADDR="192.168.22.116"
+ VOLUME_TO_SERVE=""
+else
+ echo Arg1 is unexpected: $1
+ exit 1
+fi
+
+SAIT_DIR=sait$1
+
+function await_proxyfsd_startup {
+ while true
+ do
+ /usr/bin/systemctl -q is-active proxyfsd
+ if [ $? -ne 0 ]
+ then
+ echo "ProxyFS failed to start. Exiting..."
+ exit 1
+ fi
+ curl http://$PRIVATE_IP_ADDR:15346/ 2>/dev/null >/dev/null
+ if [ $? -eq 0 ]
+ then
+ break
+ fi
+ sleep 1
+ done
+}
+
+function await_swift_startup {
+ while true
+ do
+ curl http://127.0.0.1:8090/info 2>/dev/null >/dev/null
+ if [ $? -eq 0 ]
+ then
+ break
+ fi
+ echo "Waiting for Swift to be started..."
+ sleep 1
+ done
+}
+
+function format_volume_if_necessary {
+ if [ "" != "$1" ]
+ then
+ sudo /vagrant/bin/mkproxyfs -I $1 /vagrant/src/github.com/swiftstack/ProxyFS/sait/$SAIT_DIR/proxyfs.conf SwiftClient.RetryLimit=1
+ if [ $? -ne 0 ]
+ then
+ echo "Could not pre-format $1"
+ exit 1
+ fi
+ fi
+}
+
+sudo mount -a
+
+echo "Shutting down services and mount points..."
+/vagrant/src/github.com/swiftstack/ProxyFS/sait/bin/unmount_and_stop_pfs
+echo
+echo "Bringing up services..."
+if [ -f /usr/bin/systemctl ]; then
+ # Centos
+ sudo /usr/bin/systemctl start memcached
+ sudo /usr/bin/swift-init main start
+ await_swift_startup
+ format_volume_if_necessary $VOLUME_TO_SERVE
+ sudo /usr/bin/systemctl start proxyfsd
+ await_proxyfsd_startup
+else
+ # Ubuntu (not tested!)
+ sudo /usr/sbin/service memcached start
+ sudo /usr/bin/swift-init main start
+ await_swift_startup
+ format_volume_if_necessary $VOLUME_TO_SERVE
+ sudo /usr/sbin/service proxyfsd start
+ await_proxyfsd_startup
+fi
diff --git a/sait/bin/start_swift_only b/sait/bin/start_swift_only
new file mode 100755
index 000000000..e49ba4771
--- /dev/null
+++ b/sait/bin/start_swift_only
@@ -0,0 +1,17 @@
+#!/bin/bash
+
+sudo mount -a
+
+echo "Shutting down services and mount points..."
+/vagrant/src/github.com/swiftstack/ProxyFS/sait/bin/unmount_and_stop_pfs
+echo
+echo "Bringing up services..."
+if [ -f /usr/bin/systemctl ]; then
+ # Centos
+ sudo /usr/bin/systemctl start memcached
+ sudo /usr/bin/swift-init main start
+else
+ # Ubuntu (not tested!)
+ sudo /usr/sbin/service memcached start
+ sudo /usr/bin/swift-init main start
+fi
diff --git a/sait/bin/unmount_and_stop_pfs b/sait/bin/unmount_and_stop_pfs
new file mode 100755
index 000000000..58e7bb637
--- /dev/null
+++ b/sait/bin/unmount_and_stop_pfs
@@ -0,0 +1,29 @@
+#!/bin/bash
+
+function await_proxyfsd_shutdown {
+ while true
+ do
+ pidof proxyfsd > /dev/null
+ if [ $? -ne 0 ]
+ then
+ break
+ fi
+ echo "Waiting for ProxyFS to be stopped..."
+ sleep 1
+ done
+}
+
+if [ -f /usr/bin/systemctl ]; then
+ # Centos
+ sudo /usr/bin/systemctl stop proxyfsd
+ await_proxyfsd_shutdown
+ sudo /usr/bin/swift-init main stop
+ sudo /usr/bin/systemctl stop memcached
+else
+ # Ubuntu (not tested!)
+ # Here we should stop pfsagentd, but we don't support Ubuntu
+ sudo /usr/sbin/service proxyfsd stop
+ await_proxyfsd_shutdown
+ sudo /usr/bin/swift-init main stop
+ sudo /usr/sbin/service memcached stop
+fi
diff --git a/sait/etc/swift/container-reconciler.conf b/sait/etc/swift/container-reconciler.conf
new file mode 100644
index 000000000..30f81ca5c
--- /dev/null
+++ b/sait/etc/swift/container-reconciler.conf
@@ -0,0 +1,47 @@
+[DEFAULT]
+# swift_dir = /etc/swift
+user = swift
+# You can specify default log routing here if you want:
+# log_name = swift
+# log_facility = LOG_LOCAL0
+# log_level = INFO
+# log_address = /dev/log
+#
+# comma separated list of functions to call to setup custom log handlers.
+# functions get passed: conf, name, log_to_console, log_route, fmt, logger,
+# adapted_logger
+# log_custom_handlers =
+#
+# If set, log_udp_host will override log_address
+# log_udp_host =
+# log_udp_port = 514
+#
+# You can enable StatsD logging here:
+# log_statsd_host =
+# log_statsd_port = 8125
+# log_statsd_default_sample_rate = 1.0
+# log_statsd_sample_rate_factor = 1.0
+# log_statsd_metric_prefix =
+
+[container-reconciler]
+# reclaim_age = 604800
+# interval = 300
+# request_tries = 3
+
+[pipeline:main]
+pipeline = catch_errors proxy-logging cache proxy-server
+
+[app:proxy-server]
+use = egg:swift#proxy
+# See proxy-server.conf-sample for options
+
+[filter:cache]
+use = egg:swift#memcache
+# See proxy-server.conf-sample for options
+
+[filter:proxy-logging]
+use = egg:swift#proxy_logging
+
+[filter:catch_errors]
+use = egg:swift#catch_errors
+# See proxy-server.conf-sample for options
diff --git a/sait/etc/swift/container-sync-realms.conf b/sait/etc/swift/container-sync-realms.conf
new file mode 100644
index 000000000..c84d3391d
--- /dev/null
+++ b/sait/etc/swift/container-sync-realms.conf
@@ -0,0 +1,4 @@
+[saio]
+key = changeme
+key2 = changeme
+cluster_saio_endpoint = http://127.0.0.1:8080/v1/
diff --git a/sait/etc/swift/object-expirer.conf b/sait/etc/swift/object-expirer.conf
new file mode 100644
index 000000000..ab8a4cba6
--- /dev/null
+++ b/sait/etc/swift/object-expirer.conf
@@ -0,0 +1,59 @@
+[DEFAULT]
+# swift_dir = /etc/swift
+user = swift
+# You can specify default log routing here if you want:
+log_name = object-expirer
+log_facility = LOG_LOCAL6
+log_level = INFO
+#log_address = /dev/log
+#
+# comma separated list of functions to call to setup custom log handlers.
+# functions get passed: conf, name, log_to_console, log_route, fmt, logger,
+# adapted_logger
+# log_custom_handlers =
+#
+# If set, log_udp_host will override log_address
+# log_udp_host =
+# log_udp_port = 514
+#
+# You can enable StatsD logging here:
+# log_statsd_host =
+# log_statsd_port = 8125
+# log_statsd_default_sample_rate = 1.0
+# log_statsd_sample_rate_factor = 1.0
+# log_statsd_metric_prefix =
+
+[object-expirer]
+interval = 300
+# auto_create_account_prefix = .
+# report_interval = 300
+# concurrency is the level of concurrency o use to do the work, this value
+# must be set to at least 1
+# concurrency = 1
+# processes is how many parts to divide the work into, one part per process
+# that will be doing the work
+# processes set 0 means that a single process will be doing all the work
+# processes can also be specified on the command line and will override the
+# config value
+# processes = 0
+# process is which of the parts a particular process will work on
+# process can also be specified on the command line and will override the config
+# value
+# process is "zero based", if you want to use 3 processes, you should run
+# processes with process set to 0, 1, and 2
+# process = 0
+
+[pipeline:main]
+pipeline = catch_errors cache proxy-server
+
+[app:proxy-server]
+use = egg:swift#proxy
+# See proxy-server.conf-sample for options
+
+[filter:cache]
+use = egg:swift#memcache
+# See proxy-server.conf-sample for options
+
+[filter:catch_errors]
+use = egg:swift#catch_errors
+# See proxy-server.conf-sample for options
diff --git a/sait/etc/swift/proxy-server.conf b/sait/etc/swift/proxy-server.conf
new file mode 100644
index 000000000..652614ce9
--- /dev/null
+++ b/sait/etc/swift/proxy-server.conf
@@ -0,0 +1,93 @@
+[DEFAULT]
+bind_ip = 0.0.0.0
+bind_port = 8080
+workers = 1
+user = swift
+log_facility = LOG_LOCAL1
+eventlet_debug = true
+
+[pipeline:main]
+# Yes, proxy-logging appears twice. This is so that
+# middleware-originated requests get logged too.
+pipeline = catch_errors gatekeeper healthcheck proxy-logging cache bulk tempurl ratelimit crossdomain container_sync s3api tempauth staticweb copy container-quotas account-quotas slo dlo pfs versioned_writes proxy-logging proxy-server
+
+[filter:catch_errors]
+use = egg:swift#catch_errors
+
+[filter:gatekeeper]
+use = egg:swift#gatekeeper
+
+[filter:healthcheck]
+use = egg:swift#healthcheck
+
+[filter:proxy-logging]
+use = egg:swift#proxy_logging
+
+[filter:cache]
+use = egg:swift#memcache
+memcache_max_connections = 1024
+
+[filter:bulk]
+use = egg:swift#bulk
+
+[filter:tempurl]
+use = egg:swift#tempurl
+
+[filter:ratelimit]
+use = egg:swift#ratelimit
+
+[filter:crossdomain]
+use = egg:swift#crossdomain
+
+[filter:container_sync]
+use = egg:swift#container_sync
+current = //saio/saio_endpoint
+
+[filter:s3api]
+use = egg:swift#s3api
+allow_multipart_uploads = yes
+check_bucket_owner = no
+dns_compliant_bucket_names = yes
+force_swift_request_proxy_log = yes
+s3_acl = no
+
+[filter:tempauth]
+use = egg:swift#tempauth
+user_admin_admin = admin .admin .reseller_admin
+user_test_tester = testing .admin
+user_test2_tester2 = testing2 .admin
+user_test_tester3 = testing3
+
+[filter:staticweb]
+use = egg:swift#staticweb
+
+[filter:copy]
+use = egg:swift#copy
+
+[filter:container-quotas]
+use = egg:swift#container_quotas
+
+[filter:account-quotas]
+use = egg:swift#account_quotas
+
+[filter:slo]
+use = egg:swift#slo
+
+[filter:dlo]
+use = egg:swift#dlo
+
+# For bypass_mode, one of off (default), read-only, or read-write
+[filter:pfs]
+use = egg:pfs_middleware#pfs
+proxyfsd_host = 192.168.22.114
+proxyfsd_port = 12345
+bypass_mode = read-write
+
+[filter:versioned_writes]
+use = egg:swift#versioned_writes
+allow_versioned_writes = true
+
+[app:proxy-server]
+use = egg:swift#proxy
+allow_account_management = true
+account_autocreate = true
diff --git a/sait/etc/swift/proxy-server/proxy-noauth.conf.d/20_settings.conf b/sait/etc/swift/proxy-server/proxy-noauth.conf.d/20_settings.conf
new file mode 100644
index 000000000..7e7cf83e5
--- /dev/null
+++ b/sait/etc/swift/proxy-server/proxy-noauth.conf.d/20_settings.conf
@@ -0,0 +1,45 @@
+[DEFAULT]
+bind_ip = 0.0.0.0
+bind_port = 8090
+workers = 1
+user = swift
+log_facility = LOG_LOCAL1
+eventlet_debug = true
+
+[pipeline:main]
+# Yes, proxy-logging appears twice. This is so that
+# middleware-originated requests get logged too.
+pipeline = catch_errors gatekeeper healthcheck proxy-logging cache copy dlo meta versioned_writes proxy-logging proxy-server
+
+[filter:catch_errors]
+use = egg:swift#catch_errors
+
+[filter:gatekeeper]
+use = egg:swift#gatekeeper
+
+[filter:healthcheck]
+use = egg:swift#healthcheck
+
+[filter:proxy-logging]
+use = egg:swift#proxy_logging
+
+[filter:cache]
+use = egg:swift#memcache
+
+[filter:copy]
+use = egg:swift#copy
+
+[filter:dlo]
+use = egg:swift#dlo
+
+[filter:meta]
+use = egg:meta_middleware#meta
+
+[filter:versioned_writes]
+use = egg:swift#versioned_writes
+allow_versioned_writes = true
+
+[app:proxy-server]
+use = egg:swift#proxy
+allow_account_management = true
+account_autocreate = true
diff --git a/sait/etc/swift/swift.conf b/sait/etc/swift/swift.conf
new file mode 100644
index 000000000..3022ded1e
--- /dev/null
+++ b/sait/etc/swift/swift.conf
@@ -0,0 +1,17 @@
+[swift-hash]
+# random unique strings that can never change (DO NOT LOSE)
+# Use only printable chars (python -c "import string; print(string.printable)")
+swift_hash_path_prefix = changeme
+swift_hash_path_suffix = changeme
+
+[storage-policy:0]
+name = gold
+policy_type = replication
+default = no
+deprecated = no
+
+[storage-policy:1]
+name = silver
+policy_type = replication
+default = yes
+deprecated = no
diff --git a/sait/home/swift/bin/remakerings b/sait/home/swift/bin/remakerings
new file mode 100755
index 000000000..6c16c1245
--- /dev/null
+++ b/sait/home/swift/bin/remakerings
@@ -0,0 +1,28 @@
+#!/bin/bash
+
+set -e
+
+cd /etc/swift
+
+rm -f *.builder *.ring.gz backups/*.builder backups/*.ring.gz
+
+swift-ring-builder object.builder create 10 1 1
+swift-ring-builder object.builder add r1z1-192.168.22.114:8010/sdb1 1
+swift-ring-builder object.builder add r1z1-192.168.22.115:8010/sdb2 1
+swift-ring-builder object.builder add r1z1-192.168.22.116:8010/sdb3 1
+swift-ring-builder object.builder rebalance
+swift-ring-builder object-1.builder create 10 1 1
+swift-ring-builder object-1.builder add r1z1-192.168.22.114:8010/sdb1 1
+swift-ring-builder object-1.builder add r1z1-192.168.22.115:8010/sdb2 1
+swift-ring-builder object-1.builder add r1z1-192.168.22.116:8010/sdb3 1
+swift-ring-builder object-1.builder rebalance
+swift-ring-builder container.builder create 10 1 1
+swift-ring-builder container.builder add r1z1-192.168.22.114:8011/sdb1 1
+swift-ring-builder container.builder add r1z1-192.168.22.115:8011/sdb2 1
+swift-ring-builder container.builder add r1z1-192.168.22.116:8011/sdb3 1
+swift-ring-builder container.builder rebalance
+swift-ring-builder account.builder create 10 1 1
+swift-ring-builder account.builder add r1z1-192.168.22.114:8012/sdb1 1
+swift-ring-builder account.builder add r1z1-192.168.22.115:8012/sdb2 1
+swift-ring-builder account.builder add r1z1-192.168.22.116:8012/sdb3 1
+swift-ring-builder account.builder rebalance
diff --git a/sait/home/swift/bin/resetswift b/sait/home/swift/bin/resetswift
new file mode 100755
index 000000000..6e1723b88
--- /dev/null
+++ b/sait/home/swift/bin/resetswift
@@ -0,0 +1,27 @@
+#!/bin/bash
+
+if [ "$1" != "1" ] && [ "$1" != "2" ] && [ "$1" != "3" ]
+then
+ echo Arg1 is unexpected: $1
+ exit 1
+fi
+
+set -e
+
+SAIT_INSTANCE=$1
+
+sudo swift-init all kill
+
+if cut -d' ' -f2 /proc/mounts | grep -q /srv/$SAIT_INSTANCE/node/sdb$SAIT_INSTANCE
+then
+ sudo umount /srv/$SAIT_INSTANCE/node/sdb$SAIT_INSTANCE
+fi
+sudo truncate -s 0 /srv/swift-disk
+sudo truncate -s 1GB /srv/swift-disk
+sudo mkfs.xfs -f /srv/swift-disk
+sudo mount /srv/$SAIT_INSTANCE/node/sdb$SAIT_INSTANCE
+sudo chown swift:swift /srv/$SAIT_INSTANCE/node/sdb$SAIT_INSTANCE
+
+sudo rm -f /var/log/debug /var/log/messages /var/log/rsyncd.log /var/log/syslog
+find /var/cache/swift* -type f -name *.recon -exec rm -f {} \;
+sudo systemctl restart memcached
diff --git a/sait/proxyfs.conf b/sait/proxyfs.conf
new file mode 100644
index 000000000..b24b1c990
--- /dev/null
+++ b/sait/proxyfs.conf
@@ -0,0 +1,43 @@
+# Three peer .conf file customized for SAIT for ProxyFS VMs (included by per-sait?/proxyfs.conf)
+
+[Peer:Peer1]
+PublicIPAddr: 172.28.128.3
+PrivateIPAddr: 192.168.22.114
+ReadCacheQuotaFraction: 0.20
+
+[Peer:Peer2]
+PublicIPAddr: 172.28.128.4
+PrivateIPAddr: 192.168.22.115
+ReadCacheQuotaFraction: 0.20
+
+[Peer:Peer3]
+PublicIPAddr: 172.28.128.5
+PrivateIPAddr: 192.168.22.116
+ReadCacheQuotaFraction: 0.20
+
+[Cluster]
+Peers: Peer1 Peer2 Peer3
+ServerGuid: 0bb51164-258f-4e04-a417-e16d736ca41c
+PrivateClusterUDPPort: 8123
+UDPPacketSendSize: 1400
+UDPPacketRecvSize: 1500
+UDPPacketCapPerMessage: 5
+HeartBeatDuration: 1s
+HeartBeatMissLimit: 3
+MessageQueueDepthPerPeer: 4
+MaxRequestDuration: 1s
+LivenessCheckRedundancy: 2
+LogLevel: 0
+
+.include ../proxyfsd/file_server.conf
+
+[VolumeGroup:CommonVolumeGroup]
+PrimaryPeer: Peer1
+
+.include ../proxyfsd/swift_client.conf
+.include ../proxyfsd/rpc_server.conf
+.include ../proxyfsd/saio_logging.conf
+.include ../proxyfsd/stats.conf
+.include ../proxyfsd/statslogger.conf
+.include ../proxyfsd/httpserver.conf
+.include ../proxyfsd/debug.conf
diff --git a/sait/sait1/etc/swift/account-server/1.conf b/sait/sait1/etc/swift/account-server/1.conf
new file mode 100644
index 000000000..54afdfcb0
--- /dev/null
+++ b/sait/sait1/etc/swift/account-server/1.conf
@@ -0,0 +1,27 @@
+[DEFAULT]
+devices = /srv/1/node
+mount_check = false
+disable_fallocate = true
+bind_ip = 192.168.22.114
+bind_port = 8012
+workers = 1
+user = swift
+log_facility = LOG_LOCAL2
+recon_cache_path = /var/cache/swift
+eventlet_debug = true
+
+[pipeline:main]
+pipeline = recon account-server
+
+[app:account-server]
+use = egg:swift#account
+
+[filter:recon]
+use = egg:swift#recon
+
+[account-replicator]
+rsync_module = {replication_ip}::account{replication_port}
+
+[account-auditor]
+
+[account-reaper]
diff --git a/sait/sait1/etc/swift/container-server/1.conf b/sait/sait1/etc/swift/container-server/1.conf
new file mode 100644
index 000000000..85a99ed1a
--- /dev/null
+++ b/sait/sait1/etc/swift/container-server/1.conf
@@ -0,0 +1,29 @@
+[DEFAULT]
+devices = /srv/1/node
+mount_check = false
+disable_fallocate = true
+bind_ip = 192.168.22.114
+bind_port = 8011
+workers = 1
+user = swift
+log_facility = LOG_LOCAL2
+recon_cache_path = /var/cache/swift
+eventlet_debug = true
+
+[pipeline:main]
+pipeline = recon container-server
+
+[app:container-server]
+use = egg:swift#container
+
+[filter:recon]
+use = egg:swift#recon
+
+[container-replicator]
+rsync_module = {replication_ip}::container{replication_port}
+
+[container-updater]
+
+[container-auditor]
+
+[container-sync]
diff --git a/sait/sait1/etc/swift/object-server/1.conf b/sait/sait1/etc/swift/object-server/1.conf
new file mode 100644
index 000000000..ae4f29cad
--- /dev/null
+++ b/sait/sait1/etc/swift/object-server/1.conf
@@ -0,0 +1,29 @@
+[DEFAULT]
+devices = /srv/1/node
+mount_check = false
+disable_fallocate = true
+bind_ip = 192.168.22.114
+bind_port = 8010
+workers = 1
+user = swift
+log_facility = LOG_LOCAL2
+recon_cache_path = /var/cache/swift
+eventlet_debug = true
+
+[pipeline:main]
+pipeline = recon object-server
+
+[app:object-server]
+use = egg:swift#object
+
+[filter:recon]
+use = egg:swift#recon
+
+[object-replicator]
+rsync_module = {replication_ip}::object{replication_port}
+
+[object-reconstructor]
+
+[object-updater]
+
+[object-auditor]
diff --git a/sait/sait1/proxyfs.conf b/sait/sait1/proxyfs.conf
new file mode 100644
index 000000000..e941c7b86
--- /dev/null
+++ b/sait/sait1/proxyfs.conf
@@ -0,0 +1,4 @@
+[Cluster]
+WhoAmI: Peer1
+
+.include ../proxyfs.conf
diff --git a/sait/sait1/usr/lib/systemd/system/proxyfsd.service b/sait/sait1/usr/lib/systemd/system/proxyfsd.service
new file mode 100644
index 000000000..38a806146
--- /dev/null
+++ b/sait/sait1/usr/lib/systemd/system/proxyfsd.service
@@ -0,0 +1,10 @@
+[Unit]
+Description=Swift ProxyFS service
+
+[Service]
+Environment=GOTRACEBACK=crash
+LimitCORE=infinity
+ExecStart=/vagrant/bin/proxyfsd /vagrant/src/github.com/swiftstack/ProxyFS/sait/sait1/proxyfs.conf
+ExecReload=/usr/bin/kill -HUP $MAINPID
+Restart=no
+KillMode=process
diff --git a/sait/sait2/etc/swift/account-server/2.conf b/sait/sait2/etc/swift/account-server/2.conf
new file mode 100644
index 000000000..7b40a392a
--- /dev/null
+++ b/sait/sait2/etc/swift/account-server/2.conf
@@ -0,0 +1,27 @@
+[DEFAULT]
+devices = /srv/2/node
+mount_check = false
+disable_fallocate = true
+bind_ip = 192.168.22.115
+bind_port = 8012
+workers = 1
+user = swift
+log_facility = LOG_LOCAL3
+recon_cache_path = /var/cache/swift2
+eventlet_debug = true
+
+[pipeline:main]
+pipeline = recon account-server
+
+[app:account-server]
+use = egg:swift#account
+
+[filter:recon]
+use = egg:swift#recon
+
+[account-replicator]
+rsync_module = {replication_ip}::account{replication_port}
+
+[account-auditor]
+
+[account-reaper]
diff --git a/sait/sait2/etc/swift/container-server/2.conf b/sait/sait2/etc/swift/container-server/2.conf
new file mode 100644
index 000000000..a19259bae
--- /dev/null
+++ b/sait/sait2/etc/swift/container-server/2.conf
@@ -0,0 +1,29 @@
+[DEFAULT]
+devices = /srv/2/node
+mount_check = false
+disable_fallocate = true
+bind_ip = 192.168.22.115
+bind_port = 8011
+workers = 1
+user = swift
+log_facility = LOG_LOCAL3
+recon_cache_path = /var/cache/swift2
+eventlet_debug = true
+
+[pipeline:main]
+pipeline = recon container-server
+
+[app:container-server]
+use = egg:swift#container
+
+[filter:recon]
+use = egg:swift#recon
+
+[container-replicator]
+rsync_module = {replication_ip}::container{replication_port}
+
+[container-updater]
+
+[container-auditor]
+
+[container-sync]
diff --git a/sait/sait2/etc/swift/object-server/2.conf b/sait/sait2/etc/swift/object-server/2.conf
new file mode 100644
index 000000000..2051b322f
--- /dev/null
+++ b/sait/sait2/etc/swift/object-server/2.conf
@@ -0,0 +1,29 @@
+[DEFAULT]
+devices = /srv/2/node
+mount_check = false
+disable_fallocate = true
+bind_ip = 192.168.22.115
+bind_port = 8010
+workers = 1
+user = swift
+log_facility = LOG_LOCAL3
+recon_cache_path = /var/cache/swift2
+eventlet_debug = true
+
+[pipeline:main]
+pipeline = recon object-server
+
+[app:object-server]
+use = egg:swift#object
+
+[filter:recon]
+use = egg:swift#recon
+
+[object-replicator]
+rsync_module = {replication_ip}::object{replication_port}
+
+[object-reconstructor]
+
+[object-updater]
+
+[object-auditor]
diff --git a/sait/sait2/proxyfs.conf b/sait/sait2/proxyfs.conf
new file mode 100644
index 000000000..06432ae43
--- /dev/null
+++ b/sait/sait2/proxyfs.conf
@@ -0,0 +1,4 @@
+[Cluster]
+WhoAmI: Peer2
+
+.include ../proxyfs.conf
diff --git a/sait/sait2/usr/lib/systemd/system/proxyfsd.service b/sait/sait2/usr/lib/systemd/system/proxyfsd.service
new file mode 100644
index 000000000..90f779cf9
--- /dev/null
+++ b/sait/sait2/usr/lib/systemd/system/proxyfsd.service
@@ -0,0 +1,10 @@
+[Unit]
+Description=Swift ProxyFS service
+
+[Service]
+Environment=GOTRACEBACK=crash
+LimitCORE=infinity
+ExecStart=/vagrant/bin/proxyfsd /vagrant/src/github.com/swiftstack/ProxyFS/sait/sait2/proxyfs.conf
+ExecReload=/usr/bin/kill -HUP $MAINPID
+Restart=no
+KillMode=process
diff --git a/sait/sait3/etc/swift/account-server/3.conf b/sait/sait3/etc/swift/account-server/3.conf
new file mode 100644
index 000000000..275520c32
--- /dev/null
+++ b/sait/sait3/etc/swift/account-server/3.conf
@@ -0,0 +1,27 @@
+[DEFAULT]
+devices = /srv/3/node
+mount_check = false
+disable_fallocate = true
+bind_ip = 192.168.22.116
+bind_port = 8012
+workers = 1
+user = swift
+log_facility = LOG_LOCAL4
+recon_cache_path = /var/cache/swift3
+eventlet_debug = true
+
+[pipeline:main]
+pipeline = recon account-server
+
+[app:account-server]
+use = egg:swift#account
+
+[filter:recon]
+use = egg:swift#recon
+
+[account-replicator]
+rsync_module = {replication_ip}::account{replication_port}
+
+[account-auditor]
+
+[account-reaper]
diff --git a/sait/sait3/etc/swift/container-server/3.conf b/sait/sait3/etc/swift/container-server/3.conf
new file mode 100644
index 000000000..b6392b8ef
--- /dev/null
+++ b/sait/sait3/etc/swift/container-server/3.conf
@@ -0,0 +1,29 @@
+[DEFAULT]
+devices = /srv/3/node
+mount_check = false
+disable_fallocate = true
+bind_ip = 192.168.22.116
+bind_port = 8011
+workers = 1
+user = swift
+log_facility = LOG_LOCAL4
+recon_cache_path = /var/cache/swift3
+eventlet_debug = true
+
+[pipeline:main]
+pipeline = recon container-server
+
+[app:container-server]
+use = egg:swift#container
+
+[filter:recon]
+use = egg:swift#recon
+
+[container-replicator]
+rsync_module = {replication_ip}::container{replication_port}
+
+[container-updater]
+
+[container-auditor]
+
+[container-sync]
diff --git a/sait/sait3/etc/swift/object-server/3.conf b/sait/sait3/etc/swift/object-server/3.conf
new file mode 100644
index 000000000..6291be2ab
--- /dev/null
+++ b/sait/sait3/etc/swift/object-server/3.conf
@@ -0,0 +1,29 @@
+[DEFAULT]
+devices = /srv/3/node
+mount_check = false
+disable_fallocate = true
+bind_ip = 192.168.22.116
+bind_port = 8010
+workers = 1
+user = swift
+log_facility = LOG_LOCAL4
+recon_cache_path = /var/cache/swift3
+eventlet_debug = true
+
+[pipeline:main]
+pipeline = recon object-server
+
+[app:object-server]
+use = egg:swift#object
+
+[filter:recon]
+use = egg:swift#recon
+
+[object-replicator]
+rsync_module = {replication_ip}::object{replication_port}
+
+[object-reconstructor]
+
+[object-updater]
+
+[object-auditor]
diff --git a/sait/sait3/proxyfs.conf b/sait/sait3/proxyfs.conf
new file mode 100644
index 000000000..29e261e8d
--- /dev/null
+++ b/sait/sait3/proxyfs.conf
@@ -0,0 +1,4 @@
+[Cluster]
+WhoAmI: Peer3
+
+.include ../proxyfs.conf
diff --git a/sait/sait3/usr/lib/systemd/system/proxyfsd.service b/sait/sait3/usr/lib/systemd/system/proxyfsd.service
new file mode 100644
index 000000000..ba2d6dacf
--- /dev/null
+++ b/sait/sait3/usr/lib/systemd/system/proxyfsd.service
@@ -0,0 +1,10 @@
+[Unit]
+Description=Swift ProxyFS service
+
+[Service]
+Environment=GOTRACEBACK=crash
+LimitCORE=infinity
+ExecStart=/vagrant/bin/proxyfsd /vagrant/src/github.com/swiftstack/ProxyFS/sait/sait3/proxyfs.conf
+ExecReload=/usr/bin/kill -HUP $MAINPID
+Restart=no
+KillMode=process
diff --git a/sait/usr/local/go/src/runtime/runtime-gdb.py b/sait/usr/local/go/src/runtime/runtime-gdb.py
new file mode 100644
index 000000000..d889e3d71
--- /dev/null
+++ b/sait/usr/local/go/src/runtime/runtime-gdb.py
@@ -0,0 +1,541 @@
+# Copyright 2010 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+"""GDB Pretty printers and convenience functions for Go's runtime structures.
+
+This script is loaded by GDB when it finds a .debug_gdb_scripts
+section in the compiled binary. The [68]l linkers emit this with a
+path to this file based on the path to the runtime package.
+"""
+
+# Known issues:
+# - pretty printing only works for the 'native' strings. E.g. 'type
+# foo string' will make foo a plain struct in the eyes of gdb,
+# circumventing the pretty print triggering.
+
+
+from __future__ import print_function
+import re
+import sys
+
+print("Loading Go Runtime support.", file=sys.stderr)
+#http://python3porting.com/differences.html
+if sys.version > '3':
+ xrange = range
+# allow to manually reload while developing
+goobjfile = gdb.current_objfile() or gdb.objfiles()[0]
+goobjfile.pretty_printers = []
+
+#
+# Value wrappers
+#
+
+class SliceValue:
+ "Wrapper for slice values."
+
+ def __init__(self, val):
+ self.val = val
+
+ @property
+ def len(self):
+ return int(self.val['len'])
+
+ @property
+ def cap(self):
+ return int(self.val['cap'])
+
+ def __getitem__(self, i):
+ if i < 0 or i >= self.len:
+ raise IndexError(i)
+ ptr = self.val["array"]
+ return (ptr + i).dereference()
+
+
+#
+# Pretty Printers
+#
+
+
+class StringTypePrinter:
+ "Pretty print Go strings."
+
+ pattern = re.compile(r'^struct string( \*)?$')
+
+ def __init__(self, val):
+ self.val = val
+
+ def display_hint(self):
+ return 'string'
+
+ def to_string(self):
+ l = int(self.val['len'])
+ return self.val['str'].string("utf-8", "ignore", l)
+
+
+class SliceTypePrinter:
+ "Pretty print slices."
+
+ pattern = re.compile(r'^struct \[\]')
+
+ def __init__(self, val):
+ self.val = val
+
+ def display_hint(self):
+ return 'array'
+
+ def to_string(self):
+ return str(self.val.type)[6:] # skip 'struct '
+
+ def children(self):
+ sval = SliceValue(self.val)
+ if sval.len > sval.cap:
+ return
+ for idx, item in enumerate(sval):
+ yield ('[{0}]'.format(idx), item)
+
+
+class MapTypePrinter:
+ """Pretty print map[K]V types.
+
+ Map-typed go variables are really pointers. dereference them in gdb
+ to inspect their contents with this pretty printer.
+ """
+
+ pattern = re.compile(r'^map\[.*\].*$')
+
+ def __init__(self, val):
+ self.val = val
+
+ def display_hint(self):
+ return 'map'
+
+ def to_string(self):
+ return str(self.val.type)
+
+ def children(self):
+ B = self.val['B']
+ buckets = self.val['buckets']
+ oldbuckets = self.val['oldbuckets']
+ flags = self.val['flags']
+ inttype = self.val['hash0'].type
+ cnt = 0
+ for bucket in xrange(2 ** int(B)):
+ bp = buckets + bucket
+ if oldbuckets:
+ oldbucket = bucket & (2 ** (B - 1) - 1)
+ oldbp = oldbuckets + oldbucket
+ oldb = oldbp.dereference()
+ if (oldb['overflow'].cast(inttype) & 1) == 0: # old bucket not evacuated yet
+ if bucket >= 2 ** (B - 1):
+ continue # already did old bucket
+ bp = oldbp
+ while bp:
+ b = bp.dereference()
+ for i in xrange(8):
+ if b['tophash'][i] != 0:
+ k = b['keys'][i]
+ v = b['values'][i]
+ if flags & 1:
+ k = k.dereference()
+ if flags & 2:
+ v = v.dereference()
+ yield str(cnt), k
+ yield str(cnt + 1), v
+ cnt += 2
+ bp = b['overflow']
+
+
+class ChanTypePrinter:
+ """Pretty print chan[T] types.
+
+ Chan-typed go variables are really pointers. dereference them in gdb
+ to inspect their contents with this pretty printer.
+ """
+
+ pattern = re.compile(r'^struct hchan<.*>$')
+
+ def __init__(self, val):
+ self.val = val
+
+ def display_hint(self):
+ return 'array'
+
+ def to_string(self):
+ return str(self.val.type)
+
+ def children(self):
+ # see chan.c chanbuf(). et is the type stolen from hchan::recvq->first->elem
+ et = [x.type for x in self.val['recvq']['first'].type.target().fields() if x.name == 'elem'][0]
+ ptr = (self.val.address + 1).cast(et.pointer())
+ for i in range(self.val["qcount"]):
+ j = (self.val["recvx"] + i) % self.val["dataqsiz"]
+ yield ('[{0}]'.format(i), (ptr + j).dereference())
+
+
+#
+# Register all the *Printer classes above.
+#
+
+def makematcher(klass):
+ def matcher(val):
+ try:
+ if klass.pattern.match(str(val.type)):
+ return klass(val)
+ except Exception:
+ pass
+ return matcher
+
+goobjfile.pretty_printers.extend([makematcher(var) for var in vars().values() if hasattr(var, 'pattern')])
+
+#
+# For reference, this is what we're trying to do:
+# eface: p *(*(struct 'runtime.rtype'*)'main.e'->type_->data)->string
+# iface: p *(*(struct 'runtime.rtype'*)'main.s'->tab->Type->data)->string
+#
+# interface types can't be recognized by their name, instead we check
+# if they have the expected fields. Unfortunately the mapping of
+# fields to python attributes in gdb.py isn't complete: you can't test
+# for presence other than by trapping.
+
+
+def is_iface(val):
+ try:
+ return str(val['tab'].type) == "struct runtime.itab *" and str(val['data'].type) == "void *"
+ except gdb.error:
+ pass
+
+
+def is_eface(val):
+ try:
+ return str(val['_type'].type) == "struct runtime._type *" and str(val['data'].type) == "void *"
+ except gdb.error:
+ pass
+
+
+def lookup_type(name):
+ try:
+ return gdb.lookup_type(name)
+ except gdb.error:
+ pass
+ try:
+ return gdb.lookup_type('struct ' + name)
+ except gdb.error:
+ pass
+ try:
+ return gdb.lookup_type('struct ' + name[1:]).pointer()
+ except gdb.error:
+ pass
+
+
+def iface_commontype(obj):
+ if is_iface(obj):
+ go_type_ptr = obj['tab']['_type']
+ elif is_eface(obj):
+ go_type_ptr = obj['_type']
+ else:
+ return
+
+ return go_type_ptr.cast(gdb.lookup_type("struct reflect.rtype").pointer()).dereference()
+
+
+def iface_dtype(obj):
+ "Decode type of the data field of an eface or iface struct."
+ # known issue: dtype_name decoded from runtime.rtype is "nested.Foo"
+ # but the dwarf table lists it as "full/path/to/nested.Foo"
+
+ dynamic_go_type = iface_commontype(obj)
+ if dynamic_go_type is None:
+ return
+ dtype_name = dynamic_go_type['string'].dereference()['str'].string()
+
+ dynamic_gdb_type = lookup_type(dtype_name)
+ if dynamic_gdb_type is None:
+ return
+
+ type_size = int(dynamic_go_type['size'])
+ uintptr_size = int(dynamic_go_type['size'].type.sizeof) # size is itself an uintptr
+ if type_size > uintptr_size:
+ dynamic_gdb_type = dynamic_gdb_type.pointer()
+
+ return dynamic_gdb_type
+
+
+def iface_dtype_name(obj):
+ "Decode type name of the data field of an eface or iface struct."
+
+ dynamic_go_type = iface_commontype(obj)
+ if dynamic_go_type is None:
+ return
+ return dynamic_go_type['string'].dereference()['str'].string()
+
+
+class IfacePrinter:
+ """Pretty print interface values
+
+ Casts the data field to the appropriate dynamic type."""
+
+ def __init__(self, val):
+ self.val = val
+
+ def display_hint(self):
+ return 'string'
+
+ def to_string(self):
+ if self.val['data'] == 0:
+ return 0x0
+ try:
+ dtype = iface_dtype(self.val)
+ except Exception:
+ return ""
+
+ if dtype is None: # trouble looking up, print something reasonable
+ return "({0}){0}".format(iface_dtype_name(self.val), self.val['data'])
+
+ try:
+ return self.val['data'].cast(dtype).dereference()
+ except Exception:
+ pass
+ return self.val['data'].cast(dtype)
+
+
+def ifacematcher(val):
+ if is_iface(val) or is_eface(val):
+ return IfacePrinter(val)
+
+goobjfile.pretty_printers.append(ifacematcher)
+
+#
+# Convenience Functions
+#
+
+
+class GoLenFunc(gdb.Function):
+ "Length of strings, slices, maps or channels"
+
+ how = ((StringTypePrinter, 'len'), (SliceTypePrinter, 'len'), (MapTypePrinter, 'count'), (ChanTypePrinter, 'qcount'))
+
+ def __init__(self):
+ gdb.Function.__init__(self, "len")
+
+ def invoke(self, obj):
+ typename = str(obj.type)
+ for klass, fld in self.how:
+ if klass.pattern.match(typename):
+ return obj[fld]
+
+
+class GoCapFunc(gdb.Function):
+ "Capacity of slices or channels"
+
+ how = ((SliceTypePrinter, 'cap'), (ChanTypePrinter, 'dataqsiz'))
+
+ def __init__(self):
+ gdb.Function.__init__(self, "cap")
+
+ def invoke(self, obj):
+ typename = str(obj.type)
+ for klass, fld in self.how:
+ if klass.pattern.match(typename):
+ return obj[fld]
+
+
+class DTypeFunc(gdb.Function):
+ """Cast Interface values to their dynamic type.
+
+ For non-interface types this behaves as the identity operation.
+ """
+
+ def __init__(self):
+ gdb.Function.__init__(self, "dtype")
+
+ def invoke(self, obj):
+ try:
+ return obj['data'].cast(iface_dtype(obj))
+ except gdb.error:
+ pass
+ return obj
+
+#
+# Commands
+#
+
+sts = ('idle', 'runnable', 'running', 'syscall', 'waiting', 'moribund', 'dead', 'recovery')
+
+
+def linked_list(ptr, linkfield):
+ while ptr:
+ yield ptr
+ ptr = ptr[linkfield]
+
+
+class GoroutinesCmd(gdb.Command):
+ "List all goroutines."
+
+ def __init__(self):
+ gdb.Command.__init__(self, "info goroutines", gdb.COMMAND_STACK, gdb.COMPLETE_NONE)
+
+ def invoke(self, _arg, _from_tty):
+ # args = gdb.string_to_argv(arg)
+ vp = gdb.lookup_type('void').pointer()
+ for ptr in SliceValue(gdb.parse_and_eval("'runtime.allgs'")):
+ if ptr['atomicstatus'] == 6: # 'gdead'
+ continue
+ s = ' '
+ if ptr['m']:
+ s = '*'
+ pc = ptr['sched']['pc'].cast(vp)
+ # python2 will not cast pc (type void*) to an int cleanly
+ # instead python2 and python3 work with the hex string representation
+ # of the void pointer which we can parse back into an int.
+ # int(pc) will not work.
+ try:
+ #python3 / newer versions of gdb
+ pc = int(pc)
+ except gdb.error:
+ # str(pc) can return things like
+ # "0x429d6c ", so
+ # chop at first space.
+ pc = int(str(pc).split(None, 1)[0], 16)
+ blk = gdb.block_for_pc(pc)
+ print(s, ptr['goid'], "{0:8s}".format(sts[int(ptr['atomicstatus'])]), blk.function)
+
+
+def find_goroutine(goid):
+ """
+ find_goroutine attempts to find the goroutine identified by goid
+ and returns a pointer to the goroutine info.
+
+ @param int goid
+
+ @return ptr
+ """
+ vp = gdb.lookup_type('void').pointer()
+ for ptr in SliceValue(gdb.parse_and_eval("'runtime.allgs'")):
+ if ptr['atomicstatus'] == 6: # 'gdead'
+ continue
+ if ptr['goid'] == goid:
+ return ptr
+ return None
+
+def goroutine_info(ptr):
+ '''
+ Given a pointer to goroutine info clean it up a bit
+ and return the interesting info in a dict.
+ '''
+ gorinfo = {}
+ gorinfo['goid'] = ptr['goid']
+ gorinfo['atomicstatus'] = sts[int(ptr['atomicstatus'])]
+ if gorinfo['atomicstatus'] == 'gdead':
+ return gorinfo
+
+ vp = gdb.lookup_type('void').pointer()
+ gorinfo['pc_as_str'] = str(ptr['sched']['pc'].cast(vp))
+ gorinfo['sp_as_str'] = str(ptr['sched']['sp'].cast(vp))
+
+ # str(pc) can return things like
+ # "0x429d6c ", so
+ # chop at first space.
+ gorinfo['pc_as_int'] = int(gorinfo['pc_as_str'].split(None, 1)[0], 16)
+ gorinfo['sp_as_int'] = int(gorinfo['sp_as_str'], 16)
+
+ return gorinfo
+
+class GoroutineCmd(gdb.Command):
+ """Execute a gdb command in the context of goroutine .
+
+ Switch PC and SP to the ones in the goroutine's G structure,
+ execute an arbitrary gdb command, and restore PC and SP.
+
+ Usage: (gdb) goroutine
+
+ Use goid 0 to invoke the command on all go routines.
+
+ Note that it is ill-defined to modify state in the context of a goroutine.
+ Restrict yourself to inspecting values.
+ """
+
+ def __init__(self):
+ gdb.Command.__init__(self, "goroutine", gdb.COMMAND_STACK, gdb.COMPLETE_NONE)
+
+ def invoke(self, arg, _from_tty):
+ goid, cmd = arg.split(None, 1)
+ goid = gdb.parse_and_eval(goid)
+
+ if goid == 0:
+ goptr_list = SliceValue(gdb.parse_and_eval("'runtime.allgs'"))
+ else:
+ ptr = find_goroutine(goid)
+ if ptr is None:
+ print("No such goroutine: ", goid)
+ return
+ goptr_list = [ ptr ]
+
+ for ptr in goptr_list:
+ gor = goroutine_info(ptr)
+ if gor['atomicstatus'] == 'gdead':
+ continue
+
+ print("\ngoroutine %d:" % (gor['goid']))
+ if gor['sp_as_int'] == 0:
+ print("#0 %s -- stack trace unavailable (goroutine status: %s)" %
+ (gor['pc_as_str'], gor['atomicstatus']))
+ if gor['atomicstatus'] == 'running':
+ print("Try checking per thread stacks, i.e. 'thread apply all backtrace'")
+ continue
+
+ save_frame = gdb.selected_frame()
+ gdb.parse_and_eval('$save_sp = $sp')
+ gdb.parse_and_eval('$save_pc = $pc')
+ gdb.parse_and_eval('$sp = {0}'.format(str(gor['sp_as_int'])))
+ gdb.parse_and_eval('$pc = {0}'.format(str(gor['pc_as_int'])))
+ try:
+ gdb.execute(cmd)
+ finally:
+ gdb.parse_and_eval('$sp = $save_sp')
+ gdb.parse_and_eval('$pc = $save_pc')
+ save_frame.select()
+
+
+class GoIfaceCmd(gdb.Command):
+ "Print Static and dynamic interface types"
+
+ def __init__(self):
+ gdb.Command.__init__(self, "iface", gdb.COMMAND_DATA, gdb.COMPLETE_SYMBOL)
+
+ def invoke(self, arg, _from_tty):
+ for obj in gdb.string_to_argv(arg):
+ try:
+ #TODO fix quoting for qualified variable names
+ obj = gdb.parse_and_eval(str(obj))
+ except Exception as e:
+ print("Can't parse ", obj, ": ", e)
+ continue
+
+ if obj['data'] == 0:
+ dtype = "nil"
+ else:
+ dtype = iface_dtype(obj)
+
+ if dtype is None:
+ print("Not an interface: ", obj.type)
+ continue
+
+ print("{0}: {1}".format(obj.type, dtype))
+
+# TODO: print interface's methods and dynamic type's func pointers thereof.
+#rsc: "to find the number of entries in the itab's Fn field look at
+# itab.inter->numMethods
+# i am sure i have the names wrong but look at the interface type
+# and its method count"
+# so Itype will start with a commontype which has kind = interface
+
+#
+# Register all convenience functions and CLI commands
+#
+GoLenFunc()
+GoCapFunc()
+DTypeFunc()
+GoroutinesCmd()
+GoroutineCmd()
+GoIfaceCmd()
diff --git a/sait/vagrant_provision.sh b/sait/vagrant_provision.sh
new file mode 100644
index 000000000..443c1960f
--- /dev/null
+++ b/sait/vagrant_provision.sh
@@ -0,0 +1,428 @@
+#!/bin/bash
+#
+# Note: This script assumes it is being run as root
+
+set -e
+set -x
+
+SAIT_INSTANCE=$1
+
+# Enable core dumps
+#
+# Core files will be placed in /var/lib/systemd/coredump/
+# Core files will be compressed with xz... use unxz to uncompress them
+#
+# To install the delve debugger, you will need to `go get -u github.com/go-delve/delve/cmd/dlv`
+# - Note that this will compete with the version of dlv installed for your host GOPATH
+# - As such, delve is not installed during provisioning
+# - Instead, an alias for the above, `gogetdlv`, would be issued as and when needed inside this VM
+
+sed -i '/DefaultLimitCORE=/c\DefaultLimitCORE=infinity' /etc/systemd/system.conf
+
+echo "kernel.core_pattern=| /usr/lib/systemd/systemd-coredump %p %u %g %s %t %c %e" > /etc/sysctl.d/90-override.conf
+sysctl kernel.core_pattern='| /usr/lib/systemd/systemd-coredump %p %u %g %s %t %c %e'
+
+echo "GOTRACEBACK=crash" >> /etc/environment
+
+# Install yum-utils to deal with yum repos
+
+yum -y install yum-utils
+
+# Disable generic CentOS 7 repos
+
+yum-config-manager --disable CentOS-Base
+yum-config-manager --disable CentOS-CR
+yum-config-manager --disable CentOS-Debuginfo
+yum-config-manager --disable CentOS-fasttrack
+yum-config-manager --disable CentOS-Media
+yum-config-manager --disable CentOS-Sources
+yum-config-manager --disable CentOS-Vault
+
+rm -rf /etc/yum.repos.d/CentOS-Base.repo
+rm -rf /etc/yum.repos.d/CentOS-CR.repo
+rm -rf /etc/yum.repos.d/CentOS-Debuginfo.repo
+rm -rf /etc/yum.repos.d/CentOS-fasttrack.repo
+rm -rf /etc/yum.repos.d/CentOS-Media.repo
+rm -rf /etc/yum.repos.d/CentOS-Sources.repo
+rm -rf /etc/yum.repos.d/CentOS-Vault.repo
+
+# Add and enable CentOS 7.4 repos
+
+yum-config-manager --add-repo http://vault.centos.org/centos/7.4.1708/os/x86_64/
+yum-config-manager --add-repo http://vault.centos.org/centos/7.4.1708/updates/x86_64/
+yum-config-manager --add-repo http://vault.centos.org/centos/7.4.1708/extras/x86_64/
+yum-config-manager --add-repo http://vault.centos.org/centos/7.4.1708/centosplus/x86_64/
+yum-config-manager --enable vault.centos.org_centos_7.4.1708_os_x86_64_
+yum-config-manager --enable vault.centos.org_centos_7.4.1708_updates_x86_64_
+yum-config-manager --enable vault.centos.org_centos_7.4.1708_extras_x86_64_
+yum-config-manager --enable vault.centos.org_centos_7.4.1708_centosplus_x86_64_
+
+yum clean all
+
+# Install tools needed above what's in a minimal base box
+
+yum -y install wget git nfs-utils vim lsof
+
+# Install Golang
+
+yum -y --disableexcludes=all install gcc
+cd /tmp
+TARFILE_NAME=go1.13.6.linux-amd64.tar.gz
+wget -q https://dl.google.com/go/$TARFILE_NAME
+tar -C /usr/local -xf $TARFILE_NAME
+rm $TARFILE_NAME
+echo "export PATH=\$PATH:/usr/local/go/bin" >> ~vagrant/.bash_profile
+
+# Patch Golang's GDB runtime plug-in
+
+mv /usr/local/go/src/runtime/runtime-gdb.py /usr/local/go/src/runtime/runtime-gdb.py_ORIGINAL
+cp /vagrant/src/github.com/swiftstack/ProxyFS/sait/usr/local/go/src/runtime/runtime-gdb.py /usr/local/go/src/runtime/.
+
+# Install GDB and enable above Golang GDB runtime plug-in as well as other niceties
+
+yum -y install gdb
+echo "add-auto-load-safe-path /usr/local/go/src/runtime/runtime-gdb.py" > /home/vagrant/.gdbinit
+echo "set print thread-events off" >> /home/vagrant/.gdbinit
+echo "set print pretty on" >> /home/vagrant/.gdbinit
+echo "set print object on" >> /home/vagrant/.gdbinit
+echo "set pagination off" >> /home/vagrant/.gdbinit
+chown vagrant:vagrant /home/vagrant/.gdbinit
+chmod 644 /home/vagrant/.gdbinit
+cp /home/vagrant/.gdbinit /root/.
+
+# Install Python 3.6
+
+yum -y install centos-release-scl
+yum -y install rh-python36
+ln -s /opt/rh/rh-python36/root/bin/python3.6 /bin/python3.6
+ln -s /bin/python3.6 /bin/python3
+ln -s /opt/rh/rh-python36/root/usr/include /opt/rh/rh-python36/root/include
+
+# Install Python pip
+
+yum -y install epel-release
+yum -y install python-pip
+pip install --upgrade pip
+
+# Setup ProxyFS build environment
+
+pip install requests
+yum -y install json-c-devel
+yum -y install fuse
+echo "export GOPATH=/vagrant" >> ~vagrant/.bash_profile
+echo "export PATH=\$PATH:\$GOPATH/bin" >> ~vagrant/.bash_profile
+echo "alias cdpfs=\"cd \$GOPATH/src/github.com/swiftstack/ProxyFS\"" >> ~vagrant/.bash_profile
+echo "alias goclean=\"go clean;go clean --cache;go clean --testcache\"" >> ~vagrant/.bash_profile
+echo "alias gogetdlv=\"go get -u github.com/go-delve/delve/cmd/dlv\"" >> ~vagrant/.bash_profile
+echo "user_allow_other" >> /etc/fuse.conf
+
+# Install Python tox
+
+pip install tox==3.5.3
+
+# Setup Swift
+#
+# Guided by https://docs.openstack.org/swift/latest/development_saio.html
+
+# [Setup Swift] Create the swift:swift user
+
+useradd --user-group --groups wheel swift
+chmod 755 ~swift
+
+# Using loopback devices for storage
+
+mkdir -p /srv
+
+truncate -s 0 /srv/swift-disk
+truncate -s 1GB /srv/swift-disk
+mkfs.xfs -f /srv/swift-disk
+mkdir -p /srv/$SAIT_INSTANCE/node/sdb$SAIT_INSTANCE
+echo "/srv/swift-disk /srv/$SAIT_INSTANCE/node/sdb$SAIT_INSTANCE xfs loop,noatime,nodiratime,nobarrier,logbufs=8 0 0" >> /etc/fstab
+mount /srv/$SAIT_INSTANCE/node/sdb$SAIT_INSTANCE
+chown swift:swift /srv/$SAIT_INSTANCE/node/sdb$SAIT_INSTANCE
+
+# Create Swift temporary file dir
+
+mkdir -p /var/run/swift
+chown -R swift:swift /var/run/swift
+
+# [Setup Swift] Common Post-Device Setup (Add /var boot-time provisioning to /etc/rc.d/rc.local)
+
+echo "mkdir -p /var/cache/swift /var/cache/swift2 /var/cache/swift3 /var/cache/swift4" >> /etc/rc.d/rc.local
+echo "chown swift:swift /var/cache/swift*" >> /etc/rc.d/rc.local
+echo "mkdir -p /var/run/swift" >> /etc/rc.d/rc.local
+echo "chown swift:swift /var/run/swift" >> /etc/rc.d/rc.local
+chmod +x /etc/rc.d/rc.local
+
+# [Setup Swift] Do boot-time provisioning now... as if we just booted
+
+mkdir -p /var/cache/swift /var/cache/swift2 /var/cache/swift3 /var/cache/swift4
+chown swift:swift /var/cache/swift*
+mkdir -p /var/run/swift
+chown swift:swift /var/run/swift
+
+# [Setup Swift] Getting the code
+
+yum -y install \
+ memcached \
+ sqlite \
+ xfsprogs \
+ libffi-devel \
+ xinetd \
+ openssl-devel \
+ python-setuptools \
+ python-coverage \
+ python-devel \
+ python-nose \
+ pyxattr \
+ python-eventlet \
+ python-greenlet \
+ python-paste-deploy \
+ python-netifaces \
+ python-pip \
+ python-dns \
+ python-mock
+
+pip install --upgrade setuptools
+
+yum -y install https://rpmfind.net/linux/fedora/linux/releases/30/Everything/x86_64/os/Packages/l/liberasurecode-1.6.0-3.fc30.x86_64.rpm
+yum -y install https://rpmfind.net/linux/fedora/linux/releases/30/Everything/x86_64/os/Packages/l/liberasurecode-devel-1.6.0-3.fc30.x86_64.rpm
+
+cd ~swift
+git clone -b master --single-branch --depth 1 https://github.com/openstack/python-swiftclient.git
+cd python-swiftclient
+python setup.py develop
+
+echo "export ST_AUTH=http://localhost:8080/auth/v1.0" >> ~vagrant/.bash_profile
+echo "export ST_USER=test:tester" >> ~vagrant/.bash_profile
+echo "export ST_KEY=testing" >> ~vagrant/.bash_profile
+
+cd ~swift
+git clone https://github.com/swiftstack/swift.git
+cd swift
+git checkout ss-release-2.25.0.4
+pip install wheel
+python setup.py bdist_wheel
+pip install --no-binary cryptography -r requirements.txt
+python setup.py develop
+# The following avoid dependency on pip-installed pyOpenSSL being newer than required
+pip install python-openstackclient==3.12.0 python-glanceclient==2.7.0
+pip install -r test-requirements.txt
+
+# [Setup Swift] Setting up rsync
+
+cd /etc
+cp ~swift/swift/doc/saio/rsyncd.conf .
+sed -i "s//swift/" rsyncd.conf
+
+cd /etc/xinetd.d
+echo "disable = no" >> rsync
+
+systemctl restart xinetd.service
+systemctl enable rsyncd.service
+systemctl start rsyncd.service
+
+rsync rsync://pub@localhost/
+
+# [Setup Swift] Setting up memcached
+
+systemctl enable memcached.service
+systemctl start memcached.service
+
+# [Setup Swift] Configuring each node
+
+rm -rf /etc/swift
+cp -R /vagrant/src/github.com/swiftstack/ProxyFS/sait/etc/swift /etc/.
+cp -R /vagrant/src/github.com/swiftstack/ProxyFS/sait/sait$SAIT_INSTANCE/etc/swift /etc/.
+chown -R swift:swift /etc/swift
+
+# [Setup Swift] Setting up scripts for running Swift
+
+mkdir -p ~swift/bin
+
+cd ~swift/bin
+cp /vagrant/src/github.com/swiftstack/ProxyFS/sait/home/swift/bin/* .
+echo "export PATH=\$PATH:~swift/bin" >> ~vagrant/.bash_profile
+
+~swift/bin/remakerings
+
+# Install ProxyFS's pfs_middleware into the "normal" Swift Proxy pipeline
+
+cd /vagrant/src/github.com/swiftstack/ProxyFS/pfs_middleware
+python setup.py develop
+
+# Install ProxyFS's meta_middleware into the "NoAuth" Swift Proxy pipeline
+
+cd /vagrant/src/github.com/swiftstack/ProxyFS/meta_middleware
+python setup.py develop
+
+# Setup AWS access for local vagrant user
+
+pip install awscli-plugin-endpoint
+mkdir -p ~vagrant/.aws
+cat > ~vagrant/.aws/config << EOF
+[plugins]
+endpoint = awscli_plugin_endpoint
+
+[default]
+s3 =
+ endpoint_url = http://127.0.0.1:8080
+ multipart_threshold = 64MB
+ multipart_chunksize = 16MB
+s3api =
+ endpoint_url = http://127.0.0.1:8080
+ multipart_threshold = 64MB
+ multipart_chunksize = 16MB
+EOF
+cat > ~vagrant/.aws/credentials << EOF
+[default]
+aws_access_key_id = test:tester
+aws_secret_access_key = testing
+EOF
+chown -R vagrant:vagrant ~vagrant/.aws
+
+# Ensure proxyfsd logging will work
+
+rm -rf /var/log/proxyfsd
+mkdir -p /var/log/proxyfsd
+touch /var/log/proxyfsd/proxyfsd.log
+chmod 777 /var
+chmod 777 /var/log
+chmod 777 /var/log/proxyfsd
+chmod 666 /var/log/proxyfsd/proxyfsd.log
+
+# Create Mount Points for ProxyFS (FUSE, NFS, & SMB)
+
+if [ "$SAIT_INSTANCE" == "1" ]
+then
+ rm -rf /CommonMountPoint
+ mkdir /CommonMountPoint
+ chmod 777 /CommonMountPoint
+fi
+
+# Install Kerberos Client to SDOM{1|2|3|4}.LOCAL hosted by sdc{1|2|3|4}.sdom{1|2|3|4}.local
+
+yum -y install krb5-workstation
+
+cat >> /etc/hosts << EOF
+172.28.128.11 sdc1 sdc1.sdom1.local
+172.28.128.12 sdc2 sdc2.sdom2.local
+172.28.128.13 sdc3 sdc3.sdom3.local
+172.28.128.14 sdc4 sdc4.sdom4.local
+172.28.128.21 saio1 saio1.sdom1.local
+172.28.128.22 saio2 saio2.sdom2.local
+172.28.128.23 saio3 saio3.sdom3.local
+172.28.128.24 saio4 saio4.sdom4.local
+EOF
+
+cat > /etc/krb5.conf.d/SambaDCs << EOF
+[libdefaults]
+dns_lookup_kdc = false
+
+[realms]
+SDOM1.LOCAL = {
+ admin_server = sdc1.sdom1.local
+ kdc = sdc1.sdom1.local
+ default_domain = SDOM1
+}
+SDOM2.LOCAL = {
+ admin_server = sdc2.sdom2.local
+ kdc=sdc2.sdom2.local
+ default_domain = SDOM2
+}
+SDOM3.LOCAL = {
+ admin_server = sdc3.sdom3.local
+ kdc=sdc3.sdom3.local
+ default_domain = SDOM3
+}
+SDOM4.LOCAL = {
+ admin_server = sdc4.sdom4.local
+ kdc=sdc4.sdom4.local
+ default_domain = SDOM4
+}
+
+[domain_realm]
+.sdom1.local = SDOM1.LOCAL
+sdom1.local = SDOM1.LOCAL
+.sdom2.local = SDOM2.LOCAL
+sdom2.local = SDOM2.LOCAL
+.sdom3.local = SDOM3.LOCAL
+sdom3.local = SDOM3.LOCAL
+.sdom4.local = SDOM4.LOCAL
+sdom4.local = SDOM4.LOCAL
+EOF
+
+# Install systemd .service files for ProxyFS
+
+cp /vagrant/src/github.com/swiftstack/ProxyFS/sait/sait$SAIT_INSTANCE/usr/lib/systemd/system/proxyfsd.service /usr/lib/systemd/system/.
+
+# Enable start/stop tools
+
+echo "export PATH=\$PATH:/vagrant/src/github.com/swiftstack/ProxyFS/sait/bin" >> ~vagrant/.bash_profile
+
+# Install wireshark
+
+yum -y install wireshark-gnome \
+ xorg-x11-fonts-Type1 \
+ xorg-x11-xauth \
+ xeyes
+echo "X11Forwarding yes" >> /etc/sysconfig/sshd
+systemctl restart sshd
+usermod -aG wireshark vagrant
+
+# Install benchmark support tools
+
+yum -y install atop-2.3.0-8.el7 bc fio gawk
+
+# Install ssh helper
+
+yum -y install sshpass-1.06-2.el7
+
+# Install dstat
+
+yum -y install dstat
+
+# Install tree
+
+yum -y install tree
+
+# Install jq... a very handy JSON parser
+
+yum -y install jq
+
+# Install and configure a localhost-only one-node etcd cluster
+
+ETCD_VERSION=3.4.7
+wget https://github.com/etcd-io/etcd/releases/download/v${ETCD_VERSION}/etcd-v${ETCD_VERSION}-linux-amd64.tar.gz
+tar xzf etcd-v${ETCD_VERSION}-linux-amd64.tar.gz
+rm -rf etcd-v${ETCD_VERSION}-linux-amd64.tar.gz
+install -C -m 755 etcd-v${ETCD_VERSION}-linux-amd64/etcd /usr/local/bin/
+install -C -m 755 etcd-v${ETCD_VERSION}-linux-amd64/etcdctl /usr/local/bin/
+rm -rf etcd-v${ETCD_VERSION}-linux-amd64
+
+mkdir /etcd
+
+cat > /etc/systemd/system/proxyfs-etcd.service << EOF
+[Unit]
+Description=ProxyFS etcd instance
+After=network.target
+StartLimitIntervalSec=0
+
+[Service]
+Type=simple
+Restart=always
+RestartSec=1
+User=root
+ExecStart=/usr/local/bin/etcd --name proxyfs --data-dir /etcd/proxyfs.etcd --initial-advertise-peer-urls http://localhost:2380 --listen-peer-urls http://localhost:2380 --listen-client-urls http://localhost:2379 --advertise-client-urls http://localhost:2379 --initial-cluster-token etcd-cluster --initial-cluster default=http://localhost:2380 --initial-cluster-state new
+
+[Install]
+WantedBy=multi-user.target
+EOF
+
+# Inform systemd that we've updated .service files
+
+systemctl daemon-reload
+
+# All done
+
+echo "SAIT $SAIT_INSTANCE for ProxyFS provisioned"
diff --git a/stats/config.go b/stats/config.go
index 6d48a1c71..4283b9b90 100644
--- a/stats/config.go
+++ b/stats/config.go
@@ -157,6 +157,9 @@ func (dummy *globalsStruct) ServeVolume(confMap conf.ConfMap, volumeName string)
func (dummy *globalsStruct) UnserveVolume(confMap conf.ConfMap, volumeName string) (err error) {
return nil
}
+func (dummy *globalsStruct) VolumeToBeUnserved(confMap conf.ConfMap, volumeName string) (err error) {
+ return nil
+}
func (dummy *globalsStruct) SignaledStart(confMap conf.ConfMap) (err error) {
return nil
}
diff --git a/stats/strings.go b/stats/strings.go
index f41bb127a..423bfdba6 100644
--- a/stats/strings.go
+++ b/stats/strings.go
@@ -88,6 +88,11 @@ var (
DirFileBPlusTreeNodeFaults = "proxyfs.inode.payload.node.fault.operations"
+ ReconChecks = "proxyfs.inode.recon.intervals"
+ ReconCheckTriggeredNormalMode = "proxyfs.inode.recon.triggered.normal.mode"
+ ReconCheckTriggeredNoWriteMode = "proxyfs.inode.recon.triggered.no.write.mode"
+ ReconCheckTriggeredReadOnlyMode = "proxyfs.inode.recon.triggered.read.only.mode"
+
InodeTryLockBackoffOps = "proxyfs.fs.trylock.backoff.operations"
InodeTryLockDelayedBackoffOps = "proxyfs.fs.trylock.delayed.backoff.operations"
InodeTryLockSerializedBackoffOps = "proxyfs.fs.trylock.serialized.backoff.operations"
diff --git a/statslogger/config.go b/statslogger/config.go
index 3c00af576..76543ccc1 100644
--- a/statslogger/config.go
+++ b/statslogger/config.go
@@ -105,6 +105,9 @@ func (dummy *globalsStruct) ServeVolume(confMap conf.ConfMap, volumeName string)
func (dummy *globalsStruct) UnserveVolume(confMap conf.ConfMap, volumeName string) (err error) {
return nil
}
+func (dummy *globalsStruct) VolumeToBeUnserved(confMap conf.ConfMap, volumeName string) (err error) {
+ return nil
+}
func (dummy *globalsStruct) SignaledStart(confMap conf.ConfMap) (err error) {
return nil
}
diff --git a/swiftclient/config.go b/swiftclient/config.go
index 4dd07879f..70ef0f778 100644
--- a/swiftclient/config.go
+++ b/swiftclient/config.go
@@ -356,6 +356,9 @@ func (dummy *globalsStruct) ServeVolume(confMap conf.ConfMap, volumeName string)
func (dummy *globalsStruct) UnserveVolume(confMap conf.ConfMap, volumeName string) (err error) {
return nil
}
+func (dummy *globalsStruct) VolumeToBeUnserved(confMap conf.ConfMap, volumeName string) (err error) {
+ return nil
+}
func (dummy *globalsStruct) SignaledStart(confMap conf.ConfMap) (err error) {
drainConnections()
diff --git a/trackedlock/config.go b/trackedlock/config.go
index 9a8514fd7..99d8c535c 100644
--- a/trackedlock/config.go
+++ b/trackedlock/config.go
@@ -204,6 +204,10 @@ func (dummy *globalsStruct) UnserveVolume(confMap conf.ConfMap, volumeName strin
return nil
}
+func (dummy *globalsStruct) VolumeToBeUnserved(confMap conf.ConfMap, volumeName string) (err error) {
+ return nil
+}
+
// PauseAndContract does nothing (lock tracking is not changed until SignalFinish() call)
func (dummy *globalsStruct) SignaledStart(confMap conf.ConfMap) (err error) {
return
diff --git a/transitions/api.go b/transitions/api.go
index 7c8e2e220..ebea13abc 100644
--- a/transitions/api.go
+++ b/transitions/api.go
@@ -40,6 +40,7 @@ type Callbacks interface {
VolumeDestroyed(confMap conf.ConfMap, volumeName string) (err error)
ServeVolume(confMap conf.ConfMap, volumeName string) (err error)
UnserveVolume(confMap conf.ConfMap, volumeName string) (err error)
+ VolumeToBeUnserved(confMap conf.ConfMap, volumeName string) (err error)
SignaledStart(confMap conf.ConfMap) (err error)
SignaledFinish(confMap conf.ConfMap) (err error)
Down(confMap conf.ConfMap) (err error)
@@ -111,6 +112,7 @@ func Up(confMap conf.ConfMap) (err error) {
// these volume sets, the following callbacks will be issued to each of the packages
// that have registered with package transitions:
//
+// VolumeToBeUnserved() - reverse registration order (for each such volume)
// SignaledStart() - reverse registration order
// VolumeGroupCreated() - registration order (for each such volume group)
// VolumeCreated() - registration order (for each such volume)
@@ -134,10 +136,11 @@ func Signaled(confMap conf.ConfMap) (err error) {
// Prior to the Down() callbacks, the following subset of the callbacks triggered
// by a call to Signaled() will be made as if the prior confMap were empty:
//
+// VolumeToBeUnserved() - reverse registration order (for each such volume)
// SignaledStart() - reverse registration order
-// UnserveVolume() - reverse registration order (for each such volume group)
+// UnserveVolume() - reverse registration order (for each such volume)
// VolumeDestroyed() - reverse registration order (for each such volume)
-// VolumeGroupDestroyed() - reverse registration order (for each such volume)
+// VolumeGroupDestroyed() - reverse registration order (for each such volume group)
//
func Down(confMap conf.ConfMap) (err error) {
return down(confMap)
diff --git a/transitions/api_internal.go b/transitions/api_internal.go
index c74fc0fec..28c590d3a 100644
--- a/transitions/api_internal.go
+++ b/transitions/api_internal.go
@@ -33,24 +33,32 @@ type volumeGroupStruct struct {
volumeList map[string]*volumeStruct // Key: volumeStruct.name
}
+type confMapDeltaStruct struct {
+ volumeGroupList map[string]*volumeGroupStruct // Key: volumeGroupStruct.name
+ servedVolumeGroupList map[string]*volumeGroupStruct // Key: volumeGroupStruct.name
+ remoteVolumeGroupList map[string]*volumeGroupStruct // Key: volumeGroupStruct.name
+
+ createdVolumeGroupList map[string]*volumeGroupStruct // Key: volumeGroupStruct.name
+ movedVolumeGroupList map[string]*volumeGroupStruct // Key: volumeGroupStruct.name
+ destroyedVolumeGroupList map[string]*volumeGroupStruct // Key: volumeGroupStruct.name
+
+ volumeList map[string]*volumeStruct // Key: volumeStruct.name
+ servedVolumeList map[string]*volumeStruct // Key: volumeStruct.name
+ remoteVolumeList map[string]*volumeStruct // Key: volumeStruct.name
+
+ createdVolumeList map[string]*volumeStruct // Key: volumeStruct.name
+ movedVolumeList map[string]*volumeStruct // Key: volumeStruct.name
+ destroyedVolumeList map[string]*volumeStruct // Key: volumeStruct.name
+
+ toStopServingVolumeList map[string]*volumeStruct // Key: volumeStruct.name
+ toStartServingVolumeList map[string]*volumeStruct // Key: volumeStruct.name
+}
+
type globalsStruct struct {
- sync.Mutex // Used only for protecting insertions into registration{List|Set} during init() phase
- registrationList *list.List
- registrationSet map[string]*registrationItemStruct // Key: registrationItemStruct.packageName
- currentVolumeGroupList map[string]*volumeGroupStruct // Key: volumeGroupStruct.name
- servedVolumeGroupList map[string]*volumeGroupStruct // Key: volumeGroupStruct.name
- remoteVolumeGroupList map[string]*volumeGroupStruct // Key: volumeGroupStruct.name
- createdVolumeGroupList map[string]*volumeGroupStruct // Key: volumeGroupStruct.name
- movedVolumeGroupList map[string]*volumeGroupStruct // Key: volumeGroupStruct.name
- destroyedVolumeGroupList map[string]*volumeGroupStruct // Key: volumeGroupStruct.name
- currentVolumeList map[string]*volumeStruct // Key: volumeStruct.name
- servedVolumeList map[string]*volumeStruct // Key: volumeStruct.name
- remoteVolumeList map[string]*volumeStruct // Key: volumeStruct.name
- createdVolumeList map[string]*volumeStruct // Key: volumeStruct.name
- movedVolumeList map[string]*volumeStruct // Key: volumeStruct.name
- destroyedVolumeList map[string]*volumeStruct // Key: volumeStruct.name
- toStopServingVolumeList map[string]*volumeStruct // Key: volumeStruct.name
- toStartServingVolumeList map[string]*volumeStruct // Key: volumeStruct.name
+ sync.Mutex // Used only for protecting insertions into registration{List|Set} during init() phase
+ registrationList *list.List
+ registrationSet map[string]*registrationItemStruct // Key: registrationItemStruct.packageName
+ currentConfMapDelta *confMapDeltaStruct
}
var globals globalsStruct
@@ -83,6 +91,7 @@ func register(packageName string, callbacks Callbacks) {
func up(confMap conf.ConfMap) (err error) {
var (
+ newConfMapDelta *confMapDeltaStruct
registrationItem *registrationItemStruct
registrationListElement *list.Element
registrationListPackageNameStringSlice []string
@@ -101,36 +110,51 @@ func up(confMap conf.ConfMap) (err error) {
}
}()
- globals.currentVolumeGroupList = make(map[string]*volumeGroupStruct)
- globals.servedVolumeGroupList = make(map[string]*volumeGroupStruct)
- globals.remoteVolumeGroupList = make(map[string]*volumeGroupStruct)
+ globals.currentConfMapDelta = &confMapDeltaStruct{
+ volumeGroupList: make(map[string]*volumeGroupStruct),
+ servedVolumeGroupList: make(map[string]*volumeGroupStruct),
+ remoteVolumeGroupList: make(map[string]*volumeGroupStruct),
- globals.currentVolumeList = make(map[string]*volumeStruct)
- globals.servedVolumeList = make(map[string]*volumeStruct)
- globals.remoteVolumeList = make(map[string]*volumeStruct)
+ createdVolumeGroupList: make(map[string]*volumeGroupStruct),
+ movedVolumeGroupList: make(map[string]*volumeGroupStruct),
+ destroyedVolumeGroupList: make(map[string]*volumeGroupStruct),
- err = computeConfMapDelta(confMap)
+ volumeList: make(map[string]*volumeStruct),
+ servedVolumeList: make(map[string]*volumeStruct),
+ remoteVolumeList: make(map[string]*volumeStruct),
+
+ createdVolumeList: make(map[string]*volumeStruct),
+ movedVolumeList: make(map[string]*volumeStruct),
+ destroyedVolumeList: make(map[string]*volumeStruct),
+
+ toStopServingVolumeList: make(map[string]*volumeStruct),
+ toStartServingVolumeList: make(map[string]*volumeStruct),
+ }
+
+ newConfMapDelta, err = computeConfMapDelta(confMap)
if nil != err {
return
}
- if 0 != len(globals.movedVolumeGroupList) {
+ if 0 != len(newConfMapDelta.movedVolumeGroupList) {
err = fmt.Errorf("transitions.Up() did not expect movedVolumeGroupList to be non-empty")
return
}
- if 0 != len(globals.destroyedVolumeGroupList) {
+ if 0 != len(newConfMapDelta.destroyedVolumeGroupList) {
err = fmt.Errorf("transitions.Up() did not expect destroyedVolumeGroupList to be non-empty")
return
}
- if 0 != len(globals.movedVolumeList) {
+ if 0 != len(newConfMapDelta.movedVolumeList) {
err = fmt.Errorf("transitions.Up() did not expect movedVolumeList to be non-empty")
return
}
- if 0 != len(globals.destroyedVolumeList) {
+ if 0 != len(newConfMapDelta.destroyedVolumeList) {
err = fmt.Errorf("transitions.Up() did not expect destroyedVolumeList to be non-empty")
return
}
+ globals.currentConfMapDelta = newConfMapDelta
+
// Issue Callbacks.Up() calls from Front() to Back() of globals.registrationList
registrationListElement = globals.registrationList.Front()
@@ -167,7 +191,7 @@ func up(confMap conf.ConfMap) (err error) {
for nil != registrationListElement {
registrationItem = registrationListElement.Value.(*registrationItemStruct)
- for volumeGroupName, volumeGroup = range globals.createdVolumeGroupList {
+ for volumeGroupName, volumeGroup = range globals.currentConfMapDelta.createdVolumeGroupList {
logger.Tracef("transitions.Up() calling %s.VolumeGroupCreated(,%s,%s,%s)", registrationItem.packageName, volumeGroupName, volumeGroup.activePeer, volumeGroup.virtualIPAddr)
err = registrationItem.callbacks.VolumeGroupCreated(confMap, volumeGroupName, volumeGroup.activePeer, volumeGroup.virtualIPAddr)
if nil != err {
@@ -185,7 +209,7 @@ func up(confMap conf.ConfMap) (err error) {
for nil != registrationListElement {
registrationItem = registrationListElement.Value.(*registrationItemStruct)
- for volumeName, volume = range globals.createdVolumeList {
+ for volumeName, volume = range globals.currentConfMapDelta.createdVolumeList {
logger.Tracef("transitions.Up() calling %s.VolumeCreated(,%s,%s)", registrationItem.packageName, volumeName, volume.volumeGroup.name)
err = registrationItem.callbacks.VolumeCreated(confMap, volumeName, volume.volumeGroup.name)
if nil != err {
@@ -203,7 +227,7 @@ func up(confMap conf.ConfMap) (err error) {
for nil != registrationListElement {
registrationItem = registrationListElement.Value.(*registrationItemStruct)
- for volumeName, volume = range globals.servedVolumeList {
+ for volumeName, volume = range globals.currentConfMapDelta.servedVolumeList {
logger.Tracef("transitions.Up() calling %s.ServeVolume(,%s)", registrationItem.packageName, volumeName)
err = registrationItem.callbacks.ServeVolume(confMap, volumeName)
if nil != err {
@@ -236,6 +260,7 @@ func up(confMap conf.ConfMap) (err error) {
func signaled(confMap conf.ConfMap) (err error) {
var (
+ newConfMapDelta *confMapDeltaStruct
registrationItem *registrationItemStruct
registrationListElement *list.Element
volume *volumeStruct
@@ -253,11 +278,31 @@ func signaled(confMap conf.ConfMap) (err error) {
}
}()
- err = computeConfMapDelta(confMap)
+ newConfMapDelta, err = computeConfMapDelta(confMap)
if nil != err {
return
}
+ globals.currentConfMapDelta = newConfMapDelta
+
+ // Issue Callbacks.VolumeToBeUnserved() calls from Back() to Front() of globals.registrationList
+
+ registrationListElement = globals.registrationList.Back()
+
+ for nil != registrationListElement {
+ registrationItem = registrationListElement.Value.(*registrationItemStruct)
+ for volumeName = range globals.currentConfMapDelta.toStopServingVolumeList {
+ logger.Tracef("transitions.Signaled() calling %s.VolumeToBeUnserved(,%s)", registrationItem.packageName, volumeName)
+ err = registrationItem.callbacks.VolumeToBeUnserved(confMap, volumeName)
+ if nil != err {
+ logger.Errorf("transitions.Signaled() call to %s.VolumeToBeUnserved(,%s) failed: %v", registrationItem.packageName, volumeName, err)
+ err = fmt.Errorf("%s.VolumeToBeUnserved(,%s) failed: %v", registrationItem.packageName, volumeName, err)
+ return
+ }
+ }
+ registrationListElement = registrationListElement.Prev()
+ }
+
// Issue Callbacks.SignaledStart() calls from Back() to Front() of globals.registrationList
registrationListElement = globals.registrationList.Back()
@@ -280,7 +325,7 @@ func signaled(confMap conf.ConfMap) (err error) {
for nil != registrationListElement {
registrationItem = registrationListElement.Value.(*registrationItemStruct)
- for volumeName = range globals.toStopServingVolumeList {
+ for volumeName = range globals.currentConfMapDelta.toStopServingVolumeList {
logger.Tracef("transitions.Signaled() calling %s.UnserveVolume(,%s)", registrationItem.packageName, volumeName)
err = registrationItem.callbacks.UnserveVolume(confMap, volumeName)
if nil != err {
@@ -298,7 +343,7 @@ func signaled(confMap conf.ConfMap) (err error) {
for nil != registrationListElement {
registrationItem = registrationListElement.Value.(*registrationItemStruct)
- for volumeGroupName, volumeGroup = range globals.createdVolumeGroupList {
+ for volumeGroupName, volumeGroup = range globals.currentConfMapDelta.createdVolumeGroupList {
logger.Tracef("transitions.Signaled() calling %s.VolumeGroupCreated(,%s,%s,%s)", registrationItem.packageName, volumeGroupName, volumeGroup.activePeer, volumeGroup.virtualIPAddr)
err = registrationItem.callbacks.VolumeGroupCreated(confMap, volumeGroupName, volumeGroup.activePeer, volumeGroup.virtualIPAddr)
if nil != err {
@@ -316,7 +361,7 @@ func signaled(confMap conf.ConfMap) (err error) {
for nil != registrationListElement {
registrationItem = registrationListElement.Value.(*registrationItemStruct)
- for volumeName, volume = range globals.createdVolumeList {
+ for volumeName, volume = range globals.currentConfMapDelta.createdVolumeList {
logger.Tracef("transitions.Signaled() calling %s.VolumeCreated(,%s,%s)", registrationItem.packageName, volumeName, volume.volumeGroup.name)
err = registrationItem.callbacks.VolumeCreated(confMap, volumeName, volume.volumeGroup.name)
if nil != err {
@@ -334,7 +379,7 @@ func signaled(confMap conf.ConfMap) (err error) {
for nil != registrationListElement {
registrationItem = registrationListElement.Value.(*registrationItemStruct)
- for volumeGroupName, volumeGroup = range globals.movedVolumeGroupList {
+ for volumeGroupName, volumeGroup = range globals.currentConfMapDelta.movedVolumeGroupList {
logger.Tracef("transitions.Signaled() calling %s.VolumeGroupMoved(,%s,%s,%s)", registrationItem.packageName, volumeGroupName, volumeGroup.activePeer, volumeGroup.virtualIPAddr)
err = registrationItem.callbacks.VolumeGroupMoved(confMap, volumeGroupName, volumeGroup.activePeer, volumeGroup.virtualIPAddr)
if nil != err {
@@ -352,7 +397,7 @@ func signaled(confMap conf.ConfMap) (err error) {
for nil != registrationListElement {
registrationItem = registrationListElement.Value.(*registrationItemStruct)
- for volumeName, volume = range globals.movedVolumeList {
+ for volumeName, volume = range globals.currentConfMapDelta.movedVolumeList {
logger.Tracef("transitions.Signaled() calling %s.VolumeMoved(,%s,%s)", registrationItem.packageName, volumeName, volume.volumeGroup.name)
err = registrationItem.callbacks.VolumeMoved(confMap, volumeName, volume.volumeGroup.name)
if nil != err {
@@ -370,7 +415,7 @@ func signaled(confMap conf.ConfMap) (err error) {
for nil != registrationListElement {
registrationItem = registrationListElement.Value.(*registrationItemStruct)
- for volumeName, volume = range globals.destroyedVolumeList {
+ for volumeName, volume = range globals.currentConfMapDelta.destroyedVolumeList {
logger.Tracef("transitions.Signaled() calling %s.VolumeDestroyed(,%s)", registrationItem.packageName, volumeName)
err = registrationItem.callbacks.VolumeDestroyed(confMap, volumeName)
if nil != err {
@@ -388,7 +433,7 @@ func signaled(confMap conf.ConfMap) (err error) {
for nil != registrationListElement {
registrationItem = registrationListElement.Value.(*registrationItemStruct)
- for volumeGroupName = range globals.destroyedVolumeGroupList {
+ for volumeGroupName = range globals.currentConfMapDelta.destroyedVolumeGroupList {
logger.Tracef("transitions.Signaled() calling %s.VolumeGroupDestroyed(,%s)", registrationItem.packageName, volumeGroupName)
err = registrationItem.callbacks.VolumeGroupDestroyed(confMap, volumeGroupName)
if nil != err {
@@ -406,7 +451,7 @@ func signaled(confMap conf.ConfMap) (err error) {
for nil != registrationListElement {
registrationItem = registrationListElement.Value.(*registrationItemStruct)
- for volumeName, volume = range globals.toStartServingVolumeList {
+ for volumeName, volume = range globals.currentConfMapDelta.toStartServingVolumeList {
if volume.served {
logger.Tracef("transitions.Signaled() calling %s.ServeVolume(,%s)", registrationItem.packageName, volumeGroupName)
err = registrationItem.callbacks.ServeVolume(confMap, volumeName)
@@ -441,6 +486,7 @@ func signaled(confMap conf.ConfMap) (err error) {
func down(confMap conf.ConfMap) (err error) {
var (
+ newConfMapDelta *confMapDeltaStruct
registrationItem *registrationItemStruct
registrationListElement *list.Element
volumeGroupName string
@@ -455,36 +501,56 @@ func down(confMap conf.ConfMap) (err error) {
}
}()
- err = computeConfMapDelta(confMap)
+ newConfMapDelta, err = computeConfMapDelta(confMap)
if nil != err {
return
}
- if 0 != len(globals.createdVolumeGroupList) {
+ if 0 != len(newConfMapDelta.createdVolumeGroupList) {
err = fmt.Errorf("transitions.Down() did not expect createdVolumeGroupList to be non-empty")
return
}
- if 0 != len(globals.movedVolumeGroupList) {
+ if 0 != len(newConfMapDelta.movedVolumeGroupList) {
err = fmt.Errorf("transitions.Down() did not expect movedVolumeGroupList to be non-empty")
return
}
- if 0 != len(globals.destroyedVolumeGroupList) {
+ if 0 != len(newConfMapDelta.destroyedVolumeGroupList) {
err = fmt.Errorf("transitions.Down() did not expect destroyedVolumeGroupList to be non-empty")
return
}
- if 0 != len(globals.createdVolumeList) {
+ if 0 != len(newConfMapDelta.createdVolumeList) {
err = fmt.Errorf("transitions.Down() did not expect createdVolumeList to be non-empty")
return
}
- if 0 != len(globals.movedVolumeList) {
+ if 0 != len(newConfMapDelta.movedVolumeList) {
err = fmt.Errorf("transitions.Down() did not expect movedVolumeList to be non-empty")
return
}
- if 0 != len(globals.destroyedVolumeList) {
+ if 0 != len(newConfMapDelta.destroyedVolumeList) {
err = fmt.Errorf("transitions.Down() did not expect destroyedVolumeList to be non-empty")
return
}
+ globals.currentConfMapDelta = newConfMapDelta
+
+ // Issue Callbacks.VolumeToBeUnserved() calls from Back() to Front() of globals.registrationList
+
+ registrationListElement = globals.registrationList.Back()
+
+ for nil != registrationListElement {
+ registrationItem = registrationListElement.Value.(*registrationItemStruct)
+ for volumeName = range globals.currentConfMapDelta.servedVolumeList {
+ logger.Tracef("transitions.Signaled() calling %s.VolumeToBeUnserved(,%s)", registrationItem.packageName, volumeName)
+ err = registrationItem.callbacks.VolumeToBeUnserved(confMap, volumeName)
+ if nil != err {
+ logger.Errorf("transitions.Signaled() call to %s.VolumeToBeUnserved(,%s) failed: %v", registrationItem.packageName, volumeName, err)
+ err = fmt.Errorf("%s.VolumeToBeUnserved(,%s) failed: %v", registrationItem.packageName, volumeName, err)
+ return
+ }
+ }
+ registrationListElement = registrationListElement.Prev()
+ }
+
// Issue Callbacks.SignaledStart() calls from Back() to Front() of globals.registrationList
registrationListElement = globals.registrationList.Back()
@@ -507,7 +573,7 @@ func down(confMap conf.ConfMap) (err error) {
for nil != registrationListElement {
registrationItem = registrationListElement.Value.(*registrationItemStruct)
- for volumeName = range globals.servedVolumeList {
+ for volumeName = range globals.currentConfMapDelta.servedVolumeList {
logger.Tracef("transitions.Down() calling %s.UnserveVolume(,%s)", registrationItem.packageName, volumeName)
err = registrationItem.callbacks.UnserveVolume(confMap, volumeName)
if nil != err {
@@ -525,7 +591,7 @@ func down(confMap conf.ConfMap) (err error) {
for nil != registrationListElement {
registrationItem = registrationListElement.Value.(*registrationItemStruct)
- for volumeName = range globals.currentVolumeList {
+ for volumeName = range globals.currentConfMapDelta.volumeList {
logger.Tracef("transitions.Down() calling %s.VolumeDestroyed(,%s)", registrationItem.packageName, volumeName)
err = registrationItem.callbacks.VolumeDestroyed(confMap, volumeName)
if nil != err {
@@ -543,7 +609,7 @@ func down(confMap conf.ConfMap) (err error) {
for nil != registrationListElement {
registrationItem = registrationListElement.Value.(*registrationItemStruct)
- for volumeGroupName = range globals.currentVolumeGroupList {
+ for volumeGroupName = range globals.currentConfMapDelta.volumeGroupList {
logger.Tracef("transitions.Down() calling %s.VolumeGroupDestroyed(,%s)", registrationItem.packageName, volumeGroupName)
err = registrationItem.callbacks.VolumeGroupDestroyed(confMap, volumeGroupName)
if nil != err {
@@ -574,24 +640,18 @@ func down(confMap conf.ConfMap) (err error) {
return
}
-func computeConfMapDelta(confMap conf.ConfMap) (err error) {
+func computeConfMapDelta(confMap conf.ConfMap) (newConfMapDelta *confMapDeltaStruct, err error) {
var (
- fsGlobalsVolumeGroupList []string
- newCurrentVolumeGroupList map[string]*volumeGroupStruct
- newCurrentVolumeList map[string]*volumeStruct
- newRemoteVolumeGroupList map[string]*volumeGroupStruct
- newRemoteVolumeList map[string]*volumeStruct
- newServedVolumeGroupList map[string]*volumeGroupStruct
- newServedVolumeList map[string]*volumeStruct
- ok bool
- volume *volumeStruct
- volumeGroup *volumeGroupStruct
- volumeGroupName string
- volumeGroupVolumeList []string
- volumeGroupPreviously *volumeGroupStruct
- volumeName string
- volumePreviously *volumeStruct
- whoAmI string
+ fsGlobalsVolumeGroupList []string
+ ok bool
+ volume *volumeStruct
+ volumeGroup *volumeGroupStruct
+ volumeGroupName string
+ volumeGroupVolumeList []string
+ volumeGroupPreviously *volumeGroupStruct
+ volumeName string
+ volumePreviously *volumeStruct
+ whoAmI string
)
// TODO: Remove call to upgradeConfMapIfNeeded() once backwards compatibility is no longer required
@@ -601,26 +661,28 @@ func computeConfMapDelta(confMap conf.ConfMap) (err error) {
return
}
- // Initialize lists used in computation (those in globalsStruct are actually the func output)
+ // Initialize lists used in computation
- newCurrentVolumeGroupList = make(map[string]*volumeGroupStruct)
- newServedVolumeGroupList = make(map[string]*volumeGroupStruct)
- newRemoteVolumeGroupList = make(map[string]*volumeGroupStruct)
+ newConfMapDelta = &confMapDeltaStruct{
+ volumeGroupList: make(map[string]*volumeGroupStruct),
+ servedVolumeGroupList: make(map[string]*volumeGroupStruct),
+ remoteVolumeGroupList: make(map[string]*volumeGroupStruct),
- globals.createdVolumeGroupList = make(map[string]*volumeGroupStruct)
- globals.movedVolumeGroupList = make(map[string]*volumeGroupStruct)
- globals.destroyedVolumeGroupList = make(map[string]*volumeGroupStruct)
+ createdVolumeGroupList: make(map[string]*volumeGroupStruct),
+ movedVolumeGroupList: make(map[string]*volumeGroupStruct),
+ destroyedVolumeGroupList: make(map[string]*volumeGroupStruct),
- newCurrentVolumeList = make(map[string]*volumeStruct)
- newServedVolumeList = make(map[string]*volumeStruct)
- newRemoteVolumeList = make(map[string]*volumeStruct)
+ volumeList: make(map[string]*volumeStruct),
+ servedVolumeList: make(map[string]*volumeStruct),
+ remoteVolumeList: make(map[string]*volumeStruct),
- globals.createdVolumeList = make(map[string]*volumeStruct)
- globals.movedVolumeList = make(map[string]*volumeStruct)
- globals.destroyedVolumeList = make(map[string]*volumeStruct)
+ createdVolumeList: make(map[string]*volumeStruct),
+ movedVolumeList: make(map[string]*volumeStruct),
+ destroyedVolumeList: make(map[string]*volumeStruct),
- globals.toStopServingVolumeList = make(map[string]*volumeStruct)
- globals.toStartServingVolumeList = make(map[string]*volumeStruct)
+ toStopServingVolumeList: make(map[string]*volumeStruct),
+ toStartServingVolumeList: make(map[string]*volumeStruct),
+ }
// Injest confMap
@@ -637,7 +699,7 @@ func computeConfMapDelta(confMap conf.ConfMap) (err error) {
for _, volumeGroupName = range fsGlobalsVolumeGroupList {
volumeGroup = &volumeGroupStruct{name: volumeGroupName, volumeList: make(map[string]*volumeStruct)}
- newCurrentVolumeGroupList[volumeGroupName] = volumeGroup
+ newConfMapDelta.volumeGroupList[volumeGroupName] = volumeGroup
volumeGroup.activePeer, err = confMap.FetchOptionValueString("VolumeGroup:"+volumeGroupName, "PrimaryPeer")
if nil != err {
@@ -651,9 +713,9 @@ func computeConfMapDelta(confMap conf.ConfMap) (err error) {
volumeGroup.served = (whoAmI == volumeGroup.activePeer)
if volumeGroup.served {
- newServedVolumeGroupList[volumeGroupName] = volumeGroup
+ newConfMapDelta.servedVolumeGroupList[volumeGroupName] = volumeGroup
} else {
- newRemoteVolumeGroupList[volumeGroupName] = volumeGroup
+ newConfMapDelta.remoteVolumeGroupList[volumeGroupName] = volumeGroup
}
volumeGroup.virtualIPAddr, err = confMap.FetchOptionValueString("VolumeGroup:"+volumeGroupName, "VirtualIPAddr")
@@ -673,12 +735,12 @@ func computeConfMapDelta(confMap conf.ConfMap) (err error) {
for _, volumeName = range volumeGroupVolumeList {
volume = &volumeStruct{name: volumeName, served: volumeGroup.served, volumeGroup: volumeGroup}
- newCurrentVolumeList[volumeName] = volume
+ newConfMapDelta.volumeList[volumeName] = volume
if volume.served {
- newServedVolumeList[volumeName] = volume
+ newConfMapDelta.servedVolumeList[volumeName] = volume
} else {
- newRemoteVolumeList[volumeName] = volume
+ newConfMapDelta.remoteVolumeList[volumeName] = volume
}
volumeGroup.volumeList[volumeName] = volume
@@ -687,97 +749,91 @@ func computeConfMapDelta(confMap conf.ConfMap) (err error) {
// Compute changes to VolumeGroupList
- for volumeGroupName, volumeGroup = range newCurrentVolumeGroupList {
- volumeGroupPreviously, ok = globals.currentVolumeGroupList[volumeGroupName]
+ for volumeGroupName, volumeGroup = range newConfMapDelta.volumeGroupList {
+ volumeGroupPreviously, ok = globals.currentConfMapDelta.volumeGroupList[volumeGroupName]
if ok {
if volumeGroupPreviously.activePeer != volumeGroup.activePeer {
- globals.movedVolumeGroupList[volumeGroupName] = volumeGroup
+ newConfMapDelta.movedVolumeGroupList[volumeGroupName] = volumeGroup
}
} else {
- globals.createdVolumeGroupList[volumeGroupName] = volumeGroup
+ newConfMapDelta.createdVolumeGroupList[volumeGroupName] = volumeGroup
}
}
- for volumeGroupName, volumeGroup = range globals.currentVolumeGroupList {
- _, ok = newCurrentVolumeGroupList[volumeGroupName]
+ for volumeGroupName, volumeGroup = range globals.currentConfMapDelta.volumeGroupList {
+ _, ok = newConfMapDelta.volumeGroupList[volumeGroupName]
if !ok {
- globals.destroyedVolumeGroupList[volumeGroupName] = volumeGroup
+ newConfMapDelta.destroyedVolumeGroupList[volumeGroupName] = volumeGroup
}
}
// Compute changes to VolumeList
- for volumeName, volume = range newCurrentVolumeList {
- volumePreviously, ok = globals.currentVolumeList[volumeName]
+ for volumeName, volume = range newConfMapDelta.volumeList {
+ volumePreviously, ok = globals.currentConfMapDelta.volumeList[volumeName]
if ok {
if volumePreviously.volumeGroup.name != volume.volumeGroup.name {
- globals.movedVolumeList[volumeName] = volume
+ newConfMapDelta.movedVolumeList[volumeName] = volume
}
} else {
- globals.createdVolumeList[volumeName] = volume
+ newConfMapDelta.createdVolumeList[volumeName] = volume
}
}
- for volumeName, volume = range globals.currentVolumeList {
- _, ok = newCurrentVolumeList[volumeName]
+ for volumeName, volume = range globals.currentConfMapDelta.volumeList {
+ _, ok = newConfMapDelta.volumeList[volumeName]
if !ok {
- globals.destroyedVolumeList[volumeName] = volume
+ newConfMapDelta.destroyedVolumeList[volumeName] = volume
}
}
// Compute to{Stop|Start}ServingVolumeList
- for volumeName, volume = range globals.destroyedVolumeList {
- _, ok = globals.servedVolumeList[volumeName]
+ for volumeName, volume = range newConfMapDelta.destroyedVolumeList {
+ _, ok = globals.currentConfMapDelta.servedVolumeList[volumeName]
if ok {
- globals.toStopServingVolumeList[volumeName] = volume
+ newConfMapDelta.toStopServingVolumeList[volumeName] = volume
}
}
- for volumeName, volume = range globals.movedVolumeList {
- _, ok = globals.servedVolumeList[volumeName]
+ for volumeName, volume = range newConfMapDelta.movedVolumeList {
+ _, ok = globals.currentConfMapDelta.servedVolumeList[volumeName]
if ok {
- globals.toStopServingVolumeList[volumeName] = volume
+ newConfMapDelta.toStopServingVolumeList[volumeName] = volume
}
}
- for _, volumeGroup = range globals.movedVolumeGroupList {
+ for _, volumeGroup = range newConfMapDelta.movedVolumeGroupList {
for volumeName, volume = range volumeGroup.volumeList {
- _, ok = globals.servedVolumeList[volumeName]
+ _, ok = globals.currentConfMapDelta.servedVolumeList[volumeName]
if ok {
- globals.toStopServingVolumeList[volumeName] = volume
+ newConfMapDelta.toStopServingVolumeList[volumeName] = volume
}
}
}
- for _, volumeGroup = range globals.movedVolumeGroupList {
+ for _, volumeGroup = range newConfMapDelta.movedVolumeGroupList {
for volumeName, volume = range volumeGroup.volumeList {
- _, ok = newServedVolumeList[volumeName]
+ _, ok = newConfMapDelta.servedVolumeList[volumeName]
if ok {
- globals.toStartServingVolumeList[volumeName] = volume
+ newConfMapDelta.toStartServingVolumeList[volumeName] = volume
}
}
}
- for volumeName, volume = range globals.movedVolumeList {
- _, ok = newServedVolumeList[volumeName]
+ for volumeName, volume = range newConfMapDelta.movedVolumeList {
+ _, ok = newConfMapDelta.servedVolumeList[volumeName]
if ok {
- globals.toStartServingVolumeList[volumeName] = volume
+ newConfMapDelta.toStartServingVolumeList[volumeName] = volume
}
}
- for volumeName, volume = range globals.createdVolumeList {
- _, ok = newServedVolumeList[volumeName]
+ for volumeName, volume = range newConfMapDelta.createdVolumeList {
+ _, ok = newConfMapDelta.servedVolumeList[volumeName]
if ok {
- globals.toStartServingVolumeList[volumeName] = volume
+ newConfMapDelta.toStartServingVolumeList[volumeName] = volume
}
}
- // Finally, update {current|served|remote}Volume{|Group}List fields in globalsStruct
-
- globals.currentVolumeGroupList = newCurrentVolumeGroupList
- globals.servedVolumeGroupList = newServedVolumeGroupList
- globals.remoteVolumeGroupList = newRemoteVolumeGroupList
- globals.currentVolumeList = newCurrentVolumeList
- globals.servedVolumeList = newServedVolumeList
- globals.remoteVolumeList = newRemoteVolumeList
+ // All done
+ err = nil
return
}
@@ -1037,6 +1093,10 @@ func (loggerCallbacksInterface *loggerCallbacksInterfaceStruct) UnserveVolume(co
return nil
}
+func (loggerCallbacksInterface *loggerCallbacksInterfaceStruct) VolumeToBeUnserved(confMap conf.ConfMap, volumeName string) (err error) {
+ return nil
+}
+
func (loggerCallbacksInterface *loggerCallbacksInterfaceStruct) SignaledStart(confMap conf.ConfMap) (err error) {
return logger.SignaledStart(confMap)
}
@@ -1073,11 +1133,11 @@ func dumpGlobals(indent string) {
fmt.Println()
}
- if 0 == len(globals.currentVolumeGroupList) {
- fmt.Printf("%scurrentVolumeGroupList: \n", indent)
+ if 0 == len(globals.currentConfMapDelta.volumeGroupList) {
+ fmt.Printf("%svolumeGroupList: \n", indent)
} else {
- fmt.Printf("%scurrentVolumeGroupList:\n", indent)
- for volumeGroupName, volumeGroup = range globals.currentVolumeGroupList {
+ fmt.Printf("%svolumeGroupList:\n", indent)
+ for volumeGroupName, volumeGroup = range globals.currentConfMapDelta.volumeGroupList {
fmt.Printf("%s %+v [volumeList:", indent, volumeGroup)
for volumeName = range volumeGroup.volumeList {
fmt.Printf(" %s", volumeName)
@@ -1086,130 +1146,130 @@ func dumpGlobals(indent string) {
}
}
- if 0 == len(globals.servedVolumeGroupList) {
+ if 0 == len(globals.currentConfMapDelta.servedVolumeGroupList) {
fmt.Printf("%sservedVolumeGroupList: \n", indent)
} else {
fmt.Printf("%sservedVolumeGroupList:", indent)
- for volumeGroupName = range globals.servedVolumeGroupList {
+ for volumeGroupName = range globals.currentConfMapDelta.servedVolumeGroupList {
fmt.Printf(" %s", volumeGroupName)
}
fmt.Println()
}
- if 0 == len(globals.remoteVolumeGroupList) {
+ if 0 == len(globals.currentConfMapDelta.remoteVolumeGroupList) {
fmt.Printf("%sremoteVolumeGroupList: \n", indent)
} else {
fmt.Printf("%sremoteVolumeGroupList:", indent)
- for volumeGroupName = range globals.remoteVolumeGroupList {
+ for volumeGroupName = range globals.currentConfMapDelta.remoteVolumeGroupList {
fmt.Printf(" %s", volumeGroupName)
}
fmt.Println()
}
- if 0 == len(globals.createdVolumeGroupList) {
+ if 0 == len(globals.currentConfMapDelta.createdVolumeGroupList) {
fmt.Printf("%screatedVolumeGroupList: \n", indent)
} else {
fmt.Printf("%screatedVolumeGroupList:", indent)
- for volumeGroupName = range globals.createdVolumeGroupList {
+ for volumeGroupName = range globals.currentConfMapDelta.createdVolumeGroupList {
fmt.Printf(" %s", volumeGroupName)
}
fmt.Println()
}
- if 0 == len(globals.movedVolumeGroupList) {
+ if 0 == len(globals.currentConfMapDelta.movedVolumeGroupList) {
fmt.Printf("%smovedVolumeGroupList: \n", indent)
} else {
fmt.Printf("%smovedVolumeGroupList:", indent)
- for volumeGroupName = range globals.movedVolumeGroupList {
+ for volumeGroupName = range globals.currentConfMapDelta.movedVolumeGroupList {
fmt.Printf(" %s", volumeGroupName)
}
fmt.Println()
}
- if 0 == len(globals.destroyedVolumeGroupList) {
+ if 0 == len(globals.currentConfMapDelta.destroyedVolumeGroupList) {
fmt.Printf("%sdestroyedVolumeGroupList: \n", indent)
} else {
fmt.Printf("%sdestroyedVolumeGroupList:", indent)
- for volumeGroupName = range globals.destroyedVolumeGroupList {
+ for volumeGroupName = range globals.currentConfMapDelta.destroyedVolumeGroupList {
fmt.Printf(" %s", volumeGroupName)
}
fmt.Println()
}
- if 0 == len(globals.currentVolumeList) {
- fmt.Printf("%scurrentVolumeList: \n", indent)
+ if 0 == len(globals.currentConfMapDelta.volumeList) {
+ fmt.Printf("%svolumeList: \n", indent)
} else {
- fmt.Printf("%scurrentVolumeList:\n", indent)
- for volumeName, volume = range globals.currentVolumeList {
+ fmt.Printf("%svolumeList:\n", indent)
+ for volumeName, volume = range globals.currentConfMapDelta.volumeList {
fmt.Printf("%s %+v [volumeGroup: %s]\n", indent, volume, volume.volumeGroup.name)
}
}
- if 0 == len(globals.servedVolumeList) {
+ if 0 == len(globals.currentConfMapDelta.servedVolumeList) {
fmt.Printf("%sservedVolumeList: \n", indent)
} else {
fmt.Printf("%sservedVolumeList:", indent)
- for volumeName = range globals.servedVolumeList {
+ for volumeName = range globals.currentConfMapDelta.servedVolumeList {
fmt.Printf(" %s", volumeName)
}
fmt.Println()
}
- if 0 == len(globals.remoteVolumeList) {
+ if 0 == len(globals.currentConfMapDelta.remoteVolumeList) {
fmt.Printf("%sremoteVolumeList: \n", indent)
} else {
fmt.Printf("%sremoteVolumeList:", indent)
- for volumeName = range globals.remoteVolumeList {
+ for volumeName = range globals.currentConfMapDelta.remoteVolumeList {
fmt.Printf(" %s", volumeName)
}
fmt.Println()
}
- if 0 == len(globals.createdVolumeList) {
+ if 0 == len(globals.currentConfMapDelta.createdVolumeList) {
fmt.Printf("%screatedVolumeList: \n", indent)
} else {
fmt.Printf("%screatedVolumeList:", indent)
- for volumeName = range globals.createdVolumeList {
+ for volumeName = range globals.currentConfMapDelta.createdVolumeList {
fmt.Printf(" %s", volumeName)
}
fmt.Println()
}
- if 0 == len(globals.movedVolumeList) {
+ if 0 == len(globals.currentConfMapDelta.movedVolumeList) {
fmt.Printf("%smovedVolumeList: \n", indent)
} else {
fmt.Printf("%smovedVolumeList:", indent)
- for volumeName = range globals.movedVolumeList {
+ for volumeName = range globals.currentConfMapDelta.movedVolumeList {
fmt.Printf(" %s", volumeName)
}
fmt.Println()
}
- if 0 == len(globals.destroyedVolumeList) {
+ if 0 == len(globals.currentConfMapDelta.destroyedVolumeList) {
fmt.Printf("%sdestroyedVolumeList: \n", indent)
} else {
fmt.Printf("%sdestroyedVolumeList:", indent)
- for volumeName = range globals.destroyedVolumeList {
+ for volumeName = range globals.currentConfMapDelta.destroyedVolumeList {
fmt.Printf(" %s", volumeName)
}
fmt.Println()
}
- if 0 == len(globals.toStopServingVolumeList) {
+ if 0 == len(globals.currentConfMapDelta.toStopServingVolumeList) {
fmt.Printf("%stoStopServingVolumeList: \n", indent)
} else {
fmt.Printf("%stoStopServingVolumeList:", indent)
- for volumeName = range globals.toStopServingVolumeList {
+ for volumeName = range globals.currentConfMapDelta.toStopServingVolumeList {
fmt.Printf(" %s", volumeName)
}
fmt.Println()
}
- if 0 == len(globals.toStartServingVolumeList) {
+ if 0 == len(globals.currentConfMapDelta.toStartServingVolumeList) {
fmt.Printf("%stoStartServingVolumeList: \n", indent)
} else {
fmt.Printf("%stoStartServingVolumeList:", indent)
- for volumeName = range globals.toStartServingVolumeList {
+ for volumeName = range globals.currentConfMapDelta.toStartServingVolumeList {
fmt.Printf(" %s", volumeName)
}
fmt.Println()
diff --git a/transitions/api_test.go b/transitions/api_test.go
index 9fa310de7..5d0b33d70 100644
--- a/transitions/api_test.go
+++ b/transitions/api_test.go
@@ -516,6 +516,10 @@ func TestAPI(t *testing.T) {
testValidateCallbackLog(t,
"Move VolumeB from Peer0 to Peer1",
[][]string{
+ []string{
+ "testCallbacksInterface2.VolumeToBeUnserved(,VolumeB) called"},
+ []string{
+ "testCallbacksInterface1.VolumeToBeUnserved(,VolumeB) called"},
[]string{
"testCallbacksInterface2.SignaledStart() called"},
[]string{
@@ -642,6 +646,10 @@ func TestAPI(t *testing.T) {
testValidateCallbackLog(t,
"Destroy VolumeD",
[][]string{
+ []string{
+ "testCallbacksInterface2.VolumeToBeUnserved(,VolumeD) called"},
+ []string{
+ "testCallbacksInterface1.VolumeToBeUnserved(,VolumeD) called"},
[]string{
"testCallbacksInterface2.SignaledStart() called"},
[]string{
@@ -811,6 +819,12 @@ func TestAPI(t *testing.T) {
testValidateCallbackLog(t,
"Perform Down() sequence",
[][]string{
+ []string{
+ "testCallbacksInterface2.VolumeToBeUnserved(,VolumeA) called",
+ "testCallbacksInterface2.VolumeToBeUnserved(,VolumeE) called"},
+ []string{
+ "testCallbacksInterface1.VolumeToBeUnserved(,VolumeA) called",
+ "testCallbacksInterface1.VolumeToBeUnserved(,VolumeE) called"},
[]string{
"testCallbacksInterface2.SignaledStart() called"},
[]string{
@@ -914,6 +928,13 @@ func (testCallbacksInterface *testCallbacksInterfaceStruct) UnserveVolume(confMa
return nil
}
+func (testCallbacksInterface *testCallbacksInterfaceStruct) VolumeToBeUnserved(confMap conf.ConfMap, volumeName string) (err error) {
+ logMessage := fmt.Sprintf("testCallbacksInterface%s.VolumeToBeUnserved(,%s) called", testCallbacksInterface.name, volumeName)
+ testCallbacksInterface.t.Logf(" %s", logMessage)
+ testCallbackLog = append(testCallbackLog, logMessage)
+ return nil
+}
+
func (testCallbacksInterface *testCallbacksInterfaceStruct) SignaledStart(confMap conf.ConfMap) (err error) {
logMessage := fmt.Sprintf("testCallbacksInterface%s.SignaledStart() called", testCallbacksInterface.name)
testCallbacksInterface.t.Logf(" %s", logMessage)
diff --git a/utils/api.go b/utils/api.go
index e88ce1fa0..43325a606 100644
--- a/utils/api.go
+++ b/utils/api.go
@@ -3,6 +3,7 @@ package utils
import (
"bytes"
+ "container/list"
"crypto/rand"
"encoding/binary"
"encoding/json"
@@ -748,6 +749,41 @@ func FetchRandomByteSlice(len int) (randByteSlice []byte) {
return
}
+func RandomizeList(theList *list.List) {
+ var (
+ nextElement *list.Element
+ originalListElementIndex int
+ pass int
+ randByteSlice []byte
+ theListLen int
+ thisElement *list.Element
+ )
+
+ theListLen = theList.Len()
+
+ if theListLen < 2 {
+ return
+ }
+
+ for pass = 0; pass < 2; pass++ {
+ thisElement = theList.Front()
+
+ randByteSlice = FetchRandomByteSlice(theListLen - 1)
+
+ for originalListElementIndex = 0; originalListElementIndex < (theListLen - 1); originalListElementIndex++ {
+ nextElement = thisElement.Next()
+
+ if randByteSlice[originalListElementIndex] < 0x80 {
+ theList.MoveToFront(thisElement)
+ } else {
+ theList.MoveToBack(thisElement)
+ }
+
+ thisElement = nextElement
+ }
+ }
+}
+
func JSONify(input interface{}, indentify bool) (output string) {
var (
err error
diff --git a/utils/api_test.go b/utils/api_test.go
index 38629f371..69627bcd9 100644
--- a/utils/api_test.go
+++ b/utils/api_test.go
@@ -1,6 +1,8 @@
package utils
import (
+ "container/list"
+ "fmt"
"sync"
"testing"
"time"
@@ -321,3 +323,47 @@ func TestStopwatch(t *testing.T) {
assert.Equal(int64(sw3.ElapsedTime), int64(0)) // Elapsed time isn't set yet
assert.True(sw3.IsRunning) // stopwatch is running
}
+
+func TestRandomizeList(t *testing.T) {
+ var (
+ l *list.List
+ n int
+ )
+
+ for n = 0; n <= 20; n++ {
+ l = testPopulateListOfInts(n)
+ RandomizeList(l)
+ // testDumpListOfInts(l)
+ }
+}
+
+func testPopulateListOfInts(n int) (l *list.List) {
+ var (
+ lev int
+ )
+
+ l = list.New()
+
+ for lev = 0; lev < n; lev++ {
+ l.PushBack(lev)
+ }
+
+ return
+}
+
+func testDumpListOfInts(l *list.List) {
+ var (
+ le *list.Element
+ lev int
+ )
+
+ le = l.Front()
+
+ for le != nil {
+ lev = le.Value.(int)
+ fmt.Printf(" %d", lev)
+ le = le.Next()
+ }
+
+ fmt.Println()
+}