From 1d9ad4530e8ee5aaedfa78a4c0b156f0fe4096a7 Mon Sep 17 00:00:00 2001 From: Ed McClanahan Date: Mon, 14 Dec 2020 15:48:40 -0800 Subject: [PATCH] Added [FSGlobals]CoalesceElementChunkSize to cap duration of each Coalesce lock phase duration --- CONFIGURING.md | 1 + blunder/api_test.go | 1 + dlm/llm_test.go | 1 + evtlog/api_test.go | 1 + evtlog/benchmark_test.go | 2 + fs/api_internal.go | 160 ++++++++++++++++---------- fs/config.go | 5 + fs/setup_teardown_test.go | 1 + halter/api_test.go | 1 + headhunter/api_test.go | 1 + headhunter/stress_test.go | 1 + httpserver/setup_teardown_test.go | 1 + inode/file.go | 13 +-- inode/setup_teardown_test.go | 1 + jrpcfs/middleware_test.go | 1 + pfsagentd/setup_teardown_test.go | 1 + proxyfsd/daemon_test.go | 1 + proxyfsd/default.conf | 1 + proxyfsd/file_server.conf | 1 + proxyfsd/file_server_mac_3_peers.conf | 1 + ramswift/daemon_test.go | 1 + saio/container/proxyfs.conf | 1 + stats/api_test.go | 1 + statslogger/config_test.go | 1 + swiftclient/api_test.go | 1 + transitions/api_test.go | 1 + 26 files changed, 132 insertions(+), 70 deletions(-) diff --git a/CONFIGURING.md b/CONFIGURING.md index b66bf9d2b..8cb2df09d 100644 --- a/CONFIGURING.md +++ b/CONFIGURING.md @@ -109,6 +109,7 @@ For each of the keys supported, the following table will list whether or not its | | TryLockBackoffMax | No | 50ms | Yes | No | | | TryLockSerializationThreshhold | No | 5 | Yes | No | | | SymlinkMax | No | 32 | Yes | No | +| | CoalesceElementChunkSize | No | 16 | Yes | No | | | InodeRecCacheEvictLowLimit | Yes | | Yes | No | | | InodeRecCacheEvictHighLimit | Yes | | Yes | No | | | LogSegmentRecCacheEvictLowLimit | Yes | | Yes | No | diff --git a/blunder/api_test.go b/blunder/api_test.go index 505ba1ddb..892366a94 100644 --- a/blunder/api_test.go +++ b/blunder/api_test.go @@ -36,6 +36,7 @@ func testSetup(t *testing.T) { "FSGlobals.TryLockBackoffMax=50ms", "FSGlobals.TryLockSerializationThreshhold=5", "FSGlobals.SymlinkMax=32", + "FSGlobals.CoalesceElementChunkSize=16", } testConfMap, err = conf.MakeConfMapFromStrings(testConfStrings) diff --git a/dlm/llm_test.go b/dlm/llm_test.go index 6db2ceb53..ea5c3992b 100644 --- a/dlm/llm_test.go +++ b/dlm/llm_test.go @@ -96,6 +96,7 @@ func testSetup() (err error) { "FSGlobals.TryLockBackoffMax=50ms", "FSGlobals.TryLockSerializationThreshhold=5", "FSGlobals.SymlinkMax=32", + "FSGlobals.CoalesceElementChunkSize=16", } testConfMap, err = conf.MakeConfMapFromStrings(testConfMapStrings) diff --git a/evtlog/api_test.go b/evtlog/api_test.go index 5f41cd154..cbfd56b83 100644 --- a/evtlog/api_test.go +++ b/evtlog/api_test.go @@ -48,6 +48,7 @@ func TestAPI(t *testing.T) { "FSGlobals.TryLockBackoffMax=50ms", "FSGlobals.TryLockSerializationThreshhold=5", "FSGlobals.SymlinkMax=32", + "FSGlobals.CoalesceElementChunkSize=16", "EventLog.Enabled=true", "EventLog.BufferKey=9876", // Don't conflict with a running instance "EventLog.BufferLength=65536", // 64KiB diff --git a/evtlog/benchmark_test.go b/evtlog/benchmark_test.go index 9fa16076e..770d71979 100644 --- a/evtlog/benchmark_test.go +++ b/evtlog/benchmark_test.go @@ -34,6 +34,7 @@ func benchmarkSetup(b *testing.B, enable bool) { "FSGlobals.TryLockBackoffMax=50ms", "FSGlobals.TryLockSerializationThreshhold=5", "FSGlobals.SymlinkMax=32", + "FSGlobals.CoalesceElementChunkSize=16", "EventLog.Enabled=true", "EventLog.BufferKey=1234", "EventLog.BufferLength=65536", //64KiB @@ -57,6 +58,7 @@ func benchmarkSetup(b *testing.B, enable bool) { "FSGlobals.TryLockBackoffMax=50ms", "FSGlobals.TryLockSerializationThreshhold=5", "FSGlobals.SymlinkMax=32", + "FSGlobals.CoalesceElementChunkSize=16", "EventLog.Enabled=false", } } diff --git a/fs/api_internal.go b/fs/api_internal.go index 3149a43eb..9f8c115a0 100644 --- a/fs/api_internal.go +++ b/fs/api_internal.go @@ -1381,19 +1381,20 @@ func (vS *volumeStruct) MiddlewareCoalesce(destPath string, metaData []byte, ele ino uint64, numWrites uint64, attrChangeTime uint64, modificationTime uint64, err error) { var ( - coalesceElementList []*inode.CoalesceElement - coalesceElementListIndex int - coalesceSize uint64 - ctime time.Time - mtime time.Time - destFileInodeNumber inode.InodeNumber - dirEntryBasename string - dirEntryInodeNumber inode.InodeNumber - dirInodeNumber inode.InodeNumber - elementPath string - heldLocks *heldLocksStruct - retryRequired bool - tryLockBackoffContext *tryLockBackoffContextStruct + coalesceElementList []*inode.CoalesceElement + coalesceSize uint64 + ctime time.Time + destFileInodeNumber inode.InodeNumber + dirEntryBasename string + dirEntryInodeNumber inode.InodeNumber + dirInodeNumber inode.InodeNumber + elementPathIndex int + elementPathIndexAtChunkEnd int + elementPathIndexAtChunkStart int + heldLocks *heldLocksStruct + mtime time.Time + retryRequired bool + tryLockBackoffContext *tryLockBackoffContextStruct ) startTime := time.Now() @@ -1408,54 +1409,17 @@ func (vS *volumeStruct) MiddlewareCoalesce(destPath string, metaData []byte, ele vS.jobRWMutex.RLock() defer vS.jobRWMutex.RUnlock() - // Retry until done or failure (starting with ZERO backoff) + // First create the destination file if necessary and ensure that it is empty tryLockBackoffContext = &tryLockBackoffContextStruct{} -Restart: - - // Perform backoff and update for each restart (starting with ZERO backoff of course) +RestartDestinationFileCreation: tryLockBackoffContext.backoff() - // Construct fresh heldLocks for this restart - heldLocks = newHeldLocks() - // Assemble WriteLock on each FileInode and their containing DirInode in elementPaths - - coalesceElementList = make([]*inode.CoalesceElement, len(elementPaths)) - - for coalesceElementListIndex, elementPath = range elementPaths { - dirInodeNumber, dirEntryInodeNumber, dirEntryBasename, _, retryRequired, err = - vS.resolvePath( - inode.RootDirInodeNumber, - elementPath, - heldLocks, - resolvePathFollowDirSymlinks| - resolvePathRequireExclusiveLockOnDirEntryInode| - resolvePathRequireExclusiveLockOnDirInode) - - if nil != err { - heldLocks.free() - return - } - - if retryRequired { - heldLocks.free() - goto Restart - } - - // Record dirInode & dirEntryInode (fileInode) in elementList - - coalesceElementList[coalesceElementListIndex] = &inode.CoalesceElement{ - ContainingDirectoryInodeNumber: dirInodeNumber, - ElementInodeNumber: dirEntryInodeNumber, - ElementName: dirEntryBasename, - } - } - - _, dirEntryInodeNumber, _, _, retryRequired, err = + _, destFileInodeNumber, _, _, retryRequired, err = vS.resolvePath( inode.RootDirInodeNumber, destPath, @@ -1472,18 +1436,94 @@ Restart: if retryRequired { heldLocks.free() - goto Restart + goto RestartDestinationFileCreation } - // Invoke package inode to actually perform the Coalesce operation + vS.inodeVolumeHandle.SetSize(destFileInodeNumber, 0) - destFileInodeNumber = dirEntryInodeNumber - ctime, mtime, numWrites, coalesceSize, err = vS.inodeVolumeHandle.Coalesce( - destFileInodeNumber, MiddlewareStream, metaData, coalesceElementList) + heldLocks.free() - // We can now release all the WriteLocks we are currently holding + // Now setup for looping through elementPaths with fresh locks + // every globals.coalesceElementChunkSize elements holding an + // Exclusive Lock on each FileInode and their containing DirInode - heldLocks.free() + elementPathIndexAtChunkStart = 0 + + for elementPathIndexAtChunkStart < len(elementPaths) { + elementPathIndexAtChunkEnd = elementPathIndexAtChunkStart + int(globals.coalesceElementChunkSize) + if elementPathIndexAtChunkEnd > len(elementPaths) { + elementPathIndexAtChunkEnd = len(elementPaths) + } + + // Coalesce elementPaths[elementPathIndexAtChunkStart:elementPathIndexAtChunkEnd) + + tryLockBackoffContext = &tryLockBackoffContextStruct{} + + RestartCoalesceChunk: + + tryLockBackoffContext.backoff() + + heldLocks = newHeldLocks() + + coalesceElementList = make([]*inode.CoalesceElement, 0, (elementPathIndexAtChunkEnd - elementPathIndexAtChunkStart)) + + for elementPathIndex = elementPathIndexAtChunkStart; elementPathIndex < elementPathIndexAtChunkEnd; elementPathIndex++ { + dirInodeNumber, dirEntryInodeNumber, dirEntryBasename, _, retryRequired, err = + vS.resolvePath( + inode.RootDirInodeNumber, + elementPaths[elementPathIndex], + heldLocks, + resolvePathFollowDirSymlinks| + resolvePathRequireExclusiveLockOnDirEntryInode| + resolvePathRequireExclusiveLockOnDirInode) + + if nil != err { + heldLocks.free() + return + } + + if retryRequired { + heldLocks.free() + goto RestartCoalesceChunk + } + + coalesceElementList = append(coalesceElementList, &inode.CoalesceElement{ + ContainingDirectoryInodeNumber: dirInodeNumber, + ElementInodeNumber: dirEntryInodeNumber, + ElementName: dirEntryBasename, + }) + } + + _, destFileInodeNumber, _, _, retryRequired, err = + vS.resolvePath( + inode.RootDirInodeNumber, + destPath, + heldLocks, + resolvePathFollowDirEntrySymlinks| + resolvePathFollowDirSymlinks| + resolvePathRequireExclusiveLockOnDirEntryInode) + + if nil != err { + heldLocks.free() + return + } + + if retryRequired { + heldLocks.free() + goto RestartCoalesceChunk + } + + ctime, mtime, numWrites, coalesceSize, err = vS.inodeVolumeHandle.Coalesce( + destFileInodeNumber, MiddlewareStream, metaData, coalesceElementList) + + heldLocks.free() + + if nil != err { + return + } + + elementPathIndexAtChunkStart = elementPathIndexAtChunkEnd + } // Regardless of err return, fill in other return values diff --git a/fs/config.go b/fs/config.go index ebae16412..3aa8d1fe4 100644 --- a/fs/config.go +++ b/fs/config.go @@ -57,6 +57,7 @@ type globalsStruct struct { tryLockBackoffMax time.Duration tryLockSerializationThreshhold uint64 symlinkMax uint16 + coalesceElementChunkSize uint16 volumeMap map[string]*volumeStruct // key == volumeStruct.volumeName @@ -204,6 +205,10 @@ func (dummy *globalsStruct) Up(confMap conf.ConfMap) (err error) { if nil != err { globals.symlinkMax = 32 // TODO: Eventually, just return } + globals.coalesceElementChunkSize, err = confMap.FetchOptionValueUint16("FSGlobbals", "CoalesceElementChunkSize") + if nil != err { + globals.coalesceElementChunkSize = 16 // TODO: Eventually, just return + } globals.volumeMap = make(map[string]*volumeStruct) diff --git a/fs/setup_teardown_test.go b/fs/setup_teardown_test.go index 156d42001..b07cb0d74 100644 --- a/fs/setup_teardown_test.go +++ b/fs/setup_teardown_test.go @@ -106,6 +106,7 @@ func testSetup(t *testing.T, starvationMode bool) { "FSGlobals.TryLockBackoffMax=50ms", "FSGlobals.TryLockSerializationThreshhold=5", "FSGlobals.SymlinkMax=32", + "FSGlobals.CoalesceElementChunkSize=16", "FSGlobals.InodeRecCacheEvictLowLimit=10000", "FSGlobals.InodeRecCacheEvictHighLimit=10010", "FSGlobals.LogSegmentRecCacheEvictLowLimit=10000", diff --git a/halter/api_test.go b/halter/api_test.go index 22e73b61f..a879a927a 100644 --- a/halter/api_test.go +++ b/halter/api_test.go @@ -26,6 +26,7 @@ func TestAPI(t *testing.T) { "FSGlobals.TryLockBackoffMax=50ms", "FSGlobals.TryLockSerializationThreshhold=5", "FSGlobals.SymlinkMax=32", + "FSGlobals.CoalesceElementChunkSize=16", } testConfMap, err := conf.MakeConfMapFromStrings(testConfMapStrings) diff --git a/headhunter/api_test.go b/headhunter/api_test.go index ed7c40def..724970e50 100644 --- a/headhunter/api_test.go +++ b/headhunter/api_test.go @@ -148,6 +148,7 @@ func TestHeadHunterAPI(t *testing.T) { "FSGlobals.TryLockBackoffMax=50ms", "FSGlobals.TryLockSerializationThreshhold=5", "FSGlobals.SymlinkMax=32", + "FSGlobals.CoalesceElementChunkSize=16", "FSGlobals.InodeRecCacheEvictLowLimit=10000", "FSGlobals.InodeRecCacheEvictHighLimit=10010", "FSGlobals.LogSegmentRecCacheEvictLowLimit=10000", diff --git a/headhunter/stress_test.go b/headhunter/stress_test.go index 1a48cfa6a..13b061682 100644 --- a/headhunter/stress_test.go +++ b/headhunter/stress_test.go @@ -185,6 +185,7 @@ func TestHeadHunterStress(t *testing.T) { "FSGlobals.TryLockBackoffMax=50ms", "FSGlobals.TryLockSerializationThreshhold=5", "FSGlobals.SymlinkMax=32", + "FSGlobals.CoalesceElementChunkSize=16", "FSGlobals.InodeRecCacheEvictLowLimit=10000", "FSGlobals.InodeRecCacheEvictHighLimit=10010", "FSGlobals.LogSegmentRecCacheEvictLowLimit=10000", diff --git a/httpserver/setup_teardown_test.go b/httpserver/setup_teardown_test.go index d157921ba..4e4261ac8 100644 --- a/httpserver/setup_teardown_test.go +++ b/httpserver/setup_teardown_test.go @@ -67,6 +67,7 @@ func testSetup(t *testing.T) { "FSGlobals.TryLockBackoffMax=50ms", "FSGlobals.TryLockSerializationThreshhold=5", "FSGlobals.SymlinkMax=32", + "FSGlobals.CoalesceElementChunkSize=16", "FSGlobals.InodeRecCacheEvictLowLimit=10000", "FSGlobals.InodeRecCacheEvictHighLimit=10010", "FSGlobals.LogSegmentRecCacheEvictLowLimit=10000", diff --git a/inode/file.go b/inode/file.go index 63e493cc4..8f3078ca2 100644 --- a/inode/file.go +++ b/inode/file.go @@ -1138,24 +1138,17 @@ func (vS *volumeStruct) Coalesce(destInodeNumber InodeNumber, metaDataName strin return } - // Now truncate destInode & "append" each Element's extents to destInode (creating duplicate references to LogSegments for now) + // Now "append" each Element's extents to destInode (creating duplicate references to LogSegments for now) destInodeExtentMap = destInode.payload.(sortedmap.BPlusTree) destInode.dirty = true - err = setSizeInMemory(destInode, 0) - if nil != err { - err = blunder.NewError(blunder.InvalidArgError, "Coalesce() unable to truncate destInodeNumber 0x%016X: %v", destInodeNumber, err) - return - } - - destInodeOffsetBeforeElementAppend = 0 - destInode.NumWrites = 0 + destInodeOffsetBeforeElementAppend = fileLen(destInodeExtentMap) for _, element = range elements { elementInode = inodeMap[element.ElementInodeNumber] - destInode.NumWrites += 1 + destInode.NumWrites++ elementInodeExtentMap = elementInode.payload.(sortedmap.BPlusTree) elementInodeExtentMapLen, err = elementInodeExtentMap.Len() for elementInodeExtentMapIndex = 0; elementInodeExtentMapIndex < elementInodeExtentMapLen; elementInodeExtentMapIndex++ { diff --git a/inode/setup_teardown_test.go b/inode/setup_teardown_test.go index 98a8f55db..4680f17d8 100644 --- a/inode/setup_teardown_test.go +++ b/inode/setup_teardown_test.go @@ -103,6 +103,7 @@ func testSetup(t *testing.T, starvationMode bool) { "FSGlobals.TryLockBackoffMax=50ms", "FSGlobals.TryLockSerializationThreshhold=5", "FSGlobals.SymlinkMax=32", + "FSGlobals.CoalesceElementChunkSize=16", "FSGlobals.InodeRecCacheEvictLowLimit=10000", "FSGlobals.InodeRecCacheEvictHighLimit=10010", "FSGlobals.LogSegmentRecCacheEvictLowLimit=10000", diff --git a/jrpcfs/middleware_test.go b/jrpcfs/middleware_test.go index f0e2ca544..619c0e9a9 100644 --- a/jrpcfs/middleware_test.go +++ b/jrpcfs/middleware_test.go @@ -63,6 +63,7 @@ func testSetup() []func() { "FSGlobals.TryLockBackoffMax=50ms", "FSGlobals.TryLockSerializationThreshhold=5", "FSGlobals.SymlinkMax=8", + "FSGlobals.CoalesceElementChunkSize=16", "FSGlobals.InodeRecCacheEvictLowLimit=10000", "FSGlobals.InodeRecCacheEvictHighLimit=10010", "FSGlobals.LogSegmentRecCacheEvictLowLimit=10000", diff --git a/pfsagentd/setup_teardown_test.go b/pfsagentd/setup_teardown_test.go index 03dab0952..5b5605d3b 100644 --- a/pfsagentd/setup_teardown_test.go +++ b/pfsagentd/setup_teardown_test.go @@ -221,6 +221,7 @@ func testSetup(t *testing.T) { "FSGlobals.TryLockBackoffMax=50ms", "FSGlobals.TryLockSerializationThreshhold=5", "FSGlobals.SymlinkMax=32", + "FSGlobals.CoalesceElementChunkSize=16", "FSGlobals.InodeRecCacheEvictLowLimit=10000", "FSGlobals.InodeRecCacheEvictHighLimit=10010", "FSGlobals.LogSegmentRecCacheEvictLowLimit=10000", diff --git a/proxyfsd/daemon_test.go b/proxyfsd/daemon_test.go index 59cb7cee3..b9fa18c5f 100644 --- a/proxyfsd/daemon_test.go +++ b/proxyfsd/daemon_test.go @@ -139,6 +139,7 @@ func TestDaemon(t *testing.T) { "FSGlobals.TryLockBackoffMax=50ms", "FSGlobals.TryLockSerializationThreshhold=5", "FSGlobals.SymlinkMax=32", + "FSGlobals.CoalesceElementChunkSize=16", "FSGlobals.InodeRecCacheEvictLowLimit=10000", "FSGlobals.InodeRecCacheEvictHighLimit=10010", "FSGlobals.LogSegmentRecCacheEvictLowLimit=10000", diff --git a/proxyfsd/default.conf b/proxyfsd/default.conf index 83faa5c06..d5437dc50 100644 --- a/proxyfsd/default.conf +++ b/proxyfsd/default.conf @@ -184,6 +184,7 @@ TryLockBackoffMin: 10ms TryLockBackoffMax: 50ms TryLockSerializationThreshhold: 5 SymlinkMax: 32 +CoalesceElementChunkSize: 16 InodeRecCacheEvictLowLimit: 10000 InodeRecCacheEvictHighLimit: 10010 LogSegmentRecCacheEvictLowLimit: 10000 diff --git a/proxyfsd/file_server.conf b/proxyfsd/file_server.conf index 31940bf63..79b7b38b2 100644 --- a/proxyfsd/file_server.conf +++ b/proxyfsd/file_server.conf @@ -119,6 +119,7 @@ TryLockBackoffMin: 10ms TryLockBackoffMax: 50ms TryLockSerializationThreshhold: 5 SymlinkMax: 32 +CoalesceElementChunkSize: 16 InodeRecCacheEvictLowLimit: 10000 InodeRecCacheEvictHighLimit: 10010 LogSegmentRecCacheEvictLowLimit: 10000 diff --git a/proxyfsd/file_server_mac_3_peers.conf b/proxyfsd/file_server_mac_3_peers.conf index f4ae6e3e9..652f59651 100644 --- a/proxyfsd/file_server_mac_3_peers.conf +++ b/proxyfsd/file_server_mac_3_peers.conf @@ -127,6 +127,7 @@ TryLockBackoffMin: 10ms TryLockBackoffMax: 50ms TryLockSerializationThreshhold: 5 SymlinkMax: 32 +CoalesceElementChunkSize: 16 InodeRecCacheEvictLowLimit: 10000 InodeRecCacheEvictHighLimit: 10010 LogSegmentRecCacheEvictLowLimit: 10000 diff --git a/ramswift/daemon_test.go b/ramswift/daemon_test.go index a7ab1b97f..132c3b290 100644 --- a/ramswift/daemon_test.go +++ b/ramswift/daemon_test.go @@ -37,6 +37,7 @@ func TestViaNoAuthClient(t *testing.T) { "FSGlobals.TryLockBackoffMax=50ms", "FSGlobals.TryLockSerializationThreshhold=5", "FSGlobals.SymlinkMax=32", + "FSGlobals.CoalesceElementChunkSize=16", "Peer:Peer0.ReadCacheQuotaFraction=0.20", "RamSwiftInfo.MaxAccountNameLength=256", "RamSwiftInfo.MaxContainerNameLength=256", diff --git a/saio/container/proxyfs.conf b/saio/container/proxyfs.conf index bee4cb538..6ce935bf5 100644 --- a/saio/container/proxyfs.conf +++ b/saio/container/proxyfs.conf @@ -156,6 +156,7 @@ TryLockBackoffMin: 10ms TryLockBackoffMax: 50ms TryLockSerializationThreshhold: 5 SymlinkMax: 32 +CoalesceElementChunkSize: 16 InodeRecCacheEvictLowLimit: 10000 InodeRecCacheEvictHighLimit: 10010 LogSegmentRecCacheEvictLowLimit: 10000 diff --git a/stats/api_test.go b/stats/api_test.go index 1382c8090..56b784e20 100644 --- a/stats/api_test.go +++ b/stats/api_test.go @@ -79,6 +79,7 @@ func TestStatsAPIviaUDP(t *testing.T) { "FSGlobals.TryLockBackoffMax=50ms", "FSGlobals.TryLockSerializationThreshhold=5", "FSGlobals.SymlinkMax=32", + "FSGlobals.CoalesceElementChunkSize=16", "Stats.IPAddr=localhost", "Stats.UDPPort=" + portString, "Stats.BufferLength=1000", diff --git a/statslogger/config_test.go b/statslogger/config_test.go index 68e660c04..461d7d98c 100644 --- a/statslogger/config_test.go +++ b/statslogger/config_test.go @@ -83,6 +83,7 @@ func TestAPI(t *testing.T) { "FSGlobals.TryLockBackoffMax=50ms", "FSGlobals.TryLockSerializationThreshhold=5", "FSGlobals.SymlinkMax=32", + "FSGlobals.CoalesceElementChunkSize=16", "FSGlobals.EtcdEnabled=false", "RamSwiftInfo.MaxAccountNameLength=256", diff --git a/swiftclient/api_test.go b/swiftclient/api_test.go index 0ab4a170c..d7072718d 100644 --- a/swiftclient/api_test.go +++ b/swiftclient/api_test.go @@ -87,6 +87,7 @@ func TestAPI(t *testing.T) { "FSGlobals.TryLockBackoffMax=50ms", "FSGlobals.TryLockSerializationThreshhold=5", "FSGlobals.SymlinkMax=32", + "FSGlobals.CoalesceElementChunkSize=16", "FSGlobals.EtcdEnabled=false", "Logging.LogFilePath=/dev/null", diff --git a/transitions/api_test.go b/transitions/api_test.go index 5d0b33d70..f493c2242 100644 --- a/transitions/api_test.go +++ b/transitions/api_test.go @@ -49,6 +49,7 @@ var testConfStrings = []string{ "FSGlobals.TryLockBackoffMax=50ms", "FSGlobals.TryLockSerializationThreshhold=5", "FSGlobals.SymlinkMax=32", + "FSGlobals.CoalesceElementChunkSize=16", "Cluster.Peers=Peer0,Peer1,Peer2,Peer3", "Cluster.WhoAmI=Peer0", "Transitions.AutoVolumeGroupPrefix=V_",