From 115f41b37dd4d97d74748b127d96970b78204dca Mon Sep 17 00:00:00 2001 From: Dominic Della Valle Date: Mon, 28 Nov 2022 11:58:20 -0500 Subject: [PATCH] cgofuse: use fd table shrinker and change limits Since the file table can now reclaim memory, we can raise the hardcoded open file limit. These limits are arbitrary and should become variable at some point. The file table grows by some factor as needed, and now it can shrink when descriptors are freed (down to some limit based on the growth scaling factor). No metrics on how this impacts performance. For now, we just want to prevent the table from growing large and remaining large after lots of open+close calls have been made. This could help handle lookups slightly, since we no longer need to range over as many `nil` entries at the tail anymore. But it's hard to judge without measuring if the checks+realloc cost more than we save in the average usecases. --- internal/filesystem/cgofuse/table.go | 53 ++++++++++++++-------------- 1 file changed, 27 insertions(+), 26 deletions(-) diff --git a/internal/filesystem/cgofuse/table.go b/internal/filesystem/cgofuse/table.go index 54b6fcb0..478da180 100644 --- a/internal/filesystem/cgofuse/table.go +++ b/internal/filesystem/cgofuse/table.go @@ -24,9 +24,14 @@ type ( const ( errorHandle = math.MaxUint64 - // TODO: handleMax needs to be configurable. - // This value is arbitrary. - handleMax = 2048 + // TODO: handleMax needs to be configurable like `ulimit` allows. + // NOTE: file table sizes and bounds were chosen arbitrarily. + // Suggestions for better averages or ways to tune are welcome. + handleMax = 4096 + tableStartingSize = 8 + tableGrowthfactor = 2 + tableShrinkLimitFactor = tableGrowthfactor * 2 + tableShrinkBound = tableStartingSize * tableShrinkLimitFactor ) var ( @@ -54,13 +59,9 @@ func (files handleSlice) extend() (handleSlice, error) { if filesLen < filesCap { return files[:filesEnd], nil } - const ( - initialSize = 8 // NOTE: Initial size is chosen arbitrarily. - factor = 2 // If a better average is known, replace this. - ) var ( - scaledCap = filesCap * factor - newCap = generic.Max(scaledCap, initialSize) + scaledCap = filesCap * tableGrowthfactor + newCap = generic.Max(scaledCap, tableStartingSize) ) if newCap > handleMax { return nil, errFull @@ -74,7 +75,6 @@ func (files handleSlice) shrink(lowerBound int) handleSlice { var ( emptySlots int filesLen = len(files) - filesCap = cap(files) ) for i := filesLen - 1; i != -1; i-- { if files[i] != nil { @@ -83,23 +83,23 @@ func (files handleSlice) shrink(lowerBound int) handleSlice { emptySlots++ } var ( - newLen = filesLen - emptySlots - bound = boundCheck(lowerBound, newLen) + newLen = filesLen - emptySlots + newCap = lowestAlignment(newLen, tableStartingSize) + tooSmall = newCap < lowerBound + sameSize = newCap == cap(files) + lessOrEqualToBound = tooSmall || sameSize ) - if newLen == bound || filesCap == bound { + if lessOrEqualToBound { return nil } - newTable := make(handleSlice, newLen, bound) + newTable := make(handleSlice, newLen, newCap) copy(newTable, files) return newTable } -func boundCheck(lowerBound, oldCap int) int { - newCap := lowerBound - for newCap < oldCap { - newCap *= 2 - } - return newCap +func lowestAlignment(size, alignment int) int { + remainder := (size - 1) % alignment + return (size - 1) + (alignment - remainder) } func (ft *fileTable) add(f fs.File) (fileDescriptor, error) { @@ -154,12 +154,13 @@ func (ft *fileTable) remove(fh fileDescriptor) error { if err := ft.validLocked(fh); err != nil { return err } - ft.files[fh] = nil - // TODO: We could trim the slice here so that it's not wasting memory. - // Need metrics on this though. May not be worth the cost. - // And not sure what capacity we should trim to as a maximum. - // If it's too low we're going to constantly thrash. - // Too high and we'll be wasting memory. + files := ft.files + files[fh] = nil + if cap(files) > tableShrinkBound { + if newTable := files.shrink(tableShrinkBound); newTable != nil { + ft.files = newTable + } + } return nil }