-
Notifications
You must be signed in to change notification settings - Fork 20.2k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
les: polish code #22625
les: polish code #22625
Changes from 1 commit
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -63,20 +63,22 @@ type priorityPool struct { | |
ns *nodestate.NodeStateMachine | ||
clock mclock.Clock | ||
lock sync.Mutex | ||
inactiveQueue *prque.Prque | ||
maxCount, maxCap uint64 | ||
minCap uint64 | ||
activeBias time.Duration | ||
capacityStepDiv, fineStepDiv uint64 | ||
|
||
// The snapshot of priority pool for query. | ||
cachedCurve *capacityCurve | ||
ccUpdatedAt mclock.AbsTime | ||
ccUpdateForced bool | ||
|
||
tempState []*ppNodeInfo // nodes currently in temporary state | ||
// the following fields represent the temporary state if tempState is not empty | ||
// Runtime status of prioritypool, represents the | ||
// temporary state if tempState is not empty | ||
tempState []*ppNodeInfo | ||
activeCount, activeCap uint64 | ||
activeQueue *prque.LazyQueue | ||
inactiveQueue *prque.Prque | ||
} | ||
|
||
// ppNodeInfo is the internal node descriptor of priorityPool | ||
|
@@ -89,8 +91,9 @@ type ppNodeInfo struct { | |
|
||
tempState bool // should only be true while the priorityPool lock is held | ||
tempCapacity uint64 // equals capacity when tempState is false | ||
|
||
// the following fields only affect the temporary state and they are set to their | ||
// default value when entering the temp state | ||
// default value when leaving the temp state | ||
minTarget, stepDiv uint64 | ||
bias time.Duration | ||
} | ||
|
@@ -157,11 +160,6 @@ func newPriorityPool(ns *nodestate.NodeStateMachine, setup *serverSetup, clock m | |
func (pp *priorityPool) requestCapacity(node *enode.Node, minTarget, maxTarget uint64, bias time.Duration) uint64 { | ||
pp.lock.Lock() | ||
pp.activeQueue.Refresh() | ||
var updates []capUpdate | ||
defer func() { | ||
pp.lock.Unlock() | ||
pp.updateFlags(updates) | ||
}() | ||
|
||
if minTarget < pp.minCap { | ||
minTarget = pp.minCap | ||
|
@@ -175,43 +173,44 @@ func (pp *priorityPool) requestCapacity(node *enode.Node, minTarget, maxTarget u | |
c, _ := pp.ns.GetField(node, pp.setup.queueField).(*ppNodeInfo) | ||
if c == nil { | ||
log.Error("requestCapacity called for unknown node", "id", node.ID()) | ||
pp.lock.Unlock() | ||
return 0 | ||
} | ||
pp.setTempState(c) | ||
if maxTarget > c.capacity { | ||
c.bias = bias | ||
c.stepDiv = pp.fineStepDiv | ||
stepDiv := pp.fineStepDiv | ||
if maxTarget <= c.capacity { | ||
bias, stepDiv = 0, pp.capacityStepDiv | ||
} | ||
pp.setTempState(c, pp.minCap, stepDiv, bias) | ||
pp.setTempCapacity(c, maxTarget) | ||
c.minTarget = minTarget | ||
pp.activeQueue.Remove(c.activeIndex) | ||
pp.inactiveQueue.Remove(c.inactiveIndex) | ||
pp.activeQueue.Push(c) | ||
pp.enforceLimits() | ||
updates = pp.finalizeChanges(c.tempCapacity >= minTarget && c.tempCapacity <= maxTarget && c.tempCapacity != c.capacity) | ||
updates := pp.finalizeChanges(c.tempCapacity >= minTarget && c.tempCapacity <= maxTarget && c.tempCapacity != c.capacity) | ||
pp.lock.Unlock() | ||
pp.updateFlags(updates) | ||
return c.capacity | ||
} | ||
|
||
// SetLimits sets the maximum number and total capacity of simultaneously active nodes | ||
func (pp *priorityPool) SetLimits(maxCount, maxCap uint64) { | ||
pp.lock.Lock() | ||
pp.activeQueue.Refresh() | ||
var updates []capUpdate | ||
defer func() { | ||
pp.lock.Unlock() | ||
pp.ns.Operation(func() { pp.updateFlags(updates) }) | ||
}() | ||
|
||
inc := (maxCount > pp.maxCount) || (maxCap > pp.maxCap) | ||
dec := (maxCount < pp.maxCount) || (maxCap < pp.maxCap) | ||
pp.maxCount, pp.maxCap = maxCount, maxCap | ||
|
||
var updates []capUpdate | ||
if dec { | ||
pp.enforceLimits() | ||
updates = pp.finalizeChanges(true) | ||
} | ||
if inc { | ||
updates = append(updates, pp.tryActivate(false)...) | ||
} | ||
pp.lock.Unlock() | ||
pp.ns.Operation(func() { pp.updateFlags(updates) }) | ||
} | ||
|
||
// setActiveBias sets the bias applied when trying to activate inactive nodes | ||
|
@@ -291,18 +290,15 @@ func (pp *priorityPool) inactivePriority(p *ppNodeInfo) int64 { | |
func (pp *priorityPool) connectedNode(c *ppNodeInfo) { | ||
pp.lock.Lock() | ||
pp.activeQueue.Refresh() | ||
var updates []capUpdate | ||
defer func() { | ||
pp.lock.Unlock() | ||
pp.updateFlags(updates) | ||
}() | ||
|
||
if c.connected { | ||
pp.lock.Unlock() | ||
return | ||
} | ||
c.connected = true | ||
pp.inactiveQueue.Push(c, pp.inactivePriority(c)) | ||
updates = pp.tryActivate(false) | ||
updates := pp.tryActivate(false) | ||
pp.lock.Unlock() | ||
pp.updateFlags(updates) | ||
} | ||
|
||
// disconnectedNode is called when a node has been removed from the pool (both inactiveFlag | ||
|
@@ -311,40 +307,55 @@ func (pp *priorityPool) connectedNode(c *ppNodeInfo) { | |
func (pp *priorityPool) disconnectedNode(c *ppNodeInfo) { | ||
pp.lock.Lock() | ||
pp.activeQueue.Refresh() | ||
var updates []capUpdate | ||
defer func() { | ||
pp.lock.Unlock() | ||
pp.updateFlags(updates) | ||
}() | ||
|
||
if !c.connected { | ||
pp.lock.Unlock() | ||
return | ||
} | ||
c.connected = false | ||
pp.activeQueue.Remove(c.activeIndex) | ||
pp.inactiveQueue.Remove(c.inactiveIndex) | ||
|
||
var updates []capUpdate | ||
if c.capacity != 0 { | ||
pp.setTempState(c) | ||
pp.setTempState(c, pp.minCap, pp.capacityStepDiv, 0) | ||
pp.setTempCapacity(c, 0) | ||
updates = pp.tryActivate(true) | ||
} | ||
pp.lock.Unlock() | ||
pp.updateFlags(updates) | ||
} | ||
|
||
// setTempState internally puts a node in a temporary state that can either be reverted | ||
// or confirmed later. This temporary state allows changing the capacity of a node and | ||
// moving it between the active and inactive queue. activeFlag/inactiveFlag and | ||
// capacityField are not changed while the changes are still temporary. | ||
func (pp *priorityPool) setTempState(c *ppNodeInfo) { | ||
func (pp *priorityPool) setTempState(c *ppNodeInfo, minTarget uint64, stepDiv uint64, bias time.Duration) { | ||
rjl493456442 marked this conversation as resolved.
Show resolved
Hide resolved
|
||
if c.tempState { | ||
return | ||
} | ||
c.tempState = true | ||
if c.tempCapacity != c.capacity { // should never happen | ||
log.Error("tempCapacity != capacity when entering tempState") | ||
} | ||
c.minTarget = minTarget | ||
c.stepDiv = stepDiv | ||
c.bias = bias | ||
pp.tempState = append(pp.tempState, c) | ||
} | ||
|
||
// unsetTempState revokes the temp status of the node and reset all internal | ||
// fields to the default value. | ||
func (pp *priorityPool) unsetTempState(c *ppNodeInfo) { | ||
if !c.tempState { | ||
return | ||
} | ||
c.tempState = false | ||
if c.tempCapacity != c.capacity { // should never happen | ||
log.Error("tempCapacity != capacity when leaving tempState") | ||
} | ||
c.minTarget = pp.minCap | ||
c.stepDiv = pp.capacityStepDiv | ||
pp.tempState = append(pp.tempState, c) | ||
c.bias = 0 | ||
} | ||
|
||
// setTempCapacity changes the capacity of a node in the temporary state and adjusts | ||
|
@@ -378,7 +389,7 @@ func (pp *priorityPool) enforceLimits() (*ppNodeInfo, int64) { | |
) | ||
pp.activeQueue.MultiPop(func(data interface{}, priority int64) bool { | ||
c = data.(*ppNodeInfo) | ||
pp.setTempState(c) | ||
pp.setTempState(c, pp.minCap, pp.capacityStepDiv, 0) | ||
maxActivePriority = priority | ||
if c.tempCapacity == c.minTarget || pp.activeCount > pp.maxCount { | ||
pp.setTempCapacity(c, 0) | ||
|
@@ -412,10 +423,8 @@ func (pp *priorityPool) finalizeChanges(commit bool) (updates []capUpdate) { | |
} else { | ||
pp.setTempCapacity(c, c.capacity) // revert activeCount/activeCap | ||
} | ||
c.tempState = false | ||
c.bias = 0 | ||
c.stepDiv = pp.capacityStepDiv | ||
c.minTarget = pp.minCap | ||
pp.unsetTempState(c) | ||
|
||
if c.connected { | ||
if c.capacity != 0 { | ||
pp.activeQueue.Push(c) | ||
|
@@ -461,14 +470,12 @@ func (pp *priorityPool) updateFlags(updates []capUpdate) { | |
func (pp *priorityPool) tryActivate(commit bool) []capUpdate { | ||
for pp.inactiveQueue.Size() > 0 { | ||
c := pp.inactiveQueue.PopItem().(*ppNodeInfo) | ||
pp.setTempState(c) | ||
pp.setTempState(c, pp.minCap, pp.capacityStepDiv, pp.activeBias) | ||
pp.setTempCapacity(c, pp.minCap) | ||
c.bias = pp.activeBias | ||
pp.activeQueue.Push(c) | ||
pp.enforceLimits() | ||
if c.tempCapacity > 0 { | ||
commit = true | ||
c.bias = 0 | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This is not an identical change here and I am not sure about the potential effects. Until now, the bias was always only applied to one node that has been inserted or whose capacity was increased. It is meant to protect against quick oscillations so we always use it against the node that is trying to push out others from the pool. Once we decide to grant it the space it needs, it's no longer biased. Now There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Gotcha. It's my mistake, will fix it. |
||
} else { | ||
break | ||
} | ||
|
@@ -483,14 +490,9 @@ func (pp *priorityPool) tryActivate(commit bool) []capUpdate { | |
func (pp *priorityPool) updatePriority(node *enode.Node) { | ||
pp.lock.Lock() | ||
pp.activeQueue.Refresh() | ||
var updates []capUpdate | ||
defer func() { | ||
pp.lock.Unlock() | ||
pp.updateFlags(updates) | ||
}() | ||
|
||
c, _ := pp.ns.GetField(node, pp.setup.queueField).(*ppNodeInfo) | ||
if c == nil || !c.connected { | ||
pp.lock.Unlock() | ||
return | ||
} | ||
pp.activeQueue.Remove(c.activeIndex) | ||
|
@@ -500,7 +502,9 @@ func (pp *priorityPool) updatePriority(node *enode.Node) { | |
} else { | ||
pp.inactiveQueue.Push(c, pp.inactivePriority(c)) | ||
} | ||
updates = pp.tryActivate(false) | ||
updates := pp.tryActivate(false) | ||
pp.lock.Unlock() | ||
pp.updateFlags(updates) | ||
} | ||
|
||
// capacityCurve is a snapshot of the priority pool contents in a format that can efficiently | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Why did you remove these updates? Do you think this metric is useless? Btw the metric itself is still there is
metrics.go
so this looks wrong.There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Because they are already counted in the clientpool internal. https://github.com/ethereum/go-ethereum/blob/master/les/vflux/server/clientpool.go#L154