Add WaitN support to limiter (#4675)
Adds `WaitN` support in sliding window limiter. WaitN blocks the request until all N tokens are acquired. It addresses a concern raised [here](https://github.com/alcionai/corso/pull/4636#discussion_r1389686910) in an earlier `WaitN` implementation by ensuring that all N tokens get served to each request atomically. --- #### Does this PR need a docs update or release note? - [ ] ✅ Yes, it's included - [x] 🕐 Yes, but in a later PR - [ ] ⛔ No #### Type of change <!--- Please check the type of change your PR introduces: ---> - [x] 🌻 Feature - [ ] 🐛 Bugfix - [ ] 🗺️ Documentation - [ ] 🤖 Supportability/Tests - [ ] 💻 CI/Deployment - [ ] 🧹 Tech Debt/Cleanup #### Issue(s) <!-- Can reference multiple issues. Use one of the following "magic words" - "closes, fixes" to auto-close the Github issue. --> * #<issue> #### Test Plan <!-- How will this be tested prior to merging.--> - [ ] 💪 Manual - [x] ⚡ Unit test - [ ] 💚 E2E
This commit is contained in:
parent
551bfd2b13
commit
bc9d258ca0
@ -4,6 +4,7 @@ import "context"
|
||||
|
||||
type Limiter interface {
|
||||
Wait(ctx context.Context) error
|
||||
WaitN(ctx context.Context, n int) error
|
||||
Shutdown()
|
||||
Reset()
|
||||
}
|
||||
|
||||
@ -89,20 +89,34 @@ func NewSlidingWindowLimiter(
|
||||
}
|
||||
|
||||
// Wait blocks a request until a token is available or the context is cancelled.
|
||||
// TODO(pandeyabs): Implement WaitN.
|
||||
func (s *slidingWindow) Wait(ctx context.Context) error {
|
||||
return s.WaitN(ctx, 1)
|
||||
}
|
||||
|
||||
// WaitN blocks a request until n tokens are available or the context gets
|
||||
// cancelled. WaitN should be called with n <= capacity otherwise it will block
|
||||
// forever.
|
||||
//
|
||||
// TODO(pandeyabs): Enforce n <= capacity check. Not adding it right now because
|
||||
// we are relying on capacity = 0 for ctx cancellation test, which would need
|
||||
// some refactoring.
|
||||
func (s *slidingWindow) WaitN(ctx context.Context, n int) error {
|
||||
// Acquire request mutex and slide mutex in order.
|
||||
s.requestMu.Lock()
|
||||
defer s.requestMu.Unlock()
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return clues.Stack(ctx.Err())
|
||||
case <-s.permits:
|
||||
}
|
||||
}
|
||||
|
||||
// Mark n tokens as granted in the current interval.
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
s.curr.count[s.currentInterval]++
|
||||
}
|
||||
s.curr.count[s.currentInterval] += n
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -81,40 +81,64 @@ func (suite *SlidingWindowUnitTestSuite) TestWaitBasic() {
|
||||
}
|
||||
|
||||
// TestWaitSliding tests the sliding window functionality of the limiter with
|
||||
// time distributed Wait() calls.
|
||||
func (suite *SlidingWindowUnitTestSuite) TestWaitSliding() {
|
||||
var (
|
||||
t = suite.T()
|
||||
windowSize = 1 * time.Second
|
||||
slideInterval = 10 * time.Millisecond
|
||||
capacity = 100
|
||||
// Test will run for duration of 2 windowSize.
|
||||
numRequests = 2 * capacity
|
||||
wg sync.WaitGroup
|
||||
)
|
||||
// time distributed WaitN() calls.
|
||||
func (suite *SlidingWindowUnitTestSuite) TestWaitNSliding() {
|
||||
tests := []struct {
|
||||
Name string
|
||||
windowSize time.Duration
|
||||
slideInterval time.Duration
|
||||
capacity int
|
||||
numRequests int
|
||||
n int
|
||||
}{
|
||||
{
|
||||
Name: "Request 1 token each",
|
||||
windowSize: 100 * time.Millisecond,
|
||||
slideInterval: 10 * time.Millisecond,
|
||||
capacity: 100,
|
||||
numRequests: 200,
|
||||
n: 1,
|
||||
},
|
||||
{
|
||||
Name: "Request N tokens each",
|
||||
windowSize: 100 * time.Millisecond,
|
||||
slideInterval: 10 * time.Millisecond,
|
||||
capacity: 1000,
|
||||
numRequests: 200,
|
||||
n: 10,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
suite.Run(test.Name, func() {
|
||||
t := suite.T()
|
||||
|
||||
defer goleak.VerifyNone(t)
|
||||
|
||||
ctx, flush := tester.NewContext(t)
|
||||
defer flush()
|
||||
|
||||
s, err := NewSlidingWindowLimiter(windowSize, slideInterval, capacity)
|
||||
s, err := NewSlidingWindowLimiter(test.windowSize, test.slideInterval, test.capacity)
|
||||
require.NoError(t, err)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// Make concurrent requests to the limiter
|
||||
for i := 0; i < numRequests; i++ {
|
||||
for i := 0; i < test.numRequests; i++ {
|
||||
wg.Add(1)
|
||||
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
// Sleep for a random duration to spread out requests over multiple slide
|
||||
// intervals & windows, so that we can test the sliding window logic better.
|
||||
// Without this, the requests will be bunched up in the very first intervals
|
||||
// of the 2 windows. Rest of the intervals will be empty.
|
||||
// Sleep for a random duration to spread out requests over
|
||||
// multiple slide intervals & windows, so that we can test
|
||||
// the sliding window logic better.
|
||||
// Without this, the requests will be bunched up in the very
|
||||
// first interval of the 2 windows. Rest of the intervals
|
||||
// will be empty.
|
||||
time.Sleep(time.Duration(rand.Intn(1500)) * time.Millisecond)
|
||||
|
||||
err := s.Wait(ctx)
|
||||
err := s.WaitN(ctx, test.n)
|
||||
require.NoError(t, err)
|
||||
}()
|
||||
}
|
||||
@ -131,7 +155,9 @@ func (suite *SlidingWindowUnitTestSuite) TestWaitSliding() {
|
||||
sums := slidingSums(data, sw.numIntervals)
|
||||
|
||||
for _, sum := range sums {
|
||||
require.True(t, sum <= capacity, "sum: %d, capacity: %d", sum, capacity)
|
||||
require.True(t, sum <= test.capacity, "sum: %d, capacity: %d", sum, test.capacity)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user