Push item download requests through drive rate limiter (#4393)

<!-- PR description-->

**Changes:** 
1. Count item download requests under drive rate limit quota . Currently this is under exchange limiter.
2. Use 1 rate limit token instead of 2 for drive calls by default. 
3. Use 2 tokens instead of 1 for initial delta query ( which has no token). 

Sharing internal docs separately to go along with the review. 

---

#### Does this PR need a docs update or release note?

- [ ]  Yes, it's included
- [ ] 🕐 Yes, but in a later PR
- [x]  No

#### Type of change

<!--- Please check the type of change your PR introduces: --->
- [ ] 🌻 Feature
- [x] 🐛 Bugfix
- [ ] 🗺️ Documentation
- [ ] 🤖 Supportability/Tests
- [ ] 💻 CI/Deployment
- [ ] 🧹 Tech Debt/Cleanup

#### Issue(s)

<!-- Can reference multiple issues. Use one of the following "magic words" - "closes, fixes" to auto-close the Github issue. -->
* #<issue>

#### Test Plan

<!-- How will this be tested prior to merging.-->
- [x] 💪 Manual
- [ ]  Unit test
- [ ] 💚 E2E
This commit is contained in:
Abhishek Pandey 2023-10-10 22:15:37 +05:30 committed by GitHub
parent 9ee7ca3bae
commit eb0299d316
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 23 additions and 12 deletions

View File

@ -567,6 +567,14 @@ func (oc *Collection) streamDriveItem(
parentPath) parentPath)
ctx = clues.Add(ctx, "item_info", itemInfo) ctx = clues.Add(ctx, "item_info", itemInfo)
// Drive content download requests are also rate limited by graph api.
// Ensure that this request goes through the drive limiter & not the default
// limiter.
ctx = graph.BindRateLimiterConfig(
ctx,
graph.LimiterCfg{
Service: path.OneDriveService,
})
if isFile { if isFile {
dataSuffix := metadata.DataFileSuffix dataSuffix := metadata.DataFileSuffix

View File

@ -150,10 +150,11 @@ const (
// https://learn.microsoft.com/en-us/sharepoint/dev/general-development // https://learn.microsoft.com/en-us/sharepoint/dev/general-development
// /how-to-avoid-getting-throttled-or-blocked-in-sharepoint-online#application-throttling // /how-to-avoid-getting-throttled-or-blocked-in-sharepoint-online#application-throttling
defaultLC = 1 defaultLC = 1
driveDefaultLC = 2
// limit consumption rate for single-item GETs requests, // limit consumption rate for single-item GETs requests,
// or delta-based multi-item GETs. // or delta-based multi-item GETs, or item content download requests.
SingleGetOrDeltaLC = 1 SingleGetOrDeltaLC = 1
// delta queries without a delta token cost 2 units
DeltaNoTokenLC = 2
// limit consumption rate for anything permissions related // limit consumption rate for anything permissions related
PermissionsLC = 5 PermissionsLC = 5
) )
@ -185,13 +186,7 @@ func ctxLimiterConsumption(ctx context.Context, defaultConsumption int) int {
// the next token set is available. // the next token set is available.
func QueueRequest(ctx context.Context) { func QueueRequest(ctx context.Context) {
limiter := ctxLimiter(ctx) limiter := ctxLimiter(ctx)
defaultConsumed := defaultLC consume := ctxLimiterConsumption(ctx, defaultLC)
if limiter == driveLimiter {
defaultConsumed = driveDefaultLC
}
consume := ctxLimiterConsumption(ctx, defaultConsumed)
if err := limiter.WaitN(ctx, consume); err != nil { if err := limiter.WaitN(ctx, consume); err != nil {
logger.CtxErr(ctx, err).Error("graph middleware waiting on the limiter") logger.CtxErr(ctx, err).Error("graph middleware waiting on the limiter")

View File

@ -82,7 +82,7 @@ func (hw httpWrapper) Request(
body io.Reader, body io.Reader,
headers map[string]string, headers map[string]string,
) (*http.Response, error) { ) (*http.Response, error) {
req, err := http.NewRequest(method, url, body) req, err := http.NewRequestWithContext(ctx, method, url, body)
if err != nil { if err != nil {
return nil, clues.Wrap(err, "new http request") return nil, clues.Wrap(err, "new http request")
} }

View File

@ -147,11 +147,17 @@ func deltaEnumerateItems[T any](
newDeltaLink = "" newDeltaLink = ""
invalidPrevDelta = len(prevDeltaLink) == 0 invalidPrevDelta = len(prevDeltaLink) == 0
nextLink = "do-while" nextLink = "do-while"
consume = graph.SingleGetOrDeltaLC
) )
if invalidPrevDelta {
// Delta queries with no previous token cost more.
consume = graph.DeltaNoTokenLC
}
// Loop through all pages returned by Graph API. // Loop through all pages returned by Graph API.
for len(nextLink) > 0 { for len(nextLink) > 0 {
page, err := pager.GetPage(graph.ConsumeNTokens(ctx, graph.SingleGetOrDeltaLC)) page, err := pager.GetPage(graph.ConsumeNTokens(ctx, consume))
if graph.IsErrDeltaNotSupported(err) { if graph.IsErrDeltaNotSupported(err) {
logger.Ctx(ctx).Infow("delta queries not supported") logger.Ctx(ctx).Infow("delta queries not supported")
return nil, DeltaUpdate{}, clues.Stack(graph.ErrDeltaNotSupported, err) return nil, DeltaUpdate{}, clues.Stack(graph.ErrDeltaNotSupported, err)
@ -161,6 +167,8 @@ func deltaEnumerateItems[T any](
logger.Ctx(ctx).Infow("invalid previous delta", "delta_link", prevDeltaLink) logger.Ctx(ctx).Infow("invalid previous delta", "delta_link", prevDeltaLink)
invalidPrevDelta = true invalidPrevDelta = true
// Reset limiter consumption since we don't have a valid delta token.
consume = graph.DeltaNoTokenLC
result = make([]T, 0) result = make([]T, 0)
// Reset tells the pager to try again after ditching its delta history. // Reset tells the pager to try again after ditching its delta history.