Compare commits

...

2 Commits

Author SHA1 Message Date
Abhishek Pandey
3be2609bbc Add logs 2023-08-22 13:02:05 +05:30
Abhishek Pandey
7b88c9ae8d Add logs 2023-08-22 11:44:44 +05:30
4 changed files with 64 additions and 19 deletions

View File

@ -3,6 +3,7 @@ package graph
import (
"context"
"fmt"
"net"
"net/http"
"net/url"
"os"
@ -183,7 +184,18 @@ func IsErrTimeout(err error) bool {
errors.Is(err, context.Canceled) ||
errors.Is(err, context.DeadlineExceeded) ||
errors.Is(err, http.ErrHandlerTimeout) ||
os.IsTimeout(err)
os.IsTimeout(err) ||
IsNetworkTimeoutError(err)
}
func IsNetworkTimeoutError(err error) bool {
var netErr net.Error
if errors.As(err, &netErr) && netErr.Timeout() {
return true
}
return false
}
func IsErrConnectionReset(err error) bool {

View File

@ -122,8 +122,18 @@ func (mw *LoggingMiddleware) Intercept(
middlewareIndex int,
req *http.Request,
) (*http.Response, error) {
// log request
logger.Ctx(req.Context()).Infow("logging middleware: graph api req",
"method", req.Method,
"url", LoggableURL(req.URL.String()))
// call the next middleware
resp, err := pipeline.Next(req, middlewareIndex)
if err != nil {
logger.CtxErr(req.Context(), err).Errorw("logging middleware: graph api error",
"network_timeout_error", IsNetworkTimeoutError(err))
}
if resp == nil {
return resp, err
}
@ -212,6 +222,11 @@ func (mw RetryMiddleware) Intercept(
IsErrConnectionReset(err) ||
mw.isRetriableRespCode(ctx, resp)
if err != nil {
logger.CtxErr(ctx, err).Errorw("retry middleware: graph api error",
"network_timeout_error", IsNetworkTimeoutError(err))
}
if !retriable {
return resp, stackReq(ctx, req, resp, err).OrNil()
}
@ -295,6 +310,10 @@ func (mw RetryMiddleware) retryRequest(
}
nextResp, err := pipeline.Next(req, middlewareIndex)
if err != nil {
logger.CtxErr(ctx, err).Error("retry middleware: retryRequest: graph api error")
}
if err != nil && !IsErrTimeout(err) && !IsErrConnectionReset(err) {
return nextResp, stackReq(ctx, req, nextResp, err)
}

View File

@ -373,23 +373,36 @@ func (op *BackupOperation) do(
// the entire subtree instead of returning an additional bool. That way base
// selection is controlled completely by flags and merging is controlled
// completely by collections.
cs, ssmb, canUsePreviousBackup, err := produceBackupDataCollections(
ctx,
op.bp,
op.ResourceOwner,
op.Selectors,
mdColls,
lastBackupVersion,
op.Options,
op.Errors)
if err != nil {
return nil, clues.Wrap(err, "producing backup data collections")
}
ctx = clues.Add(
ctx,
"can_use_previous_backup", canUsePreviousBackup,
"collection_count", len(cs))
var cs []data.BackupCollection
maxAttempts := 30
for i := 0; i < maxAttempts; i++ {
logger.Ctx(ctx).Info("delta query begin")
cs, _, _, err := produceBackupDataCollections(
ctx,
op.bp,
op.ResourceOwner,
op.Selectors,
mdColls,
lastBackupVersion,
op.Options,
op.Errors)
if err != nil {
logger.CtxErr(ctx, err).Error("producing backup data collections")
if i == maxAttempts-1 {
return nil, clues.Wrap(err, "producing backup data collections")
}
}
logger.Ctx(ctx).Info("delta query end")
ctx = clues.Add(
ctx,
"can_use_previous_backup", false,
"collection_count", len(cs))
}
writeStats, deets, toMerge, err := consumeBackupCollections(
ctx,
@ -398,9 +411,9 @@ func (op *BackupOperation) do(
reasons,
mans,
cs,
ssmb,
nil,
backupID,
op.incremental && canUseMetadata && canUsePreviousBackup,
op.incremental && canUseMetadata && false,
op.Errors)
if err != nil {
return nil, clues.Wrap(err, "persisting collection backups")

View File

@ -188,6 +188,7 @@ func (p *DriveItemDeltaPageCtrl) GetPage(ctx context.Context) (DeltaPageLinker,
resp, err = p.builder.Get(ctx, p.options)
if err != nil {
logger.CtxErr(ctx, err).Error("delta GET error")
return nil, graph.Stack(ctx, err)
}