Reduce select query for url cache delta query
This commit is contained in:
parent
06d4f764e7
commit
302be45a9a
@ -473,7 +473,7 @@ func (c *Collections) addURLCacheToDriveCollections(
|
|||||||
driveID,
|
driveID,
|
||||||
prevDelta,
|
prevDelta,
|
||||||
urlCacheRefreshInterval,
|
urlCacheRefreshInterval,
|
||||||
c.handler.NewItemPager(driveID, "", api.DriveItemSelectDefault()),
|
c.handler.NewItemPager(driveID, "", api.DriveItemSelectURLCache()),
|
||||||
errs)
|
errs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
|||||||
@ -369,41 +369,41 @@ func (op *BackupOperation) do(
|
|||||||
lastBackupVersion = mans.MinBackupVersion()
|
lastBackupVersion = mans.MinBackupVersion()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run 3 times and exit
|
// // Run 3 times and exit
|
||||||
cs := []data.BackupCollection{}
|
// cs := []data.BackupCollection{}
|
||||||
canUsePreviousBackup := false
|
// canUsePreviousBackup := false
|
||||||
|
|
||||||
var maxCount int = 2
|
// var maxCount int = 2
|
||||||
|
|
||||||
for i := 0; i < maxCount; i++ {
|
// for i := 0; i < maxCount; i++ {
|
||||||
logger.Ctx(ctx).Info("delta query iteration")
|
logger.Ctx(ctx).Info("delta query iteration")
|
||||||
|
|
||||||
cs, _, canUsePreviousBackup, err := produceBackupDataCollections(
|
cs, ssmb, canUsePreviousBackup, err := produceBackupDataCollections(
|
||||||
ctx,
|
ctx,
|
||||||
op.bp,
|
op.bp,
|
||||||
op.ResourceOwner,
|
op.ResourceOwner,
|
||||||
op.Selectors,
|
op.Selectors,
|
||||||
mdColls,
|
mdColls,
|
||||||
lastBackupVersion,
|
lastBackupVersion,
|
||||||
op.Options,
|
op.Options,
|
||||||
op.Errors)
|
op.Errors)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, clues.Wrap(err, "producing backup data collections")
|
return nil, clues.Wrap(err, "producing backup data collections")
|
||||||
}
|
|
||||||
|
|
||||||
ctx = clues.Add(
|
|
||||||
ctx,
|
|
||||||
"can_use_previous_backup", canUsePreviousBackup,
|
|
||||||
"collection_count", len(cs))
|
|
||||||
|
|
||||||
// sleep for 5 mins
|
|
||||||
//time.Sleep(5 * time.Minute)
|
|
||||||
|
|
||||||
if i == maxCount-1 {
|
|
||||||
return nil, clues.New("unable to produce backup collections").WithClues(ctx)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ctx = clues.Add(
|
||||||
|
ctx,
|
||||||
|
"can_use_previous_backup", canUsePreviousBackup,
|
||||||
|
"collection_count", len(cs))
|
||||||
|
|
||||||
|
// sleep for 5 mins
|
||||||
|
//time.Sleep(5 * time.Minute)
|
||||||
|
|
||||||
|
// if i == maxCount-1 {
|
||||||
|
// return nil, clues.New("unable to produce backup collections").WithClues(ctx)
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
|
||||||
writeStats, deets, toMerge, err := consumeBackupCollections(
|
writeStats, deets, toMerge, err := consumeBackupCollections(
|
||||||
ctx,
|
ctx,
|
||||||
op.kopia,
|
op.kopia,
|
||||||
@ -411,7 +411,7 @@ func (op *BackupOperation) do(
|
|||||||
reasons,
|
reasons,
|
||||||
mans,
|
mans,
|
||||||
cs,
|
cs,
|
||||||
nil,
|
ssmb,
|
||||||
backupID,
|
backupID,
|
||||||
op.incremental && canUseMetadata && canUsePreviousBackup,
|
op.incremental && canUseMetadata && canUsePreviousBackup,
|
||||||
op.Errors)
|
op.Errors)
|
||||||
|
|||||||
@ -94,6 +94,12 @@ func idAnd(ss ...string) []string {
|
|||||||
// exported
|
// exported
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
func DriveItemSelectURLCache() []string {
|
||||||
|
return idAnd(
|
||||||
|
"deleted",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
func DriveItemSelectDefault() []string {
|
func DriveItemSelectDefault() []string {
|
||||||
return idAnd(
|
return idAnd(
|
||||||
"content.downloadUrl",
|
"content.downloadUrl",
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user