Put back existing code
This commit is contained in:
parent
27383e950e
commit
993b648eff
25
src/corso.go
25
src/corso.go
@ -10,19 +10,23 @@ import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/profile"
|
||||
|
||||
"github.com/alcionai/corso/src/cli"
|
||||
"github.com/alcionai/corso/src/pkg/logger"
|
||||
"github.com/pkg/profile"
|
||||
)
|
||||
|
||||
var profileTicker = time.NewTicker(1 * time.Second)
|
||||
var perMinuteMap = make(map[time.Time]int)
|
||||
var timeSinceRefresh = time.Now()
|
||||
var (
|
||||
profileTicker = time.NewTicker(1 * time.Second)
|
||||
perMinuteMap = make(map[time.Time]int)
|
||||
timeSinceRefresh = time.Now()
|
||||
)
|
||||
|
||||
//var profileTicker = time.NewTicker(120 * time.Second)
|
||||
|
||||
var printTicker = time.NewTicker(1 * time.Second)
|
||||
var profileCounter = 0
|
||||
// var profileTicker = time.NewTicker(120 * time.Second)
|
||||
var (
|
||||
printTicker = time.NewTicker(1 * time.Second)
|
||||
profileCounter = 0
|
||||
)
|
||||
|
||||
func main() {
|
||||
defer profile.Start(profile.MemProfile).Stop()
|
||||
@ -38,8 +42,8 @@ func main() {
|
||||
// if mem > 3GB and we havent captured a profile this min, capture it
|
||||
// or if its been 2 mins since last profile, capture it
|
||||
t := time.Now().Truncate(time.Minute)
|
||||
//if (m.HeapAlloc > uint64(3*1024*1024*1024) && perMinuteMap[t] == 0) || time.Since(timeSinceRefresh) > 2*time.Minute {
|
||||
if time.Since(timeSinceRefresh) > 2*time.Minute {
|
||||
// if (m.HeapAlloc > uint64(3*1024*1024*1024) && perMinuteMap[t] == 0) || time.Since(timeSinceRefresh) > 2*time.Minute {
|
||||
if time.Since(timeSinceRefresh) > 3*time.Minute {
|
||||
filename := "mem." + strconv.Itoa(profileCounter) + ".pprof"
|
||||
|
||||
f, _ := os.Create(filename)
|
||||
@ -53,7 +57,6 @@ func main() {
|
||||
perMinuteMap[t] = 1
|
||||
timeSinceRefresh = time.Now()
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
@ -577,7 +577,7 @@ func (oc *Collection) streamDriveItem(
|
||||
"item_name", clues.Hide(itemName),
|
||||
"item_size", itemSize)
|
||||
|
||||
// item.SetParentReference(setName(item.GetParentReference(), oc.driveName))
|
||||
item.SetParentReference(setName(item.GetParentReference(), oc.driveName))
|
||||
|
||||
isFile := item.GetFile() != nil
|
||||
|
||||
|
||||
@ -273,7 +273,7 @@ func ToCorsoDriveItemable(item models.DriveItemable) CorsoDriveItemable {
|
||||
}
|
||||
}
|
||||
|
||||
if item.GetCreatedBy() != nil {
|
||||
if item.GetCreatedBy() != nil && item.GetCreatedBy().GetUser() != nil {
|
||||
cdi.CreatedBy = &itemIdentitySet{
|
||||
user: &itemUser{
|
||||
additionalData: item.GetCreatedBy().GetUser().GetAdditionalData(),
|
||||
|
||||
@ -205,12 +205,16 @@ func driveItemWriter(
|
||||
return iw, ptr.Val(icu.GetUploadUrl()), nil
|
||||
}
|
||||
|
||||
func setName(orig models.ItemReferenceable, driveName string) models.ItemReferenceable {
|
||||
func setName(orig parentReferenceable, driveName string) models.ItemReferenceable {
|
||||
if orig == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
orig.SetName(&driveName)
|
||||
mod := models.NewItemReference()
|
||||
mod.SetDriveId(orig.GetDriveId())
|
||||
mod.SetId(orig.GetId())
|
||||
mod.SetPath(orig.GetPath())
|
||||
mod.SetName(&driveName)
|
||||
|
||||
return orig
|
||||
return mod
|
||||
}
|
||||
|
||||
@ -16,7 +16,6 @@ import (
|
||||
"github.com/alcionai/corso/src/internal/events"
|
||||
"github.com/alcionai/corso/src/internal/kopia"
|
||||
kinject "github.com/alcionai/corso/src/internal/kopia/inject"
|
||||
"github.com/alcionai/corso/src/internal/m365/collection/drive"
|
||||
"github.com/alcionai/corso/src/internal/model"
|
||||
"github.com/alcionai/corso/src/internal/observe"
|
||||
"github.com/alcionai/corso/src/internal/operations/inject"
|
||||
@ -440,111 +439,110 @@ func (op *BackupOperation) do(
|
||||
lastBackupVersion = mans.MinBackupVersion()
|
||||
}
|
||||
|
||||
iterations := 1
|
||||
// iterations := 1
|
||||
|
||||
for i := 0; i < iterations; i++ {
|
||||
// TODO(ashmrtn): This should probably just return a collection that deletes
|
||||
// the entire subtree instead of returning an additional bool. That way base
|
||||
// selection is controlled completely by flags and merging is controlled
|
||||
// completely by collections.
|
||||
cs, _, _, err := produceBackupDataCollections(
|
||||
ctx,
|
||||
op.bp,
|
||||
op.ResourceOwner,
|
||||
op.Selectors,
|
||||
mdColls,
|
||||
lastBackupVersion,
|
||||
op.Options,
|
||||
op.Counter,
|
||||
op.Errors)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "producing backup data collections")
|
||||
}
|
||||
|
||||
// Sleep for 4 mins to let the memory usage settle down so that we have a better
|
||||
// picture. Also allows pprof to run twice during this time.
|
||||
// Do some meaningless work after to make sure the collections dont get garbage collected
|
||||
time.Sleep(4 * time.Minute)
|
||||
|
||||
// sum := 0
|
||||
// numItems := 0
|
||||
// mapSum := 0
|
||||
|
||||
for _, c := range cs {
|
||||
v, ok := c.(*drive.Collection)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
m := v.GetDriveItemsMap()
|
||||
for key := range m {
|
||||
logger.Ctx(ctx).Debug(key)
|
||||
}
|
||||
|
||||
// Get sizeof recursively using reflect
|
||||
// m := v.GetDriveItemsMap()
|
||||
// for _, val := range m {
|
||||
// s := size.Of(val)
|
||||
// sum += s
|
||||
// numItems++
|
||||
// }
|
||||
|
||||
// ms := size.Of(m)
|
||||
// mapSum += ms
|
||||
|
||||
// logger.Ctx(ctx).Debugf("coll drive map size %d, num drive items %d\n", ms, len(m))
|
||||
}
|
||||
|
||||
// print total sum
|
||||
// logger.Ctx(ctx).Debugf("itemSum %d, map sum %d, total items %d, mem used per item %f mem per item in map %f \n", sum, mapSum, numItems, float64(sum)/float64(numItems), float64(mapSum)/float64(numItems))
|
||||
// for i := 0; i < iterations; i++ {
|
||||
// TODO(ashmrtn): This should probably just return a collection that deletes
|
||||
// the entire subtree instead of returning an additional bool. That way base
|
||||
// selection is controlled completely by flags and merging is controlled
|
||||
// completely by collections.
|
||||
cs, ssmb, canUsePreviousBackup, err := produceBackupDataCollections(
|
||||
ctx,
|
||||
op.bp,
|
||||
op.ResourceOwner,
|
||||
op.Selectors,
|
||||
mdColls,
|
||||
lastBackupVersion,
|
||||
op.Options,
|
||||
op.Counter,
|
||||
op.Errors)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "producing backup data collections")
|
||||
}
|
||||
|
||||
return nil, clues.New("failed")
|
||||
// Sleep for 4 mins to let the memory usage settle down so that we have a better
|
||||
// picture. Also allows pprof to run twice during this time.
|
||||
// Do some meaningless work after to make sure the collections dont get garbage collected
|
||||
time.Sleep(4 * time.Minute)
|
||||
|
||||
// ctx = clues.Add(
|
||||
// ctx,
|
||||
// "can_use_previous_backup", canUsePreviousBackup,
|
||||
// "collection_count", len(cs))
|
||||
// sum := 0
|
||||
// numItems := 0
|
||||
// mapSum := 0
|
||||
|
||||
// writeStats, deets, toMerge, err := consumeBackupCollections(
|
||||
// ctx,
|
||||
// op.kopia,
|
||||
// op.account.ID(),
|
||||
// reasons,
|
||||
// mans,
|
||||
// cs,
|
||||
// ssmb,
|
||||
// backupID,
|
||||
// op.incremental && canUseMetadata && canUsePreviousBackup,
|
||||
// op.Counter,
|
||||
// op.Errors)
|
||||
// if err != nil {
|
||||
// return nil, clues.Wrap(err, "persisting collection backups")
|
||||
// for _, c := range cs {
|
||||
// v, ok := c.(*drive.Collection)
|
||||
// if !ok {
|
||||
// continue
|
||||
// }
|
||||
|
||||
// m := v.GetDriveItemsMap()
|
||||
// for key := range m {
|
||||
// logger.Ctx(ctx).Debug(key)
|
||||
// }
|
||||
|
||||
// // Get sizeof recursively using reflect
|
||||
// // m := v.GetDriveItemsMap()
|
||||
// // for _, val := range m {
|
||||
// // s := size.Of(val)
|
||||
// // sum += s
|
||||
// // numItems++
|
||||
// // }
|
||||
|
||||
// // ms := size.Of(m)
|
||||
// // mapSum += ms
|
||||
|
||||
// // logger.Ctx(ctx).Debugf("coll drive map size %d, num drive items %d\n", ms, len(m))
|
||||
// }
|
||||
|
||||
// opStats.hasNewDetailEntries = (deets != nil && !deets.Empty()) ||
|
||||
// (toMerge != nil && toMerge.ItemsToMerge() > 0)
|
||||
// opStats.k = writeStats
|
||||
// print total sum
|
||||
// logger.Ctx(ctx).Debugf("itemSum %d, map sum %d, total items %d, mem used per item %f mem per item in map %f \n", sum, mapSum, numItems, float64(sum)/float64(numItems), float64(mapSum)/float64(numItems))
|
||||
//}
|
||||
|
||||
// err = mergeDetails(
|
||||
// ctx,
|
||||
// detailsStore,
|
||||
// mans,
|
||||
// toMerge,
|
||||
// deets,
|
||||
// writeStats,
|
||||
// op.Selectors.PathService(),
|
||||
// op.Errors)
|
||||
// if err != nil {
|
||||
// return nil, clues.Wrap(err, "merging details")
|
||||
// }
|
||||
// return nil, clues.New("failed")
|
||||
|
||||
// opStats.ctrl = op.bp.Wait()
|
||||
ctx = clues.Add(
|
||||
ctx,
|
||||
"can_use_previous_backup", canUsePreviousBackup,
|
||||
"collection_count", len(cs))
|
||||
|
||||
// logger.Ctx(ctx).Debug(opStats.ctrl)
|
||||
writeStats, deets, toMerge, err := consumeBackupCollections(
|
||||
ctx,
|
||||
op.kopia,
|
||||
op.account.ID(),
|
||||
reasons,
|
||||
mans,
|
||||
cs,
|
||||
ssmb,
|
||||
backupID,
|
||||
op.incremental && canUseMetadata && canUsePreviousBackup,
|
||||
op.Counter,
|
||||
op.Errors)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "persisting collection backups")
|
||||
}
|
||||
|
||||
// return deets, nil
|
||||
opStats.hasNewDetailEntries = (deets != nil && !deets.Empty()) ||
|
||||
(toMerge != nil && toMerge.ItemsToMerge() > 0)
|
||||
opStats.k = writeStats
|
||||
|
||||
err = mergeDetails(
|
||||
ctx,
|
||||
detailsStore,
|
||||
mans,
|
||||
toMerge,
|
||||
deets,
|
||||
writeStats,
|
||||
op.Selectors.PathService(),
|
||||
op.Errors)
|
||||
if err != nil {
|
||||
return nil, clues.Wrap(err, "merging details")
|
||||
}
|
||||
|
||||
opStats.ctrl = op.bp.Wait()
|
||||
|
||||
logger.Ctx(ctx).Debug(opStats.ctrl)
|
||||
|
||||
return deets, nil
|
||||
}
|
||||
|
||||
func makeFallbackReasons(tenant string, sel selectors.Selector) ([]identity.Reasoner, error) {
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user