add diagnostics to load_test (#983)
## Description Adds some go runtime diagnostics tracking to load testing, including some trace regioning. Unfortunately, I couldn't find any third party trace library that didn't depend on a sidecar server to sample against the application on. Therefore, just starting with something basic. ## Type of change - [x] 🤖 Test ## Issue(s) * #902 ## Test Plan - [x] 💪 Manual - [x] 💚 E2E
This commit is contained in:
parent
b7f5ed73c3
commit
d4390ac5ea
2
.gitignore
vendored
2
.gitignore
vendored
@ -23,3 +23,5 @@
|
|||||||
/bin
|
/bin
|
||||||
/docker/bin
|
/docker/bin
|
||||||
/website/dist
|
/website/dist
|
||||||
|
|
||||||
|
*/test_results/**
|
||||||
@ -4,6 +4,7 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"runtime/trace"
|
||||||
|
|
||||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
@ -258,74 +259,26 @@ func RestoreExchangeDataCollections(
|
|||||||
) (*support.ConnectorOperationStatus, error) {
|
) (*support.ConnectorOperationStatus, error) {
|
||||||
var (
|
var (
|
||||||
pathCounter = map[string]bool{}
|
pathCounter = map[string]bool{}
|
||||||
|
rootFolder string
|
||||||
attempts, successes int
|
attempts, successes int
|
||||||
errs error
|
errs error
|
||||||
folderID, root string
|
|
||||||
isCancelled bool
|
|
||||||
// TODO policy to be updated from external source after completion of refactoring
|
// TODO policy to be updated from external source after completion of refactoring
|
||||||
policy = control.Copy
|
policy = control.Copy
|
||||||
)
|
)
|
||||||
|
|
||||||
|
errUpdater := func(id string, err error) {
|
||||||
|
errs = support.WrapAndAppend(id, err, errs)
|
||||||
|
}
|
||||||
|
|
||||||
for _, dc := range dcs {
|
for _, dc := range dcs {
|
||||||
var (
|
a, s, root, canceled := restoreCollection(ctx, gs, dc, rootFolder, pathCounter, dest, policy, errUpdater)
|
||||||
items = dc.Items()
|
attempts += a
|
||||||
directory = dc.FullPath()
|
successes += s
|
||||||
service = directory.Service()
|
rootFolder = root
|
||||||
category = directory.Category()
|
|
||||||
user = directory.ResourceOwner()
|
|
||||||
exit bool
|
|
||||||
directoryCheckFunc = generateRestoreContainerFunc(gs, user, category, dest.ContainerName)
|
|
||||||
)
|
|
||||||
|
|
||||||
folderID, root, errs = directoryCheckFunc(ctx, errs, directory.String(), root, pathCounter)
|
if canceled {
|
||||||
if errs != nil { // assuming FailFast
|
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
if isCancelled {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
for !exit {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
errs = support.WrapAndAppend("context cancelled", ctx.Err(), errs)
|
|
||||||
isCancelled = true
|
|
||||||
|
|
||||||
case itemData, ok := <-items:
|
|
||||||
if !ok {
|
|
||||||
exit = true
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
attempts++
|
|
||||||
|
|
||||||
buf := &bytes.Buffer{}
|
|
||||||
|
|
||||||
_, err := buf.ReadFrom(itemData.ToReader())
|
|
||||||
if err != nil {
|
|
||||||
errs = support.WrapAndAppend(
|
|
||||||
itemData.UUID()+": byteReadError during RestoreDataCollection",
|
|
||||||
err,
|
|
||||||
errs,
|
|
||||||
)
|
|
||||||
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
err = RestoreExchangeObject(ctx, buf.Bytes(), category, policy, gs, folderID, user)
|
|
||||||
if err != nil {
|
|
||||||
// More information to be here
|
|
||||||
errs = support.WrapAndAppend(
|
|
||||||
itemData.UUID()+": failed to upload RestoreExchangeObject: "+service.String()+"-"+category.String(),
|
|
||||||
err,
|
|
||||||
errs,
|
|
||||||
)
|
|
||||||
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
successes++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
status := support.CreateStatus(ctx, support.Restore, attempts, successes, len(pathCounter), errs)
|
status := support.CreateStatus(ctx, support.Restore, attempts, successes, len(pathCounter), errs)
|
||||||
@ -333,6 +286,74 @@ func RestoreExchangeDataCollections(
|
|||||||
return status, errs
|
return status, errs
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// restoreCollection handles restoration of an individual collection.
|
||||||
|
func restoreCollection(
|
||||||
|
ctx context.Context,
|
||||||
|
gs graph.Service,
|
||||||
|
dc data.Collection,
|
||||||
|
rootFolder string,
|
||||||
|
pathCounter map[string]bool,
|
||||||
|
dest control.RestoreDestination,
|
||||||
|
policy control.CollisionPolicy,
|
||||||
|
errUpdater func(string, error),
|
||||||
|
) (int, int, string, bool) {
|
||||||
|
defer trace.StartRegion(ctx, "gc:exchange:restoreCollection").End()
|
||||||
|
trace.Log(ctx, "gc:exchange:restoreCollection", dc.FullPath().String())
|
||||||
|
|
||||||
|
var (
|
||||||
|
attempts, successes int
|
||||||
|
folderID string
|
||||||
|
err error
|
||||||
|
items = dc.Items()
|
||||||
|
directory = dc.FullPath()
|
||||||
|
service = directory.Service()
|
||||||
|
category = directory.Category()
|
||||||
|
user = directory.ResourceOwner()
|
||||||
|
directoryCheckFunc = generateRestoreContainerFunc(gs, user, category, dest.ContainerName)
|
||||||
|
)
|
||||||
|
|
||||||
|
folderID, root, err := directoryCheckFunc(ctx, err, directory.String(), rootFolder, pathCounter)
|
||||||
|
if err != nil { // assuming FailFast
|
||||||
|
errUpdater(directory.String(), err)
|
||||||
|
return 0, 0, rootFolder, false
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
errUpdater("context cancelled", ctx.Err())
|
||||||
|
return attempts, successes, root, true
|
||||||
|
|
||||||
|
case itemData, ok := <-items:
|
||||||
|
if !ok {
|
||||||
|
return attempts, successes, root, false
|
||||||
|
}
|
||||||
|
attempts++
|
||||||
|
|
||||||
|
trace.Log(ctx, "gc:exchange:restoreCollection:item", itemData.UUID())
|
||||||
|
|
||||||
|
buf := &bytes.Buffer{}
|
||||||
|
|
||||||
|
_, err := buf.ReadFrom(itemData.ToReader())
|
||||||
|
if err != nil {
|
||||||
|
errUpdater(itemData.UUID()+": byteReadError during RestoreDataCollection", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
err = RestoreExchangeObject(ctx, buf.Bytes(), category, policy, gs, folderID, user)
|
||||||
|
if err != nil {
|
||||||
|
// More information to be here
|
||||||
|
errUpdater(
|
||||||
|
itemData.UUID()+": failed to upload RestoreExchangeObject: "+service.String()+"-"+category.String(),
|
||||||
|
err)
|
||||||
|
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
successes++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// generateRestoreContainerFunc utility function that holds logic for creating
|
// generateRestoreContainerFunc utility function that holds logic for creating
|
||||||
// Root Directory or necessary functions based on path.CategoryType
|
// Root Directory or necessary functions based on path.CategoryType
|
||||||
func generateRestoreContainerFunc(
|
func generateRestoreContainerFunc(
|
||||||
|
|||||||
@ -5,6 +5,7 @@ package connector
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"runtime/trace"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
msgraphsdk "github.com/microsoftgraph/msgraph-sdk-go"
|
msgraphsdk "github.com/microsoftgraph/msgraph-sdk-go"
|
||||||
@ -33,7 +34,9 @@ type GraphConnector struct {
|
|||||||
credentials account.M365Config
|
credentials account.M365Config
|
||||||
|
|
||||||
// wg is used to track completion of GC tasks
|
// wg is used to track completion of GC tasks
|
||||||
wg *sync.WaitGroup
|
wg *sync.WaitGroup
|
||||||
|
region *trace.Region
|
||||||
|
|
||||||
// mutex used to synchronize updates to `status`
|
// mutex used to synchronize updates to `status`
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
status support.ConnectorOperationStatus // contains the status of the last run status
|
status support.ConnectorOperationStatus // contains the status of the last run status
|
||||||
@ -120,6 +123,8 @@ func (gs *graphService) EnableFailFast() {
|
|||||||
// workspace. The users field is updated during this method
|
// workspace. The users field is updated during this method
|
||||||
// iff the return value is true
|
// iff the return value is true
|
||||||
func (gc *GraphConnector) setTenantUsers(ctx context.Context) error {
|
func (gc *GraphConnector) setTenantUsers(ctx context.Context) error {
|
||||||
|
defer trace.StartRegion(ctx, "gc:setTenantUsers").End()
|
||||||
|
|
||||||
response, err := exchange.GetAllUsersForTenant(ctx, gc.graphService, "")
|
response, err := exchange.GetAllUsersForTenant(ctx, gc.graphService, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(
|
return errors.Wrapf(
|
||||||
@ -248,6 +253,8 @@ func (gc *GraphConnector) RestoreDataCollections(
|
|||||||
dest control.RestoreDestination,
|
dest control.RestoreDestination,
|
||||||
dcs []data.Collection,
|
dcs []data.Collection,
|
||||||
) error {
|
) error {
|
||||||
|
gc.region = trace.StartRegion(ctx, "connector:restore")
|
||||||
|
|
||||||
var (
|
var (
|
||||||
status *support.ConnectorOperationStatus
|
status *support.ConnectorOperationStatus
|
||||||
err error
|
err error
|
||||||
@ -340,7 +347,13 @@ func (gc *GraphConnector) createCollections(
|
|||||||
|
|
||||||
// AwaitStatus waits for all gc tasks to complete and then returns status
|
// AwaitStatus waits for all gc tasks to complete and then returns status
|
||||||
func (gc *GraphConnector) AwaitStatus() *support.ConnectorOperationStatus {
|
func (gc *GraphConnector) AwaitStatus() *support.ConnectorOperationStatus {
|
||||||
|
defer func() {
|
||||||
|
if gc.region != nil {
|
||||||
|
gc.region.End()
|
||||||
|
}
|
||||||
|
}()
|
||||||
gc.wg.Wait()
|
gc.wg.Wait()
|
||||||
|
|
||||||
return &gc.status
|
return &gc.status
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -384,6 +397,8 @@ func IsNonRecoverableError(e error) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (gc *GraphConnector) DataCollections(ctx context.Context, sels selectors.Selector) ([]data.Collection, error) {
|
func (gc *GraphConnector) DataCollections(ctx context.Context, sels selectors.Selector) ([]data.Collection, error) {
|
||||||
|
defer trace.StartRegion(ctx, "gc:dataCollections:"+sels.Service.String()).End()
|
||||||
|
|
||||||
switch sels.Service {
|
switch sels.Service {
|
||||||
case selectors.ServiceExchange:
|
case selectors.ServiceExchange:
|
||||||
return gc.ExchangeDataCollection(ctx, sels)
|
return gc.ExchangeDataCollection(ctx, sels)
|
||||||
|
|||||||
@ -3,6 +3,7 @@ package onedrive
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"io"
|
"io"
|
||||||
|
"runtime/trace"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
@ -51,69 +52,93 @@ func RestoreCollections(
|
|||||||
dcs []data.Collection,
|
dcs []data.Collection,
|
||||||
) (*support.ConnectorOperationStatus, error) {
|
) (*support.ConnectorOperationStatus, error) {
|
||||||
var (
|
var (
|
||||||
total, restored int
|
total, restored int
|
||||||
restoreErrors error
|
restoreErrors error
|
||||||
copyBuffer = make([]byte, copyBufferSize)
|
|
||||||
restoreContainerName = dest.ContainerName
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
errUpdater := func(id string, err error) {
|
||||||
|
restoreErrors = support.WrapAndAppend(id, err, restoreErrors)
|
||||||
|
}
|
||||||
|
|
||||||
// Iterate through the data collections and restore the contents of each
|
// Iterate through the data collections and restore the contents of each
|
||||||
for _, dc := range dcs {
|
for _, dc := range dcs {
|
||||||
directory := dc.FullPath()
|
t, r, canceled := restoreCollection(ctx, service, dc, dest.ContainerName, errUpdater)
|
||||||
|
total += t
|
||||||
|
restored += r
|
||||||
|
|
||||||
drivePath, err := toOneDrivePath(directory)
|
if canceled {
|
||||||
if err != nil {
|
break
|
||||||
restoreErrors = support.WrapAndAppend(directory.String(), err, restoreErrors)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Assemble folder hierarchy we're going to restore into (we recreate the folder hierarchy
|
|
||||||
// from the backup under this the restore folder instead of root)
|
|
||||||
// i.e. Restore into `<drive>/root:/<restoreContainerName>/<original folder path>`
|
|
||||||
|
|
||||||
restoreFolderElements := []string{restoreContainerName}
|
|
||||||
|
|
||||||
restoreFolderElements = append(restoreFolderElements, drivePath.folders...)
|
|
||||||
|
|
||||||
logger.Ctx(ctx).Debugf("Restore target for %s is %v", dc.FullPath(), restoreFolderElements)
|
|
||||||
|
|
||||||
// Create restore folders and get the folder ID of the folder the data stream will be restored in
|
|
||||||
restoreFolderID, err := createRestoreFolders(ctx, service, drivePath.driveID, restoreFolderElements)
|
|
||||||
if err != nil {
|
|
||||||
restoreErrors = support.WrapAndAppend(directory.String(), errors.Wrapf(err, "failed to create folders %v",
|
|
||||||
restoreFolderElements), restoreErrors)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Restore items from the collection
|
|
||||||
exit := false
|
|
||||||
items := dc.Items()
|
|
||||||
|
|
||||||
for !exit {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return nil, support.WrapAndAppend("context cancelled", ctx.Err(), restoreErrors)
|
|
||||||
case itemData, ok := <-items:
|
|
||||||
if !ok {
|
|
||||||
exit = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
total++
|
|
||||||
|
|
||||||
err := restoreItem(ctx, service, itemData, drivePath.driveID, restoreFolderID, copyBuffer)
|
|
||||||
if err != nil {
|
|
||||||
restoreErrors = support.WrapAndAppend(itemData.UUID(), err, restoreErrors)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
restored++
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return support.CreateStatus(ctx, support.Restore, total, restored, 0, restoreErrors), nil
|
return support.CreateStatus(ctx, support.Restore, total, restored, 0, restoreErrors), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// restoreCollection handles restoration of an individual collection.
|
||||||
|
func restoreCollection(
|
||||||
|
ctx context.Context,
|
||||||
|
service graph.Service,
|
||||||
|
dc data.Collection,
|
||||||
|
restoreContainerName string,
|
||||||
|
errUpdater func(string, error),
|
||||||
|
) (int, int, bool) {
|
||||||
|
defer trace.StartRegion(ctx, "gc:oneDrive:restoreCollection").End()
|
||||||
|
|
||||||
|
var (
|
||||||
|
total, restored int
|
||||||
|
copyBuffer = make([]byte, copyBufferSize)
|
||||||
|
directory = dc.FullPath()
|
||||||
|
)
|
||||||
|
|
||||||
|
drivePath, err := toOneDrivePath(directory)
|
||||||
|
if err != nil {
|
||||||
|
errUpdater(directory.String(), err)
|
||||||
|
return 0, 0, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Assemble folder hierarchy we're going to restore into (we recreate the folder hierarchy
|
||||||
|
// from the backup under this the restore folder instead of root)
|
||||||
|
// i.e. Restore into `<drive>/root:/<restoreContainerName>/<original folder path>`
|
||||||
|
|
||||||
|
restoreFolderElements := []string{restoreContainerName}
|
||||||
|
restoreFolderElements = append(restoreFolderElements, drivePath.folders...)
|
||||||
|
|
||||||
|
trace.Log(ctx, "gc:oneDrive:restoreCollection", directory.String())
|
||||||
|
logger.Ctx(ctx).Debugf("Restore target for %s is %v", dc.FullPath(), restoreFolderElements)
|
||||||
|
|
||||||
|
// Create restore folders and get the folder ID of the folder the data stream will be restored in
|
||||||
|
restoreFolderID, err := createRestoreFolders(ctx, service, drivePath.driveID, restoreFolderElements)
|
||||||
|
if err != nil {
|
||||||
|
errUpdater(directory.String(), errors.Wrapf(err, "failed to create folders %v", restoreFolderElements))
|
||||||
|
return 0, 0, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Restore items from the collection
|
||||||
|
items := dc.Items()
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
errUpdater("context canceled", ctx.Err())
|
||||||
|
return total, restored, true
|
||||||
|
|
||||||
|
case itemData, ok := <-items:
|
||||||
|
if !ok {
|
||||||
|
return total, restored, false
|
||||||
|
}
|
||||||
|
total++
|
||||||
|
|
||||||
|
err := restoreItem(ctx, service, itemData, drivePath.driveID, restoreFolderID, copyBuffer)
|
||||||
|
if err != nil {
|
||||||
|
errUpdater(itemData.UUID(), err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
restored++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// createRestoreFolders creates the restore folder hieararchy in the specified drive and returns the folder ID
|
// createRestoreFolders creates the restore folder hieararchy in the specified drive and returns the folder ID
|
||||||
// of the last folder entry in the hiearchy
|
// of the last folder entry in the hiearchy
|
||||||
func createRestoreFolders(ctx context.Context, service graph.Service, driveID string, restoreFolders []string,
|
func createRestoreFolders(ctx context.Context, service graph.Service, driveID string, restoreFolders []string,
|
||||||
@ -163,7 +188,10 @@ func createRestoreFolders(ctx context.Context, service graph.Service, driveID st
|
|||||||
func restoreItem(ctx context.Context, service graph.Service, itemData data.Stream, driveID, parentFolderID string,
|
func restoreItem(ctx context.Context, service graph.Service, itemData data.Stream, driveID, parentFolderID string,
|
||||||
copyBuffer []byte,
|
copyBuffer []byte,
|
||||||
) error {
|
) error {
|
||||||
|
defer trace.StartRegion(ctx, "gc:oneDrive:restoreItem").End()
|
||||||
|
|
||||||
itemName := itemData.UUID()
|
itemName := itemData.UUID()
|
||||||
|
trace.Log(ctx, "gc:oneDrive:restoreItem", itemName)
|
||||||
|
|
||||||
// Get the stream size (needed to create the upload session)
|
// Get the stream size (needed to create the upload session)
|
||||||
ss, ok := itemData.(data.StreamSize)
|
ss, ok := itemData.(data.StreamSize)
|
||||||
|
|||||||
@ -2,6 +2,7 @@ package kopia
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"runtime/trace"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
|
|
||||||
@ -192,6 +193,8 @@ func getStreamItemFunc(
|
|||||||
progress *corsoProgress,
|
progress *corsoProgress,
|
||||||
) func(context.Context, func(context.Context, fs.Entry) error) error {
|
) func(context.Context, func(context.Context, fs.Entry) error) error {
|
||||||
return func(ctx context.Context, cb func(context.Context, fs.Entry) error) error {
|
return func(ctx context.Context, cb func(context.Context, fs.Entry) error) error {
|
||||||
|
defer trace.StartRegion(ctx, "kopia:getStreamItemFunc").End()
|
||||||
|
|
||||||
// Collect all errors and return them at the end so that iteration for this
|
// Collect all errors and return them at the end so that iteration for this
|
||||||
// directory doesn't end early.
|
// directory doesn't end early.
|
||||||
var errs *multierror.Error
|
var errs *multierror.Error
|
||||||
@ -230,6 +233,8 @@ func getStreamItemFunc(
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
trace.Log(ctx, "kopia:getStreamItemFunc:item", itemPath.String())
|
||||||
|
|
||||||
ei, ok := e.(data.StreamInfo)
|
ei, ok := e.(data.StreamInfo)
|
||||||
if !ok {
|
if !ok {
|
||||||
errs = multierror.Append(
|
errs = multierror.Append(
|
||||||
@ -383,6 +388,8 @@ func (w Wrapper) BackupCollections(
|
|||||||
return nil, nil, errNotConnected
|
return nil, nil, errNotConnected
|
||||||
}
|
}
|
||||||
|
|
||||||
|
defer trace.StartRegion(ctx, "kopia:backupCollections").End()
|
||||||
|
|
||||||
progress := &corsoProgress{
|
progress := &corsoProgress{
|
||||||
pending: map[string]*itemDetails{},
|
pending: map[string]*itemDetails{},
|
||||||
deets: &details.Details{},
|
deets: &details.Details{},
|
||||||
@ -556,6 +563,8 @@ func (w Wrapper) RestoreMultipleItems(
|
|||||||
paths []path.Path,
|
paths []path.Path,
|
||||||
bcounter byteCounter,
|
bcounter byteCounter,
|
||||||
) ([]data.Collection, error) {
|
) ([]data.Collection, error) {
|
||||||
|
defer trace.StartRegion(ctx, "kopia:restore:multiple").End()
|
||||||
|
|
||||||
if len(paths) == 0 {
|
if len(paths) == 0 {
|
||||||
return nil, errors.WithStack(errNoRestorePath)
|
return nil, errors.WithStack(errNoRestorePath)
|
||||||
}
|
}
|
||||||
|
|||||||
@ -2,6 +2,7 @@ package operations
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"runtime/trace"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
@ -83,6 +84,8 @@ type backupStats struct {
|
|||||||
|
|
||||||
// Run begins a synchronous backup operation.
|
// Run begins a synchronous backup operation.
|
||||||
func (op *BackupOperation) Run(ctx context.Context) (err error) {
|
func (op *BackupOperation) Run(ctx context.Context) (err error) {
|
||||||
|
defer trace.StartRegion(ctx, "operations:backup:run").End()
|
||||||
|
|
||||||
var (
|
var (
|
||||||
opStats backupStats
|
opStats backupStats
|
||||||
backupDetails *details.Details
|
backupDetails *details.Details
|
||||||
|
|||||||
@ -2,6 +2,7 @@ package operations
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"runtime/trace"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
@ -93,6 +94,8 @@ type restoreStats struct {
|
|||||||
|
|
||||||
// Run begins a synchronous restore operation.
|
// Run begins a synchronous restore operation.
|
||||||
func (op *RestoreOperation) Run(ctx context.Context) (err error) {
|
func (op *RestoreOperation) Run(ctx context.Context) (err error) {
|
||||||
|
defer trace.StartRegion(ctx, "operations:restore:run").End()
|
||||||
|
|
||||||
startTime := time.Now()
|
startTime := time.Now()
|
||||||
|
|
||||||
// persist operation results to the model store on exit
|
// persist operation results to the model store on exit
|
||||||
|
|||||||
@ -94,7 +94,7 @@ func Initialize(
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
r := repository{
|
r := &repository{
|
||||||
ID: uuid.New(),
|
ID: uuid.New(),
|
||||||
Version: "v1",
|
Version: "v1",
|
||||||
Account: acct,
|
Account: acct,
|
||||||
@ -106,7 +106,7 @@ func Initialize(
|
|||||||
|
|
||||||
r.Bus.Event(ctx, events.RepoInit, nil)
|
r.Bus.Event(ctx, events.RepoInit, nil)
|
||||||
|
|
||||||
return &r, nil
|
return r, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Connect will:
|
// Connect will:
|
||||||
@ -139,16 +139,14 @@ func Connect(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// todo: ID and CreatedAt should get retrieved from a stored kopia config.
|
// todo: ID and CreatedAt should get retrieved from a stored kopia config.
|
||||||
r := repository{
|
return &repository{
|
||||||
Version: "v1",
|
Version: "v1",
|
||||||
Account: acct,
|
Account: acct,
|
||||||
Storage: s,
|
Storage: s,
|
||||||
Bus: events.NewBus(s, acct.ID(), opts),
|
Bus: events.NewBus(s, acct.ID(), opts),
|
||||||
dataLayer: w,
|
dataLayer: w,
|
||||||
modelStore: ms,
|
modelStore: ms,
|
||||||
}
|
}, nil
|
||||||
|
|
||||||
return &r, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *repository) Close(ctx context.Context) error {
|
func (r *repository) Close(ctx context.Context) error {
|
||||||
|
|||||||
@ -2,6 +2,7 @@ package repository_test
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"runtime/pprof"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
@ -13,6 +14,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/operations"
|
"github.com/alcionai/corso/src/internal/operations"
|
||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
"github.com/alcionai/corso/src/pkg/account"
|
"github.com/alcionai/corso/src/pkg/account"
|
||||||
|
"github.com/alcionai/corso/src/pkg/backup"
|
||||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||||
"github.com/alcionai/corso/src/pkg/control"
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
"github.com/alcionai/corso/src/pkg/logger"
|
"github.com/alcionai/corso/src/pkg/logger"
|
||||||
@ -53,7 +55,16 @@ func runBackupLoadTest(
|
|||||||
) {
|
) {
|
||||||
//revive:enable:context-as-argument
|
//revive:enable:context-as-argument
|
||||||
t.Run("backup_"+name, func(t *testing.T) {
|
t.Run("backup_"+name, func(t *testing.T) {
|
||||||
require.NoError(t, b.Run(ctx), "running backup")
|
var (
|
||||||
|
err error
|
||||||
|
labels = pprof.Labels("backup_load_test", name)
|
||||||
|
)
|
||||||
|
|
||||||
|
pprof.Do(ctx, labels, func(ctx context.Context) {
|
||||||
|
err = b.Run(ctx)
|
||||||
|
})
|
||||||
|
|
||||||
|
require.NoError(t, err, "running backup")
|
||||||
require.NotEmpty(t, b.Results, "has results after run")
|
require.NotEmpty(t, b.Results, "has results after run")
|
||||||
assert.NotEmpty(t, b.Results.BackupID, "has an ID after run")
|
assert.NotEmpty(t, b.Results.BackupID, "has an ID after run")
|
||||||
assert.Equal(t, b.Status, operations.Completed, "backup status")
|
assert.Equal(t, b.Status, operations.Completed, "backup status")
|
||||||
@ -75,7 +86,16 @@ func runBackupListLoadTest(
|
|||||||
) {
|
) {
|
||||||
//revive:enable:context-as-argument
|
//revive:enable:context-as-argument
|
||||||
t.Run("backup_list_"+name, func(t *testing.T) {
|
t.Run("backup_list_"+name, func(t *testing.T) {
|
||||||
bs, err := r.Backups(ctx)
|
var (
|
||||||
|
err error
|
||||||
|
bs []backup.Backup
|
||||||
|
labels = pprof.Labels("list_load_test", name)
|
||||||
|
)
|
||||||
|
|
||||||
|
pprof.Do(ctx, labels, func(ctx context.Context) {
|
||||||
|
bs, err = r.Backups(ctx)
|
||||||
|
})
|
||||||
|
|
||||||
require.NoError(t, err, "retrieving backups")
|
require.NoError(t, err, "retrieving backups")
|
||||||
require.Less(t, 0, len(bs), "at least one backup is recorded")
|
require.Less(t, 0, len(bs), "at least one backup is recorded")
|
||||||
|
|
||||||
@ -105,7 +125,17 @@ func runBackupDetailsLoadTest(
|
|||||||
require.NotEmpty(t, backupID, "backup ID to retrieve deails")
|
require.NotEmpty(t, backupID, "backup ID to retrieve deails")
|
||||||
|
|
||||||
t.Run("backup_details_"+name, func(t *testing.T) {
|
t.Run("backup_details_"+name, func(t *testing.T) {
|
||||||
ds, b, err := r.BackupDetails(ctx, backupID)
|
var (
|
||||||
|
err error
|
||||||
|
b *backup.Backup
|
||||||
|
ds *details.Details
|
||||||
|
labels = pprof.Labels("details_load_test", name)
|
||||||
|
)
|
||||||
|
|
||||||
|
pprof.Do(ctx, labels, func(ctx context.Context) {
|
||||||
|
ds, b, err = r.BackupDetails(ctx, backupID)
|
||||||
|
})
|
||||||
|
|
||||||
require.NoError(t, err, "retrieving details in backup "+backupID)
|
require.NoError(t, err, "retrieving details in backup "+backupID)
|
||||||
require.NotNil(t, ds, "backup details must exist")
|
require.NotNil(t, ds, "backup details must exist")
|
||||||
require.NotNil(t, b, "backup must exist")
|
require.NotNil(t, b, "backup must exist")
|
||||||
@ -134,8 +164,16 @@ func runRestoreLoadTest(
|
|||||||
) {
|
) {
|
||||||
//revive:enable:context-as-argument
|
//revive:enable:context-as-argument
|
||||||
t.Run("restore_"+name, func(t *testing.T) {
|
t.Run("restore_"+name, func(t *testing.T) {
|
||||||
t.Skip("skipping restore handling while investigating performance")
|
var (
|
||||||
require.NoError(t, r.Run(ctx), "running restore")
|
err error
|
||||||
|
labels = pprof.Labels("restore_load_test", name)
|
||||||
|
)
|
||||||
|
|
||||||
|
pprof.Do(ctx, labels, func(ctx context.Context) {
|
||||||
|
err = r.Run(ctx)
|
||||||
|
})
|
||||||
|
|
||||||
|
require.NoError(t, err, "running restore")
|
||||||
require.NotEmpty(t, r.Results, "has results after run")
|
require.NotEmpty(t, r.Results, "has results after run")
|
||||||
assert.Equal(t, r.Status, operations.Completed, "restore status")
|
assert.Equal(t, r.Status, operations.Completed, "restore status")
|
||||||
assert.Less(t, 0, r.Results.ItemsRead, "items read")
|
assert.Less(t, 0, r.Results.ItemsRead, "items read")
|
||||||
@ -244,6 +282,7 @@ func TestRepositoryLoadTestOneDriveSuite(t *testing.T) {
|
|||||||
|
|
||||||
func (suite *RepositoryLoadTestOneDriveSuite) SetupSuite() {
|
func (suite *RepositoryLoadTestOneDriveSuite) SetupSuite() {
|
||||||
t := suite.T()
|
t := suite.T()
|
||||||
|
t.Skip("temp issue-902-live")
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
suite.ctx, suite.repo, suite.acct, suite.st = initM365Repo(t)
|
suite.ctx, suite.repo, suite.acct, suite.st = initM365Repo(t)
|
||||||
}
|
}
|
||||||
@ -268,8 +307,6 @@ func (suite *RepositoryLoadTestOneDriveSuite) TestOneDrive() {
|
|||||||
service = "one_drive"
|
service = "one_drive"
|
||||||
)
|
)
|
||||||
|
|
||||||
t.Skip("temp issue-902-live")
|
|
||||||
|
|
||||||
m356User := tester.M365UserID(t)
|
m356User := tester.M365UserID(t)
|
||||||
|
|
||||||
// backup
|
// backup
|
||||||
|
|||||||
@ -2,6 +2,7 @@ package selectors
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"runtime/trace"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||||
"github.com/alcionai/corso/src/pkg/filters"
|
"github.com/alcionai/corso/src/pkg/filters"
|
||||||
@ -210,6 +211,8 @@ func reduce[T scopeT, C categoryT](
|
|||||||
s Selector,
|
s Selector,
|
||||||
dataCategories map[path.CategoryType]C,
|
dataCategories map[path.CategoryType]C,
|
||||||
) *details.Details {
|
) *details.Details {
|
||||||
|
defer trace.StartRegion(ctx, "selectors:reduce").End()
|
||||||
|
|
||||||
if deets == nil {
|
if deets == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user