Merge branch 'main' into itemAttachment

This commit is contained in:
neha_gupta 2023-06-01 15:58:02 +05:30 committed by GitHub
commit 1ec8995f7f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
22 changed files with 342 additions and 590 deletions

9
.gitignore vendored
View File

@ -8,6 +8,8 @@
# Test binary, built with `go test -c` # Test binary, built with `go test -c`
*.test *.test
test_results/
testlog/
# Output of the go coverage tool, specifically when used with LiteIDE # Output of the go coverage tool, specifically when used with LiteIDE
*.out *.out
@ -21,12 +23,9 @@
.corso.toml .corso.toml
# Logging # Logging
.corso.log *.log
# Build directories # Build directories
/bin /bin
/docker/bin /docker/bin
/website/dist /website/dist
*/test_results/**
*/testlog/**

View File

@ -9,6 +9,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
### Added ### Added
- Added ProtectedResourceName to the backup list json output. ProtectedResourceName holds either a UPN or a WebURL, depending on the resource type. - Added ProtectedResourceName to the backup list json output. ProtectedResourceName holds either a UPN or a WebURL, depending on the resource type.
- Rework base selection logic for incremental backups so it's more likely to find a valid base.
### Fixed ### Fixed
- Fix Exchange folder cache population error when parent folder isn't found. - Fix Exchange folder cache population error when parent folder isn't found.

24
src/.gitignore vendored
View File

@ -1,2 +1,26 @@
dist/ dist/
corso corso
# Test binary, built with `go test -c`
*.test
test_results/
testlog/
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
# IDE
.vscode
*.swp
# Standard configuration file names
.corso_test.toml
.corso.toml
# Logging
*.log
# Build directories
/bin
/docker/bin
/website/dist

View File

@ -1,325 +0,0 @@
package main
import (
"context"
"os"
"time"
"github.com/alcionai/clues"
"github.com/pkg/errors"
"github.com/spf13/cobra"
. "github.com/alcionai/corso/src/cli/print"
"github.com/alcionai/corso/src/cli/utils"
"github.com/alcionai/corso/src/internal/common/dttm"
"github.com/alcionai/corso/src/internal/common/str"
"github.com/alcionai/corso/src/internal/connector"
"github.com/alcionai/corso/src/internal/connector/graph"
"github.com/alcionai/corso/src/internal/connector/onedrive"
"github.com/alcionai/corso/src/pkg/account"
"github.com/alcionai/corso/src/pkg/credentials"
"github.com/alcionai/corso/src/pkg/fault"
"github.com/alcionai/corso/src/pkg/logger"
"github.com/alcionai/corso/src/pkg/services/m365"
"github.com/alcionai/corso/src/pkg/services/m365/api"
)
var purgeCmd = &cobra.Command{
Use: "purge",
Short: "Purge all types of m365 folders",
RunE: handleAllFolderPurge,
}
var oneDriveCmd = &cobra.Command{
Use: "onedrive",
Short: "Purges OneDrive folders",
RunE: handleOneDriveFolderPurge,
}
var (
before string
user string
tenant string
prefix string
)
var ErrPurging = clues.New("not all items were successfully purged")
// ------------------------------------------------------------------------------------------
// CLI command handlers
// ------------------------------------------------------------------------------------------
func main() {
ls := logger.Settings{
Level: logger.LLDebug,
Format: logger.LFText,
}
ctx, _ := logger.CtxOrSeed(context.Background(), ls)
ctx = SetRootCmd(ctx, purgeCmd)
defer logger.Flush(ctx)
fs := purgeCmd.PersistentFlags()
fs.StringVar(&before, "before", "", "folders older than this date are deleted. (default: now in UTC)")
fs.StringVar(&user, "user", "", "m365 user id whose folders will be deleted")
cobra.CheckErr(purgeCmd.MarkPersistentFlagRequired("user"))
fs.StringVar(&tenant, "tenant", "", "m365 tenant containing the user")
fs.StringVar(&prefix, "prefix", "", "filters mail folders by displayName prefix")
cobra.CheckErr(purgeCmd.MarkPersistentFlagRequired("prefix"))
purgeCmd.AddCommand(oneDriveCmd)
if err := purgeCmd.ExecuteContext(ctx); err != nil {
logger.Flush(ctx)
os.Exit(1)
}
}
func handleAllFolderPurge(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
if utils.HasNoFlagsAndShownHelp(cmd) {
return nil
}
acct, gc, t, err := getGCAndBoundaryTime(ctx)
if err != nil {
return err
}
err = runPurgeForEachUser(
ctx,
acct,
gc,
t,
purgeOneDriveFolders,
)
if err != nil {
return Only(ctx, ErrPurging)
}
return nil
}
func handleOneDriveFolderPurge(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
if utils.HasNoFlagsAndShownHelp(cmd) {
return nil
}
acct, gc, t, err := getGCAndBoundaryTime(ctx)
if err != nil {
return err
}
if err := runPurgeForEachUser(ctx, acct, gc, t, purgeOneDriveFolders); err != nil {
logger.Ctx(ctx).Error(err)
return Only(ctx, clues.Wrap(ErrPurging, "OneDrive folders"))
}
return nil
}
// ------------------------------------------------------------------------------------------
// Purge Controllers
// ------------------------------------------------------------------------------------------
type purgable interface {
GetDisplayName() *string
GetId() *string
}
type purger func(context.Context, *connector.GraphConnector, time.Time, string) error
func runPurgeForEachUser(
ctx context.Context,
acct account.Account,
gc *connector.GraphConnector,
boundary time.Time,
ps ...purger,
) error {
users, err := m365.Users(ctx, acct, fault.New(true))
if err != nil {
return clues.Wrap(err, "getting users")
}
for _, u := range userOrUsers(user, users) {
Infof(ctx, "\nUser: %s - %s", u.PrincipalName, u.ID)
for _, p := range ps {
if err := p(ctx, gc, boundary, u.PrincipalName); err != nil {
return err
}
}
}
return nil
}
// ----- OneDrive
func purgeOneDriveFolders(
ctx context.Context,
gc *connector.GraphConnector,
boundary time.Time,
uid string,
) error {
getter := func(gs graph.Servicer, uid, prefix string) ([]purgable, error) {
pager, err := onedrive.PagerForSource(onedrive.OneDriveSource, gs, uid, nil)
if err != nil {
return nil, err
}
cfs, err := onedrive.GetAllFolders(ctx, gs, pager, prefix, fault.New(true))
if err != nil {
return nil, err
}
purgables := make([]purgable, len(cfs))
for i, v := range cfs {
purgables[i] = v
}
return purgables, nil
}
deleter := func(gs graph.Servicer, uid string, f purgable) error {
driveFolder, ok := f.(*onedrive.Displayable)
if !ok {
return clues.New("non-OneDrive item")
}
return api.DeleteDriveItem(
ctx,
gs,
*driveFolder.GetParentReference().GetDriveId(),
*f.GetId())
}
return purgeFolders(ctx, gc, boundary, "OneDrive Folders", uid, getter, deleter)
}
// ----- controller
func purgeFolders(
ctx context.Context,
gc *connector.GraphConnector,
boundary time.Time,
data, uid string,
getter func(graph.Servicer, string, string) ([]purgable, error),
deleter func(graph.Servicer, string, purgable) error,
) error {
Infof(ctx, "Container: %s", data)
// get them folders
fs, err := getter(gc.Service, uid, prefix)
if err != nil {
return Only(ctx, clues.Wrap(err, "retrieving folders: "+data))
}
if len(fs) == 0 {
Info(ctx, "None Matched")
return nil
}
var errs error
// delete any containers that don't pass the boundary
for _, fld := range fs {
// compare the folder time to the deletion boundary time first
displayName := *fld.GetDisplayName()
dnTime, err := dttm.ExtractTime(displayName)
if err != nil && !errors.Is(err, dttm.ErrNoTimeString) {
err = clues.Wrap(err, "!! Error: parsing container: "+displayName)
Info(ctx, err)
return err
}
if !dnTime.Before(boundary) || dnTime == (time.Time{}) {
continue
}
Infof(ctx, "∙ Deleting [%s]", displayName)
err = deleter(gc.Service, uid, fld)
if err != nil {
err = clues.Wrap(err, "!! Error")
Info(ctx, err)
}
}
return errs
}
// ------------------------------------------------------------------------------------------
// Helpers
// ------------------------------------------------------------------------------------------
func getGC(ctx context.Context) (account.Account, *connector.GraphConnector, error) {
// get account info
m365Cfg := account.M365Config{
M365: credentials.GetM365(),
AzureTenantID: str.First(tenant, os.Getenv(account.AzureTenantID)),
}
acct, err := account.NewAccount(account.ProviderM365, m365Cfg)
if err != nil {
return account.Account{}, nil, Only(ctx, clues.Wrap(err, "finding m365 account details"))
}
gc, err := connector.NewGraphConnector(ctx, acct, connector.Users)
if err != nil {
return account.Account{}, nil, Only(ctx, clues.Wrap(err, "connecting to graph api"))
}
return acct, gc, nil
}
func getBoundaryTime(ctx context.Context) (time.Time, error) {
// format the time input
var (
err error
boundaryTime = time.Now().UTC()
)
if len(before) > 0 {
boundaryTime, err = dttm.ParseTime(before)
if err != nil {
return time.Time{}, Only(ctx, clues.Wrap(err, "parsing before flag to time"))
}
}
return boundaryTime, nil
}
func getGCAndBoundaryTime(
ctx context.Context,
) (account.Account, *connector.GraphConnector, time.Time, error) {
acct, gc, err := getGC(ctx)
if err != nil {
return account.Account{}, nil, time.Time{}, err
}
t, err := getBoundaryTime(ctx)
if err != nil {
return account.Account{}, nil, time.Time{}, err
}
return acct, gc, t, nil
}
func userOrUsers(u string, us []*m365.User) []*m365.User {
if len(u) == 0 {
return nil
}
if u == "*" {
return us
}
return []*m365.User{{PrincipalName: u}}
}

View File

@ -8,7 +8,7 @@ require (
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0 github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0
github.com/alcionai/clues v0.0.0-20230406223931-f48777f4773c github.com/alcionai/clues v0.0.0-20230406223931-f48777f4773c
github.com/armon/go-metrics v0.4.1 github.com/armon/go-metrics v0.4.1
github.com/aws/aws-sdk-go v1.44.271 github.com/aws/aws-sdk-go v1.44.273
github.com/aws/aws-xray-sdk-go v1.8.1 github.com/aws/aws-xray-sdk-go v1.8.1
github.com/cenkalti/backoff/v4 v4.2.1 github.com/cenkalti/backoff/v4 v4.2.1
github.com/google/uuid v1.3.0 github.com/google/uuid v1.3.0
@ -26,8 +26,8 @@ require (
github.com/spatialcurrent/go-lazy v0.0.0-20211115014721-47315cc003d1 github.com/spatialcurrent/go-lazy v0.0.0-20211115014721-47315cc003d1
github.com/spf13/cobra v1.7.0 github.com/spf13/cobra v1.7.0
github.com/spf13/pflag v1.0.5 github.com/spf13/pflag v1.0.5
github.com/spf13/viper v1.15.0 github.com/spf13/viper v1.16.0
github.com/stretchr/testify v1.8.3 github.com/stretchr/testify v1.8.4
github.com/tidwall/pretty v1.2.1 github.com/tidwall/pretty v1.2.1
github.com/tomlazar/table v0.1.2 github.com/tomlazar/table v0.1.2
github.com/vbauerster/mpb/v8 v8.1.6 github.com/vbauerster/mpb/v8 v8.1.6
@ -49,9 +49,9 @@ require (
github.com/hashicorp/hcl v1.0.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect
github.com/magiconair/properties v1.8.7 // indirect github.com/magiconair/properties v1.8.7 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/pelletier/go-toml/v2 v2.0.6 // indirect github.com/pelletier/go-toml/v2 v2.0.8 // indirect
github.com/spf13/afero v1.9.3 // indirect github.com/spf13/afero v1.9.5 // indirect
github.com/spf13/cast v1.5.0 // indirect github.com/spf13/cast v1.5.1 // indirect
github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect
github.com/subosito/gotenv v1.4.2 // indirect github.com/subosito/gotenv v1.4.2 // indirect
github.com/valyala/bytebufferpool v1.0.0 // indirect github.com/valyala/bytebufferpool v1.0.0 // indirect
@ -115,14 +115,14 @@ require (
go.opentelemetry.io/otel/trace v1.15.1 // indirect go.opentelemetry.io/otel/trace v1.15.1 // indirect
go.uber.org/atomic v1.10.0 // indirect go.uber.org/atomic v1.10.0 // indirect
go.uber.org/multierr v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect
golang.org/x/crypto v0.8.0 // indirect golang.org/x/crypto v0.9.0 // indirect
golang.org/x/mod v0.10.0 // indirect golang.org/x/mod v0.10.0 // indirect
golang.org/x/net v0.10.0 golang.org/x/net v0.10.0
golang.org/x/sync v0.2.0 // indirect golang.org/x/sync v0.2.0 // indirect
golang.org/x/sys v0.8.0 // indirect golang.org/x/sys v0.8.0 // indirect
golang.org/x/text v0.9.0 // indirect golang.org/x/text v0.9.0 // indirect
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect
google.golang.org/grpc v1.54.0 // indirect google.golang.org/grpc v1.55.0 // indirect
google.golang.org/protobuf v1.30.0 // indirect google.golang.org/protobuf v1.30.0 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect

View File

@ -66,8 +66,8 @@ github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY
github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
github.com/aws/aws-sdk-go v1.44.271 h1:aa+Nu2JcnFmW1TLIz/67SS7KPq1I1Adl4RmExSMjGVo= github.com/aws/aws-sdk-go v1.44.273 h1:CX8O0gK+cGrgUyv7bgJ6QQP9mQg7u5mweHdNzULH47c=
github.com/aws/aws-sdk-go v1.44.271/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/aws/aws-sdk-go v1.44.273/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
github.com/aws/aws-xray-sdk-go v1.8.1 h1:O4pXV+hnCskaamGsZnFpzHyAmgPGusBMN6i7nnsy0Fo= github.com/aws/aws-xray-sdk-go v1.8.1 h1:O4pXV+hnCskaamGsZnFpzHyAmgPGusBMN6i7nnsy0Fo=
github.com/aws/aws-xray-sdk-go v1.8.1/go.mod h1:wMmVYzej3sykAttNBkXQHK/+clAPWTOrPiajEk7Cp3A= github.com/aws/aws-xray-sdk-go v1.8.1/go.mod h1:wMmVYzej3sykAttNBkXQHK/+clAPWTOrPiajEk7Cp3A=
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
@ -113,7 +113,7 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m
github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY=
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
@ -312,8 +312,8 @@ github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32 h1:W6apQkHrMkS0Muv8G/TipAy
github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32/go.mod h1:9wM+0iRr9ahx58uYLpLIr5fm8diHn0JbqRycJi6w0Ms= github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32/go.mod h1:9wM+0iRr9ahx58uYLpLIr5fm8diHn0JbqRycJi6w0Ms=
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pelletier/go-toml/v2 v2.0.6 h1:nrzqCb7j9cDFj2coyLNLaZuJTLjWjlaz6nvTvIwycIU= github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ=
github.com/pelletier/go-toml/v2 v2.0.6/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek= github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4=
github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM= github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM=
github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU=
@ -349,7 +349,7 @@ github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJ
github.com/rivo/uniseg v0.4.3 h1:utMvzDsuh3suAEnhH0RdHmoPbU648o6CvXxTx4SBMOw= github.com/rivo/uniseg v0.4.3 h1:utMvzDsuh3suAEnhH0RdHmoPbU648o6CvXxTx4SBMOw=
github.com/rivo/uniseg v0.4.3/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rivo/uniseg v0.4.3/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
github.com/rs/xid v1.4.0 h1:qd7wPTDkN6KQx2VmMBLrpHkiyQwgFXRnkOLacUiaSNY= github.com/rs/xid v1.4.0 h1:qd7wPTDkN6KQx2VmMBLrpHkiyQwgFXRnkOLacUiaSNY=
github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
github.com/rudderlabs/analytics-go v3.3.3+incompatible h1:OG0XlKoXfr539e2t1dXtTB+Gr89uFW+OUNQBVhHIIBY= github.com/rudderlabs/analytics-go v3.3.3+incompatible h1:OG0XlKoXfr539e2t1dXtTB+Gr89uFW+OUNQBVhHIIBY=
@ -363,18 +363,18 @@ github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/spatialcurrent/go-lazy v0.0.0-20211115014721-47315cc003d1 h1:lQ3JvmcVO1/AMFbabvUSJ4YtJRpEAX9Qza73p5j03sw= github.com/spatialcurrent/go-lazy v0.0.0-20211115014721-47315cc003d1 h1:lQ3JvmcVO1/AMFbabvUSJ4YtJRpEAX9Qza73p5j03sw=
github.com/spatialcurrent/go-lazy v0.0.0-20211115014721-47315cc003d1/go.mod h1:4aKqcbhASNqjbrG0h9BmkzcWvPJGxbef4B+j0XfFrZo= github.com/spatialcurrent/go-lazy v0.0.0-20211115014721-47315cc003d1/go.mod h1:4aKqcbhASNqjbrG0h9BmkzcWvPJGxbef4B+j0XfFrZo=
github.com/spf13/afero v1.9.3 h1:41FoI0fD7OR7mGcKE/aOiLkGreyf8ifIOQmJANWogMk= github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM=
github.com/spf13/afero v1.9.3/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ=
github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA=
github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48=
github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I=
github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0=
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU= github.com/spf13/viper v1.16.0 h1:rGGH0XDZhdUOryiDWjmIvUSWpbNqisK8Wk0Vyefw8hc=
github.com/spf13/viper v1.15.0/go.mod h1:fFcTBJxvhhzSJiZy8n+PeW6t8l+KeT/uTARa0jHOQLA= github.com/spf13/viper v1.16.0/go.mod h1:yg78JgCJcbrQOvV9YLXgkLaZqUidkY9K+Dd1FofRzQg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
@ -386,9 +386,9 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY=
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8=
github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0=
github.com/tg123/go-htpasswd v1.2.1 h1:i4wfsX1KvvkyoMiHZzjS0VzbAPWfxzI8INcZAKtutoU= github.com/tg123/go-htpasswd v1.2.1 h1:i4wfsX1KvvkyoMiHZzjS0VzbAPWfxzI8INcZAKtutoU=
@ -450,10 +450,10 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.8.0 h1:pd9TJtTueMTVQXzk8E2XESSMQDj/U7OUu0PqJqPXQjQ= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g=
golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@ -758,8 +758,8 @@ google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.54.0 h1:EhTqbhiYeixwWQtAEZAxmV9MGqcjEU2mFx52xCzNyag= google.golang.org/grpc v1.55.0 h1:3Oj82/tFSCeUrRTg/5E/7d/W5A1tj6Ky1ABAuZuv5ag=
google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=

View File

@ -151,10 +151,6 @@ func (suite *RestoreIntgSuite) TestRestoreEvent() {
} }
} }
type containerDeleter interface {
DeleteContainer(context.Context, string, string) error
}
// TestRestoreExchangeObject verifies path.Category usage for restored objects // TestRestoreExchangeObject verifies path.Category usage for restored objects
func (suite *RestoreIntgSuite) TestRestoreExchangeObject() { func (suite *RestoreIntgSuite) TestRestoreExchangeObject() {
t := suite.T() t := suite.T()
@ -165,12 +161,6 @@ func (suite *RestoreIntgSuite) TestRestoreExchangeObject() {
service, err := createService(m365) service, err := createService(m365)
require.NoError(t, err, clues.ToCore(err)) require.NoError(t, err, clues.ToCore(err))
deleters := map[path.CategoryType]containerDeleter{
path.EmailCategory: suite.ac.Mail(),
path.ContactsCategory: suite.ac.Contacts(),
path.EventsCategory: suite.ac.Events(),
}
userID := tester.M365UserID(suite.T()) userID := tester.M365UserID(suite.T())
tests := []struct { tests := []struct {
@ -381,10 +371,6 @@ func (suite *RestoreIntgSuite) TestRestoreExchangeObject() {
fault.New(true)) fault.New(true))
assert.NoError(t, err, clues.ToCore(err)) assert.NoError(t, err, clues.ToCore(err))
assert.NotNil(t, info, "item info was not populated") assert.NotNil(t, info, "item info was not populated")
assert.NotNil(t, deleters)
err = deleters[test.category].DeleteContainer(ctx, userID, destination)
assert.NoError(t, err, clues.ToCore(err))
}) })
} }
} }

View File

@ -140,29 +140,21 @@ func (mw *LoggingMiddleware) Intercept(
var ( var (
log = logger.Ctx(ctx) log = logger.Ctx(ctx)
respClass = resp.StatusCode / 100 respClass = resp.StatusCode / 100
logExtra = logger.DebugAPIFV || os.Getenv(logGraphRequestsEnvKey) != ""
// special cases where we always dump the response body, since the response
// details might be critical to understanding the response when debugging.
// * 400-bad-request
// * 403-forbidden
logBody = logger.DebugAPIFV ||
os.Getenv(logGraphRequestsEnvKey) != "" ||
resp.StatusCode == http.StatusBadRequest ||
resp.StatusCode == http.StatusForbidden
) )
// special case: always info log 429 responses // special case: always info-level status 429 logs
if resp.StatusCode == http.StatusTooManyRequests { if resp.StatusCode == http.StatusTooManyRequests {
if logExtra { log.With("response", getRespDump(ctx, resp, logBody)).
log = log.With("response", getRespDump(ctx, resp, true)) Info("graph api throttling")
}
log.Infow(
"graph api throttling",
"limit", resp.Header.Get(rateLimitHeader),
"remaining", resp.Header.Get(rateRemainingHeader),
"reset", resp.Header.Get(rateResetHeader),
"retry-after", resp.Header.Get(retryAfterHeader))
return resp, err
}
// special case: always dump status-400-bad-request
if resp.StatusCode == http.StatusBadRequest {
log.With("response", getRespDump(ctx, resp, true)).
Error("graph api error: " + resp.Status)
return resp, err return resp, err
} }
@ -170,25 +162,18 @@ func (mw *LoggingMiddleware) Intercept(
// Log api calls according to api debugging configurations. // Log api calls according to api debugging configurations.
switch respClass { switch respClass {
case 2: case 2:
if logExtra { if logBody {
// only dump the body if it's under a size limit. We don't want to copy gigs into memory for a log. // only dump the body if it's under a size limit. We don't want to copy gigs into memory for a log.
dump := getRespDump(ctx, resp, os.Getenv(log2xxGraphResponseEnvKey) != "" && resp.ContentLength < logMBLimit) dump := getRespDump(ctx, resp, os.Getenv(log2xxGraphResponseEnvKey) != "" && resp.ContentLength < logMBLimit)
log.Infow("2xx graph api resp", "response", dump) log.Infow("2xx graph api resp", "response", dump)
} }
case 3: case 3:
log = log.With("redirect_location", LoggableURL(resp.Header.Get(locationHeader))) log.With("redirect_location", LoggableURL(resp.Header.Get(locationHeader))).
With("response", getRespDump(ctx, resp, false)).
if logExtra { Info("graph api redirect: " + resp.Status)
log = log.With("response", getRespDump(ctx, resp, false))
}
log.Info("graph api redirect: " + resp.Status)
default: default:
if logExtra { log.With("response", getRespDump(ctx, resp, logBody)).
log = log.With("response", getRespDump(ctx, resp, true)) Error("graph api error: " + resp.Status)
}
log.Error("graph api error: " + resp.Status)
} }
return resp, err return resp, err

View File

@ -15,10 +15,10 @@ import (
"github.com/alcionai/corso/src/pkg/logger" "github.com/alcionai/corso/src/pkg/logger"
) )
type BackupBases struct { type backupBases struct {
Backups []BackupEntry backups []BackupEntry
MergeBases []ManifestEntry mergeBases []ManifestEntry
AssistBases []ManifestEntry assistBases []ManifestEntry
} }
type BackupEntry struct { type BackupEntry struct {
@ -31,7 +31,7 @@ type baseFinder struct {
bg inject.GetBackuper bg inject.GetBackuper
} }
func NewBaseFinder( func newBaseFinder(
sm snapshotManager, sm snapshotManager,
bg inject.GetBackuper, bg inject.GetBackuper,
) (*baseFinder, error) { ) (*baseFinder, error) {
@ -183,11 +183,11 @@ func (b *baseFinder) getBase(
return b.findBasesInSet(ctx, reason, metas) return b.findBasesInSet(ctx, reason, metas)
} }
func (b *baseFinder) FindBases( func (b *baseFinder) findBases(
ctx context.Context, ctx context.Context,
reasons []Reason, reasons []Reason,
tags map[string]string, tags map[string]string,
) (BackupBases, error) { ) (backupBases, error) {
var ( var (
// All maps go from ID -> entry. We need to track by ID so we can coalesce // All maps go from ID -> entry. We need to track by ID so we can coalesce
// the reason for selecting something. Kopia assisted snapshots also use // the reason for selecting something. Kopia assisted snapshots also use
@ -251,9 +251,24 @@ func (b *baseFinder) FindBases(
} }
} }
return BackupBases{ return backupBases{
Backups: maps.Values(baseBups), backups: maps.Values(baseBups),
MergeBases: maps.Values(baseSnaps), mergeBases: maps.Values(baseSnaps),
AssistBases: maps.Values(kopiaAssistSnaps), assistBases: maps.Values(kopiaAssistSnaps),
}, nil }, nil
} }
func (b *baseFinder) FindBases(
ctx context.Context,
reasons []Reason,
tags map[string]string,
) ([]ManifestEntry, error) {
bb, err := b.findBases(ctx, reasons, tags)
if err != nil {
return nil, clues.Stack(err)
}
// assistBases contains all snapshots so we can return it while maintaining
// almost all compatibility.
return bb.assistBases, nil
}

View File

@ -342,10 +342,10 @@ func (suite *BaseFinderUnitSuite) TestNoResult_NoBackupsOrSnapshots() {
}, },
} }
bb, err := bf.FindBases(ctx, reasons, nil) bb, err := bf.findBases(ctx, reasons, nil)
assert.NoError(t, err, "getting bases: %v", clues.ToCore(err)) assert.NoError(t, err, "getting bases: %v", clues.ToCore(err))
assert.Empty(t, bb.MergeBases) assert.Empty(t, bb.mergeBases)
assert.Empty(t, bb.AssistBases) assert.Empty(t, bb.assistBases)
} }
func (suite *BaseFinderUnitSuite) TestNoResult_ErrorListingSnapshots() { func (suite *BaseFinderUnitSuite) TestNoResult_ErrorListingSnapshots() {
@ -366,10 +366,10 @@ func (suite *BaseFinderUnitSuite) TestNoResult_ErrorListingSnapshots() {
}, },
} }
bb, err := bf.FindBases(ctx, reasons, nil) bb, err := bf.findBases(ctx, reasons, nil)
assert.NoError(t, err, "getting bases: %v", clues.ToCore(err)) assert.NoError(t, err, "getting bases: %v", clues.ToCore(err))
assert.Empty(t, bb.MergeBases) assert.Empty(t, bb.mergeBases)
assert.Empty(t, bb.AssistBases) assert.Empty(t, bb.assistBases)
} }
func (suite *BaseFinderUnitSuite) TestGetBases() { func (suite *BaseFinderUnitSuite) TestGetBases() {
@ -825,7 +825,7 @@ func (suite *BaseFinderUnitSuite) TestGetBases() {
bg: &mockModelGetter{data: test.backupData}, bg: &mockModelGetter{data: test.backupData},
} }
bb, err := bf.FindBases( bb, err := bf.findBases(
ctx, ctx,
test.input, test.input,
nil) nil)
@ -833,17 +833,17 @@ func (suite *BaseFinderUnitSuite) TestGetBases() {
checkBackupEntriesMatch( checkBackupEntriesMatch(
t, t,
bb.Backups, bb.backups,
test.backupData, test.backupData,
test.expectedBaseReasons) test.expectedBaseReasons)
checkManifestEntriesMatch( checkManifestEntriesMatch(
t, t,
bb.MergeBases, bb.mergeBases,
test.manifestData, test.manifestData,
test.expectedBaseReasons) test.expectedBaseReasons)
checkManifestEntriesMatch( checkManifestEntriesMatch(
t, t,
bb.AssistBases, bb.assistBases,
test.manifestData, test.manifestData,
test.expectedAssistManifestReasons) test.expectedAssistManifestReasons)
}) })
@ -920,7 +920,7 @@ func (suite *BaseFinderUnitSuite) TestFetchPrevSnapshots_CustomTags() {
bg: &mockModelGetter{data: backupData}, bg: &mockModelGetter{data: backupData},
} }
bb, err := bf.FindBases( bb, err := bf.findBases(
ctx, ctx,
testAllUsersAllCats, testAllUsersAllCats,
test.tags) test.tags)
@ -928,7 +928,7 @@ func (suite *BaseFinderUnitSuite) TestFetchPrevSnapshots_CustomTags() {
checkManifestEntriesMatch( checkManifestEntriesMatch(
t, t,
bb.MergeBases, bb.mergeBases,
manifestData, manifestData,
test.expectedIdxs) test.expectedIdxs)
}) })

View File

@ -39,6 +39,6 @@ type (
ctx context.Context, ctx context.Context,
reasons []kopia.Reason, reasons []kopia.Reason,
tags map[string]string, tags map[string]string,
) (kopia.BackupBases, error) ) ([]kopia.ManifestEntry, error)
} }
) )

View File

@ -19,6 +19,7 @@ import (
"github.com/alcionai/corso/src/internal/data" "github.com/alcionai/corso/src/internal/data"
"github.com/alcionai/corso/src/internal/diagnostics" "github.com/alcionai/corso/src/internal/diagnostics"
"github.com/alcionai/corso/src/internal/observe" "github.com/alcionai/corso/src/internal/observe"
"github.com/alcionai/corso/src/internal/operations/inject"
"github.com/alcionai/corso/src/internal/stats" "github.com/alcionai/corso/src/internal/stats"
"github.com/alcionai/corso/src/pkg/backup/details" "github.com/alcionai/corso/src/pkg/backup/details"
"github.com/alcionai/corso/src/pkg/control/repository" "github.com/alcionai/corso/src/pkg/control/repository"
@ -50,14 +51,16 @@ var (
type BackupStats struct { type BackupStats struct {
SnapshotID string SnapshotID string
TotalHashedBytes int64 TotalHashedBytes int64
TotalUploadedBytes int64 TotalUploadedBytes int64
TotalNonMetaUploadedBytes int64
TotalFileCount int TotalFileCount int
CachedFileCount int TotalNonMetaFileCount int
UncachedFileCount int CachedFileCount int
TotalDirectoryCount int UncachedFileCount int
ErrorCount int TotalDirectoryCount int
ErrorCount int
IgnoredErrorCount int IgnoredErrorCount int
ExpectedIgnoredErrorCount int ExpectedIgnoredErrorCount int
@ -614,6 +617,10 @@ func (w Wrapper) FetchPrevSnapshotManifests(
return fetchPrevSnapshotManifests(ctx, w.c, reasons, tags), nil return fetchPrevSnapshotManifests(ctx, w.c, reasons, tags), nil
} }
func (w Wrapper) NewBaseFinder(bg inject.GetBackuper) (*baseFinder, error) {
return newBaseFinder(w.c, bg)
}
func isErrEntryNotFound(err error) bool { func isErrEntryNotFound(err error) bool {
// Calling Child on a directory may return this. // Calling Child on a directory may return this.
if errors.Is(err, fs.ErrEntryNotFound) { if errors.Is(err, fs.ErrEntryNotFound) {

View File

@ -177,7 +177,8 @@ func (op *BackupOperation) Run(ctx context.Context) (err error) {
ctx = clues.Add( ctx = clues.Add(
ctx, ctx,
"tenant_id", clues.Hide(op.account.ID()), "tenant_id", clues.Hide(op.account.ID()),
"resource_owner", clues.Hide(op.ResourceOwner.Name()), "resource_owner_id", op.ResourceOwner.ID(),
"resource_owner_name", clues.Hide(op.ResourceOwner.Name()),
"backup_id", op.Results.BackupID, "backup_id", op.Results.BackupID,
"service", op.Selectors.Service, "service", op.Selectors.Service,
"incremental", op.incremental) "incremental", op.incremental)
@ -290,9 +291,24 @@ func (op *BackupOperation) do(
// should always be 1, since backups are 1:1 with resourceOwners. // should always be 1, since backups are 1:1 with resourceOwners.
opStats.resourceCount = 1 opStats.resourceCount = 1
kbf, err := op.kopia.NewBaseFinder(op.store)
if err != nil {
return nil, clues.Stack(err)
}
type baseFinder struct {
kinject.BaseFinder
kinject.RestoreProducer
}
bf := baseFinder{
BaseFinder: kbf,
RestoreProducer: op.kopia,
}
mans, mdColls, canUseMetaData, err := produceManifestsAndMetadata( mans, mdColls, canUseMetaData, err := produceManifestsAndMetadata(
ctx, ctx,
op.kopia, bf,
op.store, op.store,
reasons, fallbackReasons, reasons, fallbackReasons,
op.account.ID(), op.account.ID(),
@ -347,6 +363,8 @@ func (op *BackupOperation) do(
mans, mans,
toMerge, toMerge,
deets, deets,
writeStats,
op.Selectors.PathService(),
op.Errors) op.Errors)
if err != nil { if err != nil {
return nil, clues.Wrap(err, "merging details") return nil, clues.Wrap(err, "merging details")
@ -465,7 +483,7 @@ func consumeBackupCollections(
bc kinject.BackupConsumer, bc kinject.BackupConsumer,
tenantID string, tenantID string,
reasons []kopia.Reason, reasons []kopia.Reason,
mans []*kopia.ManifestEntry, mans []kopia.ManifestEntry,
cs []data.BackupCollection, cs []data.BackupCollection,
pmr prefixmatcher.StringSetReader, pmr prefixmatcher.StringSetReader,
backupID model.StableID, backupID model.StableID,
@ -650,7 +668,7 @@ func getNewPathRefs(
func lastCompleteBackups( func lastCompleteBackups(
ctx context.Context, ctx context.Context,
ms *store.Wrapper, ms *store.Wrapper,
mans []*kopia.ManifestEntry, mans []kopia.ManifestEntry,
) (map[string]*backup.Backup, int, error) { ) (map[string]*backup.Backup, int, error) {
var ( var (
oldestVersion = version.NoBackup oldestVersion = version.NoBackup
@ -701,11 +719,20 @@ func mergeDetails(
ctx context.Context, ctx context.Context,
ms *store.Wrapper, ms *store.Wrapper,
detailsStore streamstore.Streamer, detailsStore streamstore.Streamer,
mans []*kopia.ManifestEntry, mans []kopia.ManifestEntry,
dataFromBackup kopia.DetailsMergeInfoer, dataFromBackup kopia.DetailsMergeInfoer,
deets *details.Builder, deets *details.Builder,
writeStats *kopia.BackupStats,
serviceType path.ServiceType,
errs *fault.Bus, errs *fault.Bus,
) error { ) error {
detailsModel := deets.Details().DetailsModel
// getting the values in writeStats before anything else so that we don't get a return from
// conditions like no backup data.
writeStats.TotalNonMetaFileCount = len(detailsModel.FilterMetaFiles().Items())
writeStats.TotalNonMetaUploadedBytes = detailsModel.SumNonMetaFileSizes()
// Don't bother loading any of the base details if there's nothing we need to merge. // Don't bother loading any of the base details if there's nothing we need to merge.
if dataFromBackup == nil || dataFromBackup.ItemsToMerge() == 0 { if dataFromBackup == nil || dataFromBackup.ItemsToMerge() == 0 {
return nil return nil
@ -841,6 +868,8 @@ func (op *BackupOperation) persistResults(
op.Results.BytesRead = opStats.k.TotalHashedBytes op.Results.BytesRead = opStats.k.TotalHashedBytes
op.Results.BytesUploaded = opStats.k.TotalUploadedBytes op.Results.BytesUploaded = opStats.k.TotalUploadedBytes
op.Results.ItemsWritten = opStats.k.TotalFileCount op.Results.ItemsWritten = opStats.k.TotalFileCount
op.Results.NonMetaBytesUploaded = opStats.k.TotalNonMetaUploadedBytes
op.Results.NonMetaItemsWritten = opStats.k.TotalNonMetaFileCount
op.Results.ResourceOwners = opStats.resourceCount op.Results.ResourceOwners = opStats.resourceCount
if opStats.gc == nil { if opStats.gc == nil {

View File

@ -961,6 +961,7 @@ func testExchangeContinuousBackups(suite *BackupOpIntegrationSuite, toggles cont
deltaItemsWritten int deltaItemsWritten int
nonDeltaItemsRead int nonDeltaItemsRead int
nonDeltaItemsWritten int nonDeltaItemsWritten int
nonMetaItemsWritten int
}{ }{
{ {
name: "clean, no changes", name: "clean, no changes",
@ -969,6 +970,7 @@ func testExchangeContinuousBackups(suite *BackupOpIntegrationSuite, toggles cont
deltaItemsWritten: 0, deltaItemsWritten: 0,
nonDeltaItemsRead: 8, nonDeltaItemsRead: 8,
nonDeltaItemsWritten: 0, // unchanged items are not counted towards write nonDeltaItemsWritten: 0, // unchanged items are not counted towards write
nonMetaItemsWritten: 4,
}, },
{ {
name: "move an email folder to a subfolder", name: "move an email folder to a subfolder",
@ -992,6 +994,7 @@ func testExchangeContinuousBackups(suite *BackupOpIntegrationSuite, toggles cont
deltaItemsWritten: 2, deltaItemsWritten: 2,
nonDeltaItemsRead: 8, nonDeltaItemsRead: 8,
nonDeltaItemsWritten: 2, nonDeltaItemsWritten: 2,
nonMetaItemsWritten: 6,
}, },
{ {
name: "delete a folder", name: "delete a folder",
@ -1018,6 +1021,7 @@ func testExchangeContinuousBackups(suite *BackupOpIntegrationSuite, toggles cont
deltaItemsWritten: 0, // deletions are not counted as "writes" deltaItemsWritten: 0, // deletions are not counted as "writes"
nonDeltaItemsRead: 4, nonDeltaItemsRead: 4,
nonDeltaItemsWritten: 0, nonDeltaItemsWritten: 0,
nonMetaItemsWritten: 4,
}, },
{ {
name: "add a new folder", name: "add a new folder",
@ -1070,6 +1074,7 @@ func testExchangeContinuousBackups(suite *BackupOpIntegrationSuite, toggles cont
deltaItemsWritten: 4, deltaItemsWritten: 4,
nonDeltaItemsRead: 8, nonDeltaItemsRead: 8,
nonDeltaItemsWritten: 4, nonDeltaItemsWritten: 4,
nonMetaItemsWritten: 8,
}, },
{ {
name: "rename a folder", name: "rename a folder",
@ -1125,6 +1130,7 @@ func testExchangeContinuousBackups(suite *BackupOpIntegrationSuite, toggles cont
deltaItemsWritten: 0, // two items per category deltaItemsWritten: 0, // two items per category
nonDeltaItemsRead: 8, nonDeltaItemsRead: 8,
nonDeltaItemsWritten: 0, nonDeltaItemsWritten: 0,
nonMetaItemsWritten: 4,
}, },
{ {
name: "add a new item", name: "add a new item",
@ -1178,6 +1184,7 @@ func testExchangeContinuousBackups(suite *BackupOpIntegrationSuite, toggles cont
deltaItemsWritten: 2, deltaItemsWritten: 2,
nonDeltaItemsRead: 10, nonDeltaItemsRead: 10,
nonDeltaItemsWritten: 2, nonDeltaItemsWritten: 2,
nonMetaItemsWritten: 6,
}, },
{ {
name: "delete an existing item", name: "delete an existing item",
@ -1231,6 +1238,7 @@ func testExchangeContinuousBackups(suite *BackupOpIntegrationSuite, toggles cont
deltaItemsWritten: 0, // deletes are not counted as "writes" deltaItemsWritten: 0, // deletes are not counted as "writes"
nonDeltaItemsRead: 8, nonDeltaItemsRead: 8,
nonDeltaItemsWritten: 0, nonDeltaItemsWritten: 0,
nonMetaItemsWritten: 4,
}, },
} }
@ -1263,7 +1271,7 @@ func testExchangeContinuousBackups(suite *BackupOpIntegrationSuite, toggles cont
assert.Equal(t, test.nonDeltaItemsRead+4, incBO.Results.ItemsRead, "non delta items read") assert.Equal(t, test.nonDeltaItemsRead+4, incBO.Results.ItemsRead, "non delta items read")
assert.Equal(t, test.nonDeltaItemsWritten+4, incBO.Results.ItemsWritten, "non delta items written") assert.Equal(t, test.nonDeltaItemsWritten+4, incBO.Results.ItemsWritten, "non delta items written")
} }
assert.Equal(t, test.nonMetaItemsWritten, incBO.Results.ItemsWritten, "non meta incremental items write")
assert.NoError(t, incBO.Errors.Failure(), "incremental non-recoverable error", clues.ToCore(incBO.Errors.Failure())) assert.NoError(t, incBO.Errors.Failure(), "incremental non-recoverable error", clues.ToCore(incBO.Errors.Failure()))
assert.Empty(t, incBO.Errors.Recovered(), "incremental recoverable/iteration errors") assert.Empty(t, incBO.Errors.Recovered(), "incremental recoverable/iteration errors")
assert.Equal(t, 1, incMB.TimesCalled[events.BackupStart], "incremental backup-start events") assert.Equal(t, 1, incMB.TimesCalled[events.BackupStart], "incremental backup-start events")
@ -1527,9 +1535,10 @@ func runDriveIncrementalTest(
table := []struct { table := []struct {
name string name string
// performs the incremental update required for the test. // performs the incremental update required for the test.
updateFiles func(t *testing.T) updateFiles func(t *testing.T)
itemsRead int itemsRead int
itemsWritten int itemsWritten int
nonMetaItemsWritten int
}{ }{
{ {
name: "clean incremental, no changes", name: "clean incremental, no changes",
@ -1556,8 +1565,9 @@ func runDriveIncrementalTest(
expectDeets.AddItem(driveID, makeLocRef(container1), newFileID) expectDeets.AddItem(driveID, makeLocRef(container1), newFileID)
}, },
itemsRead: 1, // .data file for newitem itemsRead: 1, // .data file for newitem
itemsWritten: 3, // .data and .meta for newitem, .dirmeta for parent itemsWritten: 3, // .data and .meta for newitem, .dirmeta for parent
nonMetaItemsWritten: 1, // .data file for newitem
}, },
{ {
name: "add permission to new file", name: "add permission to new file",
@ -1578,8 +1588,9 @@ func runDriveIncrementalTest(
require.NoErrorf(t, err, "adding permission to file %v", clues.ToCore(err)) require.NoErrorf(t, err, "adding permission to file %v", clues.ToCore(err))
// no expectedDeets: metadata isn't tracked // no expectedDeets: metadata isn't tracked
}, },
itemsRead: 1, // .data file for newitem itemsRead: 1, // .data file for newitem
itemsWritten: 2, // .meta for newitem, .dirmeta for parent (.data is not written as it is not updated) itemsWritten: 2, // .meta for newitem, .dirmeta for parent (.data is not written as it is not updated)
nonMetaItemsWritten: 1, // the file for which permission was updated
}, },
{ {
name: "remove permission from new file", name: "remove permission from new file",
@ -1599,8 +1610,9 @@ func runDriveIncrementalTest(
require.NoErrorf(t, err, "removing permission from file %v", clues.ToCore(err)) require.NoErrorf(t, err, "removing permission from file %v", clues.ToCore(err))
// no expectedDeets: metadata isn't tracked // no expectedDeets: metadata isn't tracked
}, },
itemsRead: 1, // .data file for newitem itemsRead: 1, // .data file for newitem
itemsWritten: 2, // .meta for newitem, .dirmeta for parent (.data is not written as it is not updated) itemsWritten: 2, // .meta for newitem, .dirmeta for parent (.data is not written as it is not updated)
nonMetaItemsWritten: 1, //.data file for newitem
}, },
{ {
name: "add permission to container", name: "add permission to container",
@ -1621,8 +1633,9 @@ func runDriveIncrementalTest(
require.NoErrorf(t, err, "adding permission to container %v", clues.ToCore(err)) require.NoErrorf(t, err, "adding permission to container %v", clues.ToCore(err))
// no expectedDeets: metadata isn't tracked // no expectedDeets: metadata isn't tracked
}, },
itemsRead: 0, itemsRead: 0,
itemsWritten: 1, // .dirmeta for collection itemsWritten: 1, // .dirmeta for collection
nonMetaItemsWritten: 0, // no files updated as update on container
}, },
{ {
name: "remove permission from container", name: "remove permission from container",
@ -1643,8 +1656,9 @@ func runDriveIncrementalTest(
require.NoErrorf(t, err, "removing permission from container %v", clues.ToCore(err)) require.NoErrorf(t, err, "removing permission from container %v", clues.ToCore(err))
// no expectedDeets: metadata isn't tracked // no expectedDeets: metadata isn't tracked
}, },
itemsRead: 0, itemsRead: 0,
itemsWritten: 1, // .dirmeta for collection itemsWritten: 1, // .dirmeta for collection
nonMetaItemsWritten: 0, // no files updated
}, },
{ {
name: "update contents of a file", name: "update contents of a file",
@ -1658,8 +1672,9 @@ func runDriveIncrementalTest(
require.NoErrorf(t, err, "updating file contents: %v", clues.ToCore(err)) require.NoErrorf(t, err, "updating file contents: %v", clues.ToCore(err))
// no expectedDeets: neither file id nor location changed // no expectedDeets: neither file id nor location changed
}, },
itemsRead: 1, // .data file for newitem itemsRead: 1, // .data file for newitem
itemsWritten: 3, // .data and .meta for newitem, .dirmeta for parent itemsWritten: 3, // .data and .meta for newitem, .dirmeta for parent
nonMetaItemsWritten: 1, // .data file for newitem
}, },
{ {
name: "rename a file", name: "rename a file",
@ -1681,8 +1696,9 @@ func runDriveIncrementalTest(
driveItem) driveItem)
require.NoError(t, err, "renaming file %v", clues.ToCore(err)) require.NoError(t, err, "renaming file %v", clues.ToCore(err))
}, },
itemsRead: 1, // .data file for newitem itemsRead: 1, // .data file for newitem
itemsWritten: 3, // .data and .meta for newitem, .dirmeta for parent itemsWritten: 3, // .data and .meta for newitem, .dirmeta for parent
nonMetaItemsWritten: 1, // .data file for newitem
// no expectedDeets: neither file id nor location changed // no expectedDeets: neither file id nor location changed
}, },
{ {
@ -1710,8 +1726,9 @@ func runDriveIncrementalTest(
makeLocRef(container2), makeLocRef(container2),
ptr.Val(newFile.GetId())) ptr.Val(newFile.GetId()))
}, },
itemsRead: 1, // .data file for newitem itemsRead: 1, // .data file for newitem
itemsWritten: 3, // .data and .meta for newitem, .dirmeta for parent itemsWritten: 3, // .data and .meta for newitem, .dirmeta for parent
nonMetaItemsWritten: 1, // .data file for new item
}, },
{ {
name: "delete file", name: "delete file",
@ -1725,8 +1742,9 @@ func runDriveIncrementalTest(
expectDeets.RemoveItem(driveID, makeLocRef(container2), ptr.Val(newFile.GetId())) expectDeets.RemoveItem(driveID, makeLocRef(container2), ptr.Val(newFile.GetId()))
}, },
itemsRead: 0, itemsRead: 0,
itemsWritten: 0, itemsWritten: 0,
nonMetaItemsWritten: 0,
}, },
{ {
name: "move a folder to a subfolder", name: "move a folder to a subfolder",
@ -1753,8 +1771,9 @@ func runDriveIncrementalTest(
makeLocRef(container2), makeLocRef(container2),
makeLocRef(container1)) makeLocRef(container1))
}, },
itemsRead: 0, itemsRead: 0,
itemsWritten: 7, // 2*2(data and meta of 2 files) + 3 (dirmeta of two moved folders and target) itemsWritten: 7, // 2*2(data and meta of 2 files) + 3 (dirmeta of two moved folders and target)
nonMetaItemsWritten: 0,
}, },
{ {
name: "rename a folder", name: "rename a folder",
@ -1783,8 +1802,9 @@ func runDriveIncrementalTest(
makeLocRef(container1, container2), makeLocRef(container1, container2),
makeLocRef(container1, containerRename)) makeLocRef(container1, containerRename))
}, },
itemsRead: 0, itemsRead: 0,
itemsWritten: 7, // 2*2(data and meta of 2 files) + 3 (dirmeta of two moved folders and target) itemsWritten: 7, // 2*2(data and meta of 2 files) + 3 (dirmeta of two moved folders and target)
nonMetaItemsWritten: 0,
}, },
{ {
name: "delete a folder", name: "delete a folder",
@ -1799,8 +1819,9 @@ func runDriveIncrementalTest(
expectDeets.RemoveLocation(driveID, makeLocRef(container1, containerRename)) expectDeets.RemoveLocation(driveID, makeLocRef(container1, containerRename))
}, },
itemsRead: 0, itemsRead: 0,
itemsWritten: 0, itemsWritten: 0,
nonMetaItemsWritten: 0,
}, },
{ {
name: "add a new folder", name: "add a new folder",
@ -1831,8 +1852,9 @@ func runDriveIncrementalTest(
expectDeets.AddLocation(driveID, container3) expectDeets.AddLocation(driveID, container3)
}, },
itemsRead: 2, // 2 .data for 2 files itemsRead: 2, // 2 .data for 2 files
itemsWritten: 6, // read items + 2 directory meta itemsWritten: 6, // read items + 2 directory meta
nonMetaItemsWritten: 2, // 2 .data for 2 files
}, },
} }
for _, test := range table { for _, test := range table {
@ -1862,9 +1884,10 @@ func runDriveIncrementalTest(
// do some additional checks to ensure the incremental dealt with fewer items. // do some additional checks to ensure the incremental dealt with fewer items.
// +2 on read/writes to account for metadata: 1 delta and 1 path. // +2 on read/writes to account for metadata: 1 delta and 1 path.
var ( var (
expectWrites = test.itemsWritten + 2 expectWrites = test.itemsWritten + 2
expectReads = test.itemsRead + 2 expectNonMetaWrites = test.nonMetaItemsWritten
assertReadWrite = assert.Equal expectReads = test.itemsRead + 2
assertReadWrite = assert.Equal
) )
// Sharepoint can produce a superset of permissions by nature of // Sharepoint can produce a superset of permissions by nature of
@ -1876,6 +1899,7 @@ func runDriveIncrementalTest(
} }
assertReadWrite(t, expectWrites, incBO.Results.ItemsWritten, "incremental items written") assertReadWrite(t, expectWrites, incBO.Results.ItemsWritten, "incremental items written")
assertReadWrite(t, expectNonMetaWrites, incBO.Results.NonMetaItemsWritten, "incremental non-meta items written")
assertReadWrite(t, expectReads, incBO.Results.ItemsRead, "incremental items read") assertReadWrite(t, expectReads, incBO.Results.ItemsRead, "incremental items read")
assert.NoError(t, incBO.Errors.Failure(), "incremental non-recoverable error", clues.ToCore(incBO.Errors.Failure())) assert.NoError(t, incBO.Errors.Failure(), "incremental non-recoverable error", clues.ToCore(incBO.Errors.Failure()))
@ -1976,6 +2000,7 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_oneDriveOwnerMigration() {
// 2 on read/writes to account for metadata: 1 delta and 1 path. // 2 on read/writes to account for metadata: 1 delta and 1 path.
assert.LessOrEqual(t, 2, incBO.Results.ItemsWritten, "items written") assert.LessOrEqual(t, 2, incBO.Results.ItemsWritten, "items written")
assert.LessOrEqual(t, 1, incBO.Results.NonMetaItemsWritten, "non meta items written")
assert.LessOrEqual(t, 2, incBO.Results.ItemsRead, "items read") assert.LessOrEqual(t, 2, incBO.Results.ItemsRead, "items read")
assert.NoError(t, incBO.Errors.Failure(), "non-recoverable error", clues.ToCore(incBO.Errors.Failure())) assert.NoError(t, incBO.Errors.Failure(), "non-recoverable error", clues.ToCore(incBO.Errors.Failure()))
assert.Empty(t, incBO.Errors.Recovered(), "recoverable/iteration errors") assert.Empty(t, incBO.Errors.Recovered(), "recoverable/iteration errors")

View File

@ -533,12 +533,12 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_ConsumeBackupDataCollections
table := []struct { table := []struct {
name string name string
inputMan []*kopia.ManifestEntry inputMan []kopia.ManifestEntry
expected []kopia.IncrementalBase expected []kopia.IncrementalBase
}{ }{
{ {
name: "SingleManifestSingleReason", name: "SingleManifestSingleReason",
inputMan: []*kopia.ManifestEntry{ inputMan: []kopia.ManifestEntry{
{ {
Manifest: manifest1, Manifest: manifest1,
Reasons: []kopia.Reason{ Reasons: []kopia.Reason{
@ -557,7 +557,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_ConsumeBackupDataCollections
}, },
{ {
name: "SingleManifestMultipleReasons", name: "SingleManifestMultipleReasons",
inputMan: []*kopia.ManifestEntry{ inputMan: []kopia.ManifestEntry{
{ {
Manifest: manifest1, Manifest: manifest1,
Reasons: []kopia.Reason{ Reasons: []kopia.Reason{
@ -578,7 +578,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_ConsumeBackupDataCollections
}, },
{ {
name: "MultipleManifestsMultipleReasons", name: "MultipleManifestsMultipleReasons",
inputMan: []*kopia.ManifestEntry{ inputMan: []kopia.ManifestEntry{
{ {
Manifest: manifest1, Manifest: manifest1,
Reasons: []kopia.Reason{ Reasons: []kopia.Reason{
@ -731,7 +731,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
name string name string
populatedModels map[model.StableID]backup.Backup populatedModels map[model.StableID]backup.Backup
populatedDetails map[string]*details.Details populatedDetails map[string]*details.Details
inputMans []*kopia.ManifestEntry inputMans []kopia.ManifestEntry
mdm *mockDetailsMergeInfoer mdm *mockDetailsMergeInfoer
errCheck assert.ErrorAssertionFunc errCheck assert.ErrorAssertionFunc
@ -758,7 +758,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
return res return res
}(), }(),
inputMans: []*kopia.ManifestEntry{ inputMans: []kopia.ManifestEntry{
{ {
Manifest: makeManifest(suite.T(), "foo", ""), Manifest: makeManifest(suite.T(), "foo", ""),
Reasons: []kopia.Reason{ Reasons: []kopia.Reason{
@ -776,7 +776,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
return res return res
}(), }(),
inputMans: []*kopia.ManifestEntry{ inputMans: []kopia.ManifestEntry{
{ {
Manifest: makeManifest(suite.T(), backup1.ID, ""), Manifest: makeManifest(suite.T(), backup1.ID, ""),
Reasons: []kopia.Reason{ Reasons: []kopia.Reason{
@ -803,7 +803,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
return res return res
}(), }(),
inputMans: []*kopia.ManifestEntry{ inputMans: []kopia.ManifestEntry{
{ {
Manifest: makeManifest(suite.T(), backup1.ID, ""), Manifest: makeManifest(suite.T(), backup1.ID, ""),
Reasons: []kopia.Reason{ Reasons: []kopia.Reason{
@ -833,7 +833,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
return res return res
}(), }(),
inputMans: []*kopia.ManifestEntry{ inputMans: []kopia.ManifestEntry{
{ {
Manifest: makeManifest(suite.T(), backup1.ID, ""), Manifest: makeManifest(suite.T(), backup1.ID, ""),
Reasons: []kopia.Reason{ Reasons: []kopia.Reason{
@ -869,7 +869,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
return res return res
}(), }(),
inputMans: []*kopia.ManifestEntry{ inputMans: []kopia.ManifestEntry{
{ {
Manifest: makeManifest(suite.T(), backup1.ID, ""), Manifest: makeManifest(suite.T(), backup1.ID, ""),
Reasons: []kopia.Reason{ Reasons: []kopia.Reason{
@ -931,7 +931,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
return res return res
}(), }(),
inputMans: []*kopia.ManifestEntry{ inputMans: []kopia.ManifestEntry{
{ {
Manifest: makeManifest(suite.T(), backup1.ID, ""), Manifest: makeManifest(suite.T(), backup1.ID, ""),
Reasons: []kopia.Reason{ Reasons: []kopia.Reason{
@ -961,7 +961,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
return res return res
}(), }(),
inputMans: []*kopia.ManifestEntry{ inputMans: []kopia.ManifestEntry{
{ {
Manifest: makeManifest(suite.T(), backup1.ID, ""), Manifest: makeManifest(suite.T(), backup1.ID, ""),
Reasons: []kopia.Reason{ Reasons: []kopia.Reason{
@ -994,7 +994,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
return res return res
}(), }(),
inputMans: []*kopia.ManifestEntry{ inputMans: []kopia.ManifestEntry{
{ {
Manifest: makeManifest(suite.T(), backup1.ID, ""), Manifest: makeManifest(suite.T(), backup1.ID, ""),
Reasons: []kopia.Reason{ Reasons: []kopia.Reason{
@ -1027,7 +1027,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
return res return res
}(), }(),
inputMans: []*kopia.ManifestEntry{ inputMans: []kopia.ManifestEntry{
{ {
Manifest: makeManifest(suite.T(), backup1.ID, ""), Manifest: makeManifest(suite.T(), backup1.ID, ""),
Reasons: []kopia.Reason{ Reasons: []kopia.Reason{
@ -1061,7 +1061,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
return res return res
}(), }(),
inputMans: []*kopia.ManifestEntry{ inputMans: []kopia.ManifestEntry{
{ {
Manifest: makeManifest(suite.T(), backup1.ID, ""), Manifest: makeManifest(suite.T(), backup1.ID, ""),
Reasons: []kopia.Reason{ Reasons: []kopia.Reason{
@ -1095,7 +1095,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
return res return res
}(), }(),
inputMans: []*kopia.ManifestEntry{ inputMans: []kopia.ManifestEntry{
{ {
Manifest: makeManifest(suite.T(), backup1.ID, ""), Manifest: makeManifest(suite.T(), backup1.ID, ""),
Reasons: []kopia.Reason{ Reasons: []kopia.Reason{
@ -1146,7 +1146,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
return res return res
}(), }(),
inputMans: []*kopia.ManifestEntry{ inputMans: []kopia.ManifestEntry{
{ {
Manifest: makeManifest(suite.T(), backup1.ID, ""), Manifest: makeManifest(suite.T(), backup1.ID, ""),
Reasons: []kopia.Reason{ Reasons: []kopia.Reason{
@ -1198,6 +1198,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
mds := ssmock.Streamer{Deets: test.populatedDetails} mds := ssmock.Streamer{Deets: test.populatedDetails}
w := &store.Wrapper{Storer: mockBackupStorer{entries: test.populatedModels}} w := &store.Wrapper{Storer: mockBackupStorer{entries: test.populatedModels}}
deets := details.Builder{} deets := details.Builder{}
writeStats := kopia.BackupStats{}
err := mergeDetails( err := mergeDetails(
ctx, ctx,
@ -1206,6 +1207,8 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
test.inputMans, test.inputMans,
test.mdm, test.mdm,
&deets, &deets,
&writeStats,
path.OneDriveService,
fault.New(true)) fault.New(true))
test.errCheck(t, err, clues.ToCore(err)) test.errCheck(t, err, clues.ToCore(err))
@ -1255,7 +1258,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsFolde
Category: itemPath1.Category(), Category: itemPath1.Category(),
} }
inputMans = []*kopia.ManifestEntry{ inputMans = []kopia.ManifestEntry{
{ {
Manifest: makeManifest(t, backup1.ID, ""), Manifest: makeManifest(t, backup1.ID, ""),
Reasons: []kopia.Reason{ Reasons: []kopia.Reason{
@ -1307,9 +1310,10 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsFolde
defer flush() defer flush()
var ( var (
mds = ssmock.Streamer{Deets: populatedDetails} mds = ssmock.Streamer{Deets: populatedDetails}
w = &store.Wrapper{Storer: mockBackupStorer{entries: populatedModels}} w = &store.Wrapper{Storer: mockBackupStorer{entries: populatedModels}}
deets = details.Builder{} deets = details.Builder{}
writeStats = kopia.BackupStats{}
) )
err := mergeDetails( err := mergeDetails(
@ -1319,6 +1323,8 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsFolde
inputMans, inputMans,
mdm, mdm,
&deets, &deets,
&writeStats,
path.ExchangeService,
fault.New(true)) fault.New(true))
assert.NoError(t, err, clues.ToCore(err)) assert.NoError(t, err, clues.ToCore(err))
compareDeetEntries(t, expectedEntries, deets.Details().Entries) compareDeetEntries(t, expectedEntries, deets.Details().Entries)

View File

@ -19,16 +19,8 @@ import (
"github.com/alcionai/corso/src/pkg/path" "github.com/alcionai/corso/src/pkg/path"
) )
type manifestFetcher interface {
FetchPrevSnapshotManifests(
ctx context.Context,
reasons []kopia.Reason,
tags map[string]string,
) ([]*kopia.ManifestEntry, error)
}
type manifestRestorer interface { type manifestRestorer interface {
manifestFetcher inject.BaseFinder
inject.RestoreProducer inject.RestoreProducer
} }
@ -47,14 +39,14 @@ func produceManifestsAndMetadata(
reasons, fallbackReasons []kopia.Reason, reasons, fallbackReasons []kopia.Reason,
tenantID string, tenantID string,
getMetadata bool, getMetadata bool,
) ([]*kopia.ManifestEntry, []data.RestoreCollection, bool, error) { ) ([]kopia.ManifestEntry, []data.RestoreCollection, bool, error) {
var ( var (
tags = map[string]string{kopia.TagBackupCategory: ""} tags = map[string]string{kopia.TagBackupCategory: ""}
metadataFiles = graph.AllMetadataFileNames() metadataFiles = graph.AllMetadataFileNames()
collections []data.RestoreCollection collections []data.RestoreCollection
) )
ms, err := mr.FetchPrevSnapshotManifests(ctx, reasons, tags) ms, err := mr.FindBases(ctx, reasons, tags)
if err != nil { if err != nil {
return nil, nil, false, clues.Wrap(err, "looking up prior snapshots") return nil, nil, false, clues.Wrap(err, "looking up prior snapshots")
} }
@ -70,7 +62,7 @@ func produceManifestsAndMetadata(
return ms, nil, false, nil return ms, nil, false, nil
} }
fbms, err := mr.FetchPrevSnapshotManifests(ctx, fallbackReasons, tags) fbms, err := mr.FindBases(ctx, fallbackReasons, tags)
if err != nil { if err != nil {
return nil, nil, false, clues.Wrap(err, "looking up prior snapshots under alternate id") return nil, nil, false, clues.Wrap(err, "looking up prior snapshots under alternate id")
} }
@ -177,9 +169,9 @@ func produceManifestsAndMetadata(
// 3. If mans has no entry for a reason, look for both complete and incomplete fallbacks. // 3. If mans has no entry for a reason, look for both complete and incomplete fallbacks.
func unionManifests( func unionManifests(
reasons []kopia.Reason, reasons []kopia.Reason,
mans []*kopia.ManifestEntry, mans []kopia.ManifestEntry,
fallback []*kopia.ManifestEntry, fallback []kopia.ManifestEntry,
) []*kopia.ManifestEntry { ) []kopia.ManifestEntry {
if len(fallback) == 0 { if len(fallback) == 0 {
return mans return mans
} }
@ -203,7 +195,9 @@ func unionManifests(
} }
// track the manifests that were collected with the current lookup // track the manifests that were collected with the current lookup
for _, m := range mans { for i := range mans {
m := &mans[i]
for _, r := range m.Reasons { for _, r := range m.Reasons {
k := r.Service.String() + r.Category.String() k := r.Service.String() + r.Category.String()
t := tups[k] t := tups[k]
@ -219,7 +213,8 @@ func unionManifests(
} }
// backfill from the fallback where necessary // backfill from the fallback where necessary
for _, m := range fallback { for i := range fallback {
m := &fallback[i]
useReasons := []kopia.Reason{} useReasons := []kopia.Reason{}
for _, r := range m.Reasons { for _, r := range m.Reasons {
@ -250,15 +245,15 @@ func unionManifests(
} }
// collect the results into a single slice of manifests // collect the results into a single slice of manifests
ms := map[string]*kopia.ManifestEntry{} ms := map[string]kopia.ManifestEntry{}
for _, m := range tups { for _, m := range tups {
if m.complete != nil { if m.complete != nil {
ms[string(m.complete.ID)] = m.complete ms[string(m.complete.ID)] = *m.complete
} }
if m.incomplete != nil { if m.incomplete != nil {
ms[string(m.incomplete.ID)] = m.incomplete ms[string(m.incomplete.ID)] = *m.incomplete
} }
} }
@ -269,7 +264,7 @@ func unionManifests(
// of manifests, that each manifest's Reason (owner, service, category) is only // of manifests, that each manifest's Reason (owner, service, category) is only
// included once. If a reason is duplicated by any two manifests, an error is // included once. If a reason is duplicated by any two manifests, an error is
// returned. // returned.
func verifyDistinctBases(ctx context.Context, mans []*kopia.ManifestEntry) error { func verifyDistinctBases(ctx context.Context, mans []kopia.ManifestEntry) error {
reasons := map[string]manifest.ID{} reasons := map[string]manifest.ID{}
for _, man := range mans { for _, man := range mans {
@ -303,7 +298,7 @@ func verifyDistinctBases(ctx context.Context, mans []*kopia.ManifestEntry) error
func collectMetadata( func collectMetadata(
ctx context.Context, ctx context.Context,
r inject.RestoreProducer, r inject.RestoreProducer,
man *kopia.ManifestEntry, man kopia.ManifestEntry,
fileNames []string, fileNames []string,
tenantID string, tenantID string,
errs *fault.Bus, errs *fault.Bus,

View File

@ -27,16 +27,16 @@ import (
type mockManifestRestorer struct { type mockManifestRestorer struct {
mockRestoreProducer mockRestoreProducer
mans []*kopia.ManifestEntry mans []kopia.ManifestEntry
mrErr error // err varname already claimed by mockRestoreProducer mrErr error // err varname already claimed by mockRestoreProducer
} }
func (mmr mockManifestRestorer) FetchPrevSnapshotManifests( func (mmr mockManifestRestorer) FindBases(
ctx context.Context, ctx context.Context,
reasons []kopia.Reason, reasons []kopia.Reason,
tags map[string]string, tags map[string]string,
) ([]*kopia.ManifestEntry, error) { ) ([]kopia.ManifestEntry, error) {
mans := map[string]*kopia.ManifestEntry{} mans := map[string]kopia.ManifestEntry{}
for _, r := range reasons { for _, r := range reasons {
for _, m := range mmr.mans { for _, m := range mmr.mans {
@ -49,10 +49,6 @@ func (mmr mockManifestRestorer) FetchPrevSnapshotManifests(
} }
} }
if len(mans) == 0 && len(reasons) == 0 {
return mmr.mans, mmr.mrErr
}
return maps.Values(mans), mmr.mrErr return maps.Values(mans), mmr.mrErr
} }
@ -247,7 +243,7 @@ func (suite *OperationsManifestsUnitSuite) TestCollectMetadata() {
mr := mockRestoreProducer{err: test.expectErr} mr := mockRestoreProducer{err: test.expectErr}
mr.buildRestoreFunc(t, test.manID, paths) mr.buildRestoreFunc(t, test.manID, paths)
man := &kopia.ManifestEntry{ man := kopia.ManifestEntry{
Manifest: &snapshot.Manifest{ID: manifest.ID(test.manID)}, Manifest: &snapshot.Manifest{ID: manifest.ID(test.manID)},
Reasons: test.reasons, Reasons: test.reasons,
} }
@ -263,12 +259,12 @@ func (suite *OperationsManifestsUnitSuite) TestVerifyDistinctBases() {
table := []struct { table := []struct {
name string name string
mans []*kopia.ManifestEntry mans []kopia.ManifestEntry
expect assert.ErrorAssertionFunc expect assert.ErrorAssertionFunc
}{ }{
{ {
name: "one manifest, one reason", name: "one manifest, one reason",
mans: []*kopia.ManifestEntry{ mans: []kopia.ManifestEntry{
{ {
Manifest: &snapshot.Manifest{}, Manifest: &snapshot.Manifest{},
Reasons: []kopia.Reason{ Reasons: []kopia.Reason{
@ -284,7 +280,7 @@ func (suite *OperationsManifestsUnitSuite) TestVerifyDistinctBases() {
}, },
{ {
name: "one incomplete manifest", name: "one incomplete manifest",
mans: []*kopia.ManifestEntry{ mans: []kopia.ManifestEntry{
{ {
Manifest: &snapshot.Manifest{IncompleteReason: "ir"}, Manifest: &snapshot.Manifest{IncompleteReason: "ir"},
}, },
@ -293,7 +289,7 @@ func (suite *OperationsManifestsUnitSuite) TestVerifyDistinctBases() {
}, },
{ {
name: "one manifest, multiple reasons", name: "one manifest, multiple reasons",
mans: []*kopia.ManifestEntry{ mans: []kopia.ManifestEntry{
{ {
Manifest: &snapshot.Manifest{}, Manifest: &snapshot.Manifest{},
Reasons: []kopia.Reason{ Reasons: []kopia.Reason{
@ -314,7 +310,7 @@ func (suite *OperationsManifestsUnitSuite) TestVerifyDistinctBases() {
}, },
{ {
name: "one manifest, duplicate reasons", name: "one manifest, duplicate reasons",
mans: []*kopia.ManifestEntry{ mans: []kopia.ManifestEntry{
{ {
Manifest: &snapshot.Manifest{}, Manifest: &snapshot.Manifest{},
Reasons: []kopia.Reason{ Reasons: []kopia.Reason{
@ -335,7 +331,7 @@ func (suite *OperationsManifestsUnitSuite) TestVerifyDistinctBases() {
}, },
{ {
name: "two manifests, non-overlapping reasons", name: "two manifests, non-overlapping reasons",
mans: []*kopia.ManifestEntry{ mans: []kopia.ManifestEntry{
{ {
Manifest: &snapshot.Manifest{}, Manifest: &snapshot.Manifest{},
Reasons: []kopia.Reason{ Reasons: []kopia.Reason{
@ -361,7 +357,7 @@ func (suite *OperationsManifestsUnitSuite) TestVerifyDistinctBases() {
}, },
{ {
name: "two manifests, overlapping reasons", name: "two manifests, overlapping reasons",
mans: []*kopia.ManifestEntry{ mans: []kopia.ManifestEntry{
{ {
Manifest: &snapshot.Manifest{}, Manifest: &snapshot.Manifest{},
Reasons: []kopia.Reason{ Reasons: []kopia.Reason{
@ -387,7 +383,7 @@ func (suite *OperationsManifestsUnitSuite) TestVerifyDistinctBases() {
}, },
{ {
name: "two manifests, overlapping reasons, one snapshot incomplete", name: "two manifests, overlapping reasons, one snapshot incomplete",
mans: []*kopia.ManifestEntry{ mans: []kopia.ManifestEntry{
{ {
Manifest: &snapshot.Manifest{}, Manifest: &snapshot.Manifest{},
Reasons: []kopia.Reason{ Reasons: []kopia.Reason{
@ -430,13 +426,13 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() {
did = "detailsid" did = "detailsid"
) )
makeMan := func(pct path.CategoryType, id, incmpl, bid string) *kopia.ManifestEntry { makeMan := func(pct path.CategoryType, id, incmpl, bid string) kopia.ManifestEntry {
tags := map[string]string{} tags := map[string]string{}
if len(bid) > 0 { if len(bid) > 0 {
tags = map[string]string{"tag:" + kopia.TagBackupID: bid} tags = map[string]string{"tag:" + kopia.TagBackupID: bid}
} }
return &kopia.ManifestEntry{ return kopia.ManifestEntry{
Manifest: &snapshot.Manifest{ Manifest: &snapshot.Manifest{
ID: manifest.ID(id), ID: manifest.ID(id),
IncompleteReason: incmpl, IncompleteReason: incmpl,
@ -456,7 +452,6 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() {
name string name string
mr mockManifestRestorer mr mockManifestRestorer
gb mockGetBackuper gb mockGetBackuper
reasons []kopia.Reason
getMeta bool getMeta bool
assertErr assert.ErrorAssertionFunc assertErr assert.ErrorAssertionFunc
assertB assert.BoolAssertionFunc assertB assert.BoolAssertionFunc
@ -467,10 +462,9 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() {
name: "don't get metadata, no mans", name: "don't get metadata, no mans",
mr: mockManifestRestorer{ mr: mockManifestRestorer{
mockRestoreProducer: mockRestoreProducer{}, mockRestoreProducer: mockRestoreProducer{},
mans: []*kopia.ManifestEntry{}, mans: []kopia.ManifestEntry{},
}, },
gb: mockGetBackuper{detailsID: did}, gb: mockGetBackuper{detailsID: did},
reasons: []kopia.Reason{},
getMeta: false, getMeta: false,
assertErr: assert.NoError, assertErr: assert.NoError,
assertB: assert.False, assertB: assert.False,
@ -480,10 +474,9 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() {
name: "don't get metadata", name: "don't get metadata",
mr: mockManifestRestorer{ mr: mockManifestRestorer{
mockRestoreProducer: mockRestoreProducer{}, mockRestoreProducer: mockRestoreProducer{},
mans: []*kopia.ManifestEntry{makeMan(path.EmailCategory, "id1", "", "")}, mans: []kopia.ManifestEntry{makeMan(path.EmailCategory, "id1", "", "")},
}, },
gb: mockGetBackuper{detailsID: did}, gb: mockGetBackuper{detailsID: did},
reasons: []kopia.Reason{},
getMeta: false, getMeta: false,
assertErr: assert.NoError, assertErr: assert.NoError,
assertB: assert.False, assertB: assert.False,
@ -493,10 +486,9 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() {
name: "don't get metadata, incomplete manifest", name: "don't get metadata, incomplete manifest",
mr: mockManifestRestorer{ mr: mockManifestRestorer{
mockRestoreProducer: mockRestoreProducer{}, mockRestoreProducer: mockRestoreProducer{},
mans: []*kopia.ManifestEntry{makeMan(path.EmailCategory, "id1", "ir", "")}, mans: []kopia.ManifestEntry{makeMan(path.EmailCategory, "id1", "ir", "")},
}, },
gb: mockGetBackuper{detailsID: did}, gb: mockGetBackuper{detailsID: did},
reasons: []kopia.Reason{},
getMeta: false, getMeta: false,
assertErr: assert.NoError, assertErr: assert.NoError,
assertB: assert.False, assertB: assert.False,
@ -509,7 +501,6 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() {
mrErr: assert.AnError, mrErr: assert.AnError,
}, },
gb: mockGetBackuper{detailsID: did}, gb: mockGetBackuper{detailsID: did},
reasons: []kopia.Reason{},
getMeta: true, getMeta: true,
assertErr: assert.Error, assertErr: assert.Error,
assertB: assert.False, assertB: assert.False,
@ -519,13 +510,12 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() {
name: "verify distinct bases fails", name: "verify distinct bases fails",
mr: mockManifestRestorer{ mr: mockManifestRestorer{
mockRestoreProducer: mockRestoreProducer{}, mockRestoreProducer: mockRestoreProducer{},
mans: []*kopia.ManifestEntry{ mans: []kopia.ManifestEntry{
makeMan(path.EmailCategory, "id1", "", ""), makeMan(path.EmailCategory, "id1", "", ""),
makeMan(path.EmailCategory, "id2", "", ""), makeMan(path.EmailCategory, "id2", "", ""),
}, },
}, },
gb: mockGetBackuper{detailsID: did}, gb: mockGetBackuper{detailsID: did},
reasons: []kopia.Reason{},
getMeta: true, getMeta: true,
assertErr: assert.NoError, // No error, even though verify failed. assertErr: assert.NoError, // No error, even though verify failed.
assertB: assert.False, assertB: assert.False,
@ -535,10 +525,9 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() {
name: "no manifests", name: "no manifests",
mr: mockManifestRestorer{ mr: mockManifestRestorer{
mockRestoreProducer: mockRestoreProducer{}, mockRestoreProducer: mockRestoreProducer{},
mans: []*kopia.ManifestEntry{}, mans: []kopia.ManifestEntry{},
}, },
gb: mockGetBackuper{detailsID: did}, gb: mockGetBackuper{detailsID: did},
reasons: []kopia.Reason{},
getMeta: true, getMeta: true,
assertErr: assert.NoError, assertErr: assert.NoError,
assertB: assert.True, assertB: assert.True,
@ -548,13 +537,12 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() {
name: "only incomplete manifests", name: "only incomplete manifests",
mr: mockManifestRestorer{ mr: mockManifestRestorer{
mockRestoreProducer: mockRestoreProducer{}, mockRestoreProducer: mockRestoreProducer{},
mans: []*kopia.ManifestEntry{ mans: []kopia.ManifestEntry{
makeMan(path.EmailCategory, "id1", "ir", ""), makeMan(path.EmailCategory, "id1", "ir", ""),
makeMan(path.ContactsCategory, "id2", "ir", ""), makeMan(path.ContactsCategory, "id2", "ir", ""),
}, },
}, },
gb: mockGetBackuper{detailsID: did}, gb: mockGetBackuper{detailsID: did},
reasons: []kopia.Reason{},
getMeta: true, getMeta: true,
assertErr: assert.NoError, assertErr: assert.NoError,
assertB: assert.True, assertB: assert.True,
@ -568,10 +556,9 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() {
"id": {data.NotFoundRestoreCollection{Collection: mockColl{id: "id_coll"}}}, "id": {data.NotFoundRestoreCollection{Collection: mockColl{id: "id_coll"}}},
}, },
}, },
mans: []*kopia.ManifestEntry{makeMan(path.EmailCategory, "id", "", "")}, mans: []kopia.ManifestEntry{makeMan(path.EmailCategory, "id", "", "")},
}, },
gb: mockGetBackuper{detailsID: did}, gb: mockGetBackuper{detailsID: did},
reasons: []kopia.Reason{},
getMeta: true, getMeta: true,
assertErr: assert.Error, assertErr: assert.Error,
assertB: assert.False, assertB: assert.False,
@ -581,10 +568,9 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() {
name: "backup missing details id", name: "backup missing details id",
mr: mockManifestRestorer{ mr: mockManifestRestorer{
mockRestoreProducer: mockRestoreProducer{}, mockRestoreProducer: mockRestoreProducer{},
mans: []*kopia.ManifestEntry{makeMan(path.EmailCategory, "id1", "", "bid")}, mans: []kopia.ManifestEntry{makeMan(path.EmailCategory, "id1", "", "bid")},
}, },
gb: mockGetBackuper{}, gb: mockGetBackuper{},
reasons: []kopia.Reason{},
getMeta: true, getMeta: true,
assertErr: assert.NoError, assertErr: assert.NoError,
assertB: assert.False, assertB: assert.False,
@ -598,13 +584,12 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() {
"incmpl_id": {data.NotFoundRestoreCollection{Collection: mockColl{id: "incmpl_id_coll"}}}, "incmpl_id": {data.NotFoundRestoreCollection{Collection: mockColl{id: "incmpl_id_coll"}}},
}, },
}, },
mans: []*kopia.ManifestEntry{ mans: []kopia.ManifestEntry{
makeMan(path.EmailCategory, "id", "", "bid"), makeMan(path.EmailCategory, "id", "", "bid"),
makeMan(path.EmailCategory, "incmpl_id", "ir", ""), makeMan(path.EmailCategory, "incmpl_id", "ir", ""),
}, },
}, },
gb: mockGetBackuper{detailsID: did}, gb: mockGetBackuper{detailsID: did},
reasons: []kopia.Reason{},
getMeta: true, getMeta: true,
assertErr: assert.NoError, assertErr: assert.NoError,
assertB: assert.True, assertB: assert.True,
@ -618,10 +603,9 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() {
"id": {data.NotFoundRestoreCollection{Collection: mockColl{id: "id_coll"}}}, "id": {data.NotFoundRestoreCollection{Collection: mockColl{id: "id_coll"}}},
}, },
}, },
mans: []*kopia.ManifestEntry{makeMan(path.EmailCategory, "id", "", "bid")}, mans: []kopia.ManifestEntry{makeMan(path.EmailCategory, "id", "", "bid")},
}, },
gb: mockGetBackuper{detailsID: did}, gb: mockGetBackuper{detailsID: did},
reasons: []kopia.Reason{},
getMeta: true, getMeta: true,
assertErr: assert.NoError, assertErr: assert.NoError,
assertB: assert.True, assertB: assert.True,
@ -636,13 +620,12 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() {
"contact": {data.NotFoundRestoreCollection{Collection: mockColl{id: "contact_coll"}}}, "contact": {data.NotFoundRestoreCollection{Collection: mockColl{id: "contact_coll"}}},
}, },
}, },
mans: []*kopia.ManifestEntry{ mans: []kopia.ManifestEntry{
makeMan(path.EmailCategory, "mail", "", "bid"), makeMan(path.EmailCategory, "mail", "", "bid"),
makeMan(path.ContactsCategory, "contact", "", "bid"), makeMan(path.ContactsCategory, "contact", "", "bid"),
}, },
}, },
gb: mockGetBackuper{detailsID: did}, gb: mockGetBackuper{detailsID: did},
reasons: []kopia.Reason{},
getMeta: true, getMeta: true,
assertErr: assert.NoError, assertErr: assert.NoError,
assertB: assert.True, assertB: assert.True,
@ -655,10 +638,9 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() {
name: "error collecting metadata", name: "error collecting metadata",
mr: mockManifestRestorer{ mr: mockManifestRestorer{
mockRestoreProducer: mockRestoreProducer{err: assert.AnError}, mockRestoreProducer: mockRestoreProducer{err: assert.AnError},
mans: []*kopia.ManifestEntry{makeMan(path.EmailCategory, "id1", "", "bid")}, mans: []kopia.ManifestEntry{makeMan(path.EmailCategory, "id1", "", "bid")},
}, },
gb: mockGetBackuper{detailsID: did}, gb: mockGetBackuper{detailsID: did},
reasons: []kopia.Reason{},
getMeta: true, getMeta: true,
assertErr: assert.Error, assertErr: assert.Error,
assertB: assert.False, assertB: assert.False,
@ -677,7 +659,7 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() {
ctx, ctx,
&test.mr, &test.mr,
&test.gb, &test.gb,
test.reasons, nil, []kopia.Reason{{ResourceOwner: ro}}, nil,
tid, tid,
test.getMeta) test.getMeta)
test.assertErr(t, err, clues.ToCore(err)) test.assertErr(t, err, clues.ToCore(err))
@ -739,8 +721,8 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata_fallb
fbIncomplete = "fb_incmpl" fbIncomplete = "fb_incmpl"
) )
makeMan := func(id, incmpl string, reasons []kopia.Reason) *kopia.ManifestEntry { makeMan := func(id, incmpl string, reasons []kopia.Reason) kopia.ManifestEntry {
return &kopia.ManifestEntry{ return kopia.ManifestEntry{
Manifest: &snapshot.Manifest{ Manifest: &snapshot.Manifest{
ID: manifest.ID(id), ID: manifest.ID(id),
IncompleteReason: incmpl, IncompleteReason: incmpl,
@ -1005,7 +987,7 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata_fallb
}) })
} }
mans := []*kopia.ManifestEntry{} mans := []kopia.ManifestEntry{}
for _, m := range test.man { for _, m := range test.man {
incomplete := "" incomplete := ""
@ -1027,7 +1009,7 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata_fallb
mr := mockManifestRestorer{mans: mans} mr := mockManifestRestorer{mans: mans}
mans, _, b, err := produceManifestsAndMetadata( gotMans, _, b, err := produceManifestsAndMetadata(
ctx, ctx,
&mr, &mr,
nil, nil,
@ -1040,7 +1022,7 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata_fallb
manIDs := []string{} manIDs := []string{}
for _, m := range mans { for _, m := range gotMans {
manIDs = append(manIDs, string(m.ID)) manIDs = append(manIDs, string(m.ID))
reasons := test.expectReasons[string(m.ID)] reasons := test.expectReasons[string(m.ID)]
@ -1075,12 +1057,12 @@ func (suite *BackupManifestUnitSuite) TestBackupOperation_VerifyDistinctBases()
table := []struct { table := []struct {
name string name string
input []*kopia.ManifestEntry input []kopia.ManifestEntry
errCheck assert.ErrorAssertionFunc errCheck assert.ErrorAssertionFunc
}{ }{
{ {
name: "SingleManifestMultipleReasons", name: "SingleManifestMultipleReasons",
input: []*kopia.ManifestEntry{ input: []kopia.ManifestEntry{
{ {
Manifest: &snapshot.Manifest{ Manifest: &snapshot.Manifest{
ID: "id1", ID: "id1",
@ -1103,7 +1085,7 @@ func (suite *BackupManifestUnitSuite) TestBackupOperation_VerifyDistinctBases()
}, },
{ {
name: "MultipleManifestsDistinctReason", name: "MultipleManifestsDistinctReason",
input: []*kopia.ManifestEntry{ input: []kopia.ManifestEntry{
{ {
Manifest: &snapshot.Manifest{ Manifest: &snapshot.Manifest{
ID: "id1", ID: "id1",
@ -1133,7 +1115,7 @@ func (suite *BackupManifestUnitSuite) TestBackupOperation_VerifyDistinctBases()
}, },
{ {
name: "MultipleManifestsSameReason", name: "MultipleManifestsSameReason",
input: []*kopia.ManifestEntry{ input: []kopia.ManifestEntry{
{ {
Manifest: &snapshot.Manifest{ Manifest: &snapshot.Manifest{
ID: "id1", ID: "id1",
@ -1163,7 +1145,7 @@ func (suite *BackupManifestUnitSuite) TestBackupOperation_VerifyDistinctBases()
}, },
{ {
name: "MultipleManifestsSameReasonOneIncomplete", name: "MultipleManifestsSameReasonOneIncomplete",
input: []*kopia.ManifestEntry{ input: []kopia.ManifestEntry{
{ {
Manifest: &snapshot.Manifest{ Manifest: &snapshot.Manifest{
ID: "id1", ID: "id1",
@ -1250,13 +1232,13 @@ func (suite *BackupManifestUnitSuite) TestBackupOperation_CollectMetadata() {
table := []struct { table := []struct {
name string name string
inputMan *kopia.ManifestEntry inputMan kopia.ManifestEntry
inputFiles []string inputFiles []string
expected []path.Path expected []path.Path
}{ }{
{ {
name: "SingleReasonSingleFile", name: "SingleReasonSingleFile",
inputMan: &kopia.ManifestEntry{ inputMan: kopia.ManifestEntry{
Manifest: &snapshot.Manifest{}, Manifest: &snapshot.Manifest{},
Reasons: []kopia.Reason{ Reasons: []kopia.Reason{
{ {
@ -1271,7 +1253,7 @@ func (suite *BackupManifestUnitSuite) TestBackupOperation_CollectMetadata() {
}, },
{ {
name: "SingleReasonMultipleFiles", name: "SingleReasonMultipleFiles",
inputMan: &kopia.ManifestEntry{ inputMan: kopia.ManifestEntry{
Manifest: &snapshot.Manifest{}, Manifest: &snapshot.Manifest{},
Reasons: []kopia.Reason{ Reasons: []kopia.Reason{
{ {
@ -1286,7 +1268,7 @@ func (suite *BackupManifestUnitSuite) TestBackupOperation_CollectMetadata() {
}, },
{ {
name: "MultipleReasonsMultipleFiles", name: "MultipleReasonsMultipleFiles",
inputMan: &kopia.ManifestEntry{ inputMan: kopia.ManifestEntry{
Manifest: &snapshot.Manifest{}, Manifest: &snapshot.Manifest{},
Reasons: []kopia.Reason{ Reasons: []kopia.Reason{
{ {

View File

@ -194,6 +194,10 @@ func (op *RestoreOperation) do(
detailsStore streamstore.Reader, detailsStore streamstore.Reader,
start time.Time, start time.Time,
) (*details.Details, error) { ) (*details.Details, error) {
logger.Ctx(ctx).
With("control_options", op.Options, "selectors", op.Selectors).
Info("restoring selection")
bup, deets, err := getBackupAndDetailsFromID( bup, deets, err := getBackupAndDetailsFromID(
ctx, ctx,
op.BackupID, op.BackupID,
@ -213,7 +217,8 @@ func (op *RestoreOperation) do(
ctx = clues.Add( ctx = clues.Add(
ctx, ctx,
"resource_owner", bup.Selector.DiscreteOwner, "resource_owner_id", bup.Selector.ID(),
"resource_owner_name", clues.Hide(bup.Selector.Name()),
"details_entries", len(deets.Entries), "details_entries", len(deets.Entries),
"details_paths", len(paths), "details_paths", len(paths),
"backup_snapshot_id", bup.SnapshotID, "backup_snapshot_id", bup.SnapshotID,
@ -230,7 +235,6 @@ func (op *RestoreOperation) do(
}) })
observe.Message(ctx, fmt.Sprintf("Discovered %d items in backup %s to restore", len(paths), op.BackupID)) observe.Message(ctx, fmt.Sprintf("Discovered %d items in backup %s to restore", len(paths), op.BackupID))
logger.Ctx(ctx).With("control_options", op.Options, "selectors", op.Selectors).Info("restoring selection")
kopiaComplete, closer := observe.MessageWithCompletion(ctx, "Enumerating items in repository") kopiaComplete, closer := observe.MessageWithCompletion(ctx, "Enumerating items in repository")
defer closer() defer closer()

View File

@ -8,11 +8,13 @@ import (
// ReadWrites tracks the total count of reads and writes. ItemsRead // ReadWrites tracks the total count of reads and writes. ItemsRead
// and ItemsWritten counts are assumed to be successful reads. // and ItemsWritten counts are assumed to be successful reads.
type ReadWrites struct { type ReadWrites struct {
BytesRead int64 `json:"bytesRead,omitempty"` BytesRead int64 `json:"bytesRead,omitempty"`
BytesUploaded int64 `json:"bytesUploaded,omitempty"` BytesUploaded int64 `json:"bytesUploaded,omitempty"`
ItemsRead int `json:"itemsRead,omitempty"` ItemsRead int `json:"itemsRead,omitempty"`
ItemsWritten int `json:"itemsWritten,omitempty"` NonMetaBytesUploaded int64 `json:"nonMetaBytesUploaded,omitempty"`
ResourceOwners int `json:"resourceOwners,omitempty"` NonMetaItemsWritten int `json:"nonMetaItemsWritten,omitempty"`
ItemsWritten int `json:"itemsWritten,omitempty"`
ResourceOwners int `json:"resourceOwners,omitempty"`
} }
// StartAndEndTime tracks a paired starting time and ending time. // StartAndEndTime tracks a paired starting time and ending time.

View File

@ -284,12 +284,12 @@ func (b Backup) toStats() backupStats {
return backupStats{ return backupStats{
ID: string(b.ID), ID: string(b.ID),
BytesRead: b.BytesRead, BytesRead: b.BytesRead,
BytesUploaded: b.BytesUploaded, BytesUploaded: b.NonMetaBytesUploaded,
EndedAt: b.CompletedAt, EndedAt: b.CompletedAt,
ErrorCount: b.ErrorCount, ErrorCount: b.ErrorCount,
ItemsRead: b.ItemsRead, ItemsRead: b.ItemsRead,
ItemsSkipped: b.TotalSkippedItems, ItemsSkipped: b.TotalSkippedItems,
ItemsWritten: b.ItemsWritten, ItemsWritten: b.NonMetaItemsWritten,
StartedAt: b.StartedAt, StartedAt: b.StartedAt,
} }
} }

View File

@ -49,10 +49,12 @@ func stubBackup(t time.Time, ownerID, ownerName string) backup.Backup {
ErrorCount: 2, ErrorCount: 2,
Failure: "read, write", Failure: "read, write",
ReadWrites: stats.ReadWrites{ ReadWrites: stats.ReadWrites{
BytesRead: 301, BytesRead: 301,
BytesUploaded: 301, BytesUploaded: 301,
ItemsRead: 1, NonMetaBytesUploaded: 301,
ItemsWritten: 1, ItemsRead: 1,
NonMetaItemsWritten: 1,
ItemsWritten: 1,
}, },
StartAndEndTime: stats.StartAndEndTime{ StartAndEndTime: stats.StartAndEndTime{
StartedAt: t, StartedAt: t,
@ -248,7 +250,7 @@ func (suite *BackupUnitSuite) TestBackup_MinimumPrintable() {
assert.Equal(t, now, result.Stats.StartedAt, "started at") assert.Equal(t, now, result.Stats.StartedAt, "started at")
assert.Equal(t, b.Status, result.Status, "status") assert.Equal(t, b.Status, result.Status, "status")
assert.Equal(t, b.BytesRead, result.Stats.BytesRead, "size") assert.Equal(t, b.BytesRead, result.Stats.BytesRead, "size")
assert.Equal(t, b.BytesUploaded, result.Stats.BytesUploaded, "stored size") assert.Equal(t, b.NonMetaBytesUploaded, result.Stats.BytesUploaded, "stored size")
assert.Equal(t, b.Selector.DiscreteOwner, result.Owner, "owner") assert.Equal(t, b.Selector.DiscreteOwner, result.Owner, "owner")
} }

View File

@ -240,12 +240,27 @@ func (dm DetailsModel) FilterMetaFiles() DetailsModel {
return d2 return d2
} }
// SumNonMetaFileSizes returns the total size of items excluding all the
// .meta files from the items.
func (dm DetailsModel) SumNonMetaFileSizes() int64 {
var size int64
// Items will provide only files and filter out folders
for _, ent := range dm.FilterMetaFiles().Items() {
size += ent.size()
}
return size
}
// Check if a file is a metadata file. These are used to store // Check if a file is a metadata file. These are used to store
// additional data like permissions (in case of Drive items) and are // additional data like permissions (in case of Drive items) and are
// not to be treated as regular files. // not to be treated as regular files.
func (de Entry) isMetaFile() bool { func (de Entry) isMetaFile() bool {
// sharepoint types not needed, since sharepoint permissions were // sharepoint types not needed, since sharepoint permissions were
// added after IsMeta was deprecated. // added after IsMeta was deprecated.
// Earlier onedrive backups used to store both metafiles and files in details.
// So filter out just the onedrive items and check for metafiles
return de.ItemInfo.OneDrive != nil && de.ItemInfo.OneDrive.IsMeta return de.ItemInfo.OneDrive != nil && de.ItemInfo.OneDrive.IsMeta
} }