Merge branch 'main' into itemAttachment

This commit is contained in:
neha_gupta 2023-06-01 15:58:02 +05:30 committed by GitHub
commit 1ec8995f7f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
22 changed files with 342 additions and 590 deletions

9
.gitignore vendored
View File

@ -8,6 +8,8 @@
# Test binary, built with `go test -c`
*.test
test_results/
testlog/
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
@ -21,12 +23,9 @@
.corso.toml
# Logging
.corso.log
*.log
# Build directories
/bin
/docker/bin
/website/dist
*/test_results/**
*/testlog/**
/website/dist

View File

@ -9,6 +9,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
### Added
- Added ProtectedResourceName to the backup list json output. ProtectedResourceName holds either a UPN or a WebURL, depending on the resource type.
- Rework base selection logic for incremental backups so it's more likely to find a valid base.
### Fixed
- Fix Exchange folder cache population error when parent folder isn't found.

24
src/.gitignore vendored
View File

@ -1,2 +1,26 @@
dist/
corso
# Test binary, built with `go test -c`
*.test
test_results/
testlog/
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
# IDE
.vscode
*.swp
# Standard configuration file names
.corso_test.toml
.corso.toml
# Logging
*.log
# Build directories
/bin
/docker/bin
/website/dist

View File

@ -1,325 +0,0 @@
package main
import (
"context"
"os"
"time"
"github.com/alcionai/clues"
"github.com/pkg/errors"
"github.com/spf13/cobra"
. "github.com/alcionai/corso/src/cli/print"
"github.com/alcionai/corso/src/cli/utils"
"github.com/alcionai/corso/src/internal/common/dttm"
"github.com/alcionai/corso/src/internal/common/str"
"github.com/alcionai/corso/src/internal/connector"
"github.com/alcionai/corso/src/internal/connector/graph"
"github.com/alcionai/corso/src/internal/connector/onedrive"
"github.com/alcionai/corso/src/pkg/account"
"github.com/alcionai/corso/src/pkg/credentials"
"github.com/alcionai/corso/src/pkg/fault"
"github.com/alcionai/corso/src/pkg/logger"
"github.com/alcionai/corso/src/pkg/services/m365"
"github.com/alcionai/corso/src/pkg/services/m365/api"
)
var purgeCmd = &cobra.Command{
Use: "purge",
Short: "Purge all types of m365 folders",
RunE: handleAllFolderPurge,
}
var oneDriveCmd = &cobra.Command{
Use: "onedrive",
Short: "Purges OneDrive folders",
RunE: handleOneDriveFolderPurge,
}
var (
before string
user string
tenant string
prefix string
)
var ErrPurging = clues.New("not all items were successfully purged")
// ------------------------------------------------------------------------------------------
// CLI command handlers
// ------------------------------------------------------------------------------------------
func main() {
ls := logger.Settings{
Level: logger.LLDebug,
Format: logger.LFText,
}
ctx, _ := logger.CtxOrSeed(context.Background(), ls)
ctx = SetRootCmd(ctx, purgeCmd)
defer logger.Flush(ctx)
fs := purgeCmd.PersistentFlags()
fs.StringVar(&before, "before", "", "folders older than this date are deleted. (default: now in UTC)")
fs.StringVar(&user, "user", "", "m365 user id whose folders will be deleted")
cobra.CheckErr(purgeCmd.MarkPersistentFlagRequired("user"))
fs.StringVar(&tenant, "tenant", "", "m365 tenant containing the user")
fs.StringVar(&prefix, "prefix", "", "filters mail folders by displayName prefix")
cobra.CheckErr(purgeCmd.MarkPersistentFlagRequired("prefix"))
purgeCmd.AddCommand(oneDriveCmd)
if err := purgeCmd.ExecuteContext(ctx); err != nil {
logger.Flush(ctx)
os.Exit(1)
}
}
func handleAllFolderPurge(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
if utils.HasNoFlagsAndShownHelp(cmd) {
return nil
}
acct, gc, t, err := getGCAndBoundaryTime(ctx)
if err != nil {
return err
}
err = runPurgeForEachUser(
ctx,
acct,
gc,
t,
purgeOneDriveFolders,
)
if err != nil {
return Only(ctx, ErrPurging)
}
return nil
}
func handleOneDriveFolderPurge(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
if utils.HasNoFlagsAndShownHelp(cmd) {
return nil
}
acct, gc, t, err := getGCAndBoundaryTime(ctx)
if err != nil {
return err
}
if err := runPurgeForEachUser(ctx, acct, gc, t, purgeOneDriveFolders); err != nil {
logger.Ctx(ctx).Error(err)
return Only(ctx, clues.Wrap(ErrPurging, "OneDrive folders"))
}
return nil
}
// ------------------------------------------------------------------------------------------
// Purge Controllers
// ------------------------------------------------------------------------------------------
type purgable interface {
GetDisplayName() *string
GetId() *string
}
type purger func(context.Context, *connector.GraphConnector, time.Time, string) error
func runPurgeForEachUser(
ctx context.Context,
acct account.Account,
gc *connector.GraphConnector,
boundary time.Time,
ps ...purger,
) error {
users, err := m365.Users(ctx, acct, fault.New(true))
if err != nil {
return clues.Wrap(err, "getting users")
}
for _, u := range userOrUsers(user, users) {
Infof(ctx, "\nUser: %s - %s", u.PrincipalName, u.ID)
for _, p := range ps {
if err := p(ctx, gc, boundary, u.PrincipalName); err != nil {
return err
}
}
}
return nil
}
// ----- OneDrive
func purgeOneDriveFolders(
ctx context.Context,
gc *connector.GraphConnector,
boundary time.Time,
uid string,
) error {
getter := func(gs graph.Servicer, uid, prefix string) ([]purgable, error) {
pager, err := onedrive.PagerForSource(onedrive.OneDriveSource, gs, uid, nil)
if err != nil {
return nil, err
}
cfs, err := onedrive.GetAllFolders(ctx, gs, pager, prefix, fault.New(true))
if err != nil {
return nil, err
}
purgables := make([]purgable, len(cfs))
for i, v := range cfs {
purgables[i] = v
}
return purgables, nil
}
deleter := func(gs graph.Servicer, uid string, f purgable) error {
driveFolder, ok := f.(*onedrive.Displayable)
if !ok {
return clues.New("non-OneDrive item")
}
return api.DeleteDriveItem(
ctx,
gs,
*driveFolder.GetParentReference().GetDriveId(),
*f.GetId())
}
return purgeFolders(ctx, gc, boundary, "OneDrive Folders", uid, getter, deleter)
}
// ----- controller
func purgeFolders(
ctx context.Context,
gc *connector.GraphConnector,
boundary time.Time,
data, uid string,
getter func(graph.Servicer, string, string) ([]purgable, error),
deleter func(graph.Servicer, string, purgable) error,
) error {
Infof(ctx, "Container: %s", data)
// get them folders
fs, err := getter(gc.Service, uid, prefix)
if err != nil {
return Only(ctx, clues.Wrap(err, "retrieving folders: "+data))
}
if len(fs) == 0 {
Info(ctx, "None Matched")
return nil
}
var errs error
// delete any containers that don't pass the boundary
for _, fld := range fs {
// compare the folder time to the deletion boundary time first
displayName := *fld.GetDisplayName()
dnTime, err := dttm.ExtractTime(displayName)
if err != nil && !errors.Is(err, dttm.ErrNoTimeString) {
err = clues.Wrap(err, "!! Error: parsing container: "+displayName)
Info(ctx, err)
return err
}
if !dnTime.Before(boundary) || dnTime == (time.Time{}) {
continue
}
Infof(ctx, "∙ Deleting [%s]", displayName)
err = deleter(gc.Service, uid, fld)
if err != nil {
err = clues.Wrap(err, "!! Error")
Info(ctx, err)
}
}
return errs
}
// ------------------------------------------------------------------------------------------
// Helpers
// ------------------------------------------------------------------------------------------
func getGC(ctx context.Context) (account.Account, *connector.GraphConnector, error) {
// get account info
m365Cfg := account.M365Config{
M365: credentials.GetM365(),
AzureTenantID: str.First(tenant, os.Getenv(account.AzureTenantID)),
}
acct, err := account.NewAccount(account.ProviderM365, m365Cfg)
if err != nil {
return account.Account{}, nil, Only(ctx, clues.Wrap(err, "finding m365 account details"))
}
gc, err := connector.NewGraphConnector(ctx, acct, connector.Users)
if err != nil {
return account.Account{}, nil, Only(ctx, clues.Wrap(err, "connecting to graph api"))
}
return acct, gc, nil
}
func getBoundaryTime(ctx context.Context) (time.Time, error) {
// format the time input
var (
err error
boundaryTime = time.Now().UTC()
)
if len(before) > 0 {
boundaryTime, err = dttm.ParseTime(before)
if err != nil {
return time.Time{}, Only(ctx, clues.Wrap(err, "parsing before flag to time"))
}
}
return boundaryTime, nil
}
func getGCAndBoundaryTime(
ctx context.Context,
) (account.Account, *connector.GraphConnector, time.Time, error) {
acct, gc, err := getGC(ctx)
if err != nil {
return account.Account{}, nil, time.Time{}, err
}
t, err := getBoundaryTime(ctx)
if err != nil {
return account.Account{}, nil, time.Time{}, err
}
return acct, gc, t, nil
}
func userOrUsers(u string, us []*m365.User) []*m365.User {
if len(u) == 0 {
return nil
}
if u == "*" {
return us
}
return []*m365.User{{PrincipalName: u}}
}

View File

@ -8,7 +8,7 @@ require (
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0
github.com/alcionai/clues v0.0.0-20230406223931-f48777f4773c
github.com/armon/go-metrics v0.4.1
github.com/aws/aws-sdk-go v1.44.271
github.com/aws/aws-sdk-go v1.44.273
github.com/aws/aws-xray-sdk-go v1.8.1
github.com/cenkalti/backoff/v4 v4.2.1
github.com/google/uuid v1.3.0
@ -26,8 +26,8 @@ require (
github.com/spatialcurrent/go-lazy v0.0.0-20211115014721-47315cc003d1
github.com/spf13/cobra v1.7.0
github.com/spf13/pflag v1.0.5
github.com/spf13/viper v1.15.0
github.com/stretchr/testify v1.8.3
github.com/spf13/viper v1.16.0
github.com/stretchr/testify v1.8.4
github.com/tidwall/pretty v1.2.1
github.com/tomlazar/table v0.1.2
github.com/vbauerster/mpb/v8 v8.1.6
@ -49,9 +49,9 @@ require (
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/magiconair/properties v1.8.7 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/pelletier/go-toml/v2 v2.0.6 // indirect
github.com/spf13/afero v1.9.3 // indirect
github.com/spf13/cast v1.5.0 // indirect
github.com/pelletier/go-toml/v2 v2.0.8 // indirect
github.com/spf13/afero v1.9.5 // indirect
github.com/spf13/cast v1.5.1 // indirect
github.com/spf13/jwalterweatherman v1.1.0 // indirect
github.com/subosito/gotenv v1.4.2 // indirect
github.com/valyala/bytebufferpool v1.0.0 // indirect
@ -115,14 +115,14 @@ require (
go.opentelemetry.io/otel/trace v1.15.1 // indirect
go.uber.org/atomic v1.10.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/crypto v0.8.0 // indirect
golang.org/x/crypto v0.9.0 // indirect
golang.org/x/mod v0.10.0 // indirect
golang.org/x/net v0.10.0
golang.org/x/sync v0.2.0 // indirect
golang.org/x/sys v0.8.0 // indirect
golang.org/x/text v0.9.0 // indirect
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect
google.golang.org/grpc v1.54.0 // indirect
google.golang.org/grpc v1.55.0 // indirect
google.golang.org/protobuf v1.30.0 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect

View File

@ -66,8 +66,8 @@ github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY
github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
github.com/aws/aws-sdk-go v1.44.271 h1:aa+Nu2JcnFmW1TLIz/67SS7KPq1I1Adl4RmExSMjGVo=
github.com/aws/aws-sdk-go v1.44.271/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
github.com/aws/aws-sdk-go v1.44.273 h1:CX8O0gK+cGrgUyv7bgJ6QQP9mQg7u5mweHdNzULH47c=
github.com/aws/aws-sdk-go v1.44.273/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
github.com/aws/aws-xray-sdk-go v1.8.1 h1:O4pXV+hnCskaamGsZnFpzHyAmgPGusBMN6i7nnsy0Fo=
github.com/aws/aws-xray-sdk-go v1.8.1/go.mod h1:wMmVYzej3sykAttNBkXQHK/+clAPWTOrPiajEk7Cp3A=
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
@ -113,7 +113,7 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m
github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE=
github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY=
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
@ -312,8 +312,8 @@ github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32 h1:W6apQkHrMkS0Muv8G/TipAy
github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32/go.mod h1:9wM+0iRr9ahx58uYLpLIr5fm8diHn0JbqRycJi6w0Ms=
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pelletier/go-toml/v2 v2.0.6 h1:nrzqCb7j9cDFj2coyLNLaZuJTLjWjlaz6nvTvIwycIU=
github.com/pelletier/go-toml/v2 v2.0.6/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek=
github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ=
github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4=
github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM=
github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU=
@ -349,7 +349,7 @@ github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJ
github.com/rivo/uniseg v0.4.3 h1:utMvzDsuh3suAEnhH0RdHmoPbU648o6CvXxTx4SBMOw=
github.com/rivo/uniseg v0.4.3/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k=
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
github.com/rs/xid v1.4.0 h1:qd7wPTDkN6KQx2VmMBLrpHkiyQwgFXRnkOLacUiaSNY=
github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
github.com/rudderlabs/analytics-go v3.3.3+incompatible h1:OG0XlKoXfr539e2t1dXtTB+Gr89uFW+OUNQBVhHIIBY=
@ -363,18 +363,18 @@ github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/spatialcurrent/go-lazy v0.0.0-20211115014721-47315cc003d1 h1:lQ3JvmcVO1/AMFbabvUSJ4YtJRpEAX9Qza73p5j03sw=
github.com/spatialcurrent/go-lazy v0.0.0-20211115014721-47315cc003d1/go.mod h1:4aKqcbhASNqjbrG0h9BmkzcWvPJGxbef4B+j0XfFrZo=
github.com/spf13/afero v1.9.3 h1:41FoI0fD7OR7mGcKE/aOiLkGreyf8ifIOQmJANWogMk=
github.com/spf13/afero v1.9.3/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y=
github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w=
github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU=
github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM=
github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ=
github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA=
github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48=
github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I=
github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0=
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU=
github.com/spf13/viper v1.15.0/go.mod h1:fFcTBJxvhhzSJiZy8n+PeW6t8l+KeT/uTARa0jHOQLA=
github.com/spf13/viper v1.16.0 h1:rGGH0XDZhdUOryiDWjmIvUSWpbNqisK8Wk0Vyefw8hc=
github.com/spf13/viper v1.16.0/go.mod h1:yg78JgCJcbrQOvV9YLXgkLaZqUidkY9K+Dd1FofRzQg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
@ -386,9 +386,9 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY=
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8=
github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0=
github.com/tg123/go-htpasswd v1.2.1 h1:i4wfsX1KvvkyoMiHZzjS0VzbAPWfxzI8INcZAKtutoU=
@ -450,10 +450,10 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.8.0 h1:pd9TJtTueMTVQXzk8E2XESSMQDj/U7OUu0PqJqPXQjQ=
golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE=
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g=
golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@ -758,8 +758,8 @@ google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.54.0 h1:EhTqbhiYeixwWQtAEZAxmV9MGqcjEU2mFx52xCzNyag=
google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g=
google.golang.org/grpc v1.55.0 h1:3Oj82/tFSCeUrRTg/5E/7d/W5A1tj6Ky1ABAuZuv5ag=
google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=

View File

@ -151,10 +151,6 @@ func (suite *RestoreIntgSuite) TestRestoreEvent() {
}
}
type containerDeleter interface {
DeleteContainer(context.Context, string, string) error
}
// TestRestoreExchangeObject verifies path.Category usage for restored objects
func (suite *RestoreIntgSuite) TestRestoreExchangeObject() {
t := suite.T()
@ -165,12 +161,6 @@ func (suite *RestoreIntgSuite) TestRestoreExchangeObject() {
service, err := createService(m365)
require.NoError(t, err, clues.ToCore(err))
deleters := map[path.CategoryType]containerDeleter{
path.EmailCategory: suite.ac.Mail(),
path.ContactsCategory: suite.ac.Contacts(),
path.EventsCategory: suite.ac.Events(),
}
userID := tester.M365UserID(suite.T())
tests := []struct {
@ -381,10 +371,6 @@ func (suite *RestoreIntgSuite) TestRestoreExchangeObject() {
fault.New(true))
assert.NoError(t, err, clues.ToCore(err))
assert.NotNil(t, info, "item info was not populated")
assert.NotNil(t, deleters)
err = deleters[test.category].DeleteContainer(ctx, userID, destination)
assert.NoError(t, err, clues.ToCore(err))
})
}
}

View File

@ -140,29 +140,21 @@ func (mw *LoggingMiddleware) Intercept(
var (
log = logger.Ctx(ctx)
respClass = resp.StatusCode / 100
logExtra = logger.DebugAPIFV || os.Getenv(logGraphRequestsEnvKey) != ""
// special cases where we always dump the response body, since the response
// details might be critical to understanding the response when debugging.
// * 400-bad-request
// * 403-forbidden
logBody = logger.DebugAPIFV ||
os.Getenv(logGraphRequestsEnvKey) != "" ||
resp.StatusCode == http.StatusBadRequest ||
resp.StatusCode == http.StatusForbidden
)
// special case: always info log 429 responses
// special case: always info-level status 429 logs
if resp.StatusCode == http.StatusTooManyRequests {
if logExtra {
log = log.With("response", getRespDump(ctx, resp, true))
}
log.Infow(
"graph api throttling",
"limit", resp.Header.Get(rateLimitHeader),
"remaining", resp.Header.Get(rateRemainingHeader),
"reset", resp.Header.Get(rateResetHeader),
"retry-after", resp.Header.Get(retryAfterHeader))
return resp, err
}
// special case: always dump status-400-bad-request
if resp.StatusCode == http.StatusBadRequest {
log.With("response", getRespDump(ctx, resp, true)).
Error("graph api error: " + resp.Status)
log.With("response", getRespDump(ctx, resp, logBody)).
Info("graph api throttling")
return resp, err
}
@ -170,25 +162,18 @@ func (mw *LoggingMiddleware) Intercept(
// Log api calls according to api debugging configurations.
switch respClass {
case 2:
if logExtra {
if logBody {
// only dump the body if it's under a size limit. We don't want to copy gigs into memory for a log.
dump := getRespDump(ctx, resp, os.Getenv(log2xxGraphResponseEnvKey) != "" && resp.ContentLength < logMBLimit)
log.Infow("2xx graph api resp", "response", dump)
}
case 3:
log = log.With("redirect_location", LoggableURL(resp.Header.Get(locationHeader)))
if logExtra {
log = log.With("response", getRespDump(ctx, resp, false))
}
log.Info("graph api redirect: " + resp.Status)
log.With("redirect_location", LoggableURL(resp.Header.Get(locationHeader))).
With("response", getRespDump(ctx, resp, false)).
Info("graph api redirect: " + resp.Status)
default:
if logExtra {
log = log.With("response", getRespDump(ctx, resp, true))
}
log.Error("graph api error: " + resp.Status)
log.With("response", getRespDump(ctx, resp, logBody)).
Error("graph api error: " + resp.Status)
}
return resp, err

View File

@ -15,10 +15,10 @@ import (
"github.com/alcionai/corso/src/pkg/logger"
)
type BackupBases struct {
Backups []BackupEntry
MergeBases []ManifestEntry
AssistBases []ManifestEntry
type backupBases struct {
backups []BackupEntry
mergeBases []ManifestEntry
assistBases []ManifestEntry
}
type BackupEntry struct {
@ -31,7 +31,7 @@ type baseFinder struct {
bg inject.GetBackuper
}
func NewBaseFinder(
func newBaseFinder(
sm snapshotManager,
bg inject.GetBackuper,
) (*baseFinder, error) {
@ -183,11 +183,11 @@ func (b *baseFinder) getBase(
return b.findBasesInSet(ctx, reason, metas)
}
func (b *baseFinder) FindBases(
func (b *baseFinder) findBases(
ctx context.Context,
reasons []Reason,
tags map[string]string,
) (BackupBases, error) {
) (backupBases, error) {
var (
// All maps go from ID -> entry. We need to track by ID so we can coalesce
// the reason for selecting something. Kopia assisted snapshots also use
@ -251,9 +251,24 @@ func (b *baseFinder) FindBases(
}
}
return BackupBases{
Backups: maps.Values(baseBups),
MergeBases: maps.Values(baseSnaps),
AssistBases: maps.Values(kopiaAssistSnaps),
return backupBases{
backups: maps.Values(baseBups),
mergeBases: maps.Values(baseSnaps),
assistBases: maps.Values(kopiaAssistSnaps),
}, nil
}
func (b *baseFinder) FindBases(
ctx context.Context,
reasons []Reason,
tags map[string]string,
) ([]ManifestEntry, error) {
bb, err := b.findBases(ctx, reasons, tags)
if err != nil {
return nil, clues.Stack(err)
}
// assistBases contains all snapshots so we can return it while maintaining
// almost all compatibility.
return bb.assistBases, nil
}

View File

@ -342,10 +342,10 @@ func (suite *BaseFinderUnitSuite) TestNoResult_NoBackupsOrSnapshots() {
},
}
bb, err := bf.FindBases(ctx, reasons, nil)
bb, err := bf.findBases(ctx, reasons, nil)
assert.NoError(t, err, "getting bases: %v", clues.ToCore(err))
assert.Empty(t, bb.MergeBases)
assert.Empty(t, bb.AssistBases)
assert.Empty(t, bb.mergeBases)
assert.Empty(t, bb.assistBases)
}
func (suite *BaseFinderUnitSuite) TestNoResult_ErrorListingSnapshots() {
@ -366,10 +366,10 @@ func (suite *BaseFinderUnitSuite) TestNoResult_ErrorListingSnapshots() {
},
}
bb, err := bf.FindBases(ctx, reasons, nil)
bb, err := bf.findBases(ctx, reasons, nil)
assert.NoError(t, err, "getting bases: %v", clues.ToCore(err))
assert.Empty(t, bb.MergeBases)
assert.Empty(t, bb.AssistBases)
assert.Empty(t, bb.mergeBases)
assert.Empty(t, bb.assistBases)
}
func (suite *BaseFinderUnitSuite) TestGetBases() {
@ -825,7 +825,7 @@ func (suite *BaseFinderUnitSuite) TestGetBases() {
bg: &mockModelGetter{data: test.backupData},
}
bb, err := bf.FindBases(
bb, err := bf.findBases(
ctx,
test.input,
nil)
@ -833,17 +833,17 @@ func (suite *BaseFinderUnitSuite) TestGetBases() {
checkBackupEntriesMatch(
t,
bb.Backups,
bb.backups,
test.backupData,
test.expectedBaseReasons)
checkManifestEntriesMatch(
t,
bb.MergeBases,
bb.mergeBases,
test.manifestData,
test.expectedBaseReasons)
checkManifestEntriesMatch(
t,
bb.AssistBases,
bb.assistBases,
test.manifestData,
test.expectedAssistManifestReasons)
})
@ -920,7 +920,7 @@ func (suite *BaseFinderUnitSuite) TestFetchPrevSnapshots_CustomTags() {
bg: &mockModelGetter{data: backupData},
}
bb, err := bf.FindBases(
bb, err := bf.findBases(
ctx,
testAllUsersAllCats,
test.tags)
@ -928,7 +928,7 @@ func (suite *BaseFinderUnitSuite) TestFetchPrevSnapshots_CustomTags() {
checkManifestEntriesMatch(
t,
bb.MergeBases,
bb.mergeBases,
manifestData,
test.expectedIdxs)
})

View File

@ -39,6 +39,6 @@ type (
ctx context.Context,
reasons []kopia.Reason,
tags map[string]string,
) (kopia.BackupBases, error)
) ([]kopia.ManifestEntry, error)
}
)

View File

@ -19,6 +19,7 @@ import (
"github.com/alcionai/corso/src/internal/data"
"github.com/alcionai/corso/src/internal/diagnostics"
"github.com/alcionai/corso/src/internal/observe"
"github.com/alcionai/corso/src/internal/operations/inject"
"github.com/alcionai/corso/src/internal/stats"
"github.com/alcionai/corso/src/pkg/backup/details"
"github.com/alcionai/corso/src/pkg/control/repository"
@ -50,14 +51,16 @@ var (
type BackupStats struct {
SnapshotID string
TotalHashedBytes int64
TotalUploadedBytes int64
TotalHashedBytes int64
TotalUploadedBytes int64
TotalNonMetaUploadedBytes int64
TotalFileCount int
CachedFileCount int
UncachedFileCount int
TotalDirectoryCount int
ErrorCount int
TotalFileCount int
TotalNonMetaFileCount int
CachedFileCount int
UncachedFileCount int
TotalDirectoryCount int
ErrorCount int
IgnoredErrorCount int
ExpectedIgnoredErrorCount int
@ -614,6 +617,10 @@ func (w Wrapper) FetchPrevSnapshotManifests(
return fetchPrevSnapshotManifests(ctx, w.c, reasons, tags), nil
}
func (w Wrapper) NewBaseFinder(bg inject.GetBackuper) (*baseFinder, error) {
return newBaseFinder(w.c, bg)
}
func isErrEntryNotFound(err error) bool {
// Calling Child on a directory may return this.
if errors.Is(err, fs.ErrEntryNotFound) {

View File

@ -177,7 +177,8 @@ func (op *BackupOperation) Run(ctx context.Context) (err error) {
ctx = clues.Add(
ctx,
"tenant_id", clues.Hide(op.account.ID()),
"resource_owner", clues.Hide(op.ResourceOwner.Name()),
"resource_owner_id", op.ResourceOwner.ID(),
"resource_owner_name", clues.Hide(op.ResourceOwner.Name()),
"backup_id", op.Results.BackupID,
"service", op.Selectors.Service,
"incremental", op.incremental)
@ -290,9 +291,24 @@ func (op *BackupOperation) do(
// should always be 1, since backups are 1:1 with resourceOwners.
opStats.resourceCount = 1
kbf, err := op.kopia.NewBaseFinder(op.store)
if err != nil {
return nil, clues.Stack(err)
}
type baseFinder struct {
kinject.BaseFinder
kinject.RestoreProducer
}
bf := baseFinder{
BaseFinder: kbf,
RestoreProducer: op.kopia,
}
mans, mdColls, canUseMetaData, err := produceManifestsAndMetadata(
ctx,
op.kopia,
bf,
op.store,
reasons, fallbackReasons,
op.account.ID(),
@ -347,6 +363,8 @@ func (op *BackupOperation) do(
mans,
toMerge,
deets,
writeStats,
op.Selectors.PathService(),
op.Errors)
if err != nil {
return nil, clues.Wrap(err, "merging details")
@ -465,7 +483,7 @@ func consumeBackupCollections(
bc kinject.BackupConsumer,
tenantID string,
reasons []kopia.Reason,
mans []*kopia.ManifestEntry,
mans []kopia.ManifestEntry,
cs []data.BackupCollection,
pmr prefixmatcher.StringSetReader,
backupID model.StableID,
@ -650,7 +668,7 @@ func getNewPathRefs(
func lastCompleteBackups(
ctx context.Context,
ms *store.Wrapper,
mans []*kopia.ManifestEntry,
mans []kopia.ManifestEntry,
) (map[string]*backup.Backup, int, error) {
var (
oldestVersion = version.NoBackup
@ -701,11 +719,20 @@ func mergeDetails(
ctx context.Context,
ms *store.Wrapper,
detailsStore streamstore.Streamer,
mans []*kopia.ManifestEntry,
mans []kopia.ManifestEntry,
dataFromBackup kopia.DetailsMergeInfoer,
deets *details.Builder,
writeStats *kopia.BackupStats,
serviceType path.ServiceType,
errs *fault.Bus,
) error {
detailsModel := deets.Details().DetailsModel
// getting the values in writeStats before anything else so that we don't get a return from
// conditions like no backup data.
writeStats.TotalNonMetaFileCount = len(detailsModel.FilterMetaFiles().Items())
writeStats.TotalNonMetaUploadedBytes = detailsModel.SumNonMetaFileSizes()
// Don't bother loading any of the base details if there's nothing we need to merge.
if dataFromBackup == nil || dataFromBackup.ItemsToMerge() == 0 {
return nil
@ -841,6 +868,8 @@ func (op *BackupOperation) persistResults(
op.Results.BytesRead = opStats.k.TotalHashedBytes
op.Results.BytesUploaded = opStats.k.TotalUploadedBytes
op.Results.ItemsWritten = opStats.k.TotalFileCount
op.Results.NonMetaBytesUploaded = opStats.k.TotalNonMetaUploadedBytes
op.Results.NonMetaItemsWritten = opStats.k.TotalNonMetaFileCount
op.Results.ResourceOwners = opStats.resourceCount
if opStats.gc == nil {

View File

@ -961,6 +961,7 @@ func testExchangeContinuousBackups(suite *BackupOpIntegrationSuite, toggles cont
deltaItemsWritten int
nonDeltaItemsRead int
nonDeltaItemsWritten int
nonMetaItemsWritten int
}{
{
name: "clean, no changes",
@ -969,6 +970,7 @@ func testExchangeContinuousBackups(suite *BackupOpIntegrationSuite, toggles cont
deltaItemsWritten: 0,
nonDeltaItemsRead: 8,
nonDeltaItemsWritten: 0, // unchanged items are not counted towards write
nonMetaItemsWritten: 4,
},
{
name: "move an email folder to a subfolder",
@ -992,6 +994,7 @@ func testExchangeContinuousBackups(suite *BackupOpIntegrationSuite, toggles cont
deltaItemsWritten: 2,
nonDeltaItemsRead: 8,
nonDeltaItemsWritten: 2,
nonMetaItemsWritten: 6,
},
{
name: "delete a folder",
@ -1018,6 +1021,7 @@ func testExchangeContinuousBackups(suite *BackupOpIntegrationSuite, toggles cont
deltaItemsWritten: 0, // deletions are not counted as "writes"
nonDeltaItemsRead: 4,
nonDeltaItemsWritten: 0,
nonMetaItemsWritten: 4,
},
{
name: "add a new folder",
@ -1070,6 +1074,7 @@ func testExchangeContinuousBackups(suite *BackupOpIntegrationSuite, toggles cont
deltaItemsWritten: 4,
nonDeltaItemsRead: 8,
nonDeltaItemsWritten: 4,
nonMetaItemsWritten: 8,
},
{
name: "rename a folder",
@ -1125,6 +1130,7 @@ func testExchangeContinuousBackups(suite *BackupOpIntegrationSuite, toggles cont
deltaItemsWritten: 0, // two items per category
nonDeltaItemsRead: 8,
nonDeltaItemsWritten: 0,
nonMetaItemsWritten: 4,
},
{
name: "add a new item",
@ -1178,6 +1184,7 @@ func testExchangeContinuousBackups(suite *BackupOpIntegrationSuite, toggles cont
deltaItemsWritten: 2,
nonDeltaItemsRead: 10,
nonDeltaItemsWritten: 2,
nonMetaItemsWritten: 6,
},
{
name: "delete an existing item",
@ -1231,6 +1238,7 @@ func testExchangeContinuousBackups(suite *BackupOpIntegrationSuite, toggles cont
deltaItemsWritten: 0, // deletes are not counted as "writes"
nonDeltaItemsRead: 8,
nonDeltaItemsWritten: 0,
nonMetaItemsWritten: 4,
},
}
@ -1263,7 +1271,7 @@ func testExchangeContinuousBackups(suite *BackupOpIntegrationSuite, toggles cont
assert.Equal(t, test.nonDeltaItemsRead+4, incBO.Results.ItemsRead, "non delta items read")
assert.Equal(t, test.nonDeltaItemsWritten+4, incBO.Results.ItemsWritten, "non delta items written")
}
assert.Equal(t, test.nonMetaItemsWritten, incBO.Results.ItemsWritten, "non meta incremental items write")
assert.NoError(t, incBO.Errors.Failure(), "incremental non-recoverable error", clues.ToCore(incBO.Errors.Failure()))
assert.Empty(t, incBO.Errors.Recovered(), "incremental recoverable/iteration errors")
assert.Equal(t, 1, incMB.TimesCalled[events.BackupStart], "incremental backup-start events")
@ -1527,9 +1535,10 @@ func runDriveIncrementalTest(
table := []struct {
name string
// performs the incremental update required for the test.
updateFiles func(t *testing.T)
itemsRead int
itemsWritten int
updateFiles func(t *testing.T)
itemsRead int
itemsWritten int
nonMetaItemsWritten int
}{
{
name: "clean incremental, no changes",
@ -1556,8 +1565,9 @@ func runDriveIncrementalTest(
expectDeets.AddItem(driveID, makeLocRef(container1), newFileID)
},
itemsRead: 1, // .data file for newitem
itemsWritten: 3, // .data and .meta for newitem, .dirmeta for parent
itemsRead: 1, // .data file for newitem
itemsWritten: 3, // .data and .meta for newitem, .dirmeta for parent
nonMetaItemsWritten: 1, // .data file for newitem
},
{
name: "add permission to new file",
@ -1578,8 +1588,9 @@ func runDriveIncrementalTest(
require.NoErrorf(t, err, "adding permission to file %v", clues.ToCore(err))
// no expectedDeets: metadata isn't tracked
},
itemsRead: 1, // .data file for newitem
itemsWritten: 2, // .meta for newitem, .dirmeta for parent (.data is not written as it is not updated)
itemsRead: 1, // .data file for newitem
itemsWritten: 2, // .meta for newitem, .dirmeta for parent (.data is not written as it is not updated)
nonMetaItemsWritten: 1, // the file for which permission was updated
},
{
name: "remove permission from new file",
@ -1599,8 +1610,9 @@ func runDriveIncrementalTest(
require.NoErrorf(t, err, "removing permission from file %v", clues.ToCore(err))
// no expectedDeets: metadata isn't tracked
},
itemsRead: 1, // .data file for newitem
itemsWritten: 2, // .meta for newitem, .dirmeta for parent (.data is not written as it is not updated)
itemsRead: 1, // .data file for newitem
itemsWritten: 2, // .meta for newitem, .dirmeta for parent (.data is not written as it is not updated)
nonMetaItemsWritten: 1, //.data file for newitem
},
{
name: "add permission to container",
@ -1621,8 +1633,9 @@ func runDriveIncrementalTest(
require.NoErrorf(t, err, "adding permission to container %v", clues.ToCore(err))
// no expectedDeets: metadata isn't tracked
},
itemsRead: 0,
itemsWritten: 1, // .dirmeta for collection
itemsRead: 0,
itemsWritten: 1, // .dirmeta for collection
nonMetaItemsWritten: 0, // no files updated as update on container
},
{
name: "remove permission from container",
@ -1643,8 +1656,9 @@ func runDriveIncrementalTest(
require.NoErrorf(t, err, "removing permission from container %v", clues.ToCore(err))
// no expectedDeets: metadata isn't tracked
},
itemsRead: 0,
itemsWritten: 1, // .dirmeta for collection
itemsRead: 0,
itemsWritten: 1, // .dirmeta for collection
nonMetaItemsWritten: 0, // no files updated
},
{
name: "update contents of a file",
@ -1658,8 +1672,9 @@ func runDriveIncrementalTest(
require.NoErrorf(t, err, "updating file contents: %v", clues.ToCore(err))
// no expectedDeets: neither file id nor location changed
},
itemsRead: 1, // .data file for newitem
itemsWritten: 3, // .data and .meta for newitem, .dirmeta for parent
itemsRead: 1, // .data file for newitem
itemsWritten: 3, // .data and .meta for newitem, .dirmeta for parent
nonMetaItemsWritten: 1, // .data file for newitem
},
{
name: "rename a file",
@ -1681,8 +1696,9 @@ func runDriveIncrementalTest(
driveItem)
require.NoError(t, err, "renaming file %v", clues.ToCore(err))
},
itemsRead: 1, // .data file for newitem
itemsWritten: 3, // .data and .meta for newitem, .dirmeta for parent
itemsRead: 1, // .data file for newitem
itemsWritten: 3, // .data and .meta for newitem, .dirmeta for parent
nonMetaItemsWritten: 1, // .data file for newitem
// no expectedDeets: neither file id nor location changed
},
{
@ -1710,8 +1726,9 @@ func runDriveIncrementalTest(
makeLocRef(container2),
ptr.Val(newFile.GetId()))
},
itemsRead: 1, // .data file for newitem
itemsWritten: 3, // .data and .meta for newitem, .dirmeta for parent
itemsRead: 1, // .data file for newitem
itemsWritten: 3, // .data and .meta for newitem, .dirmeta for parent
nonMetaItemsWritten: 1, // .data file for new item
},
{
name: "delete file",
@ -1725,8 +1742,9 @@ func runDriveIncrementalTest(
expectDeets.RemoveItem(driveID, makeLocRef(container2), ptr.Val(newFile.GetId()))
},
itemsRead: 0,
itemsWritten: 0,
itemsRead: 0,
itemsWritten: 0,
nonMetaItemsWritten: 0,
},
{
name: "move a folder to a subfolder",
@ -1753,8 +1771,9 @@ func runDriveIncrementalTest(
makeLocRef(container2),
makeLocRef(container1))
},
itemsRead: 0,
itemsWritten: 7, // 2*2(data and meta of 2 files) + 3 (dirmeta of two moved folders and target)
itemsRead: 0,
itemsWritten: 7, // 2*2(data and meta of 2 files) + 3 (dirmeta of two moved folders and target)
nonMetaItemsWritten: 0,
},
{
name: "rename a folder",
@ -1783,8 +1802,9 @@ func runDriveIncrementalTest(
makeLocRef(container1, container2),
makeLocRef(container1, containerRename))
},
itemsRead: 0,
itemsWritten: 7, // 2*2(data and meta of 2 files) + 3 (dirmeta of two moved folders and target)
itemsRead: 0,
itemsWritten: 7, // 2*2(data and meta of 2 files) + 3 (dirmeta of two moved folders and target)
nonMetaItemsWritten: 0,
},
{
name: "delete a folder",
@ -1799,8 +1819,9 @@ func runDriveIncrementalTest(
expectDeets.RemoveLocation(driveID, makeLocRef(container1, containerRename))
},
itemsRead: 0,
itemsWritten: 0,
itemsRead: 0,
itemsWritten: 0,
nonMetaItemsWritten: 0,
},
{
name: "add a new folder",
@ -1831,8 +1852,9 @@ func runDriveIncrementalTest(
expectDeets.AddLocation(driveID, container3)
},
itemsRead: 2, // 2 .data for 2 files
itemsWritten: 6, // read items + 2 directory meta
itemsRead: 2, // 2 .data for 2 files
itemsWritten: 6, // read items + 2 directory meta
nonMetaItemsWritten: 2, // 2 .data for 2 files
},
}
for _, test := range table {
@ -1862,9 +1884,10 @@ func runDriveIncrementalTest(
// do some additional checks to ensure the incremental dealt with fewer items.
// +2 on read/writes to account for metadata: 1 delta and 1 path.
var (
expectWrites = test.itemsWritten + 2
expectReads = test.itemsRead + 2
assertReadWrite = assert.Equal
expectWrites = test.itemsWritten + 2
expectNonMetaWrites = test.nonMetaItemsWritten
expectReads = test.itemsRead + 2
assertReadWrite = assert.Equal
)
// Sharepoint can produce a superset of permissions by nature of
@ -1876,6 +1899,7 @@ func runDriveIncrementalTest(
}
assertReadWrite(t, expectWrites, incBO.Results.ItemsWritten, "incremental items written")
assertReadWrite(t, expectNonMetaWrites, incBO.Results.NonMetaItemsWritten, "incremental non-meta items written")
assertReadWrite(t, expectReads, incBO.Results.ItemsRead, "incremental items read")
assert.NoError(t, incBO.Errors.Failure(), "incremental non-recoverable error", clues.ToCore(incBO.Errors.Failure()))
@ -1976,6 +2000,7 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_oneDriveOwnerMigration() {
// 2 on read/writes to account for metadata: 1 delta and 1 path.
assert.LessOrEqual(t, 2, incBO.Results.ItemsWritten, "items written")
assert.LessOrEqual(t, 1, incBO.Results.NonMetaItemsWritten, "non meta items written")
assert.LessOrEqual(t, 2, incBO.Results.ItemsRead, "items read")
assert.NoError(t, incBO.Errors.Failure(), "non-recoverable error", clues.ToCore(incBO.Errors.Failure()))
assert.Empty(t, incBO.Errors.Recovered(), "recoverable/iteration errors")

View File

@ -533,12 +533,12 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_ConsumeBackupDataCollections
table := []struct {
name string
inputMan []*kopia.ManifestEntry
inputMan []kopia.ManifestEntry
expected []kopia.IncrementalBase
}{
{
name: "SingleManifestSingleReason",
inputMan: []*kopia.ManifestEntry{
inputMan: []kopia.ManifestEntry{
{
Manifest: manifest1,
Reasons: []kopia.Reason{
@ -557,7 +557,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_ConsumeBackupDataCollections
},
{
name: "SingleManifestMultipleReasons",
inputMan: []*kopia.ManifestEntry{
inputMan: []kopia.ManifestEntry{
{
Manifest: manifest1,
Reasons: []kopia.Reason{
@ -578,7 +578,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_ConsumeBackupDataCollections
},
{
name: "MultipleManifestsMultipleReasons",
inputMan: []*kopia.ManifestEntry{
inputMan: []kopia.ManifestEntry{
{
Manifest: manifest1,
Reasons: []kopia.Reason{
@ -731,7 +731,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
name string
populatedModels map[model.StableID]backup.Backup
populatedDetails map[string]*details.Details
inputMans []*kopia.ManifestEntry
inputMans []kopia.ManifestEntry
mdm *mockDetailsMergeInfoer
errCheck assert.ErrorAssertionFunc
@ -758,7 +758,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
return res
}(),
inputMans: []*kopia.ManifestEntry{
inputMans: []kopia.ManifestEntry{
{
Manifest: makeManifest(suite.T(), "foo", ""),
Reasons: []kopia.Reason{
@ -776,7 +776,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
return res
}(),
inputMans: []*kopia.ManifestEntry{
inputMans: []kopia.ManifestEntry{
{
Manifest: makeManifest(suite.T(), backup1.ID, ""),
Reasons: []kopia.Reason{
@ -803,7 +803,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
return res
}(),
inputMans: []*kopia.ManifestEntry{
inputMans: []kopia.ManifestEntry{
{
Manifest: makeManifest(suite.T(), backup1.ID, ""),
Reasons: []kopia.Reason{
@ -833,7 +833,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
return res
}(),
inputMans: []*kopia.ManifestEntry{
inputMans: []kopia.ManifestEntry{
{
Manifest: makeManifest(suite.T(), backup1.ID, ""),
Reasons: []kopia.Reason{
@ -869,7 +869,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
return res
}(),
inputMans: []*kopia.ManifestEntry{
inputMans: []kopia.ManifestEntry{
{
Manifest: makeManifest(suite.T(), backup1.ID, ""),
Reasons: []kopia.Reason{
@ -931,7 +931,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
return res
}(),
inputMans: []*kopia.ManifestEntry{
inputMans: []kopia.ManifestEntry{
{
Manifest: makeManifest(suite.T(), backup1.ID, ""),
Reasons: []kopia.Reason{
@ -961,7 +961,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
return res
}(),
inputMans: []*kopia.ManifestEntry{
inputMans: []kopia.ManifestEntry{
{
Manifest: makeManifest(suite.T(), backup1.ID, ""),
Reasons: []kopia.Reason{
@ -994,7 +994,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
return res
}(),
inputMans: []*kopia.ManifestEntry{
inputMans: []kopia.ManifestEntry{
{
Manifest: makeManifest(suite.T(), backup1.ID, ""),
Reasons: []kopia.Reason{
@ -1027,7 +1027,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
return res
}(),
inputMans: []*kopia.ManifestEntry{
inputMans: []kopia.ManifestEntry{
{
Manifest: makeManifest(suite.T(), backup1.ID, ""),
Reasons: []kopia.Reason{
@ -1061,7 +1061,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
return res
}(),
inputMans: []*kopia.ManifestEntry{
inputMans: []kopia.ManifestEntry{
{
Manifest: makeManifest(suite.T(), backup1.ID, ""),
Reasons: []kopia.Reason{
@ -1095,7 +1095,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
return res
}(),
inputMans: []*kopia.ManifestEntry{
inputMans: []kopia.ManifestEntry{
{
Manifest: makeManifest(suite.T(), backup1.ID, ""),
Reasons: []kopia.Reason{
@ -1146,7 +1146,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
return res
}(),
inputMans: []*kopia.ManifestEntry{
inputMans: []kopia.ManifestEntry{
{
Manifest: makeManifest(suite.T(), backup1.ID, ""),
Reasons: []kopia.Reason{
@ -1198,6 +1198,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
mds := ssmock.Streamer{Deets: test.populatedDetails}
w := &store.Wrapper{Storer: mockBackupStorer{entries: test.populatedModels}}
deets := details.Builder{}
writeStats := kopia.BackupStats{}
err := mergeDetails(
ctx,
@ -1206,6 +1207,8 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsItems
test.inputMans,
test.mdm,
&deets,
&writeStats,
path.OneDriveService,
fault.New(true))
test.errCheck(t, err, clues.ToCore(err))
@ -1255,7 +1258,7 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsFolde
Category: itemPath1.Category(),
}
inputMans = []*kopia.ManifestEntry{
inputMans = []kopia.ManifestEntry{
{
Manifest: makeManifest(t, backup1.ID, ""),
Reasons: []kopia.Reason{
@ -1307,9 +1310,10 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsFolde
defer flush()
var (
mds = ssmock.Streamer{Deets: populatedDetails}
w = &store.Wrapper{Storer: mockBackupStorer{entries: populatedModels}}
deets = details.Builder{}
mds = ssmock.Streamer{Deets: populatedDetails}
w = &store.Wrapper{Storer: mockBackupStorer{entries: populatedModels}}
deets = details.Builder{}
writeStats = kopia.BackupStats{}
)
err := mergeDetails(
@ -1319,6 +1323,8 @@ func (suite *BackupOpUnitSuite) TestBackupOperation_MergeBackupDetails_AddsFolde
inputMans,
mdm,
&deets,
&writeStats,
path.ExchangeService,
fault.New(true))
assert.NoError(t, err, clues.ToCore(err))
compareDeetEntries(t, expectedEntries, deets.Details().Entries)

View File

@ -19,16 +19,8 @@ import (
"github.com/alcionai/corso/src/pkg/path"
)
type manifestFetcher interface {
FetchPrevSnapshotManifests(
ctx context.Context,
reasons []kopia.Reason,
tags map[string]string,
) ([]*kopia.ManifestEntry, error)
}
type manifestRestorer interface {
manifestFetcher
inject.BaseFinder
inject.RestoreProducer
}
@ -47,14 +39,14 @@ func produceManifestsAndMetadata(
reasons, fallbackReasons []kopia.Reason,
tenantID string,
getMetadata bool,
) ([]*kopia.ManifestEntry, []data.RestoreCollection, bool, error) {
) ([]kopia.ManifestEntry, []data.RestoreCollection, bool, error) {
var (
tags = map[string]string{kopia.TagBackupCategory: ""}
metadataFiles = graph.AllMetadataFileNames()
collections []data.RestoreCollection
)
ms, err := mr.FetchPrevSnapshotManifests(ctx, reasons, tags)
ms, err := mr.FindBases(ctx, reasons, tags)
if err != nil {
return nil, nil, false, clues.Wrap(err, "looking up prior snapshots")
}
@ -70,7 +62,7 @@ func produceManifestsAndMetadata(
return ms, nil, false, nil
}
fbms, err := mr.FetchPrevSnapshotManifests(ctx, fallbackReasons, tags)
fbms, err := mr.FindBases(ctx, fallbackReasons, tags)
if err != nil {
return nil, nil, false, clues.Wrap(err, "looking up prior snapshots under alternate id")
}
@ -177,9 +169,9 @@ func produceManifestsAndMetadata(
// 3. If mans has no entry for a reason, look for both complete and incomplete fallbacks.
func unionManifests(
reasons []kopia.Reason,
mans []*kopia.ManifestEntry,
fallback []*kopia.ManifestEntry,
) []*kopia.ManifestEntry {
mans []kopia.ManifestEntry,
fallback []kopia.ManifestEntry,
) []kopia.ManifestEntry {
if len(fallback) == 0 {
return mans
}
@ -203,7 +195,9 @@ func unionManifests(
}
// track the manifests that were collected with the current lookup
for _, m := range mans {
for i := range mans {
m := &mans[i]
for _, r := range m.Reasons {
k := r.Service.String() + r.Category.String()
t := tups[k]
@ -219,7 +213,8 @@ func unionManifests(
}
// backfill from the fallback where necessary
for _, m := range fallback {
for i := range fallback {
m := &fallback[i]
useReasons := []kopia.Reason{}
for _, r := range m.Reasons {
@ -250,15 +245,15 @@ func unionManifests(
}
// collect the results into a single slice of manifests
ms := map[string]*kopia.ManifestEntry{}
ms := map[string]kopia.ManifestEntry{}
for _, m := range tups {
if m.complete != nil {
ms[string(m.complete.ID)] = m.complete
ms[string(m.complete.ID)] = *m.complete
}
if m.incomplete != nil {
ms[string(m.incomplete.ID)] = m.incomplete
ms[string(m.incomplete.ID)] = *m.incomplete
}
}
@ -269,7 +264,7 @@ func unionManifests(
// of manifests, that each manifest's Reason (owner, service, category) is only
// included once. If a reason is duplicated by any two manifests, an error is
// returned.
func verifyDistinctBases(ctx context.Context, mans []*kopia.ManifestEntry) error {
func verifyDistinctBases(ctx context.Context, mans []kopia.ManifestEntry) error {
reasons := map[string]manifest.ID{}
for _, man := range mans {
@ -303,7 +298,7 @@ func verifyDistinctBases(ctx context.Context, mans []*kopia.ManifestEntry) error
func collectMetadata(
ctx context.Context,
r inject.RestoreProducer,
man *kopia.ManifestEntry,
man kopia.ManifestEntry,
fileNames []string,
tenantID string,
errs *fault.Bus,

View File

@ -27,16 +27,16 @@ import (
type mockManifestRestorer struct {
mockRestoreProducer
mans []*kopia.ManifestEntry
mans []kopia.ManifestEntry
mrErr error // err varname already claimed by mockRestoreProducer
}
func (mmr mockManifestRestorer) FetchPrevSnapshotManifests(
func (mmr mockManifestRestorer) FindBases(
ctx context.Context,
reasons []kopia.Reason,
tags map[string]string,
) ([]*kopia.ManifestEntry, error) {
mans := map[string]*kopia.ManifestEntry{}
) ([]kopia.ManifestEntry, error) {
mans := map[string]kopia.ManifestEntry{}
for _, r := range reasons {
for _, m := range mmr.mans {
@ -49,10 +49,6 @@ func (mmr mockManifestRestorer) FetchPrevSnapshotManifests(
}
}
if len(mans) == 0 && len(reasons) == 0 {
return mmr.mans, mmr.mrErr
}
return maps.Values(mans), mmr.mrErr
}
@ -247,7 +243,7 @@ func (suite *OperationsManifestsUnitSuite) TestCollectMetadata() {
mr := mockRestoreProducer{err: test.expectErr}
mr.buildRestoreFunc(t, test.manID, paths)
man := &kopia.ManifestEntry{
man := kopia.ManifestEntry{
Manifest: &snapshot.Manifest{ID: manifest.ID(test.manID)},
Reasons: test.reasons,
}
@ -263,12 +259,12 @@ func (suite *OperationsManifestsUnitSuite) TestVerifyDistinctBases() {
table := []struct {
name string
mans []*kopia.ManifestEntry
mans []kopia.ManifestEntry
expect assert.ErrorAssertionFunc
}{
{
name: "one manifest, one reason",
mans: []*kopia.ManifestEntry{
mans: []kopia.ManifestEntry{
{
Manifest: &snapshot.Manifest{},
Reasons: []kopia.Reason{
@ -284,7 +280,7 @@ func (suite *OperationsManifestsUnitSuite) TestVerifyDistinctBases() {
},
{
name: "one incomplete manifest",
mans: []*kopia.ManifestEntry{
mans: []kopia.ManifestEntry{
{
Manifest: &snapshot.Manifest{IncompleteReason: "ir"},
},
@ -293,7 +289,7 @@ func (suite *OperationsManifestsUnitSuite) TestVerifyDistinctBases() {
},
{
name: "one manifest, multiple reasons",
mans: []*kopia.ManifestEntry{
mans: []kopia.ManifestEntry{
{
Manifest: &snapshot.Manifest{},
Reasons: []kopia.Reason{
@ -314,7 +310,7 @@ func (suite *OperationsManifestsUnitSuite) TestVerifyDistinctBases() {
},
{
name: "one manifest, duplicate reasons",
mans: []*kopia.ManifestEntry{
mans: []kopia.ManifestEntry{
{
Manifest: &snapshot.Manifest{},
Reasons: []kopia.Reason{
@ -335,7 +331,7 @@ func (suite *OperationsManifestsUnitSuite) TestVerifyDistinctBases() {
},
{
name: "two manifests, non-overlapping reasons",
mans: []*kopia.ManifestEntry{
mans: []kopia.ManifestEntry{
{
Manifest: &snapshot.Manifest{},
Reasons: []kopia.Reason{
@ -361,7 +357,7 @@ func (suite *OperationsManifestsUnitSuite) TestVerifyDistinctBases() {
},
{
name: "two manifests, overlapping reasons",
mans: []*kopia.ManifestEntry{
mans: []kopia.ManifestEntry{
{
Manifest: &snapshot.Manifest{},
Reasons: []kopia.Reason{
@ -387,7 +383,7 @@ func (suite *OperationsManifestsUnitSuite) TestVerifyDistinctBases() {
},
{
name: "two manifests, overlapping reasons, one snapshot incomplete",
mans: []*kopia.ManifestEntry{
mans: []kopia.ManifestEntry{
{
Manifest: &snapshot.Manifest{},
Reasons: []kopia.Reason{
@ -430,13 +426,13 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() {
did = "detailsid"
)
makeMan := func(pct path.CategoryType, id, incmpl, bid string) *kopia.ManifestEntry {
makeMan := func(pct path.CategoryType, id, incmpl, bid string) kopia.ManifestEntry {
tags := map[string]string{}
if len(bid) > 0 {
tags = map[string]string{"tag:" + kopia.TagBackupID: bid}
}
return &kopia.ManifestEntry{
return kopia.ManifestEntry{
Manifest: &snapshot.Manifest{
ID: manifest.ID(id),
IncompleteReason: incmpl,
@ -456,7 +452,6 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() {
name string
mr mockManifestRestorer
gb mockGetBackuper
reasons []kopia.Reason
getMeta bool
assertErr assert.ErrorAssertionFunc
assertB assert.BoolAssertionFunc
@ -467,10 +462,9 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() {
name: "don't get metadata, no mans",
mr: mockManifestRestorer{
mockRestoreProducer: mockRestoreProducer{},
mans: []*kopia.ManifestEntry{},
mans: []kopia.ManifestEntry{},
},
gb: mockGetBackuper{detailsID: did},
reasons: []kopia.Reason{},
getMeta: false,
assertErr: assert.NoError,
assertB: assert.False,
@ -480,10 +474,9 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() {
name: "don't get metadata",
mr: mockManifestRestorer{
mockRestoreProducer: mockRestoreProducer{},
mans: []*kopia.ManifestEntry{makeMan(path.EmailCategory, "id1", "", "")},
mans: []kopia.ManifestEntry{makeMan(path.EmailCategory, "id1", "", "")},
},
gb: mockGetBackuper{detailsID: did},
reasons: []kopia.Reason{},
getMeta: false,
assertErr: assert.NoError,
assertB: assert.False,
@ -493,10 +486,9 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() {
name: "don't get metadata, incomplete manifest",
mr: mockManifestRestorer{
mockRestoreProducer: mockRestoreProducer{},
mans: []*kopia.ManifestEntry{makeMan(path.EmailCategory, "id1", "ir", "")},
mans: []kopia.ManifestEntry{makeMan(path.EmailCategory, "id1", "ir", "")},
},
gb: mockGetBackuper{detailsID: did},
reasons: []kopia.Reason{},
getMeta: false,
assertErr: assert.NoError,
assertB: assert.False,
@ -509,7 +501,6 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() {
mrErr: assert.AnError,
},
gb: mockGetBackuper{detailsID: did},
reasons: []kopia.Reason{},
getMeta: true,
assertErr: assert.Error,
assertB: assert.False,
@ -519,13 +510,12 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() {
name: "verify distinct bases fails",
mr: mockManifestRestorer{
mockRestoreProducer: mockRestoreProducer{},
mans: []*kopia.ManifestEntry{
mans: []kopia.ManifestEntry{
makeMan(path.EmailCategory, "id1", "", ""),
makeMan(path.EmailCategory, "id2", "", ""),
},
},
gb: mockGetBackuper{detailsID: did},
reasons: []kopia.Reason{},
getMeta: true,
assertErr: assert.NoError, // No error, even though verify failed.
assertB: assert.False,
@ -535,10 +525,9 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() {
name: "no manifests",
mr: mockManifestRestorer{
mockRestoreProducer: mockRestoreProducer{},
mans: []*kopia.ManifestEntry{},
mans: []kopia.ManifestEntry{},
},
gb: mockGetBackuper{detailsID: did},
reasons: []kopia.Reason{},
getMeta: true,
assertErr: assert.NoError,
assertB: assert.True,
@ -548,13 +537,12 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() {
name: "only incomplete manifests",
mr: mockManifestRestorer{
mockRestoreProducer: mockRestoreProducer{},
mans: []*kopia.ManifestEntry{
mans: []kopia.ManifestEntry{
makeMan(path.EmailCategory, "id1", "ir", ""),
makeMan(path.ContactsCategory, "id2", "ir", ""),
},
},
gb: mockGetBackuper{detailsID: did},
reasons: []kopia.Reason{},
getMeta: true,
assertErr: assert.NoError,
assertB: assert.True,
@ -568,10 +556,9 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() {
"id": {data.NotFoundRestoreCollection{Collection: mockColl{id: "id_coll"}}},
},
},
mans: []*kopia.ManifestEntry{makeMan(path.EmailCategory, "id", "", "")},
mans: []kopia.ManifestEntry{makeMan(path.EmailCategory, "id", "", "")},
},
gb: mockGetBackuper{detailsID: did},
reasons: []kopia.Reason{},
getMeta: true,
assertErr: assert.Error,
assertB: assert.False,
@ -581,10 +568,9 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() {
name: "backup missing details id",
mr: mockManifestRestorer{
mockRestoreProducer: mockRestoreProducer{},
mans: []*kopia.ManifestEntry{makeMan(path.EmailCategory, "id1", "", "bid")},
mans: []kopia.ManifestEntry{makeMan(path.EmailCategory, "id1", "", "bid")},
},
gb: mockGetBackuper{},
reasons: []kopia.Reason{},
getMeta: true,
assertErr: assert.NoError,
assertB: assert.False,
@ -598,13 +584,12 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() {
"incmpl_id": {data.NotFoundRestoreCollection{Collection: mockColl{id: "incmpl_id_coll"}}},
},
},
mans: []*kopia.ManifestEntry{
mans: []kopia.ManifestEntry{
makeMan(path.EmailCategory, "id", "", "bid"),
makeMan(path.EmailCategory, "incmpl_id", "ir", ""),
},
},
gb: mockGetBackuper{detailsID: did},
reasons: []kopia.Reason{},
getMeta: true,
assertErr: assert.NoError,
assertB: assert.True,
@ -618,10 +603,9 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() {
"id": {data.NotFoundRestoreCollection{Collection: mockColl{id: "id_coll"}}},
},
},
mans: []*kopia.ManifestEntry{makeMan(path.EmailCategory, "id", "", "bid")},
mans: []kopia.ManifestEntry{makeMan(path.EmailCategory, "id", "", "bid")},
},
gb: mockGetBackuper{detailsID: did},
reasons: []kopia.Reason{},
getMeta: true,
assertErr: assert.NoError,
assertB: assert.True,
@ -636,13 +620,12 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() {
"contact": {data.NotFoundRestoreCollection{Collection: mockColl{id: "contact_coll"}}},
},
},
mans: []*kopia.ManifestEntry{
mans: []kopia.ManifestEntry{
makeMan(path.EmailCategory, "mail", "", "bid"),
makeMan(path.ContactsCategory, "contact", "", "bid"),
},
},
gb: mockGetBackuper{detailsID: did},
reasons: []kopia.Reason{},
getMeta: true,
assertErr: assert.NoError,
assertB: assert.True,
@ -655,10 +638,9 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() {
name: "error collecting metadata",
mr: mockManifestRestorer{
mockRestoreProducer: mockRestoreProducer{err: assert.AnError},
mans: []*kopia.ManifestEntry{makeMan(path.EmailCategory, "id1", "", "bid")},
mans: []kopia.ManifestEntry{makeMan(path.EmailCategory, "id1", "", "bid")},
},
gb: mockGetBackuper{detailsID: did},
reasons: []kopia.Reason{},
getMeta: true,
assertErr: assert.Error,
assertB: assert.False,
@ -677,7 +659,7 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata() {
ctx,
&test.mr,
&test.gb,
test.reasons, nil,
[]kopia.Reason{{ResourceOwner: ro}}, nil,
tid,
test.getMeta)
test.assertErr(t, err, clues.ToCore(err))
@ -739,8 +721,8 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata_fallb
fbIncomplete = "fb_incmpl"
)
makeMan := func(id, incmpl string, reasons []kopia.Reason) *kopia.ManifestEntry {
return &kopia.ManifestEntry{
makeMan := func(id, incmpl string, reasons []kopia.Reason) kopia.ManifestEntry {
return kopia.ManifestEntry{
Manifest: &snapshot.Manifest{
ID: manifest.ID(id),
IncompleteReason: incmpl,
@ -1005,7 +987,7 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata_fallb
})
}
mans := []*kopia.ManifestEntry{}
mans := []kopia.ManifestEntry{}
for _, m := range test.man {
incomplete := ""
@ -1027,7 +1009,7 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata_fallb
mr := mockManifestRestorer{mans: mans}
mans, _, b, err := produceManifestsAndMetadata(
gotMans, _, b, err := produceManifestsAndMetadata(
ctx,
&mr,
nil,
@ -1040,7 +1022,7 @@ func (suite *OperationsManifestsUnitSuite) TestProduceManifestsAndMetadata_fallb
manIDs := []string{}
for _, m := range mans {
for _, m := range gotMans {
manIDs = append(manIDs, string(m.ID))
reasons := test.expectReasons[string(m.ID)]
@ -1075,12 +1057,12 @@ func (suite *BackupManifestUnitSuite) TestBackupOperation_VerifyDistinctBases()
table := []struct {
name string
input []*kopia.ManifestEntry
input []kopia.ManifestEntry
errCheck assert.ErrorAssertionFunc
}{
{
name: "SingleManifestMultipleReasons",
input: []*kopia.ManifestEntry{
input: []kopia.ManifestEntry{
{
Manifest: &snapshot.Manifest{
ID: "id1",
@ -1103,7 +1085,7 @@ func (suite *BackupManifestUnitSuite) TestBackupOperation_VerifyDistinctBases()
},
{
name: "MultipleManifestsDistinctReason",
input: []*kopia.ManifestEntry{
input: []kopia.ManifestEntry{
{
Manifest: &snapshot.Manifest{
ID: "id1",
@ -1133,7 +1115,7 @@ func (suite *BackupManifestUnitSuite) TestBackupOperation_VerifyDistinctBases()
},
{
name: "MultipleManifestsSameReason",
input: []*kopia.ManifestEntry{
input: []kopia.ManifestEntry{
{
Manifest: &snapshot.Manifest{
ID: "id1",
@ -1163,7 +1145,7 @@ func (suite *BackupManifestUnitSuite) TestBackupOperation_VerifyDistinctBases()
},
{
name: "MultipleManifestsSameReasonOneIncomplete",
input: []*kopia.ManifestEntry{
input: []kopia.ManifestEntry{
{
Manifest: &snapshot.Manifest{
ID: "id1",
@ -1250,13 +1232,13 @@ func (suite *BackupManifestUnitSuite) TestBackupOperation_CollectMetadata() {
table := []struct {
name string
inputMan *kopia.ManifestEntry
inputMan kopia.ManifestEntry
inputFiles []string
expected []path.Path
}{
{
name: "SingleReasonSingleFile",
inputMan: &kopia.ManifestEntry{
inputMan: kopia.ManifestEntry{
Manifest: &snapshot.Manifest{},
Reasons: []kopia.Reason{
{
@ -1271,7 +1253,7 @@ func (suite *BackupManifestUnitSuite) TestBackupOperation_CollectMetadata() {
},
{
name: "SingleReasonMultipleFiles",
inputMan: &kopia.ManifestEntry{
inputMan: kopia.ManifestEntry{
Manifest: &snapshot.Manifest{},
Reasons: []kopia.Reason{
{
@ -1286,7 +1268,7 @@ func (suite *BackupManifestUnitSuite) TestBackupOperation_CollectMetadata() {
},
{
name: "MultipleReasonsMultipleFiles",
inputMan: &kopia.ManifestEntry{
inputMan: kopia.ManifestEntry{
Manifest: &snapshot.Manifest{},
Reasons: []kopia.Reason{
{

View File

@ -194,6 +194,10 @@ func (op *RestoreOperation) do(
detailsStore streamstore.Reader,
start time.Time,
) (*details.Details, error) {
logger.Ctx(ctx).
With("control_options", op.Options, "selectors", op.Selectors).
Info("restoring selection")
bup, deets, err := getBackupAndDetailsFromID(
ctx,
op.BackupID,
@ -213,7 +217,8 @@ func (op *RestoreOperation) do(
ctx = clues.Add(
ctx,
"resource_owner", bup.Selector.DiscreteOwner,
"resource_owner_id", bup.Selector.ID(),
"resource_owner_name", clues.Hide(bup.Selector.Name()),
"details_entries", len(deets.Entries),
"details_paths", len(paths),
"backup_snapshot_id", bup.SnapshotID,
@ -230,7 +235,6 @@ func (op *RestoreOperation) do(
})
observe.Message(ctx, fmt.Sprintf("Discovered %d items in backup %s to restore", len(paths), op.BackupID))
logger.Ctx(ctx).With("control_options", op.Options, "selectors", op.Selectors).Info("restoring selection")
kopiaComplete, closer := observe.MessageWithCompletion(ctx, "Enumerating items in repository")
defer closer()

View File

@ -8,11 +8,13 @@ import (
// ReadWrites tracks the total count of reads and writes. ItemsRead
// and ItemsWritten counts are assumed to be successful reads.
type ReadWrites struct {
BytesRead int64 `json:"bytesRead,omitempty"`
BytesUploaded int64 `json:"bytesUploaded,omitempty"`
ItemsRead int `json:"itemsRead,omitempty"`
ItemsWritten int `json:"itemsWritten,omitempty"`
ResourceOwners int `json:"resourceOwners,omitempty"`
BytesRead int64 `json:"bytesRead,omitempty"`
BytesUploaded int64 `json:"bytesUploaded,omitempty"`
ItemsRead int `json:"itemsRead,omitempty"`
NonMetaBytesUploaded int64 `json:"nonMetaBytesUploaded,omitempty"`
NonMetaItemsWritten int `json:"nonMetaItemsWritten,omitempty"`
ItemsWritten int `json:"itemsWritten,omitempty"`
ResourceOwners int `json:"resourceOwners,omitempty"`
}
// StartAndEndTime tracks a paired starting time and ending time.

View File

@ -284,12 +284,12 @@ func (b Backup) toStats() backupStats {
return backupStats{
ID: string(b.ID),
BytesRead: b.BytesRead,
BytesUploaded: b.BytesUploaded,
BytesUploaded: b.NonMetaBytesUploaded,
EndedAt: b.CompletedAt,
ErrorCount: b.ErrorCount,
ItemsRead: b.ItemsRead,
ItemsSkipped: b.TotalSkippedItems,
ItemsWritten: b.ItemsWritten,
ItemsWritten: b.NonMetaItemsWritten,
StartedAt: b.StartedAt,
}
}

View File

@ -49,10 +49,12 @@ func stubBackup(t time.Time, ownerID, ownerName string) backup.Backup {
ErrorCount: 2,
Failure: "read, write",
ReadWrites: stats.ReadWrites{
BytesRead: 301,
BytesUploaded: 301,
ItemsRead: 1,
ItemsWritten: 1,
BytesRead: 301,
BytesUploaded: 301,
NonMetaBytesUploaded: 301,
ItemsRead: 1,
NonMetaItemsWritten: 1,
ItemsWritten: 1,
},
StartAndEndTime: stats.StartAndEndTime{
StartedAt: t,
@ -248,7 +250,7 @@ func (suite *BackupUnitSuite) TestBackup_MinimumPrintable() {
assert.Equal(t, now, result.Stats.StartedAt, "started at")
assert.Equal(t, b.Status, result.Status, "status")
assert.Equal(t, b.BytesRead, result.Stats.BytesRead, "size")
assert.Equal(t, b.BytesUploaded, result.Stats.BytesUploaded, "stored size")
assert.Equal(t, b.NonMetaBytesUploaded, result.Stats.BytesUploaded, "stored size")
assert.Equal(t, b.Selector.DiscreteOwner, result.Owner, "owner")
}

View File

@ -240,12 +240,27 @@ func (dm DetailsModel) FilterMetaFiles() DetailsModel {
return d2
}
// SumNonMetaFileSizes returns the total size of items excluding all the
// .meta files from the items.
func (dm DetailsModel) SumNonMetaFileSizes() int64 {
var size int64
// Items will provide only files and filter out folders
for _, ent := range dm.FilterMetaFiles().Items() {
size += ent.size()
}
return size
}
// Check if a file is a metadata file. These are used to store
// additional data like permissions (in case of Drive items) and are
// not to be treated as regular files.
func (de Entry) isMetaFile() bool {
// sharepoint types not needed, since sharepoint permissions were
// added after IsMeta was deprecated.
// Earlier onedrive backups used to store both metafiles and files in details.
// So filter out just the onedrive items and check for metafiles
return de.ItemInfo.OneDrive != nil && de.ItemInfo.OneDrive.IsMeta
}