Merge branch 'main' into beta-library-switch
This commit is contained in:
commit
57ce0be82a
@ -17,6 +17,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|||||||
- Handle case where user's drive has not been initialized
|
- Handle case where user's drive has not been initialized
|
||||||
- Inline attachments (e.g. copy/paste ) are discovered and backed up correctly ([#2163](https://github.com/alcionai/corso/issues/2163))
|
- Inline attachments (e.g. copy/paste ) are discovered and backed up correctly ([#2163](https://github.com/alcionai/corso/issues/2163))
|
||||||
- Guest and External users (for cloud accounts) and non-on-premise users (for systems that use on-prem AD syncs) are now excluded from backup and restore operations.
|
- Guest and External users (for cloud accounts) and non-on-premise users (for systems that use on-prem AD syncs) are now excluded from backup and restore operations.
|
||||||
|
- Remove the M365 license guid check in OneDrive backup which wasn't reliable.
|
||||||
|
|
||||||
|
|
||||||
## [v0.1.0] (alpha) - 2023-01-13
|
## [v0.1.0] (alpha) - 2023-01-13
|
||||||
|
|||||||
@ -13,6 +13,7 @@ import (
|
|||||||
. "github.com/alcionai/corso/src/cli/print"
|
. "github.com/alcionai/corso/src/cli/print"
|
||||||
"github.com/alcionai/corso/src/cli/utils"
|
"github.com/alcionai/corso/src/cli/utils"
|
||||||
"github.com/alcionai/corso/src/internal/connector"
|
"github.com/alcionai/corso/src/internal/connector"
|
||||||
|
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||||
"github.com/alcionai/corso/src/internal/kopia"
|
"github.com/alcionai/corso/src/internal/kopia"
|
||||||
"github.com/alcionai/corso/src/internal/model"
|
"github.com/alcionai/corso/src/internal/model"
|
||||||
"github.com/alcionai/corso/src/pkg/backup"
|
"github.com/alcionai/corso/src/pkg/backup"
|
||||||
@ -209,7 +210,7 @@ func createSharePointCmd(cmd *cobra.Command, args []string) error {
|
|||||||
|
|
||||||
defer utils.CloseRepo(ctx, r)
|
defer utils.CloseRepo(ctx, r)
|
||||||
|
|
||||||
gc, err := connector.NewGraphConnector(ctx, acct, connector.Sites)
|
gc, err := connector.NewGraphConnector(ctx, graph.LargeItemClient(), acct, connector.Sites)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Only(ctx, errors.Wrap(err, "Failed to connect to Microsoft APIs"))
|
return Only(ctx, errors.Wrap(err, "Failed to connect to Microsoft APIs"))
|
||||||
}
|
}
|
||||||
|
|||||||
@ -39,7 +39,7 @@ var corsoCmd = &cobra.Command{
|
|||||||
func handleCorsoCmd(cmd *cobra.Command, args []string) error {
|
func handleCorsoCmd(cmd *cobra.Command, args []string) error {
|
||||||
v, _ := cmd.Flags().GetBool("version")
|
v, _ := cmd.Flags().GetBool("version")
|
||||||
if v {
|
if v {
|
||||||
print.Outf(cmd.Context(), "Corso\nversion: "+version.Version)
|
print.Outf(cmd.Context(), "Corso version: "+version.Version)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -12,6 +12,7 @@ import (
|
|||||||
. "github.com/alcionai/corso/src/cli/print"
|
. "github.com/alcionai/corso/src/cli/print"
|
||||||
"github.com/alcionai/corso/src/internal/common"
|
"github.com/alcionai/corso/src/internal/common"
|
||||||
"github.com/alcionai/corso/src/internal/connector"
|
"github.com/alcionai/corso/src/internal/connector"
|
||||||
|
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||||
"github.com/alcionai/corso/src/internal/connector/mockconnector"
|
"github.com/alcionai/corso/src/internal/connector/mockconnector"
|
||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
"github.com/alcionai/corso/src/pkg/account"
|
"github.com/alcionai/corso/src/pkg/account"
|
||||||
@ -111,7 +112,7 @@ func getGCAndVerifyUser(ctx context.Context, userID string) (*connector.GraphCon
|
|||||||
}
|
}
|
||||||
|
|
||||||
// build a graph connector
|
// build a graph connector
|
||||||
gc, err := connector.NewGraphConnector(ctx, acct, connector.Users)
|
gc, err := connector.NewGraphConnector(ctx, graph.LargeItemClient(), acct, connector.Users)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, account.Account{}, errors.Wrap(err, "connecting to graph api")
|
return nil, account.Account{}, errors.Wrap(err, "connecting to graph api")
|
||||||
}
|
}
|
||||||
|
|||||||
@ -96,7 +96,7 @@ func runDisplayM365JSON(
|
|||||||
var (
|
var (
|
||||||
bs []byte
|
bs []byte
|
||||||
err error
|
err error
|
||||||
cat = graph.StringToPathCategory(category)
|
cat = path.ToCategoryType(category)
|
||||||
sw = kw.NewJsonSerializationWriter()
|
sw = kw.NewJsonSerializationWriter()
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -178,7 +178,7 @@ func getGC(ctx context.Context) (*connector.GraphConnector, account.M365Config,
|
|||||||
return nil, m365Cfg, Only(ctx, errors.Wrap(err, "finding m365 account details"))
|
return nil, m365Cfg, Only(ctx, errors.Wrap(err, "finding m365 account details"))
|
||||||
}
|
}
|
||||||
|
|
||||||
gc, err := connector.NewGraphConnector(ctx, acct, connector.Users)
|
gc, err := connector.NewGraphConnector(ctx, graph.LargeItemClient(), acct, connector.Users)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, m365Cfg, Only(ctx, errors.Wrap(err, "connecting to graph API"))
|
return nil, m365Cfg, Only(ctx, errors.Wrap(err, "connecting to graph API"))
|
||||||
}
|
}
|
||||||
|
|||||||
@ -255,7 +255,7 @@ func getGC(ctx context.Context) (*connector.GraphConnector, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// build a graph connector
|
// build a graph connector
|
||||||
gc, err := connector.NewGraphConnector(ctx, acct, connector.Users)
|
gc, err := connector.NewGraphConnector(ctx, graph.LargeItemClient(), acct, connector.Users)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, Only(ctx, errors.Wrap(err, "connecting to graph api"))
|
return nil, Only(ctx, errors.Wrap(err, "connecting to graph api"))
|
||||||
}
|
}
|
||||||
|
|||||||
@ -4,7 +4,8 @@ go 1.19
|
|||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0
|
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0
|
||||||
github.com/aws/aws-sdk-go v1.44.184
|
github.com/alcionai/clues v0.0.0-20230120231953-1cf61dbafc40
|
||||||
|
github.com/aws/aws-sdk-go v1.44.187
|
||||||
github.com/aws/aws-xray-sdk-go v1.8.0
|
github.com/aws/aws-xray-sdk-go v1.8.0
|
||||||
github.com/google/uuid v1.3.0
|
github.com/google/uuid v1.3.0
|
||||||
github.com/hashicorp/go-multierror v1.1.1
|
github.com/hashicorp/go-multierror v1.1.1
|
||||||
|
|||||||
@ -52,6 +52,8 @@ github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1o
|
|||||||
github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4=
|
github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4=
|
||||||
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8=
|
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8=
|
||||||
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo=
|
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo=
|
||||||
|
github.com/alcionai/clues v0.0.0-20230120231953-1cf61dbafc40 h1:bvAwz0dcJeIyRjudVyzmmawOvc4SqlSerKd0B4dh0yw=
|
||||||
|
github.com/alcionai/clues v0.0.0-20230120231953-1cf61dbafc40/go.mod h1:UlAs8jkWIpsOMakiC8NxPgQQVQRdvyf1hYMszlYYLb4=
|
||||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||||
@ -60,8 +62,8 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5
|
|||||||
github.com/alessio/shellescape v1.4.1 h1:V7yhSDDn8LP4lc4jS8pFkt0zCnzVJlG5JXy9BVKJUX0=
|
github.com/alessio/shellescape v1.4.1 h1:V7yhSDDn8LP4lc4jS8pFkt0zCnzVJlG5JXy9BVKJUX0=
|
||||||
github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY=
|
github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY=
|
||||||
github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
|
github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
|
||||||
github.com/aws/aws-sdk-go v1.44.184 h1:/MggyE66rOImXJKl1HqhLQITvWvqIV7w1Q4MaG6FHUo=
|
github.com/aws/aws-sdk-go v1.44.187 h1:D5CsRomPnlwDHJCanL2mtaLIcbhjiWxNh5j8zvaWdJA=
|
||||||
github.com/aws/aws-sdk-go v1.44.184/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
github.com/aws/aws-sdk-go v1.44.187/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
||||||
github.com/aws/aws-xray-sdk-go v1.8.0 h1:0xncHZ588wB/geLjbM/esoW3FOEThWy2TJyb4VXfLFY=
|
github.com/aws/aws-xray-sdk-go v1.8.0 h1:0xncHZ588wB/geLjbM/esoW3FOEThWy2TJyb4VXfLFY=
|
||||||
github.com/aws/aws-xray-sdk-go v1.8.0/go.mod h1:7LKe47H+j3evfvS1+q0wzpoaGXGrF3mUsfM+thqVO+A=
|
github.com/aws/aws-xray-sdk-go v1.8.0/go.mod h1:7LKe47H+j3evfvS1+q0wzpoaGXGrF3mUsfM+thqVO+A=
|
||||||
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
|
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
|
||||||
|
|||||||
@ -87,6 +87,7 @@ func (gc *GraphConnector) DataCollections(
|
|||||||
case selectors.ServiceSharePoint:
|
case selectors.ServiceSharePoint:
|
||||||
colls, err := sharepoint.DataCollections(
|
colls, err := sharepoint.DataCollections(
|
||||||
ctx,
|
ctx,
|
||||||
|
gc.itemClient,
|
||||||
sels,
|
sels,
|
||||||
gc.credentials.AzureTenantID,
|
gc.credentials.AzureTenantID,
|
||||||
gc.Service,
|
gc.Service,
|
||||||
@ -198,6 +199,7 @@ func (gc *GraphConnector) OneDriveDataCollections(
|
|||||||
logger.Ctx(ctx).With("user", user).Debug("Creating OneDrive collections")
|
logger.Ctx(ctx).With("user", user).Debug("Creating OneDrive collections")
|
||||||
|
|
||||||
odcs, err := onedrive.NewCollections(
|
odcs, err := onedrive.NewCollections(
|
||||||
|
gc.itemClient,
|
||||||
gc.credentials.AzureTenantID,
|
gc.credentials.AzureTenantID,
|
||||||
user,
|
user,
|
||||||
onedrive.OneDriveSource,
|
onedrive.OneDriveSource,
|
||||||
|
|||||||
@ -10,6 +10,7 @@ import (
|
|||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/connector/exchange"
|
"github.com/alcionai/corso/src/internal/connector/exchange"
|
||||||
|
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||||
"github.com/alcionai/corso/src/internal/connector/sharepoint"
|
"github.com/alcionai/corso/src/internal/connector/sharepoint"
|
||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
"github.com/alcionai/corso/src/pkg/control"
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
@ -43,7 +44,7 @@ func (suite *ConnectorDataCollectionIntegrationSuite) SetupSuite() {
|
|||||||
|
|
||||||
tester.MustGetEnvVars(suite.T(), tester.M365AcctCredEnvs...)
|
tester.MustGetEnvVars(suite.T(), tester.M365AcctCredEnvs...)
|
||||||
|
|
||||||
suite.connector = loadConnector(ctx, suite.T(), AllResources)
|
suite.connector = loadConnector(ctx, suite.T(), graph.LargeItemClient(), AllResources)
|
||||||
suite.user = tester.M365UserID(suite.T())
|
suite.user = tester.M365UserID(suite.T())
|
||||||
suite.site = tester.M365SiteID(suite.T())
|
suite.site = tester.M365SiteID(suite.T())
|
||||||
|
|
||||||
@ -62,7 +63,7 @@ func (suite *ConnectorDataCollectionIntegrationSuite) TestExchangeDataCollection
|
|||||||
|
|
||||||
selUsers := []string{suite.user}
|
selUsers := []string{suite.user}
|
||||||
|
|
||||||
connector := loadConnector(ctx, suite.T(), Users)
|
connector := loadConnector(ctx, suite.T(), graph.LargeItemClient(), Users)
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
getSelector func(t *testing.T) selectors.Selector
|
getSelector func(t *testing.T) selectors.Selector
|
||||||
@ -138,7 +139,7 @@ func (suite *ConnectorDataCollectionIntegrationSuite) TestDataCollections_invali
|
|||||||
|
|
||||||
owners := []string{"snuffleupagus"}
|
owners := []string{"snuffleupagus"}
|
||||||
|
|
||||||
connector := loadConnector(ctx, suite.T(), Users)
|
connector := loadConnector(ctx, suite.T(), graph.LargeItemClient(), Users)
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
getSelector func(t *testing.T) selectors.Selector
|
getSelector func(t *testing.T) selectors.Selector
|
||||||
@ -214,7 +215,7 @@ func (suite *ConnectorDataCollectionIntegrationSuite) TestSharePointDataCollecti
|
|||||||
|
|
||||||
selSites := []string{suite.site}
|
selSites := []string{suite.site}
|
||||||
|
|
||||||
connector := loadConnector(ctx, suite.T(), Sites)
|
connector := loadConnector(ctx, suite.T(), graph.LargeItemClient(), Sites)
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
expected int
|
expected int
|
||||||
@ -243,6 +244,7 @@ func (suite *ConnectorDataCollectionIntegrationSuite) TestSharePointDataCollecti
|
|||||||
suite.T().Run(test.name, func(t *testing.T) {
|
suite.T().Run(test.name, func(t *testing.T) {
|
||||||
collections, err := sharepoint.DataCollections(
|
collections, err := sharepoint.DataCollections(
|
||||||
ctx,
|
ctx,
|
||||||
|
graph.LargeItemClient(),
|
||||||
test.getSelector(),
|
test.getSelector(),
|
||||||
connector.credentials.AzureTenantID,
|
connector.credentials.AzureTenantID,
|
||||||
connector.Service,
|
connector.Service,
|
||||||
@ -298,7 +300,7 @@ func (suite *ConnectorCreateSharePointCollectionIntegrationSuite) SetupSuite() {
|
|||||||
|
|
||||||
tester.MustGetEnvSets(suite.T(), tester.M365AcctCredEnvs)
|
tester.MustGetEnvSets(suite.T(), tester.M365AcctCredEnvs)
|
||||||
|
|
||||||
suite.connector = loadConnector(ctx, suite.T(), Sites)
|
suite.connector = loadConnector(ctx, suite.T(), graph.LargeItemClient(), Sites)
|
||||||
suite.user = tester.M365UserID(suite.T())
|
suite.user = tester.M365UserID(suite.T())
|
||||||
|
|
||||||
tester.LogTimeOfTest(suite.T())
|
tester.LogTimeOfTest(suite.T())
|
||||||
@ -311,7 +313,7 @@ func (suite *ConnectorCreateSharePointCollectionIntegrationSuite) TestCreateShar
|
|||||||
var (
|
var (
|
||||||
t = suite.T()
|
t = suite.T()
|
||||||
siteID = tester.M365SiteID(t)
|
siteID = tester.M365SiteID(t)
|
||||||
gc = loadConnector(ctx, t, Sites)
|
gc = loadConnector(ctx, t, graph.LargeItemClient(), Sites)
|
||||||
siteIDs = []string{siteID}
|
siteIDs = []string{siteID}
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -335,7 +337,7 @@ func (suite *ConnectorCreateSharePointCollectionIntegrationSuite) TestCreateShar
|
|||||||
var (
|
var (
|
||||||
t = suite.T()
|
t = suite.T()
|
||||||
siteID = tester.M365SiteID(t)
|
siteID = tester.M365SiteID(t)
|
||||||
gc = loadConnector(ctx, t, Sites)
|
gc = loadConnector(ctx, t, graph.LargeItemClient(), Sites)
|
||||||
siteIDs = []string{siteID}
|
siteIDs = []string{siteID}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@ -4,7 +4,6 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
"net/http/httputil"
|
"net/http/httputil"
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
az "github.com/Azure/azure-sdk-for-go/sdk/azidentity"
|
az "github.com/Azure/azure-sdk-for-go/sdk/azidentity"
|
||||||
@ -15,7 +14,6 @@ import (
|
|||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/pkg/logger"
|
"github.com/alcionai/corso/src/pkg/logger"
|
||||||
"github.com/alcionai/corso/src/pkg/path"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -57,6 +55,23 @@ func CreateHTTPClient() *http.Client {
|
|||||||
return httpClient
|
return httpClient
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// LargeItemClient generates a client that's configured to handle
|
||||||
|
// large file downloads. This client isn't suitable for other queries
|
||||||
|
// due to loose restrictions on timeouts and such.
|
||||||
|
//
|
||||||
|
// Re-use of http clients is critical, or else we leak os resources
|
||||||
|
// and consume relatively unbound socket connections. It is important
|
||||||
|
// to centralize this client to be passed downstream where api calls
|
||||||
|
// can utilize it on a per-download basis.
|
||||||
|
//
|
||||||
|
// TODO: this should get owned by an API client layer, not the GC itself.
|
||||||
|
func LargeItemClient() *http.Client {
|
||||||
|
httpClient := CreateHTTPClient()
|
||||||
|
httpClient.Timeout = 0 // infinite timeout for pulling large files
|
||||||
|
|
||||||
|
return httpClient
|
||||||
|
}
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
// Logging Middleware
|
// Logging Middleware
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
@ -87,6 +102,10 @@ func (handler *LoggingMiddleware) Intercept(
|
|||||||
logger.Ctx(ctx).Infow("graph api throttling", "method", req.Method, "url", req.URL)
|
logger.Ctx(ctx).Infow("graph api throttling", "method", req.Method, "url", req.URL)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusTooManyRequests && (resp.StatusCode/100) != 2 {
|
||||||
|
logger.Ctx(ctx).Infow("graph api error", "method", req.Method, "url", req.URL)
|
||||||
|
}
|
||||||
|
|
||||||
if logger.DebugAPI || os.Getenv(logGraphRequestsEnvKey) != "" {
|
if logger.DebugAPI || os.Getenv(logGraphRequestsEnvKey) != "" {
|
||||||
respDump, _ := httputil.DumpResponse(resp, true)
|
respDump, _ := httputil.DumpResponse(resp, true)
|
||||||
|
|
||||||
@ -104,26 +123,3 @@ func (handler *LoggingMiddleware) Intercept(
|
|||||||
|
|
||||||
return resp, err
|
return resp, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
// Other Helpers
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
func StringToPathCategory(input string) path.CategoryType {
|
|
||||||
param := strings.ToLower(input)
|
|
||||||
|
|
||||||
switch param {
|
|
||||||
case "email":
|
|
||||||
return path.EmailCategory
|
|
||||||
case "contacts":
|
|
||||||
return path.ContactsCategory
|
|
||||||
case "events":
|
|
||||||
return path.EventsCategory
|
|
||||||
case "files":
|
|
||||||
return path.FilesCategory
|
|
||||||
case "libraries":
|
|
||||||
return path.LibrariesCategory
|
|
||||||
default:
|
|
||||||
return path.UnknownCategory
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@ -4,6 +4,7 @@ package connector
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"net/http"
|
||||||
"runtime/trace"
|
"runtime/trace"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
@ -38,8 +39,9 @@ import (
|
|||||||
// GraphRequestAdapter from the msgraph-sdk-go. Additional fields are for
|
// GraphRequestAdapter from the msgraph-sdk-go. Additional fields are for
|
||||||
// bookkeeping and interfacing with other component.
|
// bookkeeping and interfacing with other component.
|
||||||
type GraphConnector struct {
|
type GraphConnector struct {
|
||||||
Service graph.Servicer
|
Service graph.Servicer
|
||||||
Owners api.Client
|
Owners api.Client
|
||||||
|
itemClient *http.Client // configured to handle large item downloads
|
||||||
|
|
||||||
tenant string
|
tenant string
|
||||||
Users map[string]string // key<email> value<id>
|
Users map[string]string // key<email> value<id>
|
||||||
@ -64,13 +66,19 @@ const (
|
|||||||
Sites
|
Sites
|
||||||
)
|
)
|
||||||
|
|
||||||
func NewGraphConnector(ctx context.Context, acct account.Account, r resource) (*GraphConnector, error) {
|
func NewGraphConnector(
|
||||||
|
ctx context.Context,
|
||||||
|
itemClient *http.Client,
|
||||||
|
acct account.Account,
|
||||||
|
r resource,
|
||||||
|
) (*GraphConnector, error) {
|
||||||
m365, err := acct.M365Config()
|
m365, err := acct.M365Config()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "retrieving m365 account configuration")
|
return nil, errors.Wrap(err, "retrieving m365 account configuration")
|
||||||
}
|
}
|
||||||
|
|
||||||
gc := GraphConnector{
|
gc := GraphConnector{
|
||||||
|
itemClient: itemClient,
|
||||||
tenant: m365.AzureTenantID,
|
tenant: m365.AzureTenantID,
|
||||||
Users: make(map[string]string, 0),
|
Users: make(map[string]string, 0),
|
||||||
wg: &sync.WaitGroup{},
|
wg: &sync.WaitGroup{},
|
||||||
|
|||||||
@ -9,6 +9,7 @@ import (
|
|||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
|
|
||||||
|
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||||
"github.com/alcionai/corso/src/internal/connector/support"
|
"github.com/alcionai/corso/src/internal/connector/support"
|
||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
"github.com/alcionai/corso/src/pkg/account"
|
"github.com/alcionai/corso/src/pkg/account"
|
||||||
@ -65,7 +66,7 @@ func (suite *DisconnectedGraphConnectorSuite) TestBadConnection() {
|
|||||||
|
|
||||||
for _, test := range table {
|
for _, test := range table {
|
||||||
suite.T().Run(test.name, func(t *testing.T) {
|
suite.T().Run(test.name, func(t *testing.T) {
|
||||||
gc, err := NewGraphConnector(ctx, test.acct(t), Users)
|
gc, err := NewGraphConnector(ctx, graph.LargeItemClient(), test.acct(t), Users)
|
||||||
assert.Nil(t, gc, test.name+" failed")
|
assert.Nil(t, gc, test.name+" failed")
|
||||||
assert.NotNil(t, err, test.name+"failed")
|
assert.NotNil(t, err, test.name+"failed")
|
||||||
})
|
})
|
||||||
|
|||||||
@ -3,6 +3,7 @@ package connector
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"io"
|
"io"
|
||||||
|
"net/http"
|
||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
@ -978,9 +979,9 @@ func getSelectorWith(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func loadConnector(ctx context.Context, t *testing.T, r resource) *GraphConnector {
|
func loadConnector(ctx context.Context, t *testing.T, itemClient *http.Client, r resource) *GraphConnector {
|
||||||
a := tester.NewM365Account(t)
|
a := tester.NewM365Account(t)
|
||||||
connector, err := NewGraphConnector(ctx, a, r)
|
connector, err := NewGraphConnector(ctx, itemClient, a, r)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
return connector
|
return connector
|
||||||
|
|||||||
@ -156,7 +156,7 @@ func (suite *GraphConnectorIntegrationSuite) SetupSuite() {
|
|||||||
|
|
||||||
tester.MustGetEnvSets(suite.T(), tester.M365AcctCredEnvs)
|
tester.MustGetEnvSets(suite.T(), tester.M365AcctCredEnvs)
|
||||||
|
|
||||||
suite.connector = loadConnector(ctx, suite.T(), Users)
|
suite.connector = loadConnector(ctx, suite.T(), graph.LargeItemClient(), Users)
|
||||||
suite.user = tester.M365UserID(suite.T())
|
suite.user = tester.M365UserID(suite.T())
|
||||||
suite.acct = tester.NewM365Account(suite.T())
|
suite.acct = tester.NewM365Account(suite.T())
|
||||||
|
|
||||||
@ -380,7 +380,7 @@ func runRestoreBackupTest(
|
|||||||
|
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
|
|
||||||
restoreGC := loadConnector(ctx, t, test.resource)
|
restoreGC := loadConnector(ctx, t, graph.LargeItemClient(), test.resource)
|
||||||
restoreSel := getSelectorWith(t, test.service, resourceOwners, true)
|
restoreSel := getSelectorWith(t, test.service, resourceOwners, true)
|
||||||
deets, err := restoreGC.RestoreDataCollections(
|
deets, err := restoreGC.RestoreDataCollections(
|
||||||
ctx,
|
ctx,
|
||||||
@ -419,7 +419,7 @@ func runRestoreBackupTest(
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
backupGC := loadConnector(ctx, t, test.resource)
|
backupGC := loadConnector(ctx, t, graph.LargeItemClient(), test.resource)
|
||||||
backupSel := backupSelectorForExpected(t, test.service, expectedDests)
|
backupSel := backupSelectorForExpected(t, test.service, expectedDests)
|
||||||
t.Logf("Selective backup of %s\n", backupSel)
|
t.Logf("Selective backup of %s\n", backupSel)
|
||||||
|
|
||||||
@ -870,7 +870,7 @@ func (suite *GraphConnectorIntegrationSuite) TestMultiFolderBackupDifferentNames
|
|||||||
dest.ContainerName,
|
dest.ContainerName,
|
||||||
)
|
)
|
||||||
|
|
||||||
restoreGC := loadConnector(ctx, t, test.resource)
|
restoreGC := loadConnector(ctx, t, graph.LargeItemClient(), test.resource)
|
||||||
deets, err := restoreGC.RestoreDataCollections(ctx, suite.acct, restoreSel, dest, collections)
|
deets, err := restoreGC.RestoreDataCollections(ctx, suite.acct, restoreSel, dest, collections)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NotNil(t, deets)
|
require.NotNil(t, deets)
|
||||||
@ -888,7 +888,7 @@ func (suite *GraphConnectorIntegrationSuite) TestMultiFolderBackupDifferentNames
|
|||||||
|
|
||||||
// Run a backup and compare its output with what we put in.
|
// Run a backup and compare its output with what we put in.
|
||||||
|
|
||||||
backupGC := loadConnector(ctx, t, test.resource)
|
backupGC := loadConnector(ctx, t, graph.LargeItemClient(), test.resource)
|
||||||
backupSel := backupSelectorForExpected(t, test.service, expectedDests)
|
backupSel := backupSelectorForExpected(t, test.service, expectedDests)
|
||||||
t.Log("Selective backup of", backupSel)
|
t.Log("Selective backup of", backupSel)
|
||||||
|
|
||||||
|
|||||||
@ -4,11 +4,13 @@ package onedrive
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"io"
|
"io"
|
||||||
|
"net/http"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||||
|
"github.com/pkg/errors"
|
||||||
"github.com/spatialcurrent/go-lazy/pkg/lazy"
|
"github.com/spatialcurrent/go-lazy/pkg/lazy"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||||
@ -43,6 +45,9 @@ var (
|
|||||||
|
|
||||||
// Collection represents a set of OneDrive objects retrieved from M365
|
// Collection represents a set of OneDrive objects retrieved from M365
|
||||||
type Collection struct {
|
type Collection struct {
|
||||||
|
// configured to handle large item downloads
|
||||||
|
itemClient *http.Client
|
||||||
|
|
||||||
// data is used to share data streams with the collection consumer
|
// data is used to share data streams with the collection consumer
|
||||||
data chan data.Stream
|
data chan data.Stream
|
||||||
// folderPath indicates what level in the hierarchy this collection
|
// folderPath indicates what level in the hierarchy this collection
|
||||||
@ -64,12 +69,13 @@ type Collection struct {
|
|||||||
|
|
||||||
// itemReadFunc returns a reader for the specified item
|
// itemReadFunc returns a reader for the specified item
|
||||||
type itemReaderFunc func(
|
type itemReaderFunc func(
|
||||||
ctx context.Context,
|
hc *http.Client,
|
||||||
item models.DriveItemable,
|
item models.DriveItemable,
|
||||||
) (itemInfo details.ItemInfo, itemData io.ReadCloser, err error)
|
) (itemInfo details.ItemInfo, itemData io.ReadCloser, err error)
|
||||||
|
|
||||||
// NewCollection creates a Collection
|
// NewCollection creates a Collection
|
||||||
func NewCollection(
|
func NewCollection(
|
||||||
|
itemClient *http.Client,
|
||||||
folderPath path.Path,
|
folderPath path.Path,
|
||||||
driveID string,
|
driveID string,
|
||||||
service graph.Servicer,
|
service graph.Servicer,
|
||||||
@ -78,6 +84,7 @@ func NewCollection(
|
|||||||
ctrlOpts control.Options,
|
ctrlOpts control.Options,
|
||||||
) *Collection {
|
) *Collection {
|
||||||
c := &Collection{
|
c := &Collection{
|
||||||
|
itemClient: itemClient,
|
||||||
folderPath: folderPath,
|
folderPath: folderPath,
|
||||||
driveItems: map[string]models.DriveItemable{},
|
driveItems: map[string]models.DriveItemable{},
|
||||||
driveID: driveID,
|
driveID: driveID,
|
||||||
@ -198,11 +205,16 @@ func (oc *Collection) populateItems(ctx context.Context) {
|
|||||||
m.Unlock()
|
m.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, item := range oc.driveItems {
|
for id, item := range oc.driveItems {
|
||||||
if oc.ctrl.FailFast && errs != nil {
|
if oc.ctrl.FailFast && errs != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if item == nil {
|
||||||
|
errUpdater(id, errors.New("nil item"))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
semaphoreCh <- struct{}{}
|
semaphoreCh <- struct{}{}
|
||||||
|
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
@ -219,10 +231,9 @@ func (oc *Collection) populateItems(ctx context.Context) {
|
|||||||
)
|
)
|
||||||
|
|
||||||
for i := 1; i <= maxRetries; i++ {
|
for i := 1; i <= maxRetries; i++ {
|
||||||
itemInfo, itemData, err = oc.itemReader(ctx, item)
|
itemInfo, itemData, err = oc.itemReader(oc.itemClient, item)
|
||||||
|
|
||||||
// retry on Timeout type errors, break otherwise.
|
|
||||||
if err == nil || graph.IsErrTimeout(err) == nil {
|
if err == nil || graph.IsErrTimeout(err) == nil {
|
||||||
|
// retry on Timeout type errors, break otherwise.
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -2,9 +2,8 @@ package onedrive
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"io"
|
"io"
|
||||||
|
"net/http"
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@ -15,6 +14,7 @@ import (
|
|||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
|
|
||||||
|
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||||
"github.com/alcionai/corso/src/internal/connector/support"
|
"github.com/alcionai/corso/src/internal/connector/support"
|
||||||
"github.com/alcionai/corso/src/internal/data"
|
"github.com/alcionai/corso/src/internal/data"
|
||||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||||
@ -73,7 +73,7 @@ func (suite *CollectionUnitTestSuite) TestCollection() {
|
|||||||
name: "oneDrive, no duplicates",
|
name: "oneDrive, no duplicates",
|
||||||
numInstances: 1,
|
numInstances: 1,
|
||||||
source: OneDriveSource,
|
source: OneDriveSource,
|
||||||
itemReader: func(context.Context, models.DriveItemable) (details.ItemInfo, io.ReadCloser, error) {
|
itemReader: func(*http.Client, models.DriveItemable) (details.ItemInfo, io.ReadCloser, error) {
|
||||||
return details.ItemInfo{OneDrive: &details.OneDriveInfo{ItemName: testItemName, Modified: now}},
|
return details.ItemInfo{OneDrive: &details.OneDriveInfo{ItemName: testItemName, Modified: now}},
|
||||||
io.NopCloser(bytes.NewReader(testItemData)),
|
io.NopCloser(bytes.NewReader(testItemData)),
|
||||||
nil
|
nil
|
||||||
@ -87,7 +87,7 @@ func (suite *CollectionUnitTestSuite) TestCollection() {
|
|||||||
name: "oneDrive, duplicates",
|
name: "oneDrive, duplicates",
|
||||||
numInstances: 3,
|
numInstances: 3,
|
||||||
source: OneDriveSource,
|
source: OneDriveSource,
|
||||||
itemReader: func(context.Context, models.DriveItemable) (details.ItemInfo, io.ReadCloser, error) {
|
itemReader: func(*http.Client, models.DriveItemable) (details.ItemInfo, io.ReadCloser, error) {
|
||||||
return details.ItemInfo{OneDrive: &details.OneDriveInfo{ItemName: testItemName, Modified: now}},
|
return details.ItemInfo{OneDrive: &details.OneDriveInfo{ItemName: testItemName, Modified: now}},
|
||||||
io.NopCloser(bytes.NewReader(testItemData)),
|
io.NopCloser(bytes.NewReader(testItemData)),
|
||||||
nil
|
nil
|
||||||
@ -101,7 +101,7 @@ func (suite *CollectionUnitTestSuite) TestCollection() {
|
|||||||
name: "sharePoint, no duplicates",
|
name: "sharePoint, no duplicates",
|
||||||
numInstances: 1,
|
numInstances: 1,
|
||||||
source: SharePointSource,
|
source: SharePointSource,
|
||||||
itemReader: func(context.Context, models.DriveItemable) (details.ItemInfo, io.ReadCloser, error) {
|
itemReader: func(*http.Client, models.DriveItemable) (details.ItemInfo, io.ReadCloser, error) {
|
||||||
return details.ItemInfo{SharePoint: &details.SharePointInfo{ItemName: testItemName, Modified: now}},
|
return details.ItemInfo{SharePoint: &details.SharePointInfo{ItemName: testItemName, Modified: now}},
|
||||||
io.NopCloser(bytes.NewReader(testItemData)),
|
io.NopCloser(bytes.NewReader(testItemData)),
|
||||||
nil
|
nil
|
||||||
@ -115,7 +115,7 @@ func (suite *CollectionUnitTestSuite) TestCollection() {
|
|||||||
name: "sharePoint, duplicates",
|
name: "sharePoint, duplicates",
|
||||||
numInstances: 3,
|
numInstances: 3,
|
||||||
source: SharePointSource,
|
source: SharePointSource,
|
||||||
itemReader: func(context.Context, models.DriveItemable) (details.ItemInfo, io.ReadCloser, error) {
|
itemReader: func(*http.Client, models.DriveItemable) (details.ItemInfo, io.ReadCloser, error) {
|
||||||
return details.ItemInfo{SharePoint: &details.SharePointInfo{ItemName: testItemName, Modified: now}},
|
return details.ItemInfo{SharePoint: &details.SharePointInfo{ItemName: testItemName, Modified: now}},
|
||||||
io.NopCloser(bytes.NewReader(testItemData)),
|
io.NopCloser(bytes.NewReader(testItemData)),
|
||||||
nil
|
nil
|
||||||
@ -140,6 +140,7 @@ func (suite *CollectionUnitTestSuite) TestCollection() {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
coll := NewCollection(
|
coll := NewCollection(
|
||||||
|
graph.LargeItemClient(),
|
||||||
folderPath,
|
folderPath,
|
||||||
"drive-id",
|
"drive-id",
|
||||||
suite,
|
suite,
|
||||||
@ -224,6 +225,7 @@ func (suite *CollectionUnitTestSuite) TestCollectionReadError() {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
coll := NewCollection(
|
coll := NewCollection(
|
||||||
|
graph.LargeItemClient(),
|
||||||
folderPath,
|
folderPath,
|
||||||
"fakeDriveID",
|
"fakeDriveID",
|
||||||
suite,
|
suite,
|
||||||
@ -235,10 +237,8 @@ func (suite *CollectionUnitTestSuite) TestCollectionReadError() {
|
|||||||
mockItem.SetId(&testItemID)
|
mockItem.SetId(&testItemID)
|
||||||
coll.Add(mockItem)
|
coll.Add(mockItem)
|
||||||
|
|
||||||
readError := errors.New("Test error")
|
coll.itemReader = func(*http.Client, models.DriveItemable) (details.ItemInfo, io.ReadCloser, error) {
|
||||||
|
return details.ItemInfo{}, nil, assert.AnError
|
||||||
coll.itemReader = func(context.Context, models.DriveItemable) (details.ItemInfo, io.ReadCloser, error) {
|
|
||||||
return details.ItemInfo{}, nil, readError
|
|
||||||
}
|
}
|
||||||
|
|
||||||
coll.Items()
|
coll.Items()
|
||||||
|
|||||||
@ -3,10 +3,12 @@ package onedrive
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"golang.org/x/exp/maps"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||||
"github.com/alcionai/corso/src/internal/connector/support"
|
"github.com/alcionai/corso/src/internal/connector/support"
|
||||||
@ -45,6 +47,9 @@ type folderMatcher interface {
|
|||||||
// Collections is used to retrieve drive data for a
|
// Collections is used to retrieve drive data for a
|
||||||
// resource owner, which can be either a user or a sharepoint site.
|
// resource owner, which can be either a user or a sharepoint site.
|
||||||
type Collections struct {
|
type Collections struct {
|
||||||
|
// configured to handle large item downloads
|
||||||
|
itemClient *http.Client
|
||||||
|
|
||||||
tenant string
|
tenant string
|
||||||
resourceOwner string
|
resourceOwner string
|
||||||
source driveSource
|
source driveSource
|
||||||
@ -65,6 +70,7 @@ type Collections struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func NewCollections(
|
func NewCollections(
|
||||||
|
itemClient *http.Client,
|
||||||
tenant string,
|
tenant string,
|
||||||
resourceOwner string,
|
resourceOwner string,
|
||||||
source driveSource,
|
source driveSource,
|
||||||
@ -74,6 +80,7 @@ func NewCollections(
|
|||||||
ctrlOpts control.Options,
|
ctrlOpts control.Options,
|
||||||
) *Collections {
|
) *Collections {
|
||||||
return &Collections{
|
return &Collections{
|
||||||
|
itemClient: itemClient,
|
||||||
tenant: tenant,
|
tenant: tenant,
|
||||||
resourceOwner: resourceOwner,
|
resourceOwner: resourceOwner,
|
||||||
source: source,
|
source: source,
|
||||||
@ -98,6 +105,12 @@ func (c *Collections) Get(ctx context.Context) ([]data.Collection, error) {
|
|||||||
deltaURLs = map[string]string{}
|
deltaURLs = map[string]string{}
|
||||||
// Drive ID -> folder ID -> folder path
|
// Drive ID -> folder ID -> folder path
|
||||||
folderPaths = map[string]map[string]string{}
|
folderPaths = map[string]map[string]string{}
|
||||||
|
// Items that should be excluded when sourcing data from the base backup.
|
||||||
|
// TODO(ashmrtn): This list contains the M365 IDs of deleted items so while
|
||||||
|
// it's technically safe to pass all the way through to kopia (files are
|
||||||
|
// unlikely to be named their M365 ID) we should wait to do that until we've
|
||||||
|
// switched to using those IDs for file names in kopia.
|
||||||
|
excludedItems = map[string]struct{}{}
|
||||||
)
|
)
|
||||||
|
|
||||||
// Update the collection map with items from each drive
|
// Update the collection map with items from each drive
|
||||||
@ -105,7 +118,13 @@ func (c *Collections) Get(ctx context.Context) ([]data.Collection, error) {
|
|||||||
driveID := *d.GetId()
|
driveID := *d.GetId()
|
||||||
driveName := *d.GetName()
|
driveName := *d.GetName()
|
||||||
|
|
||||||
delta, paths, err := collectItems(ctx, c.service, driveID, driveName, c.UpdateCollections)
|
delta, paths, excluded, err := collectItems(
|
||||||
|
ctx,
|
||||||
|
c.service,
|
||||||
|
driveID,
|
||||||
|
driveName,
|
||||||
|
c.UpdateCollections,
|
||||||
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -121,6 +140,8 @@ func (c *Collections) Get(ctx context.Context) ([]data.Collection, error) {
|
|||||||
folderPaths[driveID][id] = p
|
folderPaths[driveID][id] = p
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
maps.Copy(excludedItems, excluded)
|
||||||
}
|
}
|
||||||
|
|
||||||
observe.Message(ctx, fmt.Sprintf("Discovered %d items to backup", c.NumItems))
|
observe.Message(ctx, fmt.Sprintf("Discovered %d items to backup", c.NumItems))
|
||||||
@ -171,6 +192,7 @@ func (c *Collections) UpdateCollections(
|
|||||||
items []models.DriveItemable,
|
items []models.DriveItemable,
|
||||||
oldPaths map[string]string,
|
oldPaths map[string]string,
|
||||||
newPaths map[string]string,
|
newPaths map[string]string,
|
||||||
|
excluded map[string]struct{},
|
||||||
) error {
|
) error {
|
||||||
for _, item := range items {
|
for _, item := range items {
|
||||||
if item.GetRoot() != nil {
|
if item.GetRoot() != nil {
|
||||||
@ -231,18 +253,31 @@ func (c *Collections) UpdateCollections(
|
|||||||
updatePath(newPaths, *item.GetId(), folderPath.String())
|
updatePath(newPaths, *item.GetId(), folderPath.String())
|
||||||
|
|
||||||
case item.GetFile() != nil:
|
case item.GetFile() != nil:
|
||||||
|
if item.GetDeleted() != nil {
|
||||||
|
excluded[*item.GetId()] = struct{}{}
|
||||||
|
// Exchange counts items streamed through it which includes deletions so
|
||||||
|
// add that here too.
|
||||||
|
c.NumFiles++
|
||||||
|
c.NumItems++
|
||||||
|
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(ashmrtn): Figure what when an item was moved (maybe) and add it to
|
||||||
|
// the exclude list.
|
||||||
|
|
||||||
col, found := c.CollectionMap[collectionPath.String()]
|
col, found := c.CollectionMap[collectionPath.String()]
|
||||||
if !found {
|
if !found {
|
||||||
// TODO(ashmrtn): Compare old and new path and set collection state
|
// TODO(ashmrtn): Compare old and new path and set collection state
|
||||||
// accordingly.
|
// accordingly.
|
||||||
col = NewCollection(
|
col = NewCollection(
|
||||||
|
c.itemClient,
|
||||||
collectionPath,
|
collectionPath,
|
||||||
driveID,
|
driveID,
|
||||||
c.service,
|
c.service,
|
||||||
c.statusUpdater,
|
c.statusUpdater,
|
||||||
c.source,
|
c.source,
|
||||||
c.ctrl,
|
c.ctrl)
|
||||||
)
|
|
||||||
|
|
||||||
c.CollectionMap[collectionPath.String()] = col
|
c.CollectionMap[collectionPath.String()] = col
|
||||||
c.NumContainers++
|
c.NumContainers++
|
||||||
|
|||||||
@ -10,6 +10,7 @@ import (
|
|||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
"golang.org/x/exp/maps"
|
"golang.org/x/exp/maps"
|
||||||
|
|
||||||
|
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
"github.com/alcionai/corso/src/pkg/control"
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
"github.com/alcionai/corso/src/pkg/selectors"
|
"github.com/alcionai/corso/src/pkg/selectors"
|
||||||
@ -105,6 +106,7 @@ func (suite *OneDriveCollectionsSuite) TestUpdateCollections() {
|
|||||||
expectedContainerCount int
|
expectedContainerCount int
|
||||||
expectedFileCount int
|
expectedFileCount int
|
||||||
expectedMetadataPaths map[string]string
|
expectedMetadataPaths map[string]string
|
||||||
|
expectedExcludes map[string]struct{}
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
testCase: "Invalid item",
|
testCase: "Invalid item",
|
||||||
@ -115,6 +117,7 @@ func (suite *OneDriveCollectionsSuite) TestUpdateCollections() {
|
|||||||
scope: anyFolder,
|
scope: anyFolder,
|
||||||
expect: assert.Error,
|
expect: assert.Error,
|
||||||
expectedMetadataPaths: map[string]string{},
|
expectedMetadataPaths: map[string]string{},
|
||||||
|
expectedExcludes: map[string]struct{}{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
testCase: "Single File",
|
testCase: "Single File",
|
||||||
@ -135,6 +138,7 @@ func (suite *OneDriveCollectionsSuite) TestUpdateCollections() {
|
|||||||
expectedContainerCount: 1,
|
expectedContainerCount: 1,
|
||||||
// Root folder is skipped since it's always present.
|
// Root folder is skipped since it's always present.
|
||||||
expectedMetadataPaths: map[string]string{},
|
expectedMetadataPaths: map[string]string{},
|
||||||
|
expectedExcludes: map[string]struct{}{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
testCase: "Single Folder",
|
testCase: "Single Folder",
|
||||||
@ -153,6 +157,7 @@ func (suite *OneDriveCollectionsSuite) TestUpdateCollections() {
|
|||||||
testBaseDrivePath+"/folder",
|
testBaseDrivePath+"/folder",
|
||||||
)[0],
|
)[0],
|
||||||
},
|
},
|
||||||
|
expectedExcludes: map[string]struct{}{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
testCase: "Single Package",
|
testCase: "Single Package",
|
||||||
@ -171,6 +176,7 @@ func (suite *OneDriveCollectionsSuite) TestUpdateCollections() {
|
|||||||
testBaseDrivePath+"/package",
|
testBaseDrivePath+"/package",
|
||||||
)[0],
|
)[0],
|
||||||
},
|
},
|
||||||
|
expectedExcludes: map[string]struct{}{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
testCase: "1 root file, 1 folder, 1 package, 2 files, 3 collections",
|
testCase: "1 root file, 1 folder, 1 package, 2 files, 3 collections",
|
||||||
@ -209,6 +215,7 @@ func (suite *OneDriveCollectionsSuite) TestUpdateCollections() {
|
|||||||
testBaseDrivePath+"/package",
|
testBaseDrivePath+"/package",
|
||||||
)[0],
|
)[0],
|
||||||
},
|
},
|
||||||
|
expectedExcludes: map[string]struct{}{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
testCase: "contains folder selector",
|
testCase: "contains folder selector",
|
||||||
@ -258,6 +265,7 @@ func (suite *OneDriveCollectionsSuite) TestUpdateCollections() {
|
|||||||
testBaseDrivePath+"/folder/subfolder/folder",
|
testBaseDrivePath+"/folder/subfolder/folder",
|
||||||
)[0],
|
)[0],
|
||||||
},
|
},
|
||||||
|
expectedExcludes: map[string]struct{}{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
testCase: "prefix subfolder selector",
|
testCase: "prefix subfolder selector",
|
||||||
@ -292,6 +300,7 @@ func (suite *OneDriveCollectionsSuite) TestUpdateCollections() {
|
|||||||
testBaseDrivePath+"/folder/subfolder/folder",
|
testBaseDrivePath+"/folder/subfolder/folder",
|
||||||
)[0],
|
)[0],
|
||||||
},
|
},
|
||||||
|
expectedExcludes: map[string]struct{}{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
testCase: "match subfolder selector",
|
testCase: "match subfolder selector",
|
||||||
@ -318,6 +327,7 @@ func (suite *OneDriveCollectionsSuite) TestUpdateCollections() {
|
|||||||
expectedContainerCount: 1,
|
expectedContainerCount: 1,
|
||||||
// No child folders for subfolder so nothing here.
|
// No child folders for subfolder so nothing here.
|
||||||
expectedMetadataPaths: map[string]string{},
|
expectedMetadataPaths: map[string]string{},
|
||||||
|
expectedExcludes: map[string]struct{}{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
testCase: "not moved folder tree",
|
testCase: "not moved folder tree",
|
||||||
@ -358,6 +368,7 @@ func (suite *OneDriveCollectionsSuite) TestUpdateCollections() {
|
|||||||
testBaseDrivePath+"/folder/subfolder",
|
testBaseDrivePath+"/folder/subfolder",
|
||||||
)[0],
|
)[0],
|
||||||
},
|
},
|
||||||
|
expectedExcludes: map[string]struct{}{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
testCase: "moved folder tree",
|
testCase: "moved folder tree",
|
||||||
@ -398,6 +409,7 @@ func (suite *OneDriveCollectionsSuite) TestUpdateCollections() {
|
|||||||
testBaseDrivePath+"/folder/subfolder",
|
testBaseDrivePath+"/folder/subfolder",
|
||||||
)[0],
|
)[0],
|
||||||
},
|
},
|
||||||
|
expectedExcludes: map[string]struct{}{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
testCase: "moved folder tree and subfolder 1",
|
testCase: "moved folder tree and subfolder 1",
|
||||||
@ -439,6 +451,7 @@ func (suite *OneDriveCollectionsSuite) TestUpdateCollections() {
|
|||||||
testBaseDrivePath+"/subfolder",
|
testBaseDrivePath+"/subfolder",
|
||||||
)[0],
|
)[0],
|
||||||
},
|
},
|
||||||
|
expectedExcludes: map[string]struct{}{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
testCase: "moved folder tree and subfolder 2",
|
testCase: "moved folder tree and subfolder 2",
|
||||||
@ -480,6 +493,7 @@ func (suite *OneDriveCollectionsSuite) TestUpdateCollections() {
|
|||||||
testBaseDrivePath+"/subfolder",
|
testBaseDrivePath+"/subfolder",
|
||||||
)[0],
|
)[0],
|
||||||
},
|
},
|
||||||
|
expectedExcludes: map[string]struct{}{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
testCase: "deleted folder and package",
|
testCase: "deleted folder and package",
|
||||||
@ -508,6 +522,7 @@ func (suite *OneDriveCollectionsSuite) TestUpdateCollections() {
|
|||||||
expectedFileCount: 0,
|
expectedFileCount: 0,
|
||||||
expectedContainerCount: 0,
|
expectedContainerCount: 0,
|
||||||
expectedMetadataPaths: map[string]string{},
|
expectedMetadataPaths: map[string]string{},
|
||||||
|
expectedExcludes: map[string]struct{}{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
testCase: "delete folder tree move subfolder",
|
testCase: "delete folder tree move subfolder",
|
||||||
@ -543,6 +558,24 @@ func (suite *OneDriveCollectionsSuite) TestUpdateCollections() {
|
|||||||
testBaseDrivePath+"/subfolder",
|
testBaseDrivePath+"/subfolder",
|
||||||
)[0],
|
)[0],
|
||||||
},
|
},
|
||||||
|
expectedExcludes: map[string]struct{}{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
testCase: "delete file",
|
||||||
|
items: []models.DriveItemable{
|
||||||
|
delItem("item", testBaseDrivePath, true, false, false),
|
||||||
|
},
|
||||||
|
inputFolderMap: map[string]string{},
|
||||||
|
scope: anyFolder,
|
||||||
|
expect: assert.NoError,
|
||||||
|
expectedCollectionPaths: []string{},
|
||||||
|
expectedItemCount: 1,
|
||||||
|
expectedFileCount: 1,
|
||||||
|
expectedContainerCount: 0,
|
||||||
|
expectedMetadataPaths: map[string]string{},
|
||||||
|
expectedExcludes: map[string]struct{}{
|
||||||
|
"item": {},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -551,9 +584,11 @@ func (suite *OneDriveCollectionsSuite) TestUpdateCollections() {
|
|||||||
ctx, flush := tester.NewContext()
|
ctx, flush := tester.NewContext()
|
||||||
defer flush()
|
defer flush()
|
||||||
|
|
||||||
|
excludes := map[string]struct{}{}
|
||||||
outputFolderMap := map[string]string{}
|
outputFolderMap := map[string]string{}
|
||||||
maps.Copy(outputFolderMap, tt.inputFolderMap)
|
maps.Copy(outputFolderMap, tt.inputFolderMap)
|
||||||
c := NewCollections(
|
c := NewCollections(
|
||||||
|
graph.LargeItemClient(),
|
||||||
tenant,
|
tenant,
|
||||||
user,
|
user,
|
||||||
OneDriveSource,
|
OneDriveSource,
|
||||||
@ -562,7 +597,15 @@ func (suite *OneDriveCollectionsSuite) TestUpdateCollections() {
|
|||||||
nil,
|
nil,
|
||||||
control.Options{})
|
control.Options{})
|
||||||
|
|
||||||
err := c.UpdateCollections(ctx, "driveID", "General", tt.items, tt.inputFolderMap, outputFolderMap)
|
err := c.UpdateCollections(
|
||||||
|
ctx,
|
||||||
|
"driveID",
|
||||||
|
"General",
|
||||||
|
tt.items,
|
||||||
|
tt.inputFolderMap,
|
||||||
|
outputFolderMap,
|
||||||
|
excludes,
|
||||||
|
)
|
||||||
tt.expect(t, err)
|
tt.expect(t, err)
|
||||||
assert.Equal(t, len(tt.expectedCollectionPaths), len(c.CollectionMap), "collection paths")
|
assert.Equal(t, len(tt.expectedCollectionPaths), len(c.CollectionMap), "collection paths")
|
||||||
assert.Equal(t, tt.expectedItemCount, c.NumItems, "item count")
|
assert.Equal(t, tt.expectedItemCount, c.NumItems, "item count")
|
||||||
@ -573,6 +616,7 @@ func (suite *OneDriveCollectionsSuite) TestUpdateCollections() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
assert.Equal(t, tt.expectedMetadataPaths, outputFolderMap)
|
assert.Equal(t, tt.expectedMetadataPaths, outputFolderMap)
|
||||||
|
assert.Equal(t, tt.expectedExcludes, excludes)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -6,7 +6,6 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
msgraphgocore "github.com/microsoftgraph/msgraph-sdk-go-core"
|
|
||||||
msdrive "github.com/microsoftgraph/msgraph-sdk-go/drive"
|
msdrive "github.com/microsoftgraph/msgraph-sdk-go/drive"
|
||||||
msdrives "github.com/microsoftgraph/msgraph-sdk-go/drives"
|
msdrives "github.com/microsoftgraph/msgraph-sdk-go/drives"
|
||||||
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
"github.com/microsoftgraph/msgraph-sdk-go/models"
|
||||||
@ -20,55 +19,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/pkg/logger"
|
"github.com/alcionai/corso/src/pkg/logger"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var errFolderNotFound = errors.New("folder not found")
|
||||||
errFolderNotFound = errors.New("folder not found")
|
|
||||||
|
|
||||||
// nolint:lll
|
|
||||||
// OneDrive associated SKUs located at:
|
|
||||||
// https://learn.microsoft.com/en-us/azure/active-directory/enterprise-users/licensing-service-plan-reference
|
|
||||||
skuIDs = []string{
|
|
||||||
// Microsoft 365 Apps for Business 0365
|
|
||||||
"cdd28e44-67e3-425e-be4c-737fab2899d3",
|
|
||||||
// Microsoft 365 Apps for Business SMB_Business
|
|
||||||
"b214fe43-f5a3-4703-beeb-fa97188220fc",
|
|
||||||
// Microsoft 365 Apps for enterprise
|
|
||||||
"c2273bd0-dff7-4215-9ef5-2c7bcfb06425",
|
|
||||||
// Microsoft 365 Apps for Faculty
|
|
||||||
"12b8c807-2e20-48fc-b453-542b6ee9d171",
|
|
||||||
// Microsoft 365 Apps for Students
|
|
||||||
"c32f9321-a627-406d-a114-1f9c81aaafac",
|
|
||||||
// OneDrive for Business (Plan 1)
|
|
||||||
"e6778190-713e-4e4f-9119-8b8238de25df",
|
|
||||||
// OneDrive for Business (Plan 2)
|
|
||||||
"ed01faf2-1d88-4947-ae91-45ca18703a96",
|
|
||||||
// Visio Plan 1
|
|
||||||
"ca7f3140-d88c-455b-9a1c-7f0679e31a76",
|
|
||||||
// Visio Plan 2
|
|
||||||
"38b434d2-a15e-4cde-9a98-e737c75623e1",
|
|
||||||
// Visio Online Plan 1
|
|
||||||
"4b244418-9658-4451-a2b8-b5e2b364e9bd",
|
|
||||||
// Visio Online Plan 2
|
|
||||||
"c5928f49-12ba-48f7-ada3-0d743a3601d5",
|
|
||||||
// Visio Plan 2 for GCC
|
|
||||||
"4ae99959-6b0f-43b0-b1ce-68146001bdba",
|
|
||||||
// ONEDRIVEENTERPRISE
|
|
||||||
"afcafa6a-d966-4462-918c-ec0b4e0fe642",
|
|
||||||
// Microsoft 365 E5 Developer
|
|
||||||
"c42b9cae-ea4f-4ab7-9717-81576235ccac",
|
|
||||||
// Microsoft 365 E5
|
|
||||||
"06ebc4ee-1bb5-47dd-8120-11324bc54e06",
|
|
||||||
// Office 365 E4
|
|
||||||
"1392051d-0cb9-4b7a-88d5-621fee5e8711",
|
|
||||||
// Microsoft 365 E3
|
|
||||||
"05e9a617-0261-4cee-bb44-138d3ef5d965",
|
|
||||||
// Microsoft 365 Business Premium
|
|
||||||
"cbdc14ab-d96c-4c30-b9f4-6ada7cdc1d46",
|
|
||||||
// Microsoft 365 Business Standard
|
|
||||||
"f245ecc8-75af-4f8e-b61f-27d8114de5f3",
|
|
||||||
// Microsoft 365 Business Basic
|
|
||||||
"3b555118-da6a-4418-894f-7df1e2096870",
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// nextLinkKey is used to find the next link in a paged
|
// nextLinkKey is used to find the next link in a paged
|
||||||
@ -116,21 +67,11 @@ func siteDrives(ctx context.Context, service graph.Servicer, site string) ([]mod
|
|||||||
|
|
||||||
func userDrives(ctx context.Context, service graph.Servicer, user string) ([]models.Driveable, error) {
|
func userDrives(ctx context.Context, service graph.Servicer, user string) ([]models.Driveable, error) {
|
||||||
var (
|
var (
|
||||||
hasDrive bool
|
|
||||||
numberOfRetries = 3
|
numberOfRetries = 3
|
||||||
r models.DriveCollectionResponseable
|
r models.DriveCollectionResponseable
|
||||||
|
err error
|
||||||
)
|
)
|
||||||
|
|
||||||
hasDrive, err := hasDriveLicense(ctx, service, user)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, user)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !hasDrive {
|
|
||||||
logger.Ctx(ctx).Debugf("User %s does not have a license for OneDrive", user)
|
|
||||||
return make([]models.Driveable, 0), nil // no license
|
|
||||||
}
|
|
||||||
|
|
||||||
// Retry Loop for Drive retrieval. Request can timeout
|
// Retry Loop for Drive retrieval. Request can timeout
|
||||||
for i := 0; i <= numberOfRetries; i++ {
|
for i := 0; i <= numberOfRetries; i++ {
|
||||||
r, err = service.Client().UsersById(user).Drives().Get(ctx, nil)
|
r, err = service.Client().UsersById(user).Drives().Get(ctx, nil)
|
||||||
@ -138,7 +79,7 @@ func userDrives(ctx context.Context, service graph.Servicer, user string) ([]mod
|
|||||||
detailedError := support.ConnectorStackErrorTrace(err)
|
detailedError := support.ConnectorStackErrorTrace(err)
|
||||||
if strings.Contains(detailedError, userMysiteURLNotFound) ||
|
if strings.Contains(detailedError, userMysiteURLNotFound) ||
|
||||||
strings.Contains(detailedError, userMysiteNotFound) {
|
strings.Contains(detailedError, userMysiteNotFound) {
|
||||||
logger.Ctx(ctx).Debugf("User %s does not have a drive", user)
|
logger.Ctx(ctx).Infof("User %s does not have a drive", user)
|
||||||
return make([]models.Driveable, 0), nil // no license
|
return make([]models.Driveable, 0), nil // no license
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -170,6 +111,7 @@ type itemCollector func(
|
|||||||
driveItems []models.DriveItemable,
|
driveItems []models.DriveItemable,
|
||||||
oldPaths map[string]string,
|
oldPaths map[string]string,
|
||||||
newPaths map[string]string,
|
newPaths map[string]string,
|
||||||
|
excluded map[string]struct{},
|
||||||
) error
|
) error
|
||||||
|
|
||||||
// collectItems will enumerate all items in the specified drive and hand them to the
|
// collectItems will enumerate all items in the specified drive and hand them to the
|
||||||
@ -179,13 +121,14 @@ func collectItems(
|
|||||||
service graph.Servicer,
|
service graph.Servicer,
|
||||||
driveID, driveName string,
|
driveID, driveName string,
|
||||||
collector itemCollector,
|
collector itemCollector,
|
||||||
) (string, map[string]string, error) {
|
) (string, map[string]string, map[string]struct{}, error) {
|
||||||
var (
|
var (
|
||||||
newDeltaURL = ""
|
newDeltaURL = ""
|
||||||
// TODO(ashmrtn): Eventually this should probably be a parameter so we can
|
// TODO(ashmrtn): Eventually this should probably be a parameter so we can
|
||||||
// take in previous paths.
|
// take in previous paths.
|
||||||
oldPaths = map[string]string{}
|
oldPaths = map[string]string{}
|
||||||
newPaths = map[string]string{}
|
newPaths = map[string]string{}
|
||||||
|
excluded = map[string]struct{}{}
|
||||||
)
|
)
|
||||||
|
|
||||||
maps.Copy(newPaths, oldPaths)
|
maps.Copy(newPaths, oldPaths)
|
||||||
@ -219,16 +162,16 @@ func collectItems(
|
|||||||
for {
|
for {
|
||||||
r, err := builder.Get(ctx, requestConfig)
|
r, err := builder.Get(ctx, requestConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", nil, errors.Wrapf(
|
return "", nil, nil, errors.Wrapf(
|
||||||
err,
|
err,
|
||||||
"failed to query drive items. details: %s",
|
"failed to query drive items. details: %s",
|
||||||
support.ConnectorStackErrorTrace(err),
|
support.ConnectorStackErrorTrace(err),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = collector(ctx, driveID, driveName, r.GetValue(), oldPaths, newPaths)
|
err = collector(ctx, driveID, driveName, r.GetValue(), oldPaths, newPaths, excluded)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", nil, err
|
return "", nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if r.GetOdataDeltaLink() != nil && len(*r.GetOdataDeltaLink()) > 0 {
|
if r.GetOdataDeltaLink() != nil && len(*r.GetOdataDeltaLink()) > 0 {
|
||||||
@ -245,7 +188,7 @@ func collectItems(
|
|||||||
builder = msdrives.NewItemRootDeltaRequestBuilder(*nextLink, service.Adapter())
|
builder = msdrives.NewItemRootDeltaRequestBuilder(*nextLink, service.Adapter())
|
||||||
}
|
}
|
||||||
|
|
||||||
return newDeltaURL, newPaths, nil
|
return newDeltaURL, newPaths, excluded, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// getFolder will lookup the specified folder name under `parentFolderID`
|
// getFolder will lookup the specified folder name under `parentFolderID`
|
||||||
@ -352,7 +295,7 @@ func GetAllFolders(
|
|||||||
folders := map[string]*Displayable{}
|
folders := map[string]*Displayable{}
|
||||||
|
|
||||||
for _, d := range drives {
|
for _, d := range drives {
|
||||||
_, _, err = collectItems(
|
_, _, _, err = collectItems(
|
||||||
ctx,
|
ctx,
|
||||||
gs,
|
gs,
|
||||||
*d.GetId(),
|
*d.GetId(),
|
||||||
@ -363,6 +306,7 @@ func GetAllFolders(
|
|||||||
items []models.DriveItemable,
|
items []models.DriveItemable,
|
||||||
oldPaths map[string]string,
|
oldPaths map[string]string,
|
||||||
newPaths map[string]string,
|
newPaths map[string]string,
|
||||||
|
excluded map[string]struct{},
|
||||||
) error {
|
) error {
|
||||||
for _, item := range items {
|
for _, item := range items {
|
||||||
// Skip the root item.
|
// Skip the root item.
|
||||||
@ -419,56 +363,3 @@ func DeleteItem(
|
|||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// hasDriveLicense utility function that queries M365 server
|
|
||||||
// to investigate the user's includes access to OneDrive.
|
|
||||||
func hasDriveLicense(
|
|
||||||
ctx context.Context,
|
|
||||||
service graph.Servicer,
|
|
||||||
user string,
|
|
||||||
) (bool, error) {
|
|
||||||
var hasDrive bool
|
|
||||||
|
|
||||||
resp, err := service.Client().UsersById(user).LicenseDetails().Get(ctx, nil)
|
|
||||||
if err != nil {
|
|
||||||
return false,
|
|
||||||
errors.Wrap(err, "failure obtaining license details for user")
|
|
||||||
}
|
|
||||||
|
|
||||||
iter, err := msgraphgocore.NewPageIterator(
|
|
||||||
resp, service.Adapter(),
|
|
||||||
models.CreateLicenseDetailsCollectionResponseFromDiscriminatorValue,
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
cb := func(pageItem any) bool {
|
|
||||||
entry, ok := pageItem.(models.LicenseDetailsable)
|
|
||||||
if !ok {
|
|
||||||
err = errors.New("casting item to models.LicenseDetailsable")
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
sku := entry.GetSkuId()
|
|
||||||
if sku == nil {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, license := range skuIDs {
|
|
||||||
if sku.String() == license {
|
|
||||||
hasDrive = true
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := iter.Iterate(ctx, cb); err != nil {
|
|
||||||
return false,
|
|
||||||
errors.Wrap(err, support.ConnectorStackErrorTrace(err))
|
|
||||||
}
|
|
||||||
|
|
||||||
return hasDrive, nil
|
|
||||||
}
|
|
||||||
|
|||||||
@ -9,6 +9,7 @@ import (
|
|||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/common"
|
"github.com/alcionai/corso/src/internal/common"
|
||||||
|
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
"github.com/alcionai/corso/src/pkg/control"
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
"github.com/alcionai/corso/src/pkg/logger"
|
"github.com/alcionai/corso/src/pkg/logger"
|
||||||
@ -146,6 +147,7 @@ func (suite *OneDriveSuite) TestOneDriveNewCollections() {
|
|||||||
NewOneDriveBackup([]string{test.user}).
|
NewOneDriveBackup([]string{test.user}).
|
||||||
AllData()[0]
|
AllData()[0]
|
||||||
odcs, err := NewCollections(
|
odcs, err := NewCollections(
|
||||||
|
graph.LargeItemClient(),
|
||||||
creds.AzureTenantID,
|
creds.AzureTenantID,
|
||||||
test.user,
|
test.user,
|
||||||
OneDriveSource,
|
OneDriveSource,
|
||||||
|
|||||||
@ -4,6 +4,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
msdrives "github.com/microsoftgraph/msgraph-sdk-go/drives"
|
msdrives "github.com/microsoftgraph/msgraph-sdk-go/drives"
|
||||||
@ -13,6 +14,7 @@ import (
|
|||||||
"github.com/alcionai/corso/src/internal/connector/graph"
|
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||||
"github.com/alcionai/corso/src/internal/connector/support"
|
"github.com/alcionai/corso/src/internal/connector/support"
|
||||||
"github.com/alcionai/corso/src/internal/connector/uploadsession"
|
"github.com/alcionai/corso/src/internal/connector/uploadsession"
|
||||||
|
"github.com/alcionai/corso/src/internal/version"
|
||||||
"github.com/alcionai/corso/src/pkg/backup/details"
|
"github.com/alcionai/corso/src/pkg/backup/details"
|
||||||
"github.com/alcionai/corso/src/pkg/logger"
|
"github.com/alcionai/corso/src/pkg/logger"
|
||||||
)
|
)
|
||||||
@ -27,7 +29,7 @@ const (
|
|||||||
// It crafts this by querying M365 for a download URL for the item
|
// It crafts this by querying M365 for a download URL for the item
|
||||||
// and using a http client to initialize a reader
|
// and using a http client to initialize a reader
|
||||||
func sharePointItemReader(
|
func sharePointItemReader(
|
||||||
ctx context.Context,
|
hc *http.Client,
|
||||||
item models.DriveItemable,
|
item models.DriveItemable,
|
||||||
) (details.ItemInfo, io.ReadCloser, error) {
|
) (details.ItemInfo, io.ReadCloser, error) {
|
||||||
url, ok := item.GetAdditionalData()[downloadURLKey].(*string)
|
url, ok := item.GetAdditionalData()[downloadURLKey].(*string)
|
||||||
@ -35,7 +37,7 @@ func sharePointItemReader(
|
|||||||
return details.ItemInfo{}, nil, fmt.Errorf("failed to get url for %s", *item.GetName())
|
return details.ItemInfo{}, nil, fmt.Errorf("failed to get url for %s", *item.GetName())
|
||||||
}
|
}
|
||||||
|
|
||||||
rc, err := driveItemReader(ctx, *url)
|
resp, err := hc.Get(*url)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return details.ItemInfo{}, nil, err
|
return details.ItemInfo{}, nil, err
|
||||||
}
|
}
|
||||||
@ -44,14 +46,14 @@ func sharePointItemReader(
|
|||||||
SharePoint: sharePointItemInfo(item, *item.GetSize()),
|
SharePoint: sharePointItemInfo(item, *item.GetSize()),
|
||||||
}
|
}
|
||||||
|
|
||||||
return dii, rc, nil
|
return dii, resp.Body, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// oneDriveItemReader will return a io.ReadCloser for the specified item
|
// oneDriveItemReader will return a io.ReadCloser for the specified item
|
||||||
// It crafts this by querying M365 for a download URL for the item
|
// It crafts this by querying M365 for a download URL for the item
|
||||||
// and using a http client to initialize a reader
|
// and using a http client to initialize a reader
|
||||||
func oneDriveItemReader(
|
func oneDriveItemReader(
|
||||||
ctx context.Context,
|
hc *http.Client,
|
||||||
item models.DriveItemable,
|
item models.DriveItemable,
|
||||||
) (details.ItemInfo, io.ReadCloser, error) {
|
) (details.ItemInfo, io.ReadCloser, error) {
|
||||||
url, ok := item.GetAdditionalData()[downloadURLKey].(*string)
|
url, ok := item.GetAdditionalData()[downloadURLKey].(*string)
|
||||||
@ -59,7 +61,17 @@ func oneDriveItemReader(
|
|||||||
return details.ItemInfo{}, nil, fmt.Errorf("failed to get url for %s", *item.GetName())
|
return details.ItemInfo{}, nil, fmt.Errorf("failed to get url for %s", *item.GetName())
|
||||||
}
|
}
|
||||||
|
|
||||||
rc, err := driveItemReader(ctx, *url)
|
req, err := http.NewRequest(http.MethodGet, *url, nil)
|
||||||
|
if err != nil {
|
||||||
|
return details.ItemInfo{}, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decorate the traffic
|
||||||
|
//nolint:lll
|
||||||
|
// See https://learn.microsoft.com/en-us/sharepoint/dev/general-development/how-to-avoid-getting-throttled-or-blocked-in-sharepoint-online#how-to-decorate-your-http-traffic
|
||||||
|
req.Header.Set("User-Agent", "ISV|Alcion|Corso/"+version.Version)
|
||||||
|
|
||||||
|
resp, err := hc.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return details.ItemInfo{}, nil, err
|
return details.ItemInfo{}, nil, err
|
||||||
}
|
}
|
||||||
@ -68,25 +80,7 @@ func oneDriveItemReader(
|
|||||||
OneDrive: oneDriveItemInfo(item, *item.GetSize()),
|
OneDrive: oneDriveItemInfo(item, *item.GetSize()),
|
||||||
}
|
}
|
||||||
|
|
||||||
return dii, rc, nil
|
return dii, resp.Body, nil
|
||||||
}
|
|
||||||
|
|
||||||
// driveItemReader will return a io.ReadCloser for the specified item
|
|
||||||
// It crafts this by querying M365 for a download URL for the item
|
|
||||||
// and using a http client to initialize a reader
|
|
||||||
func driveItemReader(
|
|
||||||
ctx context.Context,
|
|
||||||
url string,
|
|
||||||
) (io.ReadCloser, error) {
|
|
||||||
httpClient := graph.CreateHTTPClient()
|
|
||||||
httpClient.Timeout = 0 // infinite timeout for pulling large files
|
|
||||||
|
|
||||||
resp, err := httpClient.Get(url)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrapf(err, "failed to download file from %s", url)
|
|
||||||
}
|
|
||||||
|
|
||||||
return resp.Body, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// oneDriveItemInfo will populate a details.OneDriveInfo struct
|
// oneDriveItemInfo will populate a details.OneDriveInfo struct
|
||||||
@ -97,7 +91,7 @@ func driveItemReader(
|
|||||||
func oneDriveItemInfo(di models.DriveItemable, itemSize int64) *details.OneDriveInfo {
|
func oneDriveItemInfo(di models.DriveItemable, itemSize int64) *details.OneDriveInfo {
|
||||||
var email, parent string
|
var email, parent string
|
||||||
|
|
||||||
if di.GetCreatedBy().GetUser() != nil {
|
if di.GetCreatedBy() != nil && di.GetCreatedBy().GetUser() != nil {
|
||||||
// User is sometimes not available when created via some
|
// User is sometimes not available when created via some
|
||||||
// external applications (like backup/restore solutions)
|
// external applications (like backup/restore solutions)
|
||||||
ed, ok := di.GetCreatedBy().GetUser().GetAdditionalData()["email"]
|
ed, ok := di.GetCreatedBy().GetUser().GetAdditionalData()["email"]
|
||||||
@ -106,11 +100,9 @@ func oneDriveItemInfo(di models.DriveItemable, itemSize int64) *details.OneDrive
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if di.GetParentReference() != nil {
|
if di.GetParentReference() != nil && di.GetParentReference().GetName() != nil {
|
||||||
if di.GetParentReference().GetName() != nil {
|
// EndPoint is not always populated from external apps
|
||||||
// EndPoint is not always populated from external apps
|
parent = *di.GetParentReference().GetName()
|
||||||
parent = *di.GetParentReference().GetName()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return &details.OneDriveInfo{
|
return &details.OneDriveInfo{
|
||||||
|
|||||||
@ -101,6 +101,7 @@ func (suite *ItemIntegrationSuite) TestItemReader_oneDrive() {
|
|||||||
items []models.DriveItemable,
|
items []models.DriveItemable,
|
||||||
oldPaths map[string]string,
|
oldPaths map[string]string,
|
||||||
newPaths map[string]string,
|
newPaths map[string]string,
|
||||||
|
excluded map[string]struct{},
|
||||||
) error {
|
) error {
|
||||||
for _, item := range items {
|
for _, item := range items {
|
||||||
if item.GetFile() != nil {
|
if item.GetFile() != nil {
|
||||||
@ -111,7 +112,7 @@ func (suite *ItemIntegrationSuite) TestItemReader_oneDrive() {
|
|||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
_, _, err := collectItems(ctx, suite, suite.userDriveID, "General", itemCollector)
|
_, _, _, err := collectItems(ctx, suite, suite.userDriveID, "General", itemCollector)
|
||||||
require.NoError(suite.T(), err)
|
require.NoError(suite.T(), err)
|
||||||
|
|
||||||
// Test Requirement 2: Need a file
|
// Test Requirement 2: Need a file
|
||||||
@ -125,7 +126,7 @@ func (suite *ItemIntegrationSuite) TestItemReader_oneDrive() {
|
|||||||
|
|
||||||
// Read data for the file
|
// Read data for the file
|
||||||
|
|
||||||
itemInfo, itemData, err := oneDriveItemReader(ctx, driveItem)
|
itemInfo, itemData, err := oneDriveItemReader(graph.LargeItemClient(), driveItem)
|
||||||
require.NoError(suite.T(), err)
|
require.NoError(suite.T(), err)
|
||||||
require.NotNil(suite.T(), itemInfo.OneDrive)
|
require.NotNil(suite.T(), itemInfo.OneDrive)
|
||||||
require.NotEmpty(suite.T(), itemInfo.OneDrive.ItemName)
|
require.NotEmpty(suite.T(), itemInfo.OneDrive.ItemName)
|
||||||
|
|||||||
@ -2,6 +2,7 @@ package sharepoint
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"net/http"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
@ -24,6 +25,7 @@ type statusUpdater interface {
|
|||||||
// for the specified user
|
// for the specified user
|
||||||
func DataCollections(
|
func DataCollections(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
|
itemClient *http.Client,
|
||||||
selector selectors.Selector,
|
selector selectors.Selector,
|
||||||
tenantID string,
|
tenantID string,
|
||||||
serv graph.Servicer,
|
serv graph.Servicer,
|
||||||
@ -66,6 +68,7 @@ func DataCollections(
|
|||||||
case path.LibrariesCategory:
|
case path.LibrariesCategory:
|
||||||
spcs, err = collectLibraries(
|
spcs, err = collectLibraries(
|
||||||
ctx,
|
ctx,
|
||||||
|
itemClient,
|
||||||
serv,
|
serv,
|
||||||
tenantID,
|
tenantID,
|
||||||
site,
|
site,
|
||||||
@ -124,6 +127,7 @@ func collectLists(
|
|||||||
// all the drives associated with the site.
|
// all the drives associated with the site.
|
||||||
func collectLibraries(
|
func collectLibraries(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
|
itemClient *http.Client,
|
||||||
serv graph.Servicer,
|
serv graph.Servicer,
|
||||||
tenantID, siteID string,
|
tenantID, siteID string,
|
||||||
scope selectors.SharePointScope,
|
scope selectors.SharePointScope,
|
||||||
@ -138,6 +142,7 @@ func collectLibraries(
|
|||||||
logger.Ctx(ctx).With("site", siteID).Debug("Creating SharePoint Library collections")
|
logger.Ctx(ctx).With("site", siteID).Debug("Creating SharePoint Library collections")
|
||||||
|
|
||||||
colls := onedrive.NewCollections(
|
colls := onedrive.NewCollections(
|
||||||
|
itemClient,
|
||||||
tenantID,
|
tenantID,
|
||||||
siteID,
|
siteID,
|
||||||
onedrive.SharePointSource,
|
onedrive.SharePointSource,
|
||||||
|
|||||||
@ -7,6 +7,7 @@ import (
|
|||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
|
|
||||||
|
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||||
"github.com/alcionai/corso/src/internal/connector/onedrive"
|
"github.com/alcionai/corso/src/internal/connector/onedrive"
|
||||||
"github.com/alcionai/corso/src/internal/tester"
|
"github.com/alcionai/corso/src/internal/tester"
|
||||||
"github.com/alcionai/corso/src/pkg/control"
|
"github.com/alcionai/corso/src/pkg/control"
|
||||||
@ -89,7 +90,9 @@ func (suite *SharePointLibrariesSuite) TestUpdateCollections() {
|
|||||||
|
|
||||||
paths := map[string]string{}
|
paths := map[string]string{}
|
||||||
newPaths := map[string]string{}
|
newPaths := map[string]string{}
|
||||||
|
excluded := map[string]struct{}{}
|
||||||
c := onedrive.NewCollections(
|
c := onedrive.NewCollections(
|
||||||
|
graph.LargeItemClient(),
|
||||||
tenant,
|
tenant,
|
||||||
site,
|
site,
|
||||||
onedrive.SharePointSource,
|
onedrive.SharePointSource,
|
||||||
@ -97,7 +100,7 @@ func (suite *SharePointLibrariesSuite) TestUpdateCollections() {
|
|||||||
&MockGraphService{},
|
&MockGraphService{},
|
||||||
nil,
|
nil,
|
||||||
control.Options{})
|
control.Options{})
|
||||||
err := c.UpdateCollections(ctx, "driveID", "General", test.items, paths, newPaths)
|
err := c.UpdateCollections(ctx, "driveID", "General", test.items, paths, newPaths, excluded)
|
||||||
test.expect(t, err)
|
test.expect(t, err)
|
||||||
assert.Equal(t, len(test.expectedCollectionPaths), len(c.CollectionMap), "collection paths")
|
assert.Equal(t, len(test.expectedCollectionPaths), len(c.CollectionMap), "collection paths")
|
||||||
assert.Equal(t, test.expectedItemCount, c.NumItems, "item count")
|
assert.Equal(t, test.expectedItemCount, c.NumItems, "item count")
|
||||||
|
|||||||
@ -4,6 +4,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
multierror "github.com/hashicorp/go-multierror"
|
||||||
bytesize "github.com/inhies/go-bytesize"
|
bytesize "github.com/inhies/go-bytesize"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/pkg/logger"
|
"github.com/alcionai/corso/src/pkg/logger"
|
||||||
@ -21,6 +22,7 @@ type ConnectorOperationStatus struct {
|
|||||||
FolderCount int
|
FolderCount int
|
||||||
Successful int
|
Successful int
|
||||||
ErrorCount int
|
ErrorCount int
|
||||||
|
Err error
|
||||||
incomplete bool
|
incomplete bool
|
||||||
incompleteReason string
|
incompleteReason string
|
||||||
additionalDetails string
|
additionalDetails string
|
||||||
@ -70,6 +72,7 @@ func CreateStatus(
|
|||||||
FolderCount: folders,
|
FolderCount: folders,
|
||||||
Successful: cm.Successes,
|
Successful: cm.Successes,
|
||||||
ErrorCount: numErr,
|
ErrorCount: numErr,
|
||||||
|
Err: err,
|
||||||
incomplete: hasErrors,
|
incomplete: hasErrors,
|
||||||
incompleteReason: reason,
|
incompleteReason: reason,
|
||||||
bytes: cm.TotalBytes,
|
bytes: cm.TotalBytes,
|
||||||
@ -115,6 +118,7 @@ func MergeStatus(one, two ConnectorOperationStatus) ConnectorOperationStatus {
|
|||||||
FolderCount: one.FolderCount + two.FolderCount,
|
FolderCount: one.FolderCount + two.FolderCount,
|
||||||
Successful: one.Successful + two.Successful,
|
Successful: one.Successful + two.Successful,
|
||||||
ErrorCount: one.ErrorCount + two.ErrorCount,
|
ErrorCount: one.ErrorCount + two.ErrorCount,
|
||||||
|
Err: multierror.Append(one.Err, two.Err).ErrorOrNil(),
|
||||||
bytes: one.bytes + two.bytes,
|
bytes: one.bytes + two.bytes,
|
||||||
incomplete: hasErrors,
|
incomplete: hasErrors,
|
||||||
incompleteReason: one.incompleteReason + ", " + two.incompleteReason,
|
incompleteReason: one.incompleteReason + ", " + two.incompleteReason,
|
||||||
|
|||||||
@ -345,6 +345,7 @@ func streamBaseEntries(
|
|||||||
prevPath path.Path,
|
prevPath path.Path,
|
||||||
dir fs.Directory,
|
dir fs.Directory,
|
||||||
encodedSeen map[string]struct{},
|
encodedSeen map[string]struct{},
|
||||||
|
globalExcludeSet map[string]struct{},
|
||||||
progress *corsoProgress,
|
progress *corsoProgress,
|
||||||
) error {
|
) error {
|
||||||
if dir == nil {
|
if dir == nil {
|
||||||
@ -373,6 +374,12 @@ func streamBaseEntries(
|
|||||||
return errors.Wrapf(err, "unable to decode entry name %s", entry.Name())
|
return errors.Wrapf(err, "unable to decode entry name %s", entry.Name())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// This entry was marked as deleted by a service that can't tell us the
|
||||||
|
// previous path of deleted items, only the item ID.
|
||||||
|
if _, ok := globalExcludeSet[entName]; ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// For now assuming that item IDs don't need escaping.
|
// For now assuming that item IDs don't need escaping.
|
||||||
itemPath, err := curPath.Append(entName, true)
|
itemPath, err := curPath.Append(entName, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -421,6 +428,7 @@ func getStreamItemFunc(
|
|||||||
staticEnts []fs.Entry,
|
staticEnts []fs.Entry,
|
||||||
streamedEnts data.Collection,
|
streamedEnts data.Collection,
|
||||||
baseDir fs.Directory,
|
baseDir fs.Directory,
|
||||||
|
globalExcludeSet map[string]struct{},
|
||||||
progress *corsoProgress,
|
progress *corsoProgress,
|
||||||
) func(context.Context, func(context.Context, fs.Entry) error) error {
|
) func(context.Context, func(context.Context, fs.Entry) error) error {
|
||||||
return func(ctx context.Context, cb func(context.Context, fs.Entry) error) error {
|
return func(ctx context.Context, cb func(context.Context, fs.Entry) error) error {
|
||||||
@ -443,6 +451,7 @@ func getStreamItemFunc(
|
|||||||
prevPath,
|
prevPath,
|
||||||
baseDir,
|
baseDir,
|
||||||
seen,
|
seen,
|
||||||
|
globalExcludeSet,
|
||||||
progress,
|
progress,
|
||||||
); err != nil {
|
); err != nil {
|
||||||
errs = multierror.Append(
|
errs = multierror.Append(
|
||||||
@ -457,21 +466,22 @@ func getStreamItemFunc(
|
|||||||
|
|
||||||
// buildKopiaDirs recursively builds a directory hierarchy from the roots up.
|
// buildKopiaDirs recursively builds a directory hierarchy from the roots up.
|
||||||
// Returned directories are virtualfs.StreamingDirectory.
|
// Returned directories are virtualfs.StreamingDirectory.
|
||||||
func buildKopiaDirs(dirName string, dir *treeMap, progress *corsoProgress) (fs.Directory, error) {
|
func buildKopiaDirs(
|
||||||
|
dirName string,
|
||||||
|
dir *treeMap,
|
||||||
|
globalExcludeSet map[string]struct{},
|
||||||
|
progress *corsoProgress,
|
||||||
|
) (fs.Directory, error) {
|
||||||
// Reuse kopia directories directly if the subtree rooted at them is
|
// Reuse kopia directories directly if the subtree rooted at them is
|
||||||
// unchanged.
|
// unchanged.
|
||||||
//
|
//
|
||||||
// TODO(ashmrtn): This will need updated when we have OneDrive backups where
|
|
||||||
// items have been deleted because we can't determine which directory used to
|
|
||||||
// have the item.
|
|
||||||
//
|
|
||||||
// TODO(ashmrtn): We could possibly also use this optimization if we know that
|
// TODO(ashmrtn): We could possibly also use this optimization if we know that
|
||||||
// the collection has no items in it. In that case though, we may need to take
|
// the collection has no items in it. In that case though, we may need to take
|
||||||
// extra care to ensure the name of the directory is properly represented. For
|
// extra care to ensure the name of the directory is properly represented. For
|
||||||
// example, a directory that has been renamed but with no additional items may
|
// example, a directory that has been renamed but with no additional items may
|
||||||
// not be able to directly use kopia's version of the directory due to the
|
// not be able to directly use kopia's version of the directory due to the
|
||||||
// rename.
|
// rename.
|
||||||
if dir.collection == nil && len(dir.childDirs) == 0 && dir.baseDir != nil {
|
if dir.collection == nil && len(dir.childDirs) == 0 && dir.baseDir != nil && len(globalExcludeSet) == 0 {
|
||||||
return dir.baseDir, nil
|
return dir.baseDir, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -480,7 +490,7 @@ func buildKopiaDirs(dirName string, dir *treeMap, progress *corsoProgress) (fs.D
|
|||||||
var childDirs []fs.Entry
|
var childDirs []fs.Entry
|
||||||
|
|
||||||
for childName, childDir := range dir.childDirs {
|
for childName, childDir := range dir.childDirs {
|
||||||
child, err := buildKopiaDirs(childName, childDir, progress)
|
child, err := buildKopiaDirs(childName, childDir, globalExcludeSet, progress)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -496,6 +506,7 @@ func buildKopiaDirs(dirName string, dir *treeMap, progress *corsoProgress) (fs.D
|
|||||||
childDirs,
|
childDirs,
|
||||||
dir.collection,
|
dir.collection,
|
||||||
dir.baseDir,
|
dir.baseDir,
|
||||||
|
globalExcludeSet,
|
||||||
progress,
|
progress,
|
||||||
),
|
),
|
||||||
), nil
|
), nil
|
||||||
@ -879,11 +890,19 @@ func inflateBaseTree(
|
|||||||
// virtualfs.StreamingDirectory with the given DataCollections if there is one
|
// virtualfs.StreamingDirectory with the given DataCollections if there is one
|
||||||
// for that node. Tags can be used in future backups to fetch old snapshots for
|
// for that node. Tags can be used in future backups to fetch old snapshots for
|
||||||
// caching reasons.
|
// caching reasons.
|
||||||
|
//
|
||||||
|
// globalExcludeSet represents a set of items, represented with file names, to
|
||||||
|
// exclude from base directories when uploading the snapshot. As items in *all*
|
||||||
|
// base directories will be checked for in every base directory, this assumes
|
||||||
|
// that items in the bases are unique. Deletions of directories or subtrees
|
||||||
|
// should be represented as changes in the status of a Collection, not an entry
|
||||||
|
// in the globalExcludeSet.
|
||||||
func inflateDirTree(
|
func inflateDirTree(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
loader snapshotLoader,
|
loader snapshotLoader,
|
||||||
baseSnaps []IncrementalBase,
|
baseSnaps []IncrementalBase,
|
||||||
collections []data.Collection,
|
collections []data.Collection,
|
||||||
|
globalExcludeSet map[string]struct{},
|
||||||
progress *corsoProgress,
|
progress *corsoProgress,
|
||||||
) (fs.Directory, error) {
|
) (fs.Directory, error) {
|
||||||
roots, updatedPaths, err := inflateCollectionTree(ctx, collections)
|
roots, updatedPaths, err := inflateCollectionTree(ctx, collections)
|
||||||
@ -915,7 +934,7 @@ func inflateDirTree(
|
|||||||
var res fs.Directory
|
var res fs.Directory
|
||||||
|
|
||||||
for dirName, dir := range roots {
|
for dirName, dir := range roots {
|
||||||
tmp, err := buildKopiaDirs(dirName, dir, progress)
|
tmp, err := buildKopiaDirs(dirName, dir, globalExcludeSet, progress)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|||||||
@ -705,7 +705,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree() {
|
|||||||
// - emails
|
// - emails
|
||||||
// - Inbox
|
// - Inbox
|
||||||
// - 42 separate files
|
// - 42 separate files
|
||||||
dirTree, err := inflateDirTree(ctx, nil, nil, collections, progress)
|
dirTree, err := inflateDirTree(ctx, nil, nil, collections, nil, progress)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
assert.Equal(t, encodeAsPath(testTenant), dirTree.Name())
|
assert.Equal(t, encodeAsPath(testTenant), dirTree.Name())
|
||||||
@ -793,7 +793,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_MixedDirectory()
|
|||||||
suite.T().Run(test.name, func(t *testing.T) {
|
suite.T().Run(test.name, func(t *testing.T) {
|
||||||
progress := &corsoProgress{pending: map[string]*itemDetails{}}
|
progress := &corsoProgress{pending: map[string]*itemDetails{}}
|
||||||
|
|
||||||
dirTree, err := inflateDirTree(ctx, nil, nil, test.layout, progress)
|
dirTree, err := inflateDirTree(ctx, nil, nil, test.layout, nil, progress)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
assert.Equal(t, encodeAsPath(testTenant), dirTree.Name())
|
assert.Equal(t, encodeAsPath(testTenant), dirTree.Name())
|
||||||
@ -889,7 +889,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTree_Fails() {
|
|||||||
defer flush()
|
defer flush()
|
||||||
|
|
||||||
suite.T().Run(test.name, func(t *testing.T) {
|
suite.T().Run(test.name, func(t *testing.T) {
|
||||||
_, err := inflateDirTree(ctx, nil, nil, test.layout, nil)
|
_, err := inflateDirTree(ctx, nil, nil, test.layout, nil, nil)
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -992,7 +992,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeErrors() {
|
|||||||
cols = append(cols, mc)
|
cols = append(cols, mc)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := inflateDirTree(ctx, nil, nil, cols, progress)
|
_, err := inflateDirTree(ctx, nil, nil, cols, nil, progress)
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -1261,6 +1261,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSingleSubtree() {
|
|||||||
mockIncrementalBase("", testTenant, testUser, path.ExchangeService, path.EmailCategory),
|
mockIncrementalBase("", testTenant, testUser, path.ExchangeService, path.EmailCategory),
|
||||||
},
|
},
|
||||||
test.inputCollections(),
|
test.inputCollections(),
|
||||||
|
nil,
|
||||||
progress,
|
progress,
|
||||||
)
|
)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -1281,7 +1282,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto
|
|||||||
[]string{testTenant, service, testUser, category, testInboxDir},
|
[]string{testTenant, service, testUser, category, testInboxDir},
|
||||||
false,
|
false,
|
||||||
)
|
)
|
||||||
inboxFileName1 := testFileName4
|
inboxFileName1 := testFileName
|
||||||
inboxFileData1 := testFileData4
|
inboxFileData1 := testFileData4
|
||||||
inboxFileName2 := testFileName5
|
inboxFileName2 := testFileName5
|
||||||
inboxFileData2 := testFileData5
|
inboxFileData2 := testFileData5
|
||||||
@ -1291,7 +1292,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto
|
|||||||
append(inboxPath.Elements(), personalDir),
|
append(inboxPath.Elements(), personalDir),
|
||||||
false,
|
false,
|
||||||
)
|
)
|
||||||
personalFileName1 := testFileName
|
personalFileName1 := inboxFileName1
|
||||||
personalFileName2 := testFileName2
|
personalFileName2 := testFileName2
|
||||||
|
|
||||||
workPath := makePath(
|
workPath := makePath(
|
||||||
@ -1312,7 +1313,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto
|
|||||||
// - user1
|
// - user1
|
||||||
// - email
|
// - email
|
||||||
// - Inbox
|
// - Inbox
|
||||||
// - file4
|
// - file1
|
||||||
// - personal
|
// - personal
|
||||||
// - file1
|
// - file1
|
||||||
// - file2
|
// - file2
|
||||||
@ -1369,8 +1370,51 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto
|
|||||||
table := []struct {
|
table := []struct {
|
||||||
name string
|
name string
|
||||||
inputCollections func(t *testing.T) []data.Collection
|
inputCollections func(t *testing.T) []data.Collection
|
||||||
|
inputExcludes map[string]struct{}
|
||||||
expected *expectedNode
|
expected *expectedNode
|
||||||
}{
|
}{
|
||||||
|
{
|
||||||
|
name: "GlobalExcludeSet",
|
||||||
|
inputCollections: func(t *testing.T) []data.Collection {
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
inputExcludes: map[string]struct{}{
|
||||||
|
inboxFileName1: {},
|
||||||
|
},
|
||||||
|
expected: expectedTreeWithChildren(
|
||||||
|
[]string{
|
||||||
|
testTenant,
|
||||||
|
service,
|
||||||
|
testUser,
|
||||||
|
category,
|
||||||
|
},
|
||||||
|
[]*expectedNode{
|
||||||
|
{
|
||||||
|
name: testInboxDir,
|
||||||
|
children: []*expectedNode{
|
||||||
|
{
|
||||||
|
name: personalDir,
|
||||||
|
children: []*expectedNode{
|
||||||
|
{
|
||||||
|
name: personalFileName2,
|
||||||
|
children: []*expectedNode{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: workDir,
|
||||||
|
children: []*expectedNode{
|
||||||
|
{
|
||||||
|
name: workFileName1,
|
||||||
|
children: []*expectedNode{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
),
|
||||||
|
},
|
||||||
{
|
{
|
||||||
name: "MovesSubtree",
|
name: "MovesSubtree",
|
||||||
inputCollections: func(t *testing.T) []data.Collection {
|
inputCollections: func(t *testing.T) []data.Collection {
|
||||||
@ -1919,6 +1963,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeMultipleSubdirecto
|
|||||||
mockIncrementalBase("", testTenant, testUser, path.ExchangeService, path.EmailCategory),
|
mockIncrementalBase("", testTenant, testUser, path.ExchangeService, path.EmailCategory),
|
||||||
},
|
},
|
||||||
test.inputCollections(t),
|
test.inputCollections(t),
|
||||||
|
test.inputExcludes,
|
||||||
progress,
|
progress,
|
||||||
)
|
)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -2079,6 +2124,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSkipsDeletedSubtre
|
|||||||
mockIncrementalBase("", testTenant, testUser, path.ExchangeService, path.EmailCategory),
|
mockIncrementalBase("", testTenant, testUser, path.ExchangeService, path.EmailCategory),
|
||||||
},
|
},
|
||||||
collections,
|
collections,
|
||||||
|
nil,
|
||||||
progress,
|
progress,
|
||||||
)
|
)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -2325,6 +2371,7 @@ func (suite *HierarchyBuilderUnitSuite) TestBuildDirectoryTreeSelectsCorrectSubt
|
|||||||
mockIncrementalBase("id2", testTenant, testUser, path.ExchangeService, path.EmailCategory),
|
mockIncrementalBase("id2", testTenant, testUser, path.ExchangeService, path.EmailCategory),
|
||||||
},
|
},
|
||||||
collections,
|
collections,
|
||||||
|
nil,
|
||||||
progress,
|
progress,
|
||||||
)
|
)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|||||||
@ -129,7 +129,11 @@ func (w Wrapper) BackupCollections(
|
|||||||
ctx, end := D.Span(ctx, "kopia:backupCollections")
|
ctx, end := D.Span(ctx, "kopia:backupCollections")
|
||||||
defer end()
|
defer end()
|
||||||
|
|
||||||
if len(collections) == 0 {
|
// TODO(ashmrtn): Make this a parameter when actually enabling the global
|
||||||
|
// exclude set.
|
||||||
|
var globalExcludeSet map[string]struct{}
|
||||||
|
|
||||||
|
if len(collections) == 0 && len(globalExcludeSet) == 0 {
|
||||||
return &BackupStats{}, &details.Builder{}, nil, nil
|
return &BackupStats{}, &details.Builder{}, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -147,7 +151,14 @@ func (w Wrapper) BackupCollections(
|
|||||||
base = previousSnapshots
|
base = previousSnapshots
|
||||||
}
|
}
|
||||||
|
|
||||||
dirTree, err := inflateDirTree(ctx, w.c, base, collections, progress)
|
dirTree, err := inflateDirTree(
|
||||||
|
ctx,
|
||||||
|
w.c,
|
||||||
|
base,
|
||||||
|
collections,
|
||||||
|
globalExcludeSet,
|
||||||
|
progress,
|
||||||
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, errors.Wrap(err, "building kopia directories")
|
return nil, nil, nil, errors.Wrap(err, "building kopia directories")
|
||||||
}
|
}
|
||||||
|
|||||||
@ -4,6 +4,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/alcionai/clues"
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
multierror "github.com/hashicorp/go-multierror"
|
multierror "github.com/hashicorp/go-multierror"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
@ -119,6 +120,14 @@ func (op *BackupOperation) Run(ctx context.Context) (err error) {
|
|||||||
|
|
||||||
op.Results.BackupID = model.StableID(uuid.NewString())
|
op.Results.BackupID = model.StableID(uuid.NewString())
|
||||||
|
|
||||||
|
ctx = clues.AddAll(
|
||||||
|
ctx,
|
||||||
|
"tenant_id", tenantID, // TODO: pii
|
||||||
|
"resource_owner", op.ResourceOwner, // TODO: pii
|
||||||
|
"backup_id", op.Results.BackupID,
|
||||||
|
"service", op.Selectors.Service,
|
||||||
|
"incremental", uib)
|
||||||
|
|
||||||
op.bus.Event(
|
op.bus.Event(
|
||||||
ctx,
|
ctx,
|
||||||
events.BackupStart,
|
events.BackupStart,
|
||||||
@ -174,6 +183,8 @@ func (op *BackupOperation) Run(ctx context.Context) (err error) {
|
|||||||
return opStats.readErr
|
return opStats.readErr
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ctx = clues.Add(ctx, "collections", len(cs))
|
||||||
|
|
||||||
opStats.k, backupDetails, toMerge, err = consumeBackupDataCollections(
|
opStats.k, backupDetails, toMerge, err = consumeBackupDataCollections(
|
||||||
ctx,
|
ctx,
|
||||||
op.kopia,
|
op.kopia,
|
||||||
@ -208,13 +219,11 @@ func (op *BackupOperation) Run(ctx context.Context) (err error) {
|
|||||||
opStats.gc = gc.AwaitStatus()
|
opStats.gc = gc.AwaitStatus()
|
||||||
|
|
||||||
if opStats.gc.ErrorCount > 0 {
|
if opStats.gc.ErrorCount > 0 {
|
||||||
opStats.writeErr = multierror.Append(nil, opStats.writeErr, errors.Errorf(
|
merr := multierror.Append(opStats.readErr, errors.Wrap(opStats.gc.Err, "retrieving data"))
|
||||||
"%v errors reported while fetching item data",
|
opStats.readErr = merr.ErrorOrNil()
|
||||||
opStats.gc.ErrorCount,
|
|
||||||
)).ErrorOrNil()
|
|
||||||
|
|
||||||
// Need to exit before we set started to true else we'll report no errors.
|
// Need to exit before we set started to true else we'll report no errors.
|
||||||
return opStats.writeErr
|
return opStats.readErr
|
||||||
}
|
}
|
||||||
|
|
||||||
// should always be 1, since backups are 1:1 with resourceOwners.
|
// should always be 1, since backups are 1:1 with resourceOwners.
|
||||||
|
|||||||
@ -655,7 +655,7 @@ func (suite *BackupOpIntegrationSuite) TestBackup_Run_exchangeIncrementals() {
|
|||||||
m365, err := acct.M365Config()
|
m365, err := acct.M365Config()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
gc, err := connector.NewGraphConnector(ctx, acct, connector.Users)
|
gc, err := connector.NewGraphConnector(ctx, graph.LargeItemClient(), acct, connector.Users)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
ac, err := api.NewClient(m365)
|
ac, err := api.NewClient(m365)
|
||||||
|
|||||||
@ -7,6 +7,7 @@ import (
|
|||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/connector"
|
"github.com/alcionai/corso/src/internal/connector"
|
||||||
|
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||||
"github.com/alcionai/corso/src/internal/events"
|
"github.com/alcionai/corso/src/internal/events"
|
||||||
"github.com/alcionai/corso/src/internal/kopia"
|
"github.com/alcionai/corso/src/internal/kopia"
|
||||||
"github.com/alcionai/corso/src/internal/observe"
|
"github.com/alcionai/corso/src/internal/observe"
|
||||||
@ -107,7 +108,7 @@ func connectToM365(
|
|||||||
resource = connector.Sites
|
resource = connector.Sites
|
||||||
}
|
}
|
||||||
|
|
||||||
gc, err := connector.NewGraphConnector(ctx, acct, resource)
|
gc, err := connector.NewGraphConnector(ctx, graph.LargeItemClient(), acct, resource)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|||||||
@ -5,6 +5,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/alcionai/clues"
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
multierror "github.com/hashicorp/go-multierror"
|
multierror "github.com/hashicorp/go-multierror"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
@ -129,6 +130,12 @@ func (op *RestoreOperation) Run(ctx context.Context) (restoreDetails *details.De
|
|||||||
|
|
||||||
detailsStore := streamstore.New(op.kopia, op.account.ID(), op.Selectors.PathService())
|
detailsStore := streamstore.New(op.kopia, op.account.ID(), op.Selectors.PathService())
|
||||||
|
|
||||||
|
ctx = clues.AddAll(
|
||||||
|
ctx,
|
||||||
|
"tenant_id", op.account.ID(), // TODO: pii
|
||||||
|
"backup_id", op.BackupID,
|
||||||
|
"service", op.Selectors.Service)
|
||||||
|
|
||||||
bup, deets, err := getBackupAndDetailsFromID(
|
bup, deets, err := getBackupAndDetailsFromID(
|
||||||
ctx,
|
ctx,
|
||||||
op.BackupID,
|
op.BackupID,
|
||||||
@ -142,6 +149,8 @@ func (op *RestoreOperation) Run(ctx context.Context) (restoreDetails *details.De
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ctx = clues.Add(ctx, "resource_owner", bup.Selector.DiscreteOwner)
|
||||||
|
|
||||||
op.bus.Event(
|
op.bus.Event(
|
||||||
ctx,
|
ctx,
|
||||||
events.RestoreStart,
|
events.RestoreStart,
|
||||||
@ -159,6 +168,8 @@ func (op *RestoreOperation) Run(ctx context.Context) (restoreDetails *details.De
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ctx = clues.Add(ctx, "details_paths", len(paths))
|
||||||
|
|
||||||
observe.Message(ctx, fmt.Sprintf("Discovered %d items in backup %s to restore", len(paths), op.BackupID))
|
observe.Message(ctx, fmt.Sprintf("Discovered %d items in backup %s to restore", len(paths), op.BackupID))
|
||||||
|
|
||||||
kopiaComplete, closer := observe.MessageWithCompletion(ctx, "Enumerating items in repository")
|
kopiaComplete, closer := observe.MessageWithCompletion(ctx, "Enumerating items in repository")
|
||||||
@ -174,6 +185,8 @@ func (op *RestoreOperation) Run(ctx context.Context) (restoreDetails *details.De
|
|||||||
}
|
}
|
||||||
kopiaComplete <- struct{}{}
|
kopiaComplete <- struct{}{}
|
||||||
|
|
||||||
|
ctx = clues.Add(ctx, "collections", len(dcs))
|
||||||
|
|
||||||
opStats.cs = dcs
|
opStats.cs = dcs
|
||||||
opStats.resourceCount = len(data.ResourceOwnerSet(dcs))
|
opStats.resourceCount = len(data.ResourceOwnerSet(dcs))
|
||||||
|
|
||||||
|
|||||||
291
src/pkg/fault/example_fault_test.go
Normal file
291
src/pkg/fault/example_fault_test.go
Normal file
@ -0,0 +1,291 @@
|
|||||||
|
package fault_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// mock helpers
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
var (
|
||||||
|
ctrl any
|
||||||
|
items = []string{}
|
||||||
|
)
|
||||||
|
|
||||||
|
type mockController struct {
|
||||||
|
errors any
|
||||||
|
}
|
||||||
|
|
||||||
|
func connectClient() error { return nil }
|
||||||
|
func dependencyCall() error { return nil }
|
||||||
|
func getIthItem(i string) error { return nil }
|
||||||
|
func getData() ([]string, error) { return nil, nil }
|
||||||
|
func storeData([]string, *fault.Errors) {}
|
||||||
|
|
||||||
|
type mockOper struct {
|
||||||
|
Errors *fault.Errors
|
||||||
|
}
|
||||||
|
|
||||||
|
func newOperation() mockOper { return mockOper{fault.New(true)} }
|
||||||
|
func (m mockOper) Run() *fault.Errors { return m.Errors }
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// examples
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
// ExampleNewErrors highlights assumptions and best practices
|
||||||
|
// for generating Errors structs.
|
||||||
|
func Example_new() {
|
||||||
|
// Errors should only be generated during the construction of
|
||||||
|
// another controller, such as a new Backup or Restore Operations.
|
||||||
|
// Configurations like failFast are set during construction.
|
||||||
|
//
|
||||||
|
// Generating new fault.Errors structs outside of an operation
|
||||||
|
// controller is a smell, and should be avoided. If you need
|
||||||
|
// to aggregate errors, you should accept an interface and pass
|
||||||
|
// an Errors instance into it.
|
||||||
|
ctrl = mockController{
|
||||||
|
errors: fault.New(false),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExampleErrorsFail describes the assumptions and best practices
|
||||||
|
// for setting the Failure error.
|
||||||
|
func Example_errors_Fail() {
|
||||||
|
errs := fault.New(false)
|
||||||
|
|
||||||
|
// Fail() should be used to record any error that highlights a
|
||||||
|
// non-recoverable failure in a process.
|
||||||
|
//
|
||||||
|
// Fail() should only get called in the last step before returning
|
||||||
|
// a fault.Errors from a controller. In all other cases, you
|
||||||
|
// should simply return an error and expect the upstream controller
|
||||||
|
// to call Fail() for you.
|
||||||
|
if err := connectClient(); err != nil {
|
||||||
|
// normally, you'd want to
|
||||||
|
// return errs.Fail(err)
|
||||||
|
errs.Fail(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only the topmost handler of the error should set the Fail() err.
|
||||||
|
// This will normally be the operation controller itself.
|
||||||
|
// IE: Fail() is not Wrap(). In lower levels, errors should get
|
||||||
|
// wrapped and returned like normal, and only handled by errors
|
||||||
|
// at the end.
|
||||||
|
lowLevelCall := func() error {
|
||||||
|
if err := dependencyCall(); err != nil {
|
||||||
|
// wrap here, deeper into the stack
|
||||||
|
return errors.Wrap(err, "dependency")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := lowLevelCall(); err != nil {
|
||||||
|
// fail here, at the top of the stack
|
||||||
|
errs.Fail(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExampleErrorsAdd describes the assumptions and best practices
|
||||||
|
// for aggregating iterable or recoverable errors.
|
||||||
|
func Example_errors_Add() {
|
||||||
|
errs := fault.New(false)
|
||||||
|
|
||||||
|
// Add() should be used to record any error in a recoverable
|
||||||
|
// part of processing.
|
||||||
|
//
|
||||||
|
// Add() should only get called in the last step in handling an
|
||||||
|
// error within a loop or stream that does not otherwise return
|
||||||
|
// an error. In all other cases, you should simply return an error
|
||||||
|
// and expect the upstream point of iteration to call Add() for you.
|
||||||
|
for _, i := range items {
|
||||||
|
if err := getIthItem(i); err != nil {
|
||||||
|
errs.Add(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// In case of failFast behavior, iteration should exit as soon
|
||||||
|
// as an error occurs. Errors does not expose the failFast flag
|
||||||
|
// directly. Instead, iterators should check the value of Err().
|
||||||
|
// If it is non-nil, then the loop shold break.
|
||||||
|
for _, i := range items {
|
||||||
|
if errs.Err() != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
errs.Add(getIthItem(i))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only the topmost handler of the error should Add() the err.
|
||||||
|
// This will normally be the iteration loop itself.
|
||||||
|
// IE: Add() is not Wrap(). In lower levels, errors should get
|
||||||
|
// wrapped and returned like normally, and only added to the
|
||||||
|
// errors at the end.
|
||||||
|
clientBasedGetter := func(s string) error {
|
||||||
|
if err := dependencyCall(); err != nil {
|
||||||
|
// wrap here, deeper into the stack
|
||||||
|
return errors.Wrap(err, "dependency")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, i := range items {
|
||||||
|
if err := clientBasedGetter(i); err != nil {
|
||||||
|
// add here, within the iteraton loop
|
||||||
|
errs.Add(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExampleErrorsErr describes retrieving the non-recoverable error.
|
||||||
|
func Example_errors_Err() {
|
||||||
|
errs := fault.New(false)
|
||||||
|
errs.Fail(errors.New("catastrophe"))
|
||||||
|
|
||||||
|
// Err() gets the primary failure error.
|
||||||
|
err := errs.Err()
|
||||||
|
fmt.Println(err)
|
||||||
|
|
||||||
|
// if multiple Failures occur, each one after the first gets
|
||||||
|
// added to the Errs slice.
|
||||||
|
errs.Fail(errors.New("another catastrophe"))
|
||||||
|
errSl := errs.Errs()
|
||||||
|
|
||||||
|
for _, e := range errSl {
|
||||||
|
fmt.Println(e)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If Err() is nil, then you can assume the operation completed.
|
||||||
|
// A complete operation is not necessarily an error-free operation.
|
||||||
|
//
|
||||||
|
// Even if Err() is nil, Errs() can be non-empty.
|
||||||
|
// Make sure you check both.
|
||||||
|
|
||||||
|
errs = fault.New(true)
|
||||||
|
|
||||||
|
// If failFast is set to true, then the first error Add()ed gets
|
||||||
|
// promoted to the Err() position.
|
||||||
|
|
||||||
|
errs.Add(errors.New("not catastrophic, but still becomes the Err()"))
|
||||||
|
err = errs.Err()
|
||||||
|
fmt.Println(err)
|
||||||
|
|
||||||
|
// Output: catastrophe
|
||||||
|
// another catastrophe
|
||||||
|
// not catastrophic, but still becomes the Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExampleErrorsErrs describes retrieving individual errors.
|
||||||
|
func Example_errors_Errs() {
|
||||||
|
errs := fault.New(false)
|
||||||
|
errs.Add(errors.New("not catastrophic"))
|
||||||
|
errs.Add(errors.New("something unwanted"))
|
||||||
|
|
||||||
|
// Errs() gets the slice errors that were recorded, but were
|
||||||
|
// considered recoverable.
|
||||||
|
errSl := errs.Errs()
|
||||||
|
for _, err := range errSl {
|
||||||
|
fmt.Println(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Errs() only needs to be investigated by the end user at the
|
||||||
|
// conclusion of an operation. Checking Errs() within lower-
|
||||||
|
// layer code is a smell. Funcs should return an error if they
|
||||||
|
// need upstream handlers to recognize failure states.
|
||||||
|
//
|
||||||
|
// If Errs() is nil, then you can assume that no recoverable or
|
||||||
|
// iteration-based errors occurred. But that does not necessarily
|
||||||
|
// mean the operation was able to complete.
|
||||||
|
//
|
||||||
|
// Even if Errs() contains zero items, Err() can be non-nil.
|
||||||
|
// Make sure you check both.
|
||||||
|
|
||||||
|
// Output: not catastrophic
|
||||||
|
// something unwanted
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExampleErrorsE2e showcases a more complex integration.
|
||||||
|
func Example_errors_e2e() {
|
||||||
|
oper := newOperation()
|
||||||
|
|
||||||
|
// imagine that we're a user, calling into corso SDK.
|
||||||
|
// (fake funcs used here to minimize example bloat)
|
||||||
|
//
|
||||||
|
// The operation is our controller, we expect it to
|
||||||
|
// generate a new fault.Errors when constructed, and
|
||||||
|
// to return that struct when we call Run()
|
||||||
|
errs := oper.Run()
|
||||||
|
|
||||||
|
// Let's investigate what went on inside. Since we're at
|
||||||
|
// the top of our controller, and returning a fault.Errors,
|
||||||
|
// all the error handlers set the Fail() case.
|
||||||
|
/* Run() */
|
||||||
|
func() *fault.Errors {
|
||||||
|
if err := connectClient(); err != nil {
|
||||||
|
// Fail() here; we're top level in the controller
|
||||||
|
// and this is a non-recoverable issue
|
||||||
|
return oper.Errors.Fail(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err := getData()
|
||||||
|
if err != nil {
|
||||||
|
return oper.Errors.Fail(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// storeData will aggregate iterated errors into
|
||||||
|
// oper.Errors.
|
||||||
|
storeData(data, oper.Errors)
|
||||||
|
|
||||||
|
// return oper.Errors here, in part to ensure it's
|
||||||
|
// non-nil, and because we don't know if we've
|
||||||
|
// aggregated any iterated errors yet.
|
||||||
|
return oper.Errors
|
||||||
|
}()
|
||||||
|
|
||||||
|
// What about the lower level handling? storeData didn't
|
||||||
|
// return an error, so what's happening there?
|
||||||
|
/* storeData */
|
||||||
|
func(data []any, errs *fault.Errors) {
|
||||||
|
// this is downstream in our code somewhere
|
||||||
|
storer := func(a any) error {
|
||||||
|
if err := dependencyCall(); err != nil {
|
||||||
|
// we're not passing in or calling fault.Errors here,
|
||||||
|
// because this isn't the iteration handler, it's just
|
||||||
|
// a regular error.
|
||||||
|
return errors.Wrap(err, "dependency")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, d := range data {
|
||||||
|
if errs.Err() != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := storer(d); err != nil {
|
||||||
|
// Since we're at the top of the iteration, we need
|
||||||
|
// to add each error to the fault.Errors struct.
|
||||||
|
errs.Add(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}(nil, nil)
|
||||||
|
|
||||||
|
// then at the end of the oper.Run, we investigate the results.
|
||||||
|
if errs.Err() != nil {
|
||||||
|
// handle the primary error
|
||||||
|
fmt.Println("err occurred", errs.Err())
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, err := range errs.Errs() {
|
||||||
|
// handle each recoverable error
|
||||||
|
fmt.Println("recoverable err occurred", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
127
src/pkg/fault/fault.go
Normal file
127
src/pkg/fault/fault.go
Normal file
@ -0,0 +1,127 @@
|
|||||||
|
package fault
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"golang.org/x/exp/slices"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Errors struct {
|
||||||
|
mu *sync.Mutex
|
||||||
|
|
||||||
|
// err identifies non-recoverable errors. This includes
|
||||||
|
// non-start cases (ex: cannot connect to client), hard-
|
||||||
|
// stop issues (ex: credentials expired) or conscious exit
|
||||||
|
// cases (ex: iteration error + failFast config).
|
||||||
|
err error
|
||||||
|
|
||||||
|
// errs is the accumulation of recoverable or iterated
|
||||||
|
// errors. Eg: if a process is retrieving N items, and
|
||||||
|
// 1 of the items fails to be retrieved, but the rest of
|
||||||
|
// them succeed, we'd expect to see 1 error added to this
|
||||||
|
// slice.
|
||||||
|
errs []error
|
||||||
|
|
||||||
|
// if failFast is true, the first errs addition will
|
||||||
|
// get promoted to the err value. This signifies a
|
||||||
|
// non-recoverable processing state, causing any running
|
||||||
|
// processes to exit.
|
||||||
|
failFast bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorsData provides the errors data alone, without sync
|
||||||
|
// controls, allowing the data to be persisted.
|
||||||
|
type ErrorsData struct {
|
||||||
|
Err error `json:"err"`
|
||||||
|
Errs []error `json:"errs"`
|
||||||
|
FailFast bool `json:"failFast"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// New constructs a new error with default values in place.
|
||||||
|
func New(failFast bool) *Errors {
|
||||||
|
return &Errors{
|
||||||
|
mu: &sync.Mutex{},
|
||||||
|
errs: []error{},
|
||||||
|
failFast: failFast,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Err returns the primary error. If not nil, this
|
||||||
|
// indicates the operation exited prior to completion.
|
||||||
|
func (e *Errors) Err() error {
|
||||||
|
return e.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Errs returns the slice of recoverable and
|
||||||
|
// iterated errors.
|
||||||
|
func (e *Errors) Errs() []error {
|
||||||
|
return e.errs
|
||||||
|
}
|
||||||
|
|
||||||
|
// Data returns the plain set of error data
|
||||||
|
// without any sync properties.
|
||||||
|
func (e *Errors) Data() ErrorsData {
|
||||||
|
return ErrorsData{
|
||||||
|
Err: e.err,
|
||||||
|
Errs: slices.Clone(e.errs),
|
||||||
|
FailFast: e.failFast,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: introduce Failer interface
|
||||||
|
|
||||||
|
// Fail sets the non-recoverable error (ie: errors.err)
|
||||||
|
// in the errors struct. If a non-recoverable error is
|
||||||
|
// already present, the error gets added to the errs slice.
|
||||||
|
func (e *Errors) Fail(err error) *Errors {
|
||||||
|
if err == nil {
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
e.mu.Lock()
|
||||||
|
defer e.mu.Unlock()
|
||||||
|
|
||||||
|
return e.setErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// setErr handles setting errors.err. Sync locking gets
|
||||||
|
// handled upstream of this call.
|
||||||
|
func (e *Errors) setErr(err error) *Errors {
|
||||||
|
if e.err != nil {
|
||||||
|
return e.addErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
e.err = err
|
||||||
|
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: introduce Adder interface
|
||||||
|
|
||||||
|
// Add appends the error to the slice of recoverable and
|
||||||
|
// iterated errors (ie: errors.errs). If failFast is true,
|
||||||
|
// the first Added error will get copied to errors.err,
|
||||||
|
// causing the errors struct to identify as non-recoverably
|
||||||
|
// failed.
|
||||||
|
func (e *Errors) Add(err error) *Errors {
|
||||||
|
if err == nil {
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
e.mu.Lock()
|
||||||
|
defer e.mu.Unlock()
|
||||||
|
|
||||||
|
return e.addErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// addErr handles adding errors to errors.errs. Sync locking
|
||||||
|
// gets handled upstream of this call.
|
||||||
|
func (e *Errors) addErr(err error) *Errors {
|
||||||
|
if e.err == nil && e.failFast {
|
||||||
|
e.setErr(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
e.errs = append(e.errs, err)
|
||||||
|
|
||||||
|
return e
|
||||||
|
}
|
||||||
202
src/pkg/fault/fault_test.go
Normal file
202
src/pkg/fault/fault_test.go
Normal file
@ -0,0 +1,202 @@
|
|||||||
|
package fault_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"github.com/stretchr/testify/suite"
|
||||||
|
|
||||||
|
"github.com/alcionai/corso/src/pkg/fault"
|
||||||
|
)
|
||||||
|
|
||||||
|
type FaultErrorsUnitSuite struct {
|
||||||
|
suite.Suite
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFaultErrorsUnitSuite(t *testing.T) {
|
||||||
|
suite.Run(t, new(FaultErrorsUnitSuite))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *FaultErrorsUnitSuite) TestNew() {
|
||||||
|
t := suite.T()
|
||||||
|
|
||||||
|
n := fault.New(false)
|
||||||
|
assert.NotNil(t, n)
|
||||||
|
|
||||||
|
n = fault.New(true)
|
||||||
|
assert.NotNil(t, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *FaultErrorsUnitSuite) TestErr() {
|
||||||
|
table := []struct {
|
||||||
|
name string
|
||||||
|
failFast bool
|
||||||
|
fail error
|
||||||
|
add error
|
||||||
|
expect assert.ErrorAssertionFunc
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "nil",
|
||||||
|
expect: assert.NoError,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "nil, failFast",
|
||||||
|
failFast: true,
|
||||||
|
expect: assert.NoError,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "failed",
|
||||||
|
fail: assert.AnError,
|
||||||
|
expect: assert.Error,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "failed, failFast",
|
||||||
|
fail: assert.AnError,
|
||||||
|
failFast: true,
|
||||||
|
expect: assert.Error,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "added",
|
||||||
|
add: assert.AnError,
|
||||||
|
expect: assert.NoError,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "added, failFast",
|
||||||
|
add: assert.AnError,
|
||||||
|
failFast: true,
|
||||||
|
expect: assert.Error,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, test := range table {
|
||||||
|
suite.T().Run(test.name, func(t *testing.T) {
|
||||||
|
n := fault.New(test.failFast)
|
||||||
|
require.NotNil(t, n)
|
||||||
|
|
||||||
|
e := n.Fail(test.fail)
|
||||||
|
require.NotNil(t, e)
|
||||||
|
|
||||||
|
e = n.Add(test.add)
|
||||||
|
require.NotNil(t, e)
|
||||||
|
|
||||||
|
test.expect(t, n.Err())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *FaultErrorsUnitSuite) TestFail() {
|
||||||
|
t := suite.T()
|
||||||
|
|
||||||
|
n := fault.New(false)
|
||||||
|
require.NotNil(t, n)
|
||||||
|
|
||||||
|
n.Fail(assert.AnError)
|
||||||
|
assert.Error(t, n.Err())
|
||||||
|
assert.Empty(t, n.Errs())
|
||||||
|
|
||||||
|
n.Fail(assert.AnError)
|
||||||
|
assert.Error(t, n.Err())
|
||||||
|
assert.NotEmpty(t, n.Errs())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *FaultErrorsUnitSuite) TestErrs() {
|
||||||
|
table := []struct {
|
||||||
|
name string
|
||||||
|
failFast bool
|
||||||
|
fail error
|
||||||
|
add error
|
||||||
|
expect assert.ValueAssertionFunc
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "nil",
|
||||||
|
expect: assert.Empty,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "nil, failFast",
|
||||||
|
failFast: true,
|
||||||
|
expect: assert.Empty,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "failed",
|
||||||
|
fail: assert.AnError,
|
||||||
|
expect: assert.Empty,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "failed, failFast",
|
||||||
|
fail: assert.AnError,
|
||||||
|
failFast: true,
|
||||||
|
expect: assert.Empty,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "added",
|
||||||
|
add: assert.AnError,
|
||||||
|
expect: assert.NotEmpty,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "added, failFast",
|
||||||
|
add: assert.AnError,
|
||||||
|
failFast: true,
|
||||||
|
expect: assert.NotEmpty,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, test := range table {
|
||||||
|
suite.T().Run(test.name, func(t *testing.T) {
|
||||||
|
n := fault.New(test.failFast)
|
||||||
|
require.NotNil(t, n)
|
||||||
|
|
||||||
|
e := n.Fail(test.fail)
|
||||||
|
require.NotNil(t, e)
|
||||||
|
|
||||||
|
e = n.Add(test.add)
|
||||||
|
require.NotNil(t, e)
|
||||||
|
|
||||||
|
test.expect(t, n.Errs())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *FaultErrorsUnitSuite) TestAdd() {
|
||||||
|
t := suite.T()
|
||||||
|
|
||||||
|
n := fault.New(true)
|
||||||
|
require.NotNil(t, n)
|
||||||
|
|
||||||
|
n.Add(assert.AnError)
|
||||||
|
assert.Error(t, n.Err())
|
||||||
|
assert.Len(t, n.Errs(), 1)
|
||||||
|
|
||||||
|
n.Add(assert.AnError)
|
||||||
|
assert.Error(t, n.Err())
|
||||||
|
assert.Len(t, n.Errs(), 2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *FaultErrorsUnitSuite) TestData() {
|
||||||
|
t := suite.T()
|
||||||
|
|
||||||
|
// not fail-fast
|
||||||
|
n := fault.New(false)
|
||||||
|
require.NotNil(t, n)
|
||||||
|
|
||||||
|
n.Fail(errors.New("fail"))
|
||||||
|
n.Add(errors.New("1"))
|
||||||
|
n.Add(errors.New("2"))
|
||||||
|
|
||||||
|
d := n.Data()
|
||||||
|
assert.Equal(t, n.Err(), d.Err)
|
||||||
|
assert.ElementsMatch(t, n.Errs(), d.Errs)
|
||||||
|
assert.False(t, d.FailFast)
|
||||||
|
|
||||||
|
// fail-fast
|
||||||
|
n = fault.New(true)
|
||||||
|
require.NotNil(t, n)
|
||||||
|
|
||||||
|
n.Fail(errors.New("fail"))
|
||||||
|
n.Add(errors.New("1"))
|
||||||
|
n.Add(errors.New("2"))
|
||||||
|
|
||||||
|
d = n.Data()
|
||||||
|
assert.Equal(t, n.Err(), d.Err)
|
||||||
|
assert.ElementsMatch(t, n.Errs(), d.Errs)
|
||||||
|
assert.True(t, d.FailFast)
|
||||||
|
}
|
||||||
@ -1,6 +1,8 @@
|
|||||||
package path
|
package path
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -30,18 +32,20 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func toServiceType(service string) ServiceType {
|
func toServiceType(service string) ServiceType {
|
||||||
switch service {
|
s := strings.ToLower(service)
|
||||||
case ExchangeService.String():
|
|
||||||
|
switch s {
|
||||||
|
case strings.ToLower(ExchangeService.String()):
|
||||||
return ExchangeService
|
return ExchangeService
|
||||||
case OneDriveService.String():
|
case strings.ToLower(OneDriveService.String()):
|
||||||
return OneDriveService
|
return OneDriveService
|
||||||
case SharePointService.String():
|
case strings.ToLower(SharePointService.String()):
|
||||||
return SharePointService
|
return SharePointService
|
||||||
case ExchangeMetadataService.String():
|
case strings.ToLower(ExchangeMetadataService.String()):
|
||||||
return ExchangeMetadataService
|
return ExchangeMetadataService
|
||||||
case OneDriveMetadataService.String():
|
case strings.ToLower(OneDriveMetadataService.String()):
|
||||||
return OneDriveMetadataService
|
return OneDriveMetadataService
|
||||||
case SharePointMetadataService.String():
|
case strings.ToLower(SharePointMetadataService.String()):
|
||||||
return SharePointMetadataService
|
return SharePointMetadataService
|
||||||
default:
|
default:
|
||||||
return UnknownService
|
return UnknownService
|
||||||
@ -70,22 +74,24 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func ToCategoryType(category string) CategoryType {
|
func ToCategoryType(category string) CategoryType {
|
||||||
switch category {
|
cat := strings.ToLower(category)
|
||||||
case EmailCategory.String():
|
|
||||||
|
switch cat {
|
||||||
|
case strings.ToLower(EmailCategory.String()):
|
||||||
return EmailCategory
|
return EmailCategory
|
||||||
case ContactsCategory.String():
|
case strings.ToLower(ContactsCategory.String()):
|
||||||
return ContactsCategory
|
return ContactsCategory
|
||||||
case EventsCategory.String():
|
case strings.ToLower(EventsCategory.String()):
|
||||||
return EventsCategory
|
return EventsCategory
|
||||||
case FilesCategory.String():
|
case strings.ToLower(FilesCategory.String()):
|
||||||
return FilesCategory
|
return FilesCategory
|
||||||
case LibrariesCategory.String():
|
case strings.ToLower(LibrariesCategory.String()):
|
||||||
return LibrariesCategory
|
return LibrariesCategory
|
||||||
case ListsCategory.String():
|
case strings.ToLower(ListsCategory.String()):
|
||||||
return ListsCategory
|
return ListsCategory
|
||||||
case PagesCategory.String():
|
case strings.ToLower(PagesCategory.String()):
|
||||||
return PagesCategory
|
return PagesCategory
|
||||||
case DetailsCategory.String():
|
case strings.ToLower(DetailsCategory.String()):
|
||||||
return DetailsCategory
|
return DetailsCategory
|
||||||
default:
|
default:
|
||||||
return UnknownCategory
|
return UnknownCategory
|
||||||
|
|||||||
@ -1,6 +1,7 @@
|
|||||||
package path
|
package path
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
@ -128,3 +129,51 @@ func (suite *ServiceCategoryUnitSuite) TestValidateServiceAndCategory() {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (suite *ServiceCategoryUnitSuite) TestToServiceType() {
|
||||||
|
table := []struct {
|
||||||
|
name string
|
||||||
|
service string
|
||||||
|
expected ServiceType
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "SameCase",
|
||||||
|
service: ExchangeMetadataService.String(),
|
||||||
|
expected: ExchangeMetadataService,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "DifferentCase",
|
||||||
|
service: strings.ToUpper(ExchangeMetadataService.String()),
|
||||||
|
expected: ExchangeMetadataService,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, test := range table {
|
||||||
|
suite.T().Run(test.name, func(t *testing.T) {
|
||||||
|
assert.Equal(t, test.expected, toServiceType(test.service))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *ServiceCategoryUnitSuite) TestToCategoryType() {
|
||||||
|
table := []struct {
|
||||||
|
name string
|
||||||
|
category string
|
||||||
|
expected CategoryType
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "SameCase",
|
||||||
|
category: EmailCategory.String(),
|
||||||
|
expected: EmailCategory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "DifferentCase",
|
||||||
|
category: strings.ToUpper(EmailCategory.String()),
|
||||||
|
expected: EmailCategory,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, test := range table {
|
||||||
|
suite.T().Run(test.name, func(t *testing.T) {
|
||||||
|
assert.Equal(t, test.expected, ToCategoryType(test.category))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@ -224,7 +224,7 @@ func (s *sharePoint) AllData() []SharePointScope {
|
|||||||
scopes,
|
scopes,
|
||||||
makeScope[SharePointScope](SharePointLibrary, Any()),
|
makeScope[SharePointScope](SharePointLibrary, Any()),
|
||||||
makeScope[SharePointScope](SharePointList, Any()),
|
makeScope[SharePointScope](SharePointList, Any()),
|
||||||
makeScope[SharePointScope](SharePointPage, Any()),
|
makeScope[SharePointScope](SharePointPageFolder, Any()),
|
||||||
)
|
)
|
||||||
|
|
||||||
return scopes
|
return scopes
|
||||||
@ -323,7 +323,7 @@ func (s *sharePoint) PageItems(pages, items []string, opts ...option) []SharePoi
|
|||||||
scopes = append(
|
scopes = append(
|
||||||
scopes,
|
scopes,
|
||||||
makeScope[SharePointScope](SharePointPage, items).
|
makeScope[SharePointScope](SharePointPage, items).
|
||||||
set(SharePointPage, pages, opts...),
|
set(SharePointPageFolder, pages, opts...),
|
||||||
)
|
)
|
||||||
|
|
||||||
return scopes
|
return scopes
|
||||||
|
|||||||
@ -193,9 +193,13 @@ func (suite *SharePointSelectorSuite) TestToSharePointRestore() {
|
|||||||
|
|
||||||
func (suite *SharePointSelectorSuite) TestSharePointRestore_Reduce() {
|
func (suite *SharePointSelectorSuite) TestSharePointRestore_Reduce() {
|
||||||
var (
|
var (
|
||||||
item = stubRepoRef(path.SharePointService, path.LibrariesCategory, "sid", "folderA/folderB", "item")
|
pairAC = "folderA/folderC"
|
||||||
item2 = stubRepoRef(path.SharePointService, path.LibrariesCategory, "sid", "folderA/folderC", "item2")
|
pairGH = "folderG/folderH"
|
||||||
item3 = stubRepoRef(path.SharePointService, path.LibrariesCategory, "sid", "folderD/folderE", "item3")
|
item = stubRepoRef(path.SharePointService, path.LibrariesCategory, "sid", "folderA/folderB", "item")
|
||||||
|
item2 = stubRepoRef(path.SharePointService, path.LibrariesCategory, "sid", pairAC, "item2")
|
||||||
|
item3 = stubRepoRef(path.SharePointService, path.LibrariesCategory, "sid", "folderD/folderE", "item3")
|
||||||
|
item4 = stubRepoRef(path.SharePointService, path.PagesCategory, "sid", pairGH, "item4")
|
||||||
|
item5 = stubRepoRef(path.SharePointService, path.PagesCategory, "sid", pairGH, "item5")
|
||||||
)
|
)
|
||||||
|
|
||||||
deets := &details.Details{
|
deets := &details.Details{
|
||||||
@ -225,6 +229,22 @@ func (suite *SharePointSelectorSuite) TestSharePointRestore_Reduce() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
RepoRef: item4,
|
||||||
|
ItemInfo: details.ItemInfo{
|
||||||
|
SharePoint: &details.SharePointInfo{
|
||||||
|
ItemType: details.SharePointItem,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
RepoRef: item5,
|
||||||
|
ItemInfo: details.ItemInfo{
|
||||||
|
SharePoint: &details.SharePointInfo{
|
||||||
|
ItemType: details.SharePointItem,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -247,7 +267,7 @@ func (suite *SharePointSelectorSuite) TestSharePointRestore_Reduce() {
|
|||||||
odr.Include(odr.AllData())
|
odr.Include(odr.AllData())
|
||||||
return odr
|
return odr
|
||||||
},
|
},
|
||||||
expect: arr(item, item2, item3),
|
expect: arr(item, item2, item3, item4, item5),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "only match item",
|
name: "only match item",
|
||||||
@ -264,11 +284,21 @@ func (suite *SharePointSelectorSuite) TestSharePointRestore_Reduce() {
|
|||||||
deets: deets,
|
deets: deets,
|
||||||
makeSelector: func() *SharePointRestore {
|
makeSelector: func() *SharePointRestore {
|
||||||
odr := NewSharePointRestore([]string{"sid"})
|
odr := NewSharePointRestore([]string{"sid"})
|
||||||
odr.Include(odr.Libraries([]string{"folderA/folderB", "folderA/folderC"}))
|
odr.Include(odr.Libraries([]string{"folderA/folderB", pairAC}))
|
||||||
return odr
|
return odr
|
||||||
},
|
},
|
||||||
expect: arr(item, item2),
|
expect: arr(item, item2),
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "pages match folder",
|
||||||
|
deets: deets,
|
||||||
|
makeSelector: func() *SharePointRestore {
|
||||||
|
odr := NewSharePointRestore([]string{"sid"})
|
||||||
|
odr.Include(odr.Pages([]string{pairGH, pairAC}))
|
||||||
|
return odr
|
||||||
|
},
|
||||||
|
expect: arr(item4, item5),
|
||||||
|
},
|
||||||
}
|
}
|
||||||
for _, test := range table {
|
for _, test := range table {
|
||||||
suite.T().Run(test.name, func(t *testing.T) {
|
suite.T().Run(test.name, func(t *testing.T) {
|
||||||
|
|||||||
@ -8,6 +8,7 @@ import (
|
|||||||
|
|
||||||
"github.com/alcionai/corso/src/internal/connector"
|
"github.com/alcionai/corso/src/internal/connector"
|
||||||
"github.com/alcionai/corso/src/internal/connector/discovery"
|
"github.com/alcionai/corso/src/internal/connector/discovery"
|
||||||
|
"github.com/alcionai/corso/src/internal/connector/graph"
|
||||||
"github.com/alcionai/corso/src/pkg/account"
|
"github.com/alcionai/corso/src/pkg/account"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -20,7 +21,7 @@ type User struct {
|
|||||||
// Users returns a list of users in the specified M365 tenant
|
// Users returns a list of users in the specified M365 tenant
|
||||||
// TODO: Implement paging support
|
// TODO: Implement paging support
|
||||||
func Users(ctx context.Context, m365Account account.Account) ([]*User, error) {
|
func Users(ctx context.Context, m365Account account.Account) ([]*User, error) {
|
||||||
gc, err := connector.NewGraphConnector(ctx, m365Account, connector.Users)
|
gc, err := connector.NewGraphConnector(ctx, graph.LargeItemClient(), m365Account, connector.Users)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "could not initialize M365 graph connection")
|
return nil, errors.Wrap(err, "could not initialize M365 graph connection")
|
||||||
}
|
}
|
||||||
@ -76,7 +77,7 @@ func UserPNs(ctx context.Context, m365Account account.Account) ([]string, error)
|
|||||||
|
|
||||||
// SiteURLs returns a list of SharePoint site WebURLs in the specified M365 tenant
|
// SiteURLs returns a list of SharePoint site WebURLs in the specified M365 tenant
|
||||||
func SiteURLs(ctx context.Context, m365Account account.Account) ([]string, error) {
|
func SiteURLs(ctx context.Context, m365Account account.Account) ([]string, error) {
|
||||||
gc, err := connector.NewGraphConnector(ctx, m365Account, connector.Sites)
|
gc, err := connector.NewGraphConnector(ctx, graph.LargeItemClient(), m365Account, connector.Sites)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "could not initialize M365 graph connection")
|
return nil, errors.Wrap(err, "could not initialize M365 graph connection")
|
||||||
}
|
}
|
||||||
@ -86,7 +87,7 @@ func SiteURLs(ctx context.Context, m365Account account.Account) ([]string, error
|
|||||||
|
|
||||||
// SiteURLs returns a list of SharePoint sites IDs in the specified M365 tenant
|
// SiteURLs returns a list of SharePoint sites IDs in the specified M365 tenant
|
||||||
func SiteIDs(ctx context.Context, m365Account account.Account) ([]string, error) {
|
func SiteIDs(ctx context.Context, m365Account account.Account) ([]string, error) {
|
||||||
gc, err := connector.NewGraphConnector(ctx, m365Account, connector.Sites)
|
gc, err := connector.NewGraphConnector(ctx, graph.LargeItemClient(), m365Account, connector.Sites)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "could not initialize M365 graph connection")
|
return nil, errors.Wrap(err, "could not initialize M365 graph connection")
|
||||||
}
|
}
|
||||||
|
|||||||
@ -47,7 +47,7 @@ const config = {
|
|||||||
sidebarPath: require.resolve('./sidebars.js'),
|
sidebarPath: require.resolve('./sidebars.js'),
|
||||||
remarkPlugins: [require('mdx-mermaid')],
|
remarkPlugins: [require('mdx-mermaid')],
|
||||||
editUrl:
|
editUrl:
|
||||||
'https://github.com/alcionai/corso/tree/main/docs',
|
'https://github.com/alcionai/corso/tree/main/website',
|
||||||
},
|
},
|
||||||
blog: {
|
blog: {
|
||||||
showReadingTime: true,
|
showReadingTime: true,
|
||||||
|
|||||||
13
website/package-lock.json
generated
13
website/package-lock.json
generated
@ -13220,9 +13220,9 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/ua-parser-js": {
|
"node_modules/ua-parser-js": {
|
||||||
"version": "0.7.32",
|
"version": "0.7.33",
|
||||||
"resolved": "https://registry.npmjs.org/ua-parser-js/-/ua-parser-js-0.7.32.tgz",
|
"resolved": "https://registry.npmjs.org/ua-parser-js/-/ua-parser-js-0.7.33.tgz",
|
||||||
"integrity": "sha512-f9BESNVhzlhEFf2CHMSj40NWOjYPl1YKYbrvIr/hFTDEmLq7SRbWvm7FcdcpCYT95zrOhC7gZSxjdnnTpBcwVw==",
|
"integrity": "sha512-s8ax/CeZdK9R/56Sui0WM6y9OFREJarMRHqLB2EwkovemBxNQ+Bqu8GAsUnVcXKgphb++ghr/B2BZx4mahujPw==",
|
||||||
"funding": [
|
"funding": [
|
||||||
{
|
{
|
||||||
"type": "opencollective",
|
"type": "opencollective",
|
||||||
@ -13233,7 +13233,6 @@
|
|||||||
"url": "https://paypal.me/faisalman"
|
"url": "https://paypal.me/faisalman"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"license": "MIT",
|
|
||||||
"engines": {
|
"engines": {
|
||||||
"node": "*"
|
"node": "*"
|
||||||
}
|
}
|
||||||
@ -23418,9 +23417,9 @@
|
|||||||
"peer": true
|
"peer": true
|
||||||
},
|
},
|
||||||
"ua-parser-js": {
|
"ua-parser-js": {
|
||||||
"version": "0.7.32",
|
"version": "0.7.33",
|
||||||
"resolved": "https://registry.npmjs.org/ua-parser-js/-/ua-parser-js-0.7.32.tgz",
|
"resolved": "https://registry.npmjs.org/ua-parser-js/-/ua-parser-js-0.7.33.tgz",
|
||||||
"integrity": "sha512-f9BESNVhzlhEFf2CHMSj40NWOjYPl1YKYbrvIr/hFTDEmLq7SRbWvm7FcdcpCYT95zrOhC7gZSxjdnnTpBcwVw=="
|
"integrity": "sha512-s8ax/CeZdK9R/56Sui0WM6y9OFREJarMRHqLB2EwkovemBxNQ+Bqu8GAsUnVcXKgphb++ghr/B2BZx4mahujPw=="
|
||||||
},
|
},
|
||||||
"unherit": {
|
"unherit": {
|
||||||
"version": "1.1.3",
|
"version": "1.1.3",
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user