Enable some linters in revive (#486)
* Turn on revive linter, ignore only a few things * Fix lint errors Ignore shadowing of 'suite' in tests for now. Also move some constants that had the same value to tester.
This commit is contained in:
parent
a586512b42
commit
9e2e88f5f3
@ -6,6 +6,7 @@ linters:
|
||||
- gofmt
|
||||
- misspell
|
||||
- gci
|
||||
- revive
|
||||
|
||||
linters-settings:
|
||||
gci:
|
||||
@ -14,7 +15,63 @@ linters-settings:
|
||||
- default
|
||||
- prefix(github.com/alcionai/corso)
|
||||
skip-generated: true
|
||||
revive:
|
||||
max-open-files: 2048
|
||||
# Don't know why, but false means ignore generated files.
|
||||
ignore-generated-header: false
|
||||
rules:
|
||||
- name: blank-imports
|
||||
- name: bool-literal-in-expr
|
||||
- name: constant-logical-expr
|
||||
- name: context-as-argument
|
||||
- name: context-keys-type
|
||||
- name: duplicated-imports
|
||||
- name: early-return
|
||||
- name: empty-block
|
||||
- name: errorf
|
||||
- name: error-naming
|
||||
- name: error-return
|
||||
- name: error-strings
|
||||
- name: exported
|
||||
- name: identical-branches
|
||||
- name: if-return
|
||||
- name: import-shadowing
|
||||
- name: increment-decrement
|
||||
- name: indent-error-flow
|
||||
- name: modifies-value-receiver
|
||||
- name: package-comments
|
||||
- name: range
|
||||
- name: receiver-naming
|
||||
- name: redefines-builtin-id
|
||||
- name: superfluous-else
|
||||
- name: time-equal
|
||||
- name: time-naming
|
||||
- name: unexported-return
|
||||
- name: unreachable-code
|
||||
- name: useless-break
|
||||
- name: var-declaration
|
||||
- name: var-naming
|
||||
|
||||
issues:
|
||||
# Limit on number of errors with identical text reported.
|
||||
max-same-issues: 50
|
||||
exclude-rules:
|
||||
- linters:
|
||||
- revive
|
||||
text: "exported:.*details.DetailsModel by other packages, and that stutters"
|
||||
- linters:
|
||||
- revive
|
||||
text: "exported:.*details.DetailsEntry by other packages, and that stutters"
|
||||
- linters:
|
||||
- revive
|
||||
text: "exported:.*mock.MockModelStore by other packages, and that stutters"
|
||||
- linters:
|
||||
- revive
|
||||
text: "unexported-return:.*unexported type selectors.exchangeCategory"
|
||||
- linters:
|
||||
- revive
|
||||
text: "unexported-return:.*unexported type.*kopia.conn"
|
||||
- path: _test\.go
|
||||
linters:
|
||||
- revive
|
||||
text: "import-shadowing:.*'suite' shadows"
|
||||
|
||||
@ -195,7 +195,7 @@ func exchangeBackupCreateSelectors(all bool, users, data []string) selectors.Sel
|
||||
|
||||
func validateExchangeBackupCreateFlags(all bool, users, data []string) error {
|
||||
if len(users) == 0 && !all {
|
||||
return errors.New("requires one or more --user ids, the wildcard --user *, or the --all flag.")
|
||||
return errors.New("requires one or more --user ids, the wildcard --user *, or the --all flag")
|
||||
}
|
||||
if len(data) > 0 && all {
|
||||
return errors.New("--all does a backup on all data, and cannot be reduced with --data")
|
||||
@ -450,7 +450,7 @@ func validateExchangeBackupDetailFlags(
|
||||
return nil
|
||||
}
|
||||
if lu == 0 {
|
||||
return errors.New("requires one or more --user ids, the wildcard --user *, or the --all flag.")
|
||||
return errors.New("requires one or more --user ids, the wildcard --user *, or the --all flag")
|
||||
}
|
||||
if lc > 0 && lcf == 0 {
|
||||
return errors.New("one or more --contact-folder ids or the wildcard --contact-folder * must be included to specify a --contact")
|
||||
|
||||
@ -104,6 +104,7 @@ type Printable interface {
|
||||
Values() []string
|
||||
}
|
||||
|
||||
//revive:disable:redefines-builtin-id
|
||||
func print(p Printable) {
|
||||
if outputAsJSON || outputAsJSONDebug {
|
||||
outputJSON(p, outputAsJSONDebug)
|
||||
|
||||
@ -77,8 +77,8 @@ func NewCollection(
|
||||
|
||||
// getPopulateFunction is a function to set populate function field
|
||||
// with exchange-application specific functions
|
||||
func getPopulateFunction(optId optionIdentifier) populater {
|
||||
switch optId {
|
||||
func getPopulateFunction(optID optionIdentifier) populater {
|
||||
switch optID {
|
||||
case messages:
|
||||
return PopulateFromCollection
|
||||
default:
|
||||
@ -101,40 +101,44 @@ func (eoc *Collection) Items() <-chan data.Stream {
|
||||
}
|
||||
|
||||
// FullPath returns the Collection's fullPath []string
|
||||
func (edc *Collection) FullPath() []string {
|
||||
return append([]string{}, edc.fullPath...)
|
||||
func (eoc *Collection) FullPath() []string {
|
||||
return append([]string{}, eoc.fullPath...)
|
||||
}
|
||||
|
||||
// PopulateFromCollection async call to fill DataCollection via channel implementation
|
||||
func PopulateFromCollection(
|
||||
ctx context.Context,
|
||||
service graph.Service,
|
||||
edc *Collection,
|
||||
eoc *Collection,
|
||||
statusChannel chan<- *support.ConnectorOperationStatus,
|
||||
) {
|
||||
var errs error
|
||||
var attemptedItems, success int
|
||||
objectWriter := kw.NewJsonSerializationWriter()
|
||||
|
||||
for _, task := range edc.jobs {
|
||||
response, err := service.Client().UsersById(edc.user).MessagesById(task).Get()
|
||||
for _, task := range eoc.jobs {
|
||||
response, err := service.Client().UsersById(eoc.user).MessagesById(task).Get()
|
||||
if err != nil {
|
||||
details := support.ConnectorStackErrorTrace(err)
|
||||
errs = support.WrapAndAppend(edc.user, errors.Wrapf(err, "unable to retrieve item %s; details %s", task, details), errs)
|
||||
errDetails := support.ConnectorStackErrorTrace(err)
|
||||
errs = support.WrapAndAppend(
|
||||
eoc.user,
|
||||
errors.Wrapf(err, "unable to retrieve item %s; details %s", task, errDetails),
|
||||
errs,
|
||||
)
|
||||
continue
|
||||
}
|
||||
err = messageToDataCollection(service.Client(), ctx, objectWriter, edc.data, response, edc.user)
|
||||
err = messageToDataCollection(ctx, service.Client(), objectWriter, eoc.data, response, eoc.user)
|
||||
success++
|
||||
if err != nil {
|
||||
errs = support.WrapAndAppendf(edc.user, err, errs)
|
||||
errs = support.WrapAndAppendf(eoc.user, err, errs)
|
||||
success--
|
||||
}
|
||||
if errs != nil && service.ErrPolicy() {
|
||||
break
|
||||
}
|
||||
}
|
||||
close(edc.data)
|
||||
attemptedItems += len(edc.jobs)
|
||||
close(eoc.data)
|
||||
attemptedItems += len(eoc.jobs)
|
||||
|
||||
status := support.CreateStatus(ctx, support.Backup, attemptedItems, success, 1, errs)
|
||||
logger.Ctx(ctx).Debug(status.String())
|
||||
@ -142,8 +146,8 @@ func PopulateFromCollection(
|
||||
}
|
||||
|
||||
func messageToDataCollection(
|
||||
client *msgraphsdk.GraphServiceClient,
|
||||
ctx context.Context,
|
||||
client *msgraphsdk.GraphServiceClient,
|
||||
objectWriter *kw.JsonSerializationWriter,
|
||||
dataChannel chan<- data.Stream,
|
||||
message models.Messageable,
|
||||
@ -218,10 +222,10 @@ func (od *Stream) Info() details.ItemInfo {
|
||||
}
|
||||
|
||||
// NewStream constructor for exchange.Stream object
|
||||
func NewStream(identifier string, bytes []byte, detail details.ExchangeInfo) Stream {
|
||||
func NewStream(identifier string, dataBytes []byte, detail details.ExchangeInfo) Stream {
|
||||
return Stream{
|
||||
id: identifier,
|
||||
message: bytes,
|
||||
message: dataBytes,
|
||||
info: &detail,
|
||||
}
|
||||
|
||||
|
||||
@ -136,7 +136,7 @@ func GetAllMailFolders(gs graph.Service, user, nameContains string) ([]MailFolde
|
||||
// @returns a *string if the folder exists. If the folder does not exist returns nil, error-> folder not found
|
||||
func GetMailFolderID(service graph.Service, folderName, user string) (*string, error) {
|
||||
var errs error
|
||||
var folderId *string
|
||||
var folderID *string
|
||||
options, err := optionsForMailFolders([]string{"displayName"})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -159,7 +159,7 @@ func GetMailFolderID(service graph.Service, folderName, user string) (*string, e
|
||||
return true
|
||||
}
|
||||
if *folder.GetDisplayName() == folderName {
|
||||
folderId = folder.GetId()
|
||||
folderID = folder.GetId()
|
||||
return false
|
||||
}
|
||||
return true
|
||||
@ -167,10 +167,10 @@ func GetMailFolderID(service graph.Service, folderName, user string) (*string, e
|
||||
iterateError := pageIterator.Iterate(callbackFunc)
|
||||
if iterateError != nil {
|
||||
errs = support.WrapAndAppend(service.Adapter().GetBaseUrl(), iterateError, errs)
|
||||
} else if folderId == nil {
|
||||
} else if folderID == nil {
|
||||
return nil, ErrFolderNotFound
|
||||
}
|
||||
return folderId, errs
|
||||
return folderID, errs
|
||||
|
||||
}
|
||||
|
||||
@ -205,10 +205,10 @@ func GetCopyRestoreFolder(service graph.Service, user string) (*string, error) {
|
||||
return nil, support.WrapAndAppend(user, err, err)
|
||||
}
|
||||
return fold.GetId(), nil
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil, err
|
||||
|
||||
}
|
||||
return isFolder, nil
|
||||
}
|
||||
@ -234,10 +234,10 @@ func RestoreMailMessage(
|
||||
}
|
||||
// Sets fields from original message from storage
|
||||
clone := support.ToMessage(originalMessage)
|
||||
valueId := RestorePropertyTag
|
||||
valueID := RestorePropertyTag
|
||||
enableValue := RestoreCanonicalEnableValue
|
||||
sv := models.NewSingleValueLegacyExtendedProperty()
|
||||
sv.SetId(&valueId)
|
||||
sv.SetId(&valueID)
|
||||
sv.SetValue(&enableValue)
|
||||
svlep := []models.SingleValueLegacyExtendedPropertyable{sv}
|
||||
clone.SetSingleValueExtendedProperties(svlep)
|
||||
|
||||
@ -68,7 +68,7 @@ func IterateSelectAllMessagesForCollections(
|
||||
) func(any) bool {
|
||||
return func(messageItem any) bool {
|
||||
// Defines the type of collection being created within the function
|
||||
collection_type := messages
|
||||
collectionType := messages
|
||||
user := scope.Get(selectors.ExchangeUser)[0]
|
||||
|
||||
message, ok := messageItem.(models.Messageable)
|
||||
@ -87,7 +87,7 @@ func IterateSelectAllMessagesForCollections(
|
||||
edc := NewCollection(
|
||||
user,
|
||||
[]string{tenant, user, mailCategory, directory},
|
||||
collection_type,
|
||||
collectionType,
|
||||
service,
|
||||
statusCh,
|
||||
)
|
||||
@ -157,7 +157,7 @@ func optionsForMailFolders(moreOps []string) (*msfolder.MailFoldersRequestBuilde
|
||||
// buildOptions - Utility Method for verifying if select options are valid for the m365 object type
|
||||
// @return is a pair. The first is a string literal of allowable options based on the object type,
|
||||
// the second is an error. An error is returned if an unsupported option or optionIdentifier was used
|
||||
func buildOptions(options []string, optId optionIdentifier) ([]string, error) {
|
||||
func buildOptions(options []string, optID optionIdentifier) ([]string, error) {
|
||||
var allowedOptions map[string]int
|
||||
|
||||
fieldsForFolders := map[string]int{
|
||||
@ -188,7 +188,7 @@ func buildOptions(options []string, optId optionIdentifier) ([]string, error) {
|
||||
}
|
||||
returnedOptions := []string{"id"}
|
||||
|
||||
switch optId {
|
||||
switch optID {
|
||||
case folders:
|
||||
allowedOptions = fieldsForFolders
|
||||
case users:
|
||||
@ -203,11 +203,11 @@ func buildOptions(options []string, optId optionIdentifier) ([]string, error) {
|
||||
|
||||
for _, entry := range options {
|
||||
_, ok := allowedOptions[entry]
|
||||
if ok {
|
||||
returnedOptions = append(returnedOptions, entry)
|
||||
} else {
|
||||
if !ok {
|
||||
return nil, errors.New("unsupported option")
|
||||
}
|
||||
|
||||
returnedOptions = append(returnedOptions, entry)
|
||||
}
|
||||
return returnedOptions, nil
|
||||
}
|
||||
|
||||
@ -234,7 +234,7 @@ func (gc *GraphConnector) RestoreMessages(ctx context.Context, dcs []data.Collec
|
||||
pathCounter = map[string]bool{}
|
||||
attempts, successes int
|
||||
errs error
|
||||
folderId *string
|
||||
folderID *string
|
||||
)
|
||||
policy := control.Copy // TODO policy to be updated from external source after completion of refactoring
|
||||
|
||||
@ -243,7 +243,7 @@ func (gc *GraphConnector) RestoreMessages(ctx context.Context, dcs []data.Collec
|
||||
items := dc.Items()
|
||||
pathCounter[strings.Join(dc.FullPath(), "")] = true
|
||||
if policy == control.Copy {
|
||||
folderId, errs = exchange.GetCopyRestoreFolder(&gc.graphService, user)
|
||||
folderID, errs = exchange.GetCopyRestoreFolder(&gc.graphService, user)
|
||||
if errs != nil {
|
||||
return errs
|
||||
}
|
||||
@ -254,7 +254,7 @@ func (gc *GraphConnector) RestoreMessages(ctx context.Context, dcs []data.Collec
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return support.WrapAndAppend("context cancelled", ctx.Err(), errs)
|
||||
case data, ok := <-items:
|
||||
case itemData, ok := <-items:
|
||||
if !ok {
|
||||
exit = true
|
||||
break
|
||||
@ -262,19 +262,19 @@ func (gc *GraphConnector) RestoreMessages(ctx context.Context, dcs []data.Collec
|
||||
attempts++
|
||||
|
||||
buf := &bytes.Buffer{}
|
||||
_, err := buf.ReadFrom(data.ToReader())
|
||||
_, err := buf.ReadFrom(itemData.ToReader())
|
||||
if err != nil {
|
||||
errs = support.WrapAndAppend(data.UUID(), err, errs)
|
||||
errs = support.WrapAndAppend(itemData.UUID(), err, errs)
|
||||
continue
|
||||
}
|
||||
switch policy {
|
||||
case control.Copy:
|
||||
err = exchange.RestoreMailMessage(ctx, buf.Bytes(), &gc.graphService, control.Copy, *folderId, user)
|
||||
err = exchange.RestoreMailMessage(ctx, buf.Bytes(), &gc.graphService, control.Copy, *folderID, user)
|
||||
if err != nil {
|
||||
errs = support.WrapAndAppend(data.UUID(), err, errs)
|
||||
errs = support.WrapAndAppend(itemData.UUID(), err, errs)
|
||||
}
|
||||
default:
|
||||
errs = support.WrapAndAppend(data.UUID(), errors.New("restore policy not supported"), errs)
|
||||
errs = support.WrapAndAppend(itemData.UUID(), errors.New("restore policy not supported"), errs)
|
||||
continue
|
||||
}
|
||||
successes++
|
||||
|
||||
@ -119,9 +119,9 @@ func ConnectorStackErrorTrace(e error) string {
|
||||
if inners != nil {
|
||||
eMessage = eMessage + "\nConnector Section:"
|
||||
client := inners.GetClientRequestId()
|
||||
rId := inners.GetRequestId()
|
||||
rID := inners.GetRequestId()
|
||||
eMessage = concatenateStringFromPointers(eMessage,
|
||||
[]*string{client, rId})
|
||||
[]*string{client, rID})
|
||||
}
|
||||
}
|
||||
return eMessage
|
||||
|
||||
@ -14,14 +14,8 @@ type DataSupportSuite struct {
|
||||
suite.Suite
|
||||
}
|
||||
|
||||
const (
|
||||
// File needs to be a single message .json
|
||||
// Use: https://developer.microsoft.com/en-us/graph/graph-explorer for details
|
||||
support_file = "CORSO_TEST_SUPPORT_FILE"
|
||||
)
|
||||
|
||||
func TestDataSupportSuite(t *testing.T) {
|
||||
err := tester.RunOnAny(support_file)
|
||||
err := tester.RunOnAny(tester.CorsoGraphConnectorTestSupportFile)
|
||||
if err != nil {
|
||||
t.Skipf("Skipping: %v\n", err)
|
||||
}
|
||||
@ -29,7 +23,7 @@ func TestDataSupportSuite(t *testing.T) {
|
||||
}
|
||||
|
||||
func (suite *DataSupportSuite) TestCreateMessageFromBytes() {
|
||||
bytes, err := tester.LoadAFile(os.Getenv(SUPPORT_FILE))
|
||||
bytes, err := tester.LoadAFile(os.Getenv(tester.CorsoGraphConnectorTestSupportFile))
|
||||
if err != nil {
|
||||
suite.T().Errorf("Failed with %v\n", err)
|
||||
}
|
||||
|
||||
@ -14,18 +14,12 @@ type SupportTestSuite struct {
|
||||
suite.Suite
|
||||
}
|
||||
|
||||
const (
|
||||
// File needs to be a single message .json
|
||||
// Use: https://developer.microsoft.com/en-us/graph/graph-explorer for details
|
||||
SUPPORT_FILE = "CORSO_TEST_SUPPORT_FILE"
|
||||
)
|
||||
|
||||
func TestSupportTestSuite(t *testing.T) {
|
||||
evs, err := tester.GetRequiredEnvVars(SUPPORT_FILE)
|
||||
evs, err := tester.GetRequiredEnvVars(tester.CorsoGraphConnectorTestSupportFile)
|
||||
if err != nil {
|
||||
t.Skipf("Env not configured: %v\n", err)
|
||||
}
|
||||
_, err = os.Stat(evs[SUPPORT_FILE])
|
||||
_, err = os.Stat(evs[tester.CorsoGraphConnectorTestSupportFile])
|
||||
if err != nil {
|
||||
t.Skip("Test object not available: Module Skipped")
|
||||
}
|
||||
@ -33,7 +27,7 @@ func TestSupportTestSuite(t *testing.T) {
|
||||
}
|
||||
|
||||
func (suite *SupportTestSuite) TestToMessage() {
|
||||
bytes, err := tester.LoadAFile(os.Getenv(SUPPORT_FILE))
|
||||
bytes, err := tester.LoadAFile(os.Getenv(tester.CorsoGraphConnectorTestSupportFile))
|
||||
if err != nil {
|
||||
suite.T().Errorf("Failed with %v\n", err)
|
||||
}
|
||||
|
||||
@ -78,11 +78,7 @@ func (w *conn) Initialize(ctx context.Context) error {
|
||||
return errors.Wrap(err, errConnect.Error())
|
||||
}
|
||||
|
||||
if err := w.open(ctx, cfg.CorsoPassword); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return w.open(ctx, cfg.CorsoPassword)
|
||||
}
|
||||
|
||||
func (w *conn) Connect(ctx context.Context) error {
|
||||
@ -108,11 +104,7 @@ func (w *conn) Connect(ctx context.Context) error {
|
||||
return errors.Wrap(err, errConnect.Error())
|
||||
}
|
||||
|
||||
if err := w.open(ctx, cfg.CorsoPassword); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return w.open(ctx, cfg.CorsoPassword)
|
||||
}
|
||||
|
||||
func blobStoreByProvider(ctx context.Context, s storage.Storage) (blob.Storage, error) {
|
||||
|
||||
@ -11,6 +11,7 @@ import (
|
||||
"github.com/alcionai/corso/internal/tester"
|
||||
)
|
||||
|
||||
//revive:disable:context-as-argument
|
||||
func openKopiaRepo(t *testing.T, ctx context.Context) (*conn, error) {
|
||||
storage, err := tester.NewPrefixedS3Storage(t)
|
||||
if err != nil {
|
||||
|
||||
@ -20,6 +20,7 @@ type fooModel struct {
|
||||
Bar string
|
||||
}
|
||||
|
||||
//revive:disable:context-as-argument
|
||||
func getModelStore(t *testing.T, ctx context.Context) *ModelStore {
|
||||
c, err := openKopiaRepo(t, ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
@ -81,7 +81,7 @@ func (w *Wrapper) Close(ctx context.Context) error {
|
||||
// DataCollection.
|
||||
func getStreamItemFunc(
|
||||
collection data.Collection,
|
||||
details *details.Details,
|
||||
snapshotDetails *details.Details,
|
||||
) func(context.Context, func(context.Context, fs.Entry) error) error {
|
||||
return func(ctx context.Context, cb func(context.Context, fs.Entry) error) error {
|
||||
items := collection.Items()
|
||||
@ -105,7 +105,7 @@ func getStreamItemFunc(
|
||||
|
||||
// Populate BackupDetails
|
||||
ep := append(collection.FullPath(), e.UUID())
|
||||
details.Add(path.Join(ep...), ei.Info())
|
||||
snapshotDetails.Add(path.Join(ep...), ei.Info())
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -114,7 +114,7 @@ func getStreamItemFunc(
|
||||
// buildKopiaDirs recursively builds a directory hierarchy from the roots up.
|
||||
// Returned directories are either virtualfs.StreamingDirectory or
|
||||
// virtualfs.staticDirectory.
|
||||
func buildKopiaDirs(dirName string, dir *treeMap, details *details.Details) (fs.Directory, error) {
|
||||
func buildKopiaDirs(dirName string, dir *treeMap, snapshotDetails *details.Details) (fs.Directory, error) {
|
||||
// Don't support directories that have both a DataCollection and a set of
|
||||
// static child directories.
|
||||
if dir.collection != nil && len(dir.childDirs) > 0 {
|
||||
@ -122,7 +122,7 @@ func buildKopiaDirs(dirName string, dir *treeMap, details *details.Details) (fs.
|
||||
}
|
||||
|
||||
if dir.collection != nil {
|
||||
return virtualfs.NewStreamingDirectory(dirName, getStreamItemFunc(dir.collection, details)), nil
|
||||
return virtualfs.NewStreamingDirectory(dirName, getStreamItemFunc(dir.collection, snapshotDetails)), nil
|
||||
}
|
||||
|
||||
// Need to build the directory tree from the leaves up because intermediate
|
||||
@ -130,7 +130,7 @@ func buildKopiaDirs(dirName string, dir *treeMap, details *details.Details) (fs.
|
||||
childDirs := []fs.Entry{}
|
||||
|
||||
for childName, childDir := range dir.childDirs {
|
||||
child, err := buildKopiaDirs(childName, childDir, details)
|
||||
child, err := buildKopiaDirs(childName, childDir, snapshotDetails)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -156,29 +156,29 @@ func newTreeMap() *treeMap {
|
||||
// ancestor of the streams and uses virtualfs.StaticDirectory for internal nodes
|
||||
// in the hierarchy. Leaf nodes are virtualfs.StreamingDirectory with the given
|
||||
// DataCollections.
|
||||
func inflateDirTree(ctx context.Context, collections []data.Collection, details *details.Details) (fs.Directory, error) {
|
||||
func inflateDirTree(ctx context.Context, collections []data.Collection, snapshotDetails *details.Details) (fs.Directory, error) {
|
||||
roots := make(map[string]*treeMap)
|
||||
|
||||
for _, s := range collections {
|
||||
path := s.FullPath()
|
||||
itemPath := s.FullPath()
|
||||
|
||||
if len(path) == 0 {
|
||||
if len(itemPath) == 0 {
|
||||
return nil, errors.New("no identifier for collection")
|
||||
}
|
||||
|
||||
dir, ok := roots[path[0]]
|
||||
dir, ok := roots[itemPath[0]]
|
||||
if !ok {
|
||||
dir = newTreeMap()
|
||||
roots[path[0]] = dir
|
||||
roots[itemPath[0]] = dir
|
||||
}
|
||||
|
||||
// Single DataCollection with no ancestors.
|
||||
if len(path) == 1 {
|
||||
if len(itemPath) == 1 {
|
||||
dir.collection = s
|
||||
continue
|
||||
}
|
||||
|
||||
for _, p := range path[1 : len(path)-1] {
|
||||
for _, p := range itemPath[1 : len(itemPath)-1] {
|
||||
newDir, ok := dir.childDirs[p]
|
||||
if !ok {
|
||||
newDir = newTreeMap()
|
||||
@ -197,16 +197,16 @@ func inflateDirTree(ctx context.Context, collections []data.Collection, details
|
||||
// as treeMap objects and `dir` is the parent directory of this
|
||||
// DataCollection.
|
||||
|
||||
end := len(path) - 1
|
||||
end := len(itemPath) - 1
|
||||
|
||||
// Make sure this entry doesn't already exist.
|
||||
if _, ok := dir.childDirs[path[end]]; ok {
|
||||
if _, ok := dir.childDirs[itemPath[end]]; ok {
|
||||
return nil, errors.New(errUnsupportedDir.Error())
|
||||
}
|
||||
|
||||
sd := newTreeMap()
|
||||
sd.collection = s
|
||||
dir.childDirs[path[end]] = sd
|
||||
dir.childDirs[itemPath[end]] = sd
|
||||
}
|
||||
|
||||
if len(roots) > 1 {
|
||||
@ -215,7 +215,7 @@ func inflateDirTree(ctx context.Context, collections []data.Collection, details
|
||||
|
||||
var res fs.Directory
|
||||
for dirName, dir := range roots {
|
||||
tmp, err := buildKopiaDirs(dirName, dir, details)
|
||||
tmp, err := buildKopiaDirs(dirName, dir, snapshotDetails)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -234,25 +234,25 @@ func (w Wrapper) BackupCollections(
|
||||
return nil, nil, errNotConnected
|
||||
}
|
||||
|
||||
details := &details.Details{}
|
||||
snapshotDetails := &details.Details{}
|
||||
|
||||
dirTree, err := inflateDirTree(ctx, collections, details)
|
||||
dirTree, err := inflateDirTree(ctx, collections, snapshotDetails)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "building kopia directories")
|
||||
}
|
||||
|
||||
stats, err := w.makeSnapshotWithRoot(ctx, dirTree, details)
|
||||
stats, err := w.makeSnapshotWithRoot(ctx, dirTree, snapshotDetails)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return stats, details, nil
|
||||
return stats, snapshotDetails, nil
|
||||
}
|
||||
|
||||
func (w Wrapper) makeSnapshotWithRoot(
|
||||
ctx context.Context,
|
||||
root fs.Directory,
|
||||
details *details.Details,
|
||||
snapshotDetails *details.Details,
|
||||
) (*BackupStats, error) {
|
||||
var man *snapshot.Manifest
|
||||
|
||||
@ -321,12 +321,12 @@ func (w Wrapper) getEntry(
|
||||
return nil, errors.New("no restore path given")
|
||||
}
|
||||
|
||||
manifest, err := snapshot.LoadSnapshot(ctx, w.c, manifest.ID(snapshotID))
|
||||
man, err := snapshot.LoadSnapshot(ctx, w.c, manifest.ID(snapshotID))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "getting snapshot handle")
|
||||
}
|
||||
|
||||
rootDirEntry, err := snapshotfs.SnapshotRoot(w.c, manifest)
|
||||
rootDirEntry, err := snapshotfs.SnapshotRoot(w.c, man)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "getting root directory")
|
||||
}
|
||||
@ -563,8 +563,8 @@ func (w Wrapper) RestoreMultipleItems(
|
||||
dcs = []data.Collection{}
|
||||
errs *multierror.Error
|
||||
)
|
||||
for _, path := range paths {
|
||||
dc, err := w.RestoreSingleItem(ctx, snapshotID, path)
|
||||
for _, itemPath := range paths {
|
||||
dc, err := w.RestoreSingleItem(ctx, snapshotID, itemPath)
|
||||
if err != nil {
|
||||
errs = multierror.Append(errs, err)
|
||||
} else {
|
||||
|
||||
@ -117,7 +117,7 @@ func (suite *KopiaUnitSuite) TestBuildDirectoryTree() {
|
||||
user2: 42,
|
||||
}
|
||||
|
||||
details := &details.Details{}
|
||||
snapshotDetails := &details.Details{}
|
||||
|
||||
collections := []data.Collection{
|
||||
mockconnector.NewMockExchangeCollection(
|
||||
@ -138,7 +138,7 @@ func (suite *KopiaUnitSuite) TestBuildDirectoryTree() {
|
||||
// - user2
|
||||
// - emails
|
||||
// - 42 separate files
|
||||
dirTree, err := inflateDirTree(ctx, collections, details)
|
||||
dirTree, err := inflateDirTree(ctx, collections, snapshotDetails)
|
||||
require.NoError(suite.T(), err)
|
||||
assert.Equal(suite.T(), dirTree.Name(), tenant)
|
||||
|
||||
@ -169,7 +169,7 @@ func (suite *KopiaUnitSuite) TestBuildDirectoryTree() {
|
||||
totalFileCount += c
|
||||
}
|
||||
|
||||
assert.Len(suite.T(), details.Entries, totalFileCount)
|
||||
assert.Len(suite.T(), snapshotDetails.Entries, totalFileCount)
|
||||
}
|
||||
|
||||
func (suite *KopiaUnitSuite) TestBuildDirectoryTree_NoAncestorDirs() {
|
||||
@ -180,7 +180,7 @@ func (suite *KopiaUnitSuite) TestBuildDirectoryTree_NoAncestorDirs() {
|
||||
|
||||
expectedFileCount := 42
|
||||
|
||||
details := &details.Details{}
|
||||
snapshotDetails := &details.Details{}
|
||||
collections := []data.Collection{
|
||||
mockconnector.NewMockExchangeCollection(
|
||||
[]string{emails},
|
||||
@ -191,7 +191,7 @@ func (suite *KopiaUnitSuite) TestBuildDirectoryTree_NoAncestorDirs() {
|
||||
// Returned directory structure should look like:
|
||||
// - emails
|
||||
// - 42 separate files
|
||||
dirTree, err := inflateDirTree(ctx, collections, details)
|
||||
dirTree, err := inflateDirTree(ctx, collections, snapshotDetails)
|
||||
require.NoError(suite.T(), err)
|
||||
assert.Equal(suite.T(), dirTree.Name(), emails)
|
||||
|
||||
@ -259,8 +259,8 @@ func (suite *KopiaUnitSuite) TestBuildDirectoryTree_Fails() {
|
||||
ctx := context.Background()
|
||||
|
||||
suite.T().Run(test.name, func(t *testing.T) {
|
||||
details := &details.Details{}
|
||||
_, err := inflateDirTree(ctx, test.layout, details)
|
||||
snapshotDetails := &details.Details{}
|
||||
_, err := inflateDirTree(ctx, test.layout, snapshotDetails)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
@ -80,15 +80,15 @@ func (op *BackupOperation) Run(ctx context.Context) (err error) {
|
||||
|
||||
// persist operation results to the model store on exit
|
||||
var (
|
||||
stats backupStats
|
||||
details *details.Details
|
||||
opStats backupStats
|
||||
backupDetails *details.Details
|
||||
)
|
||||
defer func() {
|
||||
op.persistResults(time.Now(), &stats)
|
||||
op.persistResults(time.Now(), &opStats)
|
||||
|
||||
err = op.createBackupModels(ctx, stats.k.SnapshotID, details)
|
||||
err = op.createBackupModels(ctx, opStats.k.SnapshotID, backupDetails)
|
||||
if err != nil {
|
||||
stats.writeErr = err
|
||||
opStats.writeErr = err
|
||||
// todo: ^ we're not persisting this yet, except for the error shown to the user.
|
||||
}
|
||||
}()
|
||||
@ -96,24 +96,24 @@ func (op *BackupOperation) Run(ctx context.Context) (err error) {
|
||||
// retrieve data from the producer
|
||||
gc, err := connector.NewGraphConnector(op.account)
|
||||
if err != nil {
|
||||
stats.readErr = err
|
||||
opStats.readErr = err
|
||||
return errors.Wrap(err, "connecting to graph api")
|
||||
}
|
||||
|
||||
var cs []data.Collection
|
||||
cs, err = gc.ExchangeDataCollection(ctx, op.Selectors)
|
||||
if err != nil {
|
||||
stats.readErr = err
|
||||
opStats.readErr = err
|
||||
return errors.Wrap(err, "retrieving service data")
|
||||
}
|
||||
|
||||
// hand the results to the consumer
|
||||
stats.k, details, err = op.kopia.BackupCollections(ctx, cs)
|
||||
opStats.k, backupDetails, err = op.kopia.BackupCollections(ctx, cs)
|
||||
if err != nil {
|
||||
stats.writeErr = err
|
||||
opStats.writeErr = err
|
||||
return errors.Wrap(err, "backing up service data")
|
||||
}
|
||||
stats.gc = gc.AwaitStatus()
|
||||
opStats.gc = gc.AwaitStatus()
|
||||
|
||||
return err
|
||||
}
|
||||
@ -122,21 +122,21 @@ func (op *BackupOperation) Run(ctx context.Context) (err error) {
|
||||
// later stored in the manifest using createBackupModels.
|
||||
func (op *BackupOperation) persistResults(
|
||||
started time.Time,
|
||||
stats *backupStats,
|
||||
opStats *backupStats,
|
||||
) {
|
||||
op.Status = Completed
|
||||
if stats.k.TotalFileCount == 0 && (stats.readErr != nil || stats.writeErr != nil) {
|
||||
if opStats.k.TotalFileCount == 0 && (opStats.readErr != nil || opStats.writeErr != nil) {
|
||||
op.Status = Failed
|
||||
}
|
||||
|
||||
op.Results.ReadErrors = stats.readErr
|
||||
op.Results.WriteErrors = stats.writeErr
|
||||
op.Results.ReadErrors = opStats.readErr
|
||||
op.Results.WriteErrors = opStats.writeErr
|
||||
|
||||
if stats.gc != nil {
|
||||
op.Results.ItemsRead = stats.gc.Successful
|
||||
if opStats.gc != nil {
|
||||
op.Results.ItemsRead = opStats.gc.Successful
|
||||
}
|
||||
if stats.k != nil {
|
||||
op.Results.ItemsWritten = stats.k.TotalFileCount
|
||||
if opStats.k != nil {
|
||||
op.Results.ItemsWritten = opStats.k.TotalFileCount
|
||||
}
|
||||
|
||||
op.Results.StartedAt = started
|
||||
@ -144,14 +144,14 @@ func (op *BackupOperation) persistResults(
|
||||
}
|
||||
|
||||
// stores the operation details, results, and selectors in the backup manifest.
|
||||
func (op *BackupOperation) createBackupModels(ctx context.Context, snapID string, details *details.Details) error {
|
||||
err := op.store.Put(ctx, model.BackupDetailsSchema, &details.DetailsModel)
|
||||
func (op *BackupOperation) createBackupModels(ctx context.Context, snapID string, backupDetails *details.Details) error {
|
||||
err := op.store.Put(ctx, model.BackupDetailsSchema, &backupDetails.DetailsModel)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "creating backupdetails model")
|
||||
}
|
||||
|
||||
b := backup.New(
|
||||
snapID, string(details.ModelStoreID), op.Status.String(),
|
||||
snapID, string(backupDetails.ModelStoreID), op.Status.String(),
|
||||
op.Selectors,
|
||||
op.Results.ReadWrites,
|
||||
op.Results.StartAndEndTime,
|
||||
|
||||
@ -81,19 +81,19 @@ func (op *RestoreOperation) Run(ctx context.Context) error {
|
||||
// TODO: persist initial state of restoreOperation in modelstore
|
||||
|
||||
// persist operation results to the model store on exit
|
||||
stats := restoreStats{}
|
||||
defer op.persistResults(time.Now(), &stats)
|
||||
opStats := restoreStats{}
|
||||
defer op.persistResults(time.Now(), &opStats)
|
||||
|
||||
// retrieve the restore point details
|
||||
d, b, err := op.store.GetDetailsFromBackupID(ctx, op.BackupID)
|
||||
if err != nil {
|
||||
stats.readErr = errors.Wrap(err, "getting backup details for restore")
|
||||
return stats.readErr
|
||||
opStats.readErr = errors.Wrap(err, "getting backup details for restore")
|
||||
return opStats.readErr
|
||||
}
|
||||
|
||||
er, err := op.Selectors.ToExchangeRestore()
|
||||
if err != nil {
|
||||
stats.readErr = err
|
||||
opStats.readErr = err
|
||||
return err
|
||||
}
|
||||
|
||||
@ -111,23 +111,23 @@ func (op *RestoreOperation) Run(ctx context.Context) error {
|
||||
}
|
||||
dcs, err := op.kopia.RestoreMultipleItems(ctx, b.SnapshotID, paths)
|
||||
if err != nil {
|
||||
stats.readErr = errors.Wrap(err, "retrieving service data")
|
||||
return stats.readErr
|
||||
opStats.readErr = errors.Wrap(err, "retrieving service data")
|
||||
return opStats.readErr
|
||||
}
|
||||
stats.cs = dcs
|
||||
opStats.cs = dcs
|
||||
|
||||
// restore those collections using graph
|
||||
gc, err := connector.NewGraphConnector(op.account)
|
||||
if err != nil {
|
||||
stats.writeErr = errors.Wrap(err, "connecting to graph api")
|
||||
return stats.writeErr
|
||||
opStats.writeErr = errors.Wrap(err, "connecting to graph api")
|
||||
return opStats.writeErr
|
||||
}
|
||||
|
||||
if err := gc.RestoreMessages(ctx, dcs); err != nil {
|
||||
stats.writeErr = errors.Wrap(err, "restoring service data")
|
||||
return stats.writeErr
|
||||
opStats.writeErr = errors.Wrap(err, "restoring service data")
|
||||
return opStats.writeErr
|
||||
}
|
||||
stats.gc = gc.AwaitStatus()
|
||||
opStats.gc = gc.AwaitStatus()
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -135,20 +135,20 @@ func (op *RestoreOperation) Run(ctx context.Context) error {
|
||||
// writes the restoreOperation outcome to the modelStore.
|
||||
func (op *RestoreOperation) persistResults(
|
||||
started time.Time,
|
||||
stats *restoreStats,
|
||||
opStats *restoreStats,
|
||||
) {
|
||||
op.Status = Completed
|
||||
if (stats.readErr != nil || stats.writeErr != nil) &&
|
||||
(stats.gc == nil || stats.gc.Successful == 0) {
|
||||
if (opStats.readErr != nil || opStats.writeErr != nil) &&
|
||||
(opStats.gc == nil || opStats.gc.Successful == 0) {
|
||||
op.Status = Failed
|
||||
}
|
||||
op.Results.ReadErrors = stats.readErr
|
||||
op.Results.WriteErrors = stats.writeErr
|
||||
op.Results.ReadErrors = opStats.readErr
|
||||
op.Results.WriteErrors = opStats.writeErr
|
||||
|
||||
op.Results.ItemsRead = len(stats.cs) // TODO: file count, not collection count
|
||||
op.Results.ItemsRead = len(opStats.cs) // TODO: file count, not collection count
|
||||
|
||||
if stats.gc != nil {
|
||||
op.Results.ItemsWritten = stats.gc.Successful
|
||||
if opStats.gc != nil {
|
||||
op.Results.ItemsWritten = opStats.gc.Successful
|
||||
}
|
||||
|
||||
op.Results.StartedAt = started
|
||||
|
||||
@ -33,7 +33,6 @@
|
||||
// 8.
|
||||
// input path: `this/is/a/path\/`
|
||||
// elements of path: `this`, `is`, `a`, `path/`
|
||||
|
||||
package path
|
||||
|
||||
import (
|
||||
|
||||
@ -17,8 +17,8 @@ func TestEnvvarsSuite(t *testing.T) {
|
||||
}
|
||||
|
||||
func (suite *EnvvarsTestSuite) TestRunOnAny() {
|
||||
env_variable := "TEST_ENVVARS_SUITE"
|
||||
os.Setenv(env_variable, "1")
|
||||
envVariable := "TEST_ENVVARS_SUITE"
|
||||
os.Setenv(envVariable, "1")
|
||||
table := []struct {
|
||||
name string
|
||||
param string
|
||||
@ -26,7 +26,7 @@ func (suite *EnvvarsTestSuite) TestRunOnAny() {
|
||||
}{
|
||||
{
|
||||
name: "Valid Environment",
|
||||
param: env_variable,
|
||||
param: envVariable,
|
||||
function: assert.NoError,
|
||||
},
|
||||
{
|
||||
@ -41,5 +41,5 @@ func (suite *EnvvarsTestSuite) TestRunOnAny() {
|
||||
test.function(suite.T(), result)
|
||||
})
|
||||
}
|
||||
os.Unsetenv(env_variable)
|
||||
os.Unsetenv(envVariable)
|
||||
}
|
||||
|
||||
@ -19,6 +19,10 @@ const (
|
||||
CorsoRepositoryTests = "CORSO_REPOSITORY_TESTS"
|
||||
)
|
||||
|
||||
// File needs to be a single message .json
|
||||
// Use: https://developer.microsoft.com/en-us/graph/graph-explorer for details
|
||||
const CorsoGraphConnectorTestSupportFile = "CORSO_TEST_SUPPORT_FILE"
|
||||
|
||||
// RunOnAny takes in a list of env variable names and returns
|
||||
// an error if all of them are zero valued. Implication being:
|
||||
// if any of those env vars are truthy, you should run the
|
||||
|
||||
@ -42,9 +42,9 @@ type Repository struct {
|
||||
func Initialize(
|
||||
ctx context.Context,
|
||||
acct account.Account,
|
||||
storage storage.Storage,
|
||||
s storage.Storage,
|
||||
) (*Repository, error) {
|
||||
kopiaRef := kopia.NewConn(storage)
|
||||
kopiaRef := kopia.NewConn(s)
|
||||
if err := kopiaRef.Initialize(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -66,7 +66,7 @@ func Initialize(
|
||||
ID: uuid.New(),
|
||||
Version: "v1",
|
||||
Account: acct,
|
||||
Storage: storage,
|
||||
Storage: s,
|
||||
dataLayer: w,
|
||||
modelStore: ms,
|
||||
}
|
||||
@ -81,9 +81,9 @@ func Initialize(
|
||||
func Connect(
|
||||
ctx context.Context,
|
||||
acct account.Account,
|
||||
storage storage.Storage,
|
||||
s storage.Storage,
|
||||
) (*Repository, error) {
|
||||
kopiaRef := kopia.NewConn(storage)
|
||||
kopiaRef := kopia.NewConn(s)
|
||||
if err := kopiaRef.Connect(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -105,7 +105,7 @@ func Connect(
|
||||
r := Repository{
|
||||
Version: "v1",
|
||||
Account: acct,
|
||||
Storage: storage,
|
||||
Storage: s,
|
||||
dataLayer: w,
|
||||
modelStore: ms,
|
||||
}
|
||||
|
||||
@ -652,14 +652,14 @@ func exchangeIDPath(cat exchangeCategory, path []string) map[exchangeCategory]st
|
||||
|
||||
// Reduce reduces the entries in a backupDetails struct to only
|
||||
// those that match the inclusions, filters, and exclusions in the selector.
|
||||
func (s *ExchangeRestore) Reduce(deets *details.Details) *details.Details {
|
||||
func (sr *ExchangeRestore) Reduce(deets *details.Details) *details.Details {
|
||||
if deets == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
entExcs := exchangeScopesByCategory(s.Excludes)
|
||||
entFilt := exchangeScopesByCategory(s.Filters)
|
||||
entIncs := exchangeScopesByCategory(s.Includes)
|
||||
entExcs := exchangeScopesByCategory(sr.Excludes)
|
||||
entFilt := exchangeScopesByCategory(sr.Filters)
|
||||
entIncs := exchangeScopesByCategory(sr.Includes)
|
||||
|
||||
ents := []details.DetailsEntry{}
|
||||
|
||||
|
||||
@ -27,6 +27,6 @@ type Wrapper struct {
|
||||
Storer
|
||||
}
|
||||
|
||||
func NewKopiaStore(kMS *kopia.ModelStore) *Wrapper {
|
||||
return &Wrapper{kMS}
|
||||
func NewKopiaStore(kms *kopia.ModelStore) *Wrapper {
|
||||
return &Wrapper{kms}
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user