diff --git a/internal/grpc/services/storageprovider/storageprovider.go b/internal/grpc/services/storageprovider/storageprovider.go index f7812783cf6..0132e92d8b0 100644 --- a/internal/grpc/services/storageprovider/storageprovider.go +++ b/internal/grpc/services/storageprovider/storageprovider.go @@ -338,6 +338,15 @@ func (s *service) InitiateFileUpload(ctx context.Context, req *provider.Initiate }, nil } + // FIXME: This is a hack to transport more metadata to the storage.FS InitiateUpload implementation + // we should use a request object that can carry + // * if-match + // * if-unmodified-since + // * uploadLength from the tus Upload-Length header + // * checksum from the tus Upload-Checksum header + // * mtime from the X-OC-Mtime header + // * expires from the s.conf.UploadExpiration ... should that not be part of the driver? + // * providerID metadata := map[string]string{} ifMatch := req.GetIfMatch() if ifMatch != "" { diff --git a/internal/http/services/owncloud/ocdav/tus.go b/internal/http/services/owncloud/ocdav/tus.go index 84a3515eddb..fb7329eac9f 100644 --- a/internal/http/services/owncloud/ocdav/tus.go +++ b/internal/http/services/owncloud/ocdav/tus.go @@ -88,6 +88,10 @@ func (s *svc) handleSpacesTusPost(w http.ResponseWriter, r *http.Request, spaceI sublog := appctx.GetLogger(ctx).With().Str("spaceid", spaceID).Str("path", r.URL.Path).Logger() + // use filename to build a storage space reference + // but what if upload happens directly to toh resourceid .. and filename is empty? + // currently there is always a validator thet requires the filename is not empty ... + // hm -> bug: clients currently cannot POST to an existing source with a resource id only ref, err := spacelookup.MakeStorageSpaceReference(spaceID, path.Join(r.URL.Path, meta["filename"])) if err != nil { w.WriteHeader(http.StatusBadRequest) diff --git a/pkg/rhttp/datatx/manager/tus/filter.go b/pkg/rhttp/datatx/manager/tus/filter.go new file mode 100644 index 00000000000..ca4eeaaeaa4 --- /dev/null +++ b/pkg/rhttp/datatx/manager/tus/filter.go @@ -0,0 +1,44 @@ +package tus + +import ( + "net/http" + "strings" + + tusd "github.com/tus/tusd/pkg/handler" +) + +type FilterResponseWriter struct { + w http.ResponseWriter + header http.Header +} + +const TusPrefix = "tus." +const CS3Prefix = "cs3." + +func NewFilterResponseWriter(w http.ResponseWriter) *FilterResponseWriter { + return &FilterResponseWriter{ + w: w, + header: http.Header{}, + } +} + +func (fw *FilterResponseWriter) Header() http.Header { + return fw.w.Header() +} + +func (fw *FilterResponseWriter) Write(b []byte) (int, error) { + return fw.w.Write(b) +} + +func (fw *FilterResponseWriter) WriteHeader(statusCode int) { + metadata := tusd.ParseMetadataHeader(fw.w.Header().Get("Upload-Metadata")) + tusMetadata := map[string]string{} + for k, v := range metadata { + if strings.HasPrefix(k, TusPrefix) { + tusMetadata[strings.TrimPrefix(k, TusPrefix)] = v + } + } + + fw.w.Header().Set("Upload-Metadata", tusd.SerializeMetadataHeader(tusMetadata)) + fw.w.WriteHeader(statusCode) +} diff --git a/pkg/rhttp/datatx/manager/tus/tus.go b/pkg/rhttp/datatx/manager/tus/tus.go index 052a1ecb884..b1a50fe4bac 100644 --- a/pkg/rhttp/datatx/manager/tus/tus.go +++ b/pkg/rhttp/datatx/manager/tus/tus.go @@ -137,20 +137,23 @@ func (m *manager) Handler(fs storage.FS) (http.Handler, error) { ev := <-handler.CompleteUploads info := ev.Upload spaceOwner := &userv1beta1.UserId{ - OpaqueId: info.MetaData["_SpaceOwnerOrManager"], + OpaqueId: info.MetaData[CS3Prefix+"SpaceOwnerOrManager"], } executant := &userv1beta1.UserId{ - Type: userv1beta1.UserType(userv1beta1.UserType_value[info.MetaData["_ExecutantType"]]), - Idp: info.MetaData["_ExecutantIdp"], - OpaqueId: info.MetaData["_ExecutantId"], + Type: userv1beta1.UserType(userv1beta1.UserType_value[info.MetaData[CS3Prefix+"ExecutantType"]]), + Idp: info.MetaData[CS3Prefix+"ExecutantIdp"], + OpaqueId: info.MetaData[CS3Prefix+"ExecutantId"], } ref := &provider.Reference{ ResourceId: &provider.ResourceId{ - StorageId: info.MetaData["providerID"], - SpaceId: info.MetaData["_SpaceRoot"], - OpaqueId: info.MetaData["_SpaceRoot"], // TODO shouldn't this be the node id? + StorageId: info.MetaData[CS3Prefix+"providerID"], + SpaceId: info.MetaData[CS3Prefix+"SpaceRoot"], + OpaqueId: info.MetaData[CS3Prefix+"SpaceRoot"], }, - Path: utils.MakeRelativePath(filepath.Join(info.MetaData["dir"], info.MetaData["filename"])), + // FIXME this seems wrong, path is not really relative to space root + // actually it is: InitiateUpload calls fs.lu.Path to get the path relative to the root... + // hm is that robust? what if the file is moved? shouldn't we store the parent id, then? + Path: utils.MakeRelativePath(filepath.Join(info.MetaData[CS3Prefix+"dir"], info.MetaData[CS3Prefix+"filename"])), } datatx.InvalidateCache(executant, ref, m.statCache) if m.publisher != nil { @@ -162,6 +165,9 @@ func (m *manager) Handler(fs storage.FS) (http.Handler, error) { }() h := handler.Middleware(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // filter metadata headers + w = NewFilterResponseWriter(w) + method := r.Method // https://github.com/tus/tus-resumable-upload-protocol/blob/master/protocol.md#x-http-method-override if r.Header.Get("X-HTTP-Method-Override") != "" { @@ -231,14 +237,29 @@ func setHeaders(datastore tusd.DataStore, w http.ResponseWriter, r *http.Request appctx.GetLogger(ctx).Error().Err(err).Msg("could not get upload info for upload") return } - expires := info.MetaData["expires"] + expires := info.MetaData[CS3Prefix+"expires"] + // fallback for outdated storageproviders that implement a tus datastore + if expires == "" { + expires = info.MetaData["expires"] + } if expires != "" { w.Header().Set(net.HeaderTusUploadExpires, expires) } resourceid := provider.ResourceId{ - StorageId: info.MetaData["providerID"], - SpaceId: info.MetaData["_SpaceRoot"], - OpaqueId: info.MetaData["_NodeId"], + StorageId: info.MetaData[CS3Prefix+"providerID"], + SpaceId: info.MetaData[CS3Prefix+"SpaceRoot"], + OpaqueId: info.MetaData[CS3Prefix+"NodeId"], + } + // fallback for outdated storageproviders that implement a tus datastore + if resourceid.StorageId == "" { + resourceid.StorageId = info.MetaData["providerID"] } + if resourceid.SpaceId == "" { + resourceid.SpaceId = info.MetaData["SpaceRoot"] + } + if resourceid.OpaqueId == "" { + resourceid.OpaqueId = info.MetaData["NodeId"] + } + w.Header().Set(net.HeaderOCFileID, storagespace.FormatResourceID(resourceid)) } diff --git a/pkg/storage/utils/decomposedfs/decomposedfs.go b/pkg/storage/utils/decomposedfs/decomposedfs.go index edf1acf13ee..398c4f6d227 100644 --- a/pkg/storage/utils/decomposedfs/decomposedfs.go +++ b/pkg/storage/utils/decomposedfs/decomposedfs.go @@ -46,6 +46,7 @@ import ( "github.com/cs3org/reva/v2/pkg/events" "github.com/cs3org/reva/v2/pkg/logger" "github.com/cs3org/reva/v2/pkg/rgrpc/todo/pool" + "github.com/cs3org/reva/v2/pkg/rhttp/datatx/manager/tus" "github.com/cs3org/reva/v2/pkg/rhttp/datatx/utils/download" "github.com/cs3org/reva/v2/pkg/storage" "github.com/cs3org/reva/v2/pkg/storage/cache" @@ -284,7 +285,7 @@ func (fs *Decomposedfs) Postprocessing(ch <-chan events.Event) { keepUpload bool ) - n, err := node.ReadNode(ctx, fs.lu, info.MetaData["_SpaceRoot"], info.MetaData["_NodeId"], false, nil, true) + n, err := node.ReadNode(ctx, fs.lu, info.MetaData[tus.CS3Prefix+"SpaceRoot"], info.MetaData[tus.CS3Prefix+"NodeId"], false, nil, true) if err != nil { log.Error().Err(err).Str("uploadID", ev.UploadID).Msg("could not read node") continue @@ -318,7 +319,7 @@ func (fs *Decomposedfs) Postprocessing(ch <-chan events.Event) { now := time.Now() if failed { - sizeDiff, err := strconv.ParseInt(info.MetaData["_sizeDiff"], 10, 64) + sizeDiff, err := strconv.ParseInt(info.MetaData[tus.CS3Prefix+"sizeDiff"], 10, 64) if err != nil { log.Error().Err(err).Str("uploadID", ev.UploadID).Interface("info", info).Msg("could not parse sizediff") continue @@ -348,20 +349,23 @@ func (fs *Decomposedfs) Postprocessing(ch <-chan events.Event) { Failed: failed, ExecutingUser: &user.User{ Id: &user.UserId{ - Type: user.UserType(user.UserType_value[info.MetaData["_ExecutantType"]]), - Idp: info.MetaData["_ExecutantIdp"], - OpaqueId: info.MetaData["_ExecutantId"], + Type: user.UserType(user.UserType_value[info.MetaData[tus.CS3Prefix+"ExecutantType"]]), + Idp: info.MetaData[tus.CS3Prefix+"ExecutantIdp"], + OpaqueId: info.MetaData[tus.CS3Prefix+"ExecutantId"], }, - Username: info.MetaData["_ExecutantUserName"], + Username: info.MetaData[tus.CS3Prefix+"ExecutantUserName"], }, Filename: ev.Filename, FileRef: &provider.Reference{ ResourceId: &provider.ResourceId{ - StorageId: info.MetaData["providerID"], - SpaceId: info.MetaData["_SpaceRoot"], - OpaqueId: info.MetaData["_SpaceRoot"], + StorageId: info.MetaData[tus.CS3Prefix+"providerID"], + SpaceId: info.MetaData[tus.CS3Prefix+"SpaceRoot"], + OpaqueId: info.MetaData[tus.CS3Prefix+"SpaceRoot"], }, - Path: utils.MakeRelativePath(filepath.Join(info.MetaData["dir"], info.MetaData["filename"])), + // FIXME this seems wrong, path is not really relative to space root + // actually it is: InitiateUpload calls fs.lu.Path to get the path relative to the root... + // hm is that robust? what if the file is moved? shouldn't we store the parent id, then? + Path: utils.MakeRelativePath(filepath.Join(info.MetaData[tus.CS3Prefix+"dir"], info.MetaData[tus.CS3Prefix+"filename"])), }, Timestamp: utils.TimeToTS(now), SpaceOwner: n.SpaceOwnerOrManager(ctx), @@ -381,7 +385,7 @@ func (fs *Decomposedfs) Postprocessing(ch <-chan events.Event) { continue // NOTE: since we can't get the upload, we can't restart postprocessing } - n, err := node.ReadNode(ctx, fs.lu, info.MetaData["_SpaceRoot"], info.MetaData["_NodeId"], false, nil, true) + n, err := node.ReadNode(ctx, fs.lu, info.MetaData[tus.CS3Prefix+"SpaceRoot"], info.MetaData[tus.CS3Prefix+"NodeId"], false, nil, true) if err != nil { log.Error().Err(err).Str("uploadID", ev.UploadID).Msg("could not read node") continue @@ -398,7 +402,7 @@ func (fs *Decomposedfs) Postprocessing(ch <-chan events.Event) { SpaceOwner: n.SpaceOwnerOrManager(ctx), ExecutingUser: &user.User{Id: &user.UserId{OpaqueId: "postprocessing-restart"}}, // send nil instead? ResourceID: &provider.ResourceId{SpaceId: n.SpaceID, OpaqueId: n.ID}, - Filename: info.MetaData["_NodeName"], + Filename: info.MetaData[tus.CS3Prefix+"NodeName"], Filesize: uint64(info.Size), }); err != nil { log.Error().Err(err).Str("uploadID", ev.UploadID).Msg("Failed to publish BytesReceived event") diff --git a/pkg/storage/utils/decomposedfs/upload.go b/pkg/storage/utils/decomposedfs/upload.go index c07d7c12e3a..29a0b802d4f 100644 --- a/pkg/storage/utils/decomposedfs/upload.go +++ b/pkg/storage/utils/decomposedfs/upload.go @@ -28,7 +28,6 @@ import ( "hash/adler32" "io" iofs "io/fs" - "net/http" "net/url" "os" "path/filepath" @@ -50,6 +49,7 @@ import ( ctxpkg "github.com/cs3org/reva/v2/pkg/ctx" "github.com/cs3org/reva/v2/pkg/errtypes" "github.com/cs3org/reva/v2/pkg/events" + "github.com/cs3org/reva/v2/pkg/rhttp/datatx/manager/tus" "github.com/cs3org/reva/v2/pkg/storage" "github.com/cs3org/reva/v2/pkg/storage/utils/chunking" "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/lookup" @@ -82,8 +82,8 @@ func (fs *Decomposedfs) PreFinishResponseCallback(hook tusd.HookEvent) error { info := hook.Upload // set lockID to context - if info.MetaData["lockid"] != "" { - ctx = ctxpkg.ContextSetLockID(ctx, info.MetaData["lockid"]) + if info.MetaData[tus.CS3Prefix+"lockid"] != "" { + ctx = ctxpkg.ContextSetLockID(ctx, info.MetaData[tus.CS3Prefix+"lockid"]) } log := appctx.GetLogger(ctx) @@ -126,9 +126,9 @@ func (fs *Decomposedfs) PreFinishResponseCallback(hook tusd.HookEvent) error { // compare if they match the sent checksum // TODO the tus checksum extension would do this on every chunk, but I currently don't see an easy way to pass in the requested checksum. for now we do it in FinishUpload which is also called for chunked uploads - if info.MetaData["checksum"] != "" { + if info.MetaData[tus.CS3Prefix+"checksum"] != "" { var err error - parts := strings.SplitN(info.MetaData["checksum"], " ", 2) + parts := strings.SplitN(info.MetaData[tus.CS3Prefix+"checksum"], " ", 2) if len(parts) != 2 { return errtypes.BadRequest("invalid checksum format. must be '[algorithm] [checksum]'") } @@ -165,11 +165,11 @@ func (fs *Decomposedfs) PreFinishResponseCallback(hook tusd.HookEvent) error { user := &userv1beta1.User{ Id: &userv1beta1.UserId{ - Type: user.UserType(user.UserType_value[info.MetaData["_ExecutantType"]]), - Idp: info.MetaData["_ExecutantIdp"], - OpaqueId: info.MetaData["_ExecutantId"], + Type: user.UserType(user.UserType_value[info.MetaData[tus.CS3Prefix+"ExecutantType"]]), + Idp: info.MetaData[tus.CS3Prefix+"ExecutantIdp"], + OpaqueId: info.MetaData[tus.CS3Prefix+"ExecutantId"], }, - Username: info.MetaData["_ExecutantUserName"], + Username: info.MetaData[tus.CS3Prefix+"ExecutantUserName"], } s, err := fs.downloadURL(ctx, info.ID) if err != nil { @@ -182,7 +182,7 @@ func (fs *Decomposedfs) PreFinishResponseCallback(hook tusd.HookEvent) error { SpaceOwner: n.SpaceOwnerOrManager(ctx), ExecutingUser: user, ResourceID: &provider.ResourceId{SpaceId: n.SpaceID, OpaqueId: n.ID}, - Filename: info.MetaData["_NodeName"], + Filename: info.MetaData[tus.CS3Prefix+"NodeName"], Filesize: uint64(info.Size), }); err != nil { return err @@ -199,7 +199,7 @@ func (fs *Decomposedfs) PreFinishResponseCallback(hook tusd.HookEvent) error { } } - sizeDiff, err := strconv.ParseInt(info.MetaData["_sizeDiff"], 10, 64) + sizeDiff, err := strconv.ParseInt(info.MetaData[tus.CS3Prefix+"sizeDiff"], 10, 64) if err != nil { log.Error().Err(err).Msg("failed to parse size diff in upload info") return err @@ -295,12 +295,12 @@ func CreateNodeForUpload(ctx context.Context, lu *lookup.Lookup, info tusd.FileI var err error - spaceID := info.MetaData["_SpaceRoot"] + spaceID := info.MetaData[tus.CS3Prefix+"SpaceRoot"] n := node.New( spaceID, - info.MetaData["_NodeId"], - info.MetaData["_NodeParentId"], - info.MetaData["_NodeName"], + info.MetaData[tus.CS3Prefix+"NodeId"], + info.MetaData[tus.CS3Prefix+"NodeParentId"], + info.MetaData[tus.CS3Prefix+"NodeName"], info.Size, info.ID, provider.ResourceType_RESOURCE_TYPE_FILE, @@ -318,7 +318,7 @@ func CreateNodeForUpload(ctx context.Context, lu *lookup.Lookup, info tusd.FileI } var f *lockedfile.File - switch info.MetaData["_NodeExists"] { + switch info.MetaData[tus.CS3Prefix+"NodeExists"] { case "false": f, err = initNewNode(ctx, lu, info, n, uint64(info.Size)) if f != nil { @@ -343,9 +343,9 @@ func CreateNodeForUpload(ctx context.Context, lu *lookup.Lookup, info tusd.FileI } mtime := time.Now() - if info.MetaData["mtime"] != "" { + if info.MetaData[tus.TusPrefix+"mtime"] != "" { // overwrite mtime if requested - mtime, err = utils.MTimeToTime(info.MetaData["mtime"]) + mtime, err = utils.MTimeToTime(info.MetaData[tus.TusPrefix+"mtime"]) if err != nil { return nil, err } @@ -367,10 +367,10 @@ func CreateNodeForUpload(ctx context.Context, lu *lookup.Lookup, info tusd.FileI } // add etag to metadata - info.MetaData["etag"], _ = node.CalculateEtag(n, mtime) + info.MetaData[tus.CS3Prefix+"etag"], _ = node.CalculateEtag(n, mtime) // update nodeid for later - info.MetaData["_NodeId"] = n.ID + info.MetaData[tus.CS3Prefix+"NodeId"] = n.ID //if err := upload.writeInfo(); err != nil { // return nil, err //} @@ -418,7 +418,7 @@ func initNewNode(ctx context.Context, lu *lookup.Lookup, info tusd.FileInfo, n * log.Info().Msg("initNewNode: symlink created") // on a new file the sizeDiff is the fileSize - info.MetaData["_sizeDiff"] = strconv.FormatUint(fsize, 10) + info.MetaData[tus.CS3Prefix+"sizeDiff"] = strconv.FormatUint(fsize, 10) return f, nil } @@ -447,7 +447,7 @@ func updateExistingNode(ctx context.Context, lu *lookup.Lookup, info tusd.FileIn // When the if-match header was set we need to check if the // etag still matches before finishing the upload. - if ifMatch, ok := info.MetaData["if-match"]; ok { + if ifMatch, ok := info.MetaData[tus.CS3Prefix+"if-match"]; ok { if ifMatch != oldNodeEtag { return f, errtypes.Aborted("etag mismatch") } @@ -455,7 +455,7 @@ func updateExistingNode(ctx context.Context, lu *lookup.Lookup, info tusd.FileIn // When the if-none-match header was set we need to check if any of the // etags matches before finishing the upload. - if ifNoneMatch, ok := info.MetaData["if-none-match"]; ok { + if ifNoneMatch, ok := info.MetaData[tus.CS3Prefix+"if-none-match"]; ok { if ifNoneMatch == "*" { return f, errtypes.Aborted("etag mismatch, resource exists") } @@ -468,7 +468,7 @@ func updateExistingNode(ctx context.Context, lu *lookup.Lookup, info tusd.FileIn // When the if-unmodified-since header was set we need to check if the // etag still matches before finishing the upload. - if ifUnmodifiedSince, ok := info.MetaData["if-unmodified-since"]; ok { + if ifUnmodifiedSince, ok := info.MetaData[tus.CS3Prefix+"if-unmodified-since"]; ok { if err != nil { return f, errtypes.InternalError(fmt.Sprintf("failed to read mtime of node: %s", err)) } @@ -482,16 +482,16 @@ func updateExistingNode(ctx context.Context, lu *lookup.Lookup, info tusd.FileIn } } - info.MetaData["_versionsPath"] = lu.InternalPath(spaceID, n.ID+node.RevisionIDDelimiter+oldNodeMtime.UTC().Format(time.RFC3339Nano)) - info.MetaData["_sizeDiff"] = strconv.FormatInt(int64(fsize)-old.Blobsize, 10) + info.MetaData[tus.CS3Prefix+"versionsPath"] = lu.InternalPath(spaceID, n.ID+node.RevisionIDDelimiter+oldNodeMtime.UTC().Format(time.RFC3339Nano)) + info.MetaData[tus.CS3Prefix+"sizeDiff"] = strconv.FormatInt(int64(fsize)-old.Blobsize, 10) // create version node - if _, err := os.Create(info.MetaData["_versionsPath"]); err != nil { + if _, err := os.Create(info.MetaData[tus.CS3Prefix+"versionsPath"]); err != nil { return f, err } // copy blob metadata to version node - if err := lu.CopyMetadataWithSourceLock(ctx, targetPath, info.MetaData["_versionsPath"], func(attributeName string, value []byte) (newValue []byte, copy bool) { + if err := lu.CopyMetadataWithSourceLock(ctx, targetPath, info.MetaData[tus.CS3Prefix+"versionsPath"], func(attributeName string, value []byte) (newValue []byte, copy bool) { return value, strings.HasPrefix(attributeName, prefixes.ChecksumPrefix) || attributeName == prefixes.TypeAttr || attributeName == prefixes.BlobIDAttr || @@ -502,7 +502,7 @@ func updateExistingNode(ctx context.Context, lu *lookup.Lookup, info tusd.FileIn } // keep mtime from previous version - if err := os.Chtimes(info.MetaData["_versionsPath"], oldNodeMtime, oldNodeMtime); err != nil { + if err := os.Chtimes(info.MetaData[tus.CS3Prefix+"versionsPath"], oldNodeMtime, oldNodeMtime); err != nil { return f, errtypes.InternalError(fmt.Sprintf("failed to change mtime of version node: %s", err)) } @@ -521,7 +521,7 @@ func (fs *Decomposedfs) Upload(ctx context.Context, ref *provider.Reference, r i uploadInfo, _ := up.GetInfo(ctx) - p := uploadInfo.MetaData["_NodeName"] + p := uploadInfo.MetaData[tus.CS3Prefix+"NodeName"] if chunking.IsChunked(p) { // check chunking v1 var assembledFile string p, assembledFile, err = fs.chunkHandler.WriteChunk(p, r) @@ -532,7 +532,7 @@ func (fs *Decomposedfs) Upload(ctx context.Context, ref *provider.Reference, r i uploadInfo.StopUpload() return provider.ResourceInfo{}, errtypes.PartialContent(ref.String()) } - uploadInfo.MetaData["_NodeName"] = p + uploadInfo.MetaData[tus.CS3Prefix+"NodeName"] = p fd, err := os.Open(assembledFile) if err != nil { return provider.ResourceInfo{}, errors.Wrap(err, "Decomposedfs: error opening assembled file") @@ -560,19 +560,21 @@ func (fs *Decomposedfs) Upload(ctx context.Context, ref *provider.Reference, r i if uff != nil { uploadRef := &provider.Reference{ ResourceId: &provider.ResourceId{ - StorageId: uploadInfo.MetaData["providerID"], - SpaceId: uploadInfo.MetaData["_SpaceRoot"], - OpaqueId: uploadInfo.MetaData["_SpaceRoot"], + StorageId: uploadInfo.MetaData[tus.CS3Prefix+"providerID"], + SpaceId: uploadInfo.MetaData[tus.CS3Prefix+"SpaceRoot"], + OpaqueId: uploadInfo.MetaData[tus.CS3Prefix+"SpaceRoot"], }, - // FIXME this sems wrong, path is not really relative to space root - Path: utils.MakeRelativePath(filepath.Join(uploadInfo.MetaData["dir"], uploadInfo.MetaData["filename"])), + // FIXME this seems wrong, path is not really relative to space root + // actually it is: InitiateUpload calls fs.lu.Path to get the path relative to the root... + // hm is that robust? what if the file is moved? shouldn't we store the parent id, then? + Path: utils.MakeRelativePath(filepath.Join(uploadInfo.MetaData[tus.CS3Prefix+"dir"], uploadInfo.MetaData[tus.CS3Prefix+"filename"])), } excutant, ok := ctxpkg.ContextGetUser(ctx) if !ok { return provider.ResourceInfo{}, errtypes.PreconditionFailed("error getting user from context") } spaceOwner := &userpb.UserId{ - OpaqueId: uploadInfo.MetaData["_SpaceOwnerOrManager"], + OpaqueId: uploadInfo.MetaData[tus.CS3Prefix+"SpaceOwnerOrManager"], } uff(spaceOwner, excutant.Id, uploadRef) } @@ -580,14 +582,14 @@ func (fs *Decomposedfs) Upload(ctx context.Context, ref *provider.Reference, r i ri := provider.ResourceInfo{ // fill with at least fileid, mtime and etag Id: &provider.ResourceId{ - StorageId: uploadInfo.MetaData["providerID"], - SpaceId: uploadInfo.MetaData["_SpaceRoot"], - OpaqueId: uploadInfo.MetaData["_NodeId"], + StorageId: uploadInfo.MetaData[tus.CS3Prefix+"providerID"], + SpaceId: uploadInfo.MetaData[tus.CS3Prefix+"SpaceRoot"], + OpaqueId: uploadInfo.MetaData[tus.CS3Prefix+"NodeId"], }, - Etag: uploadInfo.MetaData["etag"], + Etag: uploadInfo.MetaData[tus.CS3Prefix+"etag"], } - if mtime, err := utils.MTimeToTS(uploadInfo.MetaData["mtime"]); err == nil { + if mtime, err := utils.MTimeToTS(uploadInfo.MetaData[tus.TusPrefix+"mtime"]); err == nil { ri.Mtime = &mtime } @@ -597,7 +599,8 @@ func (fs *Decomposedfs) Upload(ctx context.Context, ref *provider.Reference, r i // InitiateUpload returns upload ids corresponding to different protocols it supports // TODO read optional content for small files in this request // TODO InitiateUpload (and Upload) needs a way to receive the expected checksum. Maybe in metadata as 'checksum' => 'sha1 aeosvp45w5xaeoe' = lowercase, space separated? -func (fs *Decomposedfs) InitiateUpload(ctx context.Context, ref *provider.Reference, uploadLength int64, metadata map[string]string) (map[string]string, error) { +// FIXME metadata is actually used to carry all kinds of headers +func (fs *Decomposedfs) InitiateUpload(ctx context.Context, ref *provider.Reference, uploadLength int64, headers map[string]string) (map[string]string, error) { log := appctx.GetLogger(ctx) n, err := fs.lu.NodeFromResource(ctx, ref) @@ -621,63 +624,58 @@ func (fs *Decomposedfs) InitiateUpload(ctx context.Context, ref *provider.Refere info := tusd.FileInfo{ MetaData: tusd.MetaData{ - "filename": filepath.Base(relative), - "dir": filepath.Dir(relative), - "lockid": lockID, - // TODO metadata with _ should not be exposed? - "_SpaceRoot": n.SpaceRoot.ID, - "_SpaceOwnerOrManager": n.SpaceOwnerOrManager(ctx).GetOpaqueId(), + tus.CS3Prefix + "filename": filepath.Base(relative), + tus.CS3Prefix + "dir": filepath.Dir(relative), + tus.CS3Prefix + "lockid": lockID, + tus.CS3Prefix + "SpaceRoot": n.SpaceRoot.ID, + tus.CS3Prefix + "SpaceOwnerOrManager": n.SpaceOwnerOrManager(ctx).GetOpaqueId(), }, Size: uploadLength, // Store } - header := http.Header{} - - if metadata != nil { - info.MetaData["providerID"] = metadata["providerID"] - if mtime, ok := metadata["mtime"]; ok { + if headers != nil { + info.MetaData[tus.CS3Prefix+"providerID"] = headers["providerID"] + // mtime has been set via tus metadata, expose it as tus metadata + if mtime, ok := headers["mtime"]; ok { if mtime != "null" { - info.MetaData["mtime"] = mtime + info.MetaData[tus.TusPrefix+"mtime"] = mtime } } - if expiration, ok := metadata["expires"]; ok { + // expires has been set by the storageprovider, do not expose + if expiration, ok := headers["expires"]; ok { if expiration != "null" { - info.MetaData["expires"] = expiration + info.MetaData[tus.CS3Prefix+"expires"] = expiration } } - if _, ok := metadata["sizedeferred"]; ok { - info.SizeIsDeferred = true - } - if checksum, ok := metadata["checksum"]; ok { + // checksum is sent as tus Upload-Checksum header and should not magically become a metadata property + if checksum, ok := headers["checksum"]; ok { parts := strings.SplitN(checksum, " ", 2) if len(parts) != 2 { return nil, errtypes.BadRequest("invalid checksum format. must be '[algorithm] [checksum]'") } switch parts[0] { case "sha1", "md5", "adler32": - info.MetaData["checksum"] = checksum + info.MetaData[tus.CS3Prefix+"checksum"] = checksum default: return nil, errtypes.BadRequest("unsupported checksum algorithm: " + parts[0]) } } - // only check preconditions if they are not empty // TODO or is this a bad request? - if metadata["if-match"] != "" { - header.Set("if-match", metadata["if-match"]) - info.MetaData["if-match"] = metadata["if-match"] // TODO drop + // only check preconditions if they are not empty + // do not expose as metadata + if headers["if-match"] != "" { + info.MetaData[tus.CS3Prefix+"if-match"] = headers["if-match"] // TODO drop? } - if metadata["if-none-match"] != "" { - header.Set("if-none-match", metadata["if-none-match"]) - info.MetaData["if-none-match"] = metadata["if-none-match"] + if headers["if-none-match"] != "" { + info.MetaData[tus.CS3Prefix+"if-none-match"] = headers["if-none-match"] } - if metadata["if-unmodified-since"] != "" { - header.Set("if-unmodified-since", metadata["if-unmodified-since"]) - info.MetaData["if-unmodified-since"] = metadata["if-unmodified-since"] + if headers["if-unmodified-since"] != "" { + info.MetaData[tus.CS3Prefix+"if-unmodified-since"] = headers["if-unmodified-since"] } } - log.Debug().Interface("info", info).Interface("node", n).Interface("metadata", metadata).Msg("Decomposedfs: resolved filename") + log.Debug().Interface("info", info).Interface("node", n).Interface("headers", headers).Msg("Decomposedfs: resolved filename") _, err = node.CheckQuota(ctx, n.SpaceRoot, n.Exists, uint64(n.Blobsize), uint64(info.Size)) if err != nil { @@ -724,8 +722,8 @@ func (fs *Decomposedfs) InitiateUpload(ctx context.Context, ref *provider.Refere } // check lock - if info.MetaData["lockid"] != "" { - ctx = ctxpkg.ContextSetLockID(ctx, info.MetaData["lockid"]) + if info.MetaData[tus.CS3Prefix+"lockid"] != "" { + ctx = ctxpkg.ContextSetLockID(ctx, info.MetaData[tus.CS3Prefix+"lockid"]) } if err := n.CheckLock(ctx); err != nil { return nil, err @@ -733,25 +731,25 @@ func (fs *Decomposedfs) InitiateUpload(ctx context.Context, ref *provider.Refere usr := ctxpkg.ContextMustGetUser(ctx) - info.MetaData["_NodeParentId"] = n.ParentID - info.MetaData["_NodeName"] = n.Name + info.MetaData[tus.CS3Prefix+"NodeParentId"] = n.ParentID + info.MetaData[tus.CS3Prefix+"NodeName"] = n.Name - info.MetaData["_ExecutantIdp"] = usr.Id.Idp - info.MetaData["_ExecutantId"] = usr.Id.OpaqueId - info.MetaData["_ExecutantType"] = utils.UserTypeToString(usr.Id.Type) - info.MetaData["_ExecutantUserName"] = usr.Username + info.MetaData[tus.CS3Prefix+"ExecutantIdp"] = usr.Id.Idp + info.MetaData[tus.CS3Prefix+"ExecutantId"] = usr.Id.OpaqueId + info.MetaData[tus.CS3Prefix+"ExecutantType"] = utils.UserTypeToString(usr.Id.Type) + info.MetaData[tus.CS3Prefix+"ExecutantUserName"] = usr.Username - info.MetaData["_LogLevel"] = log.GetLevel().String() + info.MetaData[tus.CS3Prefix+"LogLevel"] = log.GetLevel().String() if n.Exists { - info.MetaData["_NodeId"] = n.ID - info.MetaData["_NodeExists"] = "true" + info.MetaData[tus.CS3Prefix+"NodeId"] = n.ID + info.MetaData[tus.CS3Prefix+"NodeExists"] = "true" } else { // fill future node info - info.MetaData["_NodeId"] = uuid.New().String() - info.MetaData["_NodeExists"] = "false" + info.MetaData[tus.CS3Prefix+"NodeId"] = uuid.New().String() + info.MetaData[tus.CS3Prefix+"NodeExists"] = "false" } - if info.MetaData["if-none-match"] == "*" && info.MetaData["_NodeExists"] == "true" { + if info.MetaData[tus.CS3Prefix+"if-none-match"] == "*" && n.Exists { return nil, errtypes.Aborted(fmt.Sprintf("parent %s already has a child %s", n.ID, n.Name)) } @@ -803,13 +801,13 @@ func (fs *Decomposedfs) PurgeExpiredUploads(purgedChan chan<- tusd.FileInfo) err } for _, info := range infos { - expires, err := strconv.Atoi(info.MetaData["expires"]) + expires, err := strconv.Atoi(info.MetaData[tus.CS3Prefix+"expires"]) if err != nil { continue } if int64(expires) < time.Now().Unix() { purgedChan <- info - err = os.Remove(info.Storage["BinPath"]) + err = os.Remove(info.Storage["BinPath"]) // FIXME if err != nil { return err } @@ -845,7 +843,7 @@ func (fs *Decomposedfs) AsConcatableUpload(up tusd.Upload) tusd.ConcatableUpload func (fs *Decomposedfs) uploadInfos(ctx context.Context) ([]tusd.FileInfo, error) { infos := []tusd.FileInfo{} - infoFiles, err := filepath.Glob(filepath.Join(fs.o.Root, "uploads", "*.info")) + infoFiles, err := filepath.Glob(filepath.Join(fs.o.Root, "uploads", "*.info")) // FIXME if err != nil { return nil, err } diff --git a/pkg/storage/utils/decomposedfs/upload/processing.go b/pkg/storage/utils/decomposedfs/upload/processing.go index 8ea21104ada..d4b719d991f 100644 --- a/pkg/storage/utils/decomposedfs/upload/processing.go +++ b/pkg/storage/utils/decomposedfs/upload/processing.go @@ -36,6 +36,7 @@ import ( "github.com/cs3org/reva/v2/pkg/errtypes" "github.com/cs3org/reva/v2/pkg/events" "github.com/cs3org/reva/v2/pkg/logger" + "github.com/cs3org/reva/v2/pkg/rhttp/datatx/manager/tus" "github.com/cs3org/reva/v2/pkg/storage/utils/chunking" "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/lookup" "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/metadata/prefixes" @@ -62,19 +63,19 @@ func New(ctx context.Context, info tusd.FileInfo, lu *lookup.Lookup, tp Tree, p log := appctx.GetLogger(ctx) log.Debug().Interface("info", info).Msg("Decomposedfs: NewUpload") - if info.MetaData["filename"] == "" { + if info.MetaData[tus.CS3Prefix+"filename"] == "" { return nil, errors.New("Decomposedfs: missing filename in metadata") } - if info.MetaData["dir"] == "" { + if info.MetaData[tus.CS3Prefix+"dir"] == "" { return nil, errors.New("Decomposedfs: missing dir in metadata") } - n, err := lu.NodeFromSpaceID(ctx, info.MetaData["_SpaceRoot"]) + n, err := lu.NodeFromSpaceID(ctx, info.MetaData[tus.CS3Prefix+"SpaceRoot"]) if err != nil { return nil, errors.Wrap(err, "Decomposedfs: error getting space root node") } - n, err = lookupNode(ctx, n, filepath.Join(info.MetaData["dir"], info.MetaData["filename"]), lu) + n, err = lookupNode(ctx, n, filepath.Join(info.MetaData[tus.CS3Prefix+"dir"], info.MetaData[tus.CS3Prefix+"filename"]), lu) if err != nil { return nil, errors.Wrap(err, "Decomposedfs: error walking path") } @@ -121,8 +122,8 @@ func New(ctx context.Context, info tusd.FileInfo, lu *lookup.Lookup, tp Tree, p } // check lock - if info.MetaData["lockid"] != "" { - ctx = ctxpkg.ContextSetLockID(ctx, info.MetaData["lockid"]) + if info.MetaData[tus.CS3Prefix+"lockid"] != "" { + ctx = ctxpkg.ContextSetLockID(ctx, info.MetaData[tus.CS3Prefix+"lockid"]) } if err := n.CheckLock(ctx); err != nil { return nil, err @@ -138,7 +139,7 @@ func New(ctx context.Context, info tusd.FileInfo, lu *lookup.Lookup, tp Tree, p ok bool ) if info.MetaData != nil { - if spaceRoot, ok = info.MetaData["_SpaceRoot"]; !ok { + if spaceRoot, ok = info.MetaData[tus.CS3Prefix+"SpaceRoot"]; !ok { spaceRoot = n.SpaceRoot.ID } } else { @@ -149,26 +150,26 @@ func New(ctx context.Context, info tusd.FileInfo, lu *lookup.Lookup, tp Tree, p "Type": "OCISStore", "BinPath": binPath, - "_NodeId": n.ID, - "_NodeExists": "true", - "_NodeParentId": n.ParentID, - "_NodeName": n.Name, - "_SpaceRoot": spaceRoot, - "_SpaceOwnerOrManager": info.Storage["SpaceOwnerOrManager"], + tus.CS3Prefix + "NodeId": n.ID, + tus.CS3Prefix + "NodeExists": "true", + tus.CS3Prefix + "NodeParentId": n.ParentID, + tus.CS3Prefix + "NodeName": n.Name, + tus.CS3Prefix + "SpaceRoot": spaceRoot, + tus.CS3Prefix + "SpaceOwnerOrManager": info.Storage["SpaceOwnerOrManager"], - "_ExecutantIdp": usr.Id.Idp, - "_ExecutantId": usr.Id.OpaqueId, - "_ExecutantType": utils.UserTypeToString(usr.Id.Type), - "_ExecutantUserName": usr.Username, + tus.CS3Prefix + "ExecutantIdp": usr.Id.Idp, + tus.CS3Prefix + "ExecutantId": usr.Id.OpaqueId, + tus.CS3Prefix + "ExecutantType": utils.UserTypeToString(usr.Id.Type), + tus.CS3Prefix + "ExecutantUserName": usr.Username, - "_LogLevel": log.GetLevel().String(), + tus.CS3Prefix + "LogLevel": log.GetLevel().String(), } if !n.Exists { // fill future node info - info.MetaData["_NodeId"] = uuid.New().String() - info.MetaData["_NodeExists"] = "false" + info.MetaData[tus.CS3Prefix+"NodeId"] = uuid.New().String() + info.MetaData[tus.CS3Prefix+"NodeExists"] = "false" } - if info.MetaData["if-none-match"] == "*" && info.MetaData["NodeExists"] == "true" { + if info.MetaData[tus.CS3Prefix+"if-none-match"] == "*" && n.Exists { return nil, errtypes.Aborted(fmt.Sprintf("parent %s already has a child %s", n.ID, n.Name)) } // Create binary file in the upload folder with no content @@ -253,12 +254,12 @@ func CreateNodeForUpload(upload *Upload, initAttrs node.Attributes) (*node.Node, } fsize := fi.Size() - spaceID := upload.Info.MetaData["_SpaceRoot"] + spaceID := upload.Info.MetaData[tus.CS3Prefix+"SpaceRoot"] n := node.New( spaceID, - upload.Info.MetaData["_NodeId"], - upload.Info.MetaData["_NodeParentId"], - upload.Info.MetaData["_NodeName"], + upload.Info.MetaData[tus.CS3Prefix+"NodeId"], + upload.Info.MetaData[tus.CS3Prefix+"NodeParentId"], + upload.Info.MetaData[tus.CS3Prefix+"NodeName"], fsize, upload.Info.ID, provider.ResourceType_RESOURCE_TYPE_FILE, @@ -276,7 +277,7 @@ func CreateNodeForUpload(upload *Upload, initAttrs node.Attributes) (*node.Node, } var f *lockedfile.File - switch upload.Info.MetaData["_NodeExists"] { + switch upload.Info.MetaData[tus.CS3Prefix+"NodeExists"] { case "false": f, err = initNewNode(upload, n, uint64(fsize)) if f != nil { @@ -328,7 +329,7 @@ func CreateNodeForUpload(upload *Upload, initAttrs node.Attributes) (*node.Node, upload.Info.MetaData["etag"], _ = node.CalculateEtag(n, mtime) // update nodeid for later - upload.Info.MetaData["_NodeId"] = n.ID + upload.Info.MetaData[tus.CS3Prefix+"NodeId"] = n.ID if err := upload.writeInfo(); err != nil { return nil, err } diff --git a/pkg/storage/utils/decomposedfs/upload/upload.go b/pkg/storage/utils/decomposedfs/upload/upload.go index 6721483a138..2ec2006dd09 100644 --- a/pkg/storage/utils/decomposedfs/upload/upload.go +++ b/pkg/storage/utils/decomposedfs/upload/upload.go @@ -39,6 +39,7 @@ import ( ctxpkg "github.com/cs3org/reva/v2/pkg/ctx" "github.com/cs3org/reva/v2/pkg/errtypes" "github.com/cs3org/reva/v2/pkg/events" + "github.com/cs3org/reva/v2/pkg/rhttp/datatx/manager/tus" "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/lookup" "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/metadata/prefixes" "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/node" @@ -194,8 +195,8 @@ func (upload *Upload) FinishUpload(_ context.Context) error { ctx, span := tracer.Start(upload.Ctx, "FinishUpload") defer span.End() // set lockID to context - if upload.Info.MetaData["lockid"] != "" { - upload.Ctx = ctxpkg.ContextSetLockID(upload.Ctx, upload.Info.MetaData["lockid"]) + if upload.Info.MetaData[tus.CS3Prefix+"lockid"] != "" { + upload.Ctx = ctxpkg.ContextSetLockID(upload.Ctx, upload.Info.MetaData[tus.CS3Prefix+"lockid"]) } log := appctx.GetLogger(upload.Ctx) @@ -280,7 +281,7 @@ func (upload *Upload) FinishUpload(_ context.Context) error { SpaceOwner: n.SpaceOwnerOrManager(upload.Ctx), ExecutingUser: u, ResourceID: &provider.ResourceId{SpaceId: n.SpaceID, OpaqueId: n.ID}, - Filename: upload.Info.MetaData["_NodeName"], + Filename: upload.Info.MetaData[tus.CS3Prefix+"NodeName"], Filesize: uint64(upload.Info.Size), }); err != nil { return err @@ -356,7 +357,7 @@ func (upload *Upload) Finalize() (err error) { n := upload.Node if n == nil { var err error - n, err = node.ReadNode(ctx, upload.lu, upload.Info.MetaData["_SpaceRoot"], upload.Info.MetaData["_NodeId"], false, nil, false) + n, err = node.ReadNode(ctx, upload.lu, upload.Info.MetaData[tus.CS3Prefix+"SpaceRoot"], upload.Info.MetaData[tus.CS3Prefix+"NodeId"], false, nil, false) if err != nil { return err }