diff --git a/apps/backend/internal/blob/fs_store.go b/apps/backend/internal/blob/fs_store.go index 78c9ce8..1803766 100644 --- a/apps/backend/internal/blob/fs_store.go +++ b/apps/backend/internal/blob/fs_store.go @@ -3,6 +3,7 @@ package blob import ( "context" "io" + "log/slog" "os" "path/filepath" @@ -23,6 +24,14 @@ func NewFSStore(config FSStoreConfig) *FSStore { return &FSStore{config: config} } +func (s *FSStore) SupportsDirectUpload() bool { + return false +} + +func (s *FSStore) SupportsDirectDownload() bool { + return false +} + func (s *FSStore) Initialize(ctx context.Context) error { return os.MkdirAll(s.config.Root, 0755) } @@ -30,6 +39,8 @@ func (s *FSStore) Initialize(ctx context.Context) error { func (s *FSStore) Put(ctx context.Context, key Key, reader io.Reader) error { path := filepath.Join(s.config.Root, string(key)) + slog.Info("fs store: putting file", "path", path) + err := os.MkdirAll(filepath.Dir(path), 0755) if err != nil { return err @@ -55,6 +66,9 @@ func (s *FSStore) Put(ctx context.Context, key Key, reader io.Reader) error { func (s *FSStore) Read(ctx context.Context, key Key) (io.ReadCloser, error) { path := filepath.Join(s.config.Root, string(key)) + + slog.Info("fs store: reading file", "path", path) + f, err := os.Open(path) if err != nil { if os.IsNotExist(err) { @@ -68,6 +82,8 @@ func (s *FSStore) Read(ctx context.Context, key Key) (io.ReadCloser, error) { func (s *FSStore) ReadRange(ctx context.Context, key Key, offset, length int64) (io.ReadCloser, error) { path := filepath.Join(s.config.Root, string(key)) + slog.Info("fs store: reading range", "path", path, "offset", offset, "length", length) + f, err := os.Open(path) if err != nil { if os.IsNotExist(err) { @@ -86,6 +102,9 @@ func (s *FSStore) ReadRange(ctx context.Context, key Key, offset, length int64) func (s *FSStore) ReadSize(ctx context.Context, key Key) (int64, error) { path := filepath.Join(s.config.Root, string(key)) + + slog.Info("fs store: reading size", "path", path) + fi, err := os.Stat(path) if err != nil { if os.IsNotExist(err) { @@ -97,7 +116,9 @@ func (s *FSStore) ReadSize(ctx context.Context, key Key) (int64, error) { } func (s *FSStore) Delete(ctx context.Context, key Key) error { - err := os.Remove(filepath.Join(s.config.Root, string(key))) + path := filepath.Join(s.config.Root, string(key)) + slog.Info("fs store: deleting file", "path", path) + err := os.Remove(path) // no op if file does not exist // swallow error if file does not exist if err != nil && !os.IsNotExist(err) { @@ -145,10 +166,10 @@ func (s *FSStore) Move(ctx context.Context, srcKey, dstKey Key) error { return nil } -func (s *FSStore) SupportsDirectUpload() bool { - return false -} - func (s *FSStore) GenerateUploadURL(ctx context.Context, key Key, opts UploadURLOptions) (string, error) { return "", nil } + +func (s *FSStore) GenerateDownloadURL(ctx context.Context, key Key, opts DownloadURLOptions) (string, error) { + return "", nil +} diff --git a/apps/backend/internal/blob/store.go b/apps/backend/internal/blob/store.go index 0538f3f..ab761ac 100644 --- a/apps/backend/internal/blob/store.go +++ b/apps/backend/internal/blob/store.go @@ -10,11 +10,21 @@ type UploadURLOptions struct { Duration time.Duration } +type DownloadURLOptions struct { + Duration time.Duration +} + type UpdateOptions struct { ContentType string } type Store interface { + // SupportsDirectUpload returns true if the store allows files to be uploaded directly to the blob store. + SupportsDirectUpload() bool + + // SupportsDirectDownload returns true if the store allows files to be downloaded directly from the blob store. + SupportsDirectDownload() bool + Initialize(ctx context.Context) error Put(ctx context.Context, key Key, reader io.Reader) error Update(ctx context.Context, key Key, opts UpdateOptions) error @@ -25,9 +35,9 @@ type Store interface { ReadRange(ctx context.Context, key Key, offset, length int64) (io.ReadCloser, error) ReadSize(ctx context.Context, key Key) (int64, error) - // SupportsDirectUpload returns true if the store allows files to be uploaded directly to the blob store. - SupportsDirectUpload() bool - // GenerateUploadURL generates a URL that can be used to upload a file directly to the blob store. If unsupported, returns an empty string with no error. GenerateUploadURL(ctx context.Context, key Key, opts UploadURLOptions) (string, error) + + // GenerateDownloadURL generates a URL that can be used to download a file directly from the blob store. If unsupported, returns an empty string with no error. + GenerateDownloadURL(ctx context.Context, key Key, opts DownloadURLOptions) (string, error) } diff --git a/apps/backend/internal/catalog/doc.go b/apps/backend/internal/catalog/doc.go new file mode 100644 index 0000000..bb2331a --- /dev/null +++ b/apps/backend/internal/catalog/doc.go @@ -0,0 +1,2 @@ +// Package catalog handles file and directory browsing. +package catalog diff --git a/apps/backend/internal/catalog/file.go b/apps/backend/internal/catalog/file.go new file mode 100644 index 0000000..0390d40 --- /dev/null +++ b/apps/backend/internal/catalog/file.go @@ -0,0 +1,13 @@ +package catalog + +import "time" + +type FileInfo struct { + ID string `json:"id"` + Name string `json:"name"` + Size int64 `json:"size"` + MimeType string `json:"mimeType"` + CreatedAt time.Time `json:"createdAt"` + UpdatedAt time.Time `json:"updatedAt"` + DeletedAt *time.Time `json:"deletedAt,omitempty"` +} diff --git a/apps/backend/internal/catalog/http.go b/apps/backend/internal/catalog/http.go new file mode 100644 index 0000000..a944679 --- /dev/null +++ b/apps/backend/internal/catalog/http.go @@ -0,0 +1,184 @@ +package catalog + +import ( + "errors" + "fmt" + + "github.com/get-drexa/drexa/internal/account" + "github.com/get-drexa/drexa/internal/httperr" + "github.com/get-drexa/drexa/internal/virtualfs" + "github.com/gofiber/fiber/v2" + "github.com/uptrace/bun" +) + +type HTTPHandler struct { + vfs *virtualfs.VirtualFS + db *bun.DB +} + +type patchFileRequest struct { + Name string `json:"name"` +} + +func NewHTTPHandler(vfs *virtualfs.VirtualFS, db *bun.DB) *HTTPHandler { + return &HTTPHandler{vfs: vfs, db: db} +} + +func (h *HTTPHandler) RegisterRoutes(api fiber.Router) { + g := api.Group("/files/:fileID") + g.Use(h.currentFileMiddleware) + g.Get("/", h.fetchFile) + g.Get("/content", h.downloadFile) + g.Patch("/", h.patchFile) + g.Delete("/", h.deleteFile) +} + +func mustCurrentFileNode(c *fiber.Ctx) *virtualfs.Node { + return c.Locals("file").(*virtualfs.Node) +} + +func (h *HTTPHandler) currentFileMiddleware(c *fiber.Ctx) error { + account := account.CurrentAccount(c) + if account == nil { + return c.SendStatus(fiber.StatusUnauthorized) + } + + fileID := c.Params("fileID") + node, err := h.vfs.FindNodeByPublicID(c.Context(), h.db, account.ID, fileID) + if err != nil { + if errors.Is(err, virtualfs.ErrNodeNotFound) { + return c.SendStatus(fiber.StatusNotFound) + } + return httperr.Internal(err) + } + + c.Locals("file", node) + + return c.Next() +} + +func (h *HTTPHandler) fetchFile(c *fiber.Ctx) error { + node := mustCurrentFileNode(c) + i := FileInfo{ + ID: node.PublicID, + Name: node.Name, + Size: node.Size, + MimeType: node.MimeType, + CreatedAt: node.CreatedAt, + UpdatedAt: node.UpdatedAt, + } + if node.DeletedAt != nil { + i.DeletedAt = node.DeletedAt + } + + return c.JSON(i) +} + +func (h *HTTPHandler) downloadFile(c *fiber.Ctx) error { + node := mustCurrentFileNode(c) + + content, err := h.vfs.ReadFile(c.Context(), h.db, node) + if err != nil { + if errors.Is(err, virtualfs.ErrUnsupportedOperation) { + return c.SendStatus(fiber.StatusNotFound) + } + return httperr.Internal(err) + } + + if content.URL != "" { + return c.Redirect(content.URL, fiber.StatusTemporaryRedirect) + } + + if content.Reader != nil { + if node.MimeType != "" { + c.Set("Content-Type", node.MimeType) + } + if content.Size > 0 { + return c.SendStream(content.Reader, int(content.Size)) + } + return c.SendStream(content.Reader) + } + + return httperr.Internal(errors.New("vfs returned neither a reader nor a URL")) +} + +func (h *HTTPHandler) patchFile(c *fiber.Ctx) error { + node := mustCurrentFileNode(c) + + patch := new(patchFileRequest) + if err := c.BodyParser(patch); err != nil { + return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{"error": "Invalid request"}) + } + + tx, err := h.db.BeginTx(c.Context(), nil) + if err != nil { + return httperr.Internal(err) + } + defer tx.Rollback() + + if patch.Name != "" { + err := h.vfs.RenameNode(c.Context(), tx, node, patch.Name) + if err != nil { + if errors.Is(err, virtualfs.ErrNodeNotFound) { + return c.SendStatus(fiber.StatusNotFound) + } + return httperr.Internal(err) + } + } + + err = tx.Commit() + if err != nil { + return httperr.Internal(err) + } + + fmt.Printf("node deleted at: %v\n", node.DeletedAt) + + return c.JSON(FileInfo{ + ID: node.PublicID, + Name: node.Name, + Size: node.Size, + MimeType: node.MimeType, + CreatedAt: node.CreatedAt, + UpdatedAt: node.UpdatedAt, + DeletedAt: node.DeletedAt, + }) +} + +func (h *HTTPHandler) deleteFile(c *fiber.Ctx) error { + node := mustCurrentFileNode(c) + + tx, err := h.db.BeginTx(c.Context(), nil) + if err != nil { + return httperr.Internal(err) + } + defer tx.Rollback() + + shouldTrash := c.Query("trash") == "true" + if shouldTrash { + err = h.vfs.SoftDeleteNode(c.Context(), tx, node) + if err != nil { + return httperr.Internal(err) + } + return c.JSON(FileInfo{ + ID: node.PublicID, + Name: node.Name, + Size: node.Size, + MimeType: node.MimeType, + CreatedAt: node.CreatedAt, + UpdatedAt: node.UpdatedAt, + DeletedAt: node.DeletedAt, + }) + } else { + err = h.vfs.PermanentlyDeleteNode(c.Context(), tx, node) + if err != nil { + return httperr.Internal(err) + } + + err = tx.Commit() + if err != nil { + return httperr.Internal(err) + } + + return c.SendStatus(fiber.StatusNoContent) + } +} diff --git a/apps/backend/internal/drexa/server.go b/apps/backend/internal/drexa/server.go index d6e4dc6..f386a8d 100644 --- a/apps/backend/internal/drexa/server.go +++ b/apps/backend/internal/drexa/server.go @@ -7,6 +7,7 @@ import ( "github.com/get-drexa/drexa/internal/account" "github.com/get-drexa/drexa/internal/auth" "github.com/get-drexa/drexa/internal/blob" + "github.com/get-drexa/drexa/internal/catalog" "github.com/get-drexa/drexa/internal/database" "github.com/get-drexa/drexa/internal/httperr" "github.com/get-drexa/drexa/internal/upload" @@ -54,7 +55,7 @@ func NewServer(c Config) (*Server, error) { return nil, fmt.Errorf("unknown storage backend: %s", c.Storage.Backend) } - err := blobStore.Initialize(context.Background()) + err := blobStore.Initialize(context.TODO()) if err != nil { return nil, fmt.Errorf("failed to initialize blob store: %w", err) } @@ -70,7 +71,7 @@ func NewServer(c Config) (*Server, error) { return nil, fmt.Errorf("unknown storage mode: %s", c.Storage.Mode) } - vfs, err := virtualfs.NewVirtualFS(blobStore, keyResolver) + vfs, err := virtualfs.New(blobStore, keyResolver) if err != nil { return nil, fmt.Errorf("failed to create virtual file system: %w", err) } @@ -82,16 +83,16 @@ func NewServer(c Config) (*Server, error) { SecretKey: c.JWT.SecretKey, }) uploadService := upload.NewService(vfs, blobStore) - accountService := account.NewService(userService) + accountService := account.NewService(userService, vfs) authMiddleware := auth.NewBearerAuthMiddleware(authService, db) api := app.Group("/api") - - accRouter := account.NewHTTPHandler(accountService, authService, db, authMiddleware).RegisterRoutes(api) - auth.NewHTTPHandler(authService, db).RegisterRoutes(api) - upload.NewHTTPHandler(uploadService, db).RegisterRoutes(accRouter) + + accountRouter := account.NewHTTPHandler(accountService, authService, db, authMiddleware).RegisterRoutes(api) + upload.NewHTTPHandler(uploadService, db).RegisterRoutes(accountRouter) + catalog.NewHTTPHandler(vfs, db).RegisterRoutes(accountRouter) s := &Server{ config: c, diff --git a/apps/backend/internal/virtualfs/file_content.go b/apps/backend/internal/virtualfs/file_content.go new file mode 100644 index 0000000..79bf646 --- /dev/null +++ b/apps/backend/internal/virtualfs/file_content.go @@ -0,0 +1,34 @@ +package virtualfs + +import ( + "io" + + "github.com/get-drexa/drexa/internal/blob" +) + +type FileContent struct { + Size int64 + Reader io.ReadCloser + BlobKey blob.Key + URL string +} + +func EmptyFileContent() FileContent { + return FileContent{} +} + +func FileContentFromReader(reader io.Reader) FileContent { + return FileContent{Reader: io.NopCloser(reader)} +} + +func FileContentFromReaderWithSize(reader io.Reader, size int64) FileContent { + return FileContent{Reader: io.NopCloser(reader), Size: size} +} + +func FileContentFromBlobKey(blobKey blob.Key) FileContent { + return FileContent{BlobKey: blobKey} +} + +func FileContentFromURL(url string) FileContent { + return FileContent{URL: url} +} diff --git a/apps/backend/internal/virtualfs/flat_key_resolver.go b/apps/backend/internal/virtualfs/flat_key_resolver.go index cccea2b..87e9deb 100644 --- a/apps/backend/internal/virtualfs/flat_key_resolver.go +++ b/apps/backend/internal/virtualfs/flat_key_resolver.go @@ -5,6 +5,7 @@ import ( "github.com/get-drexa/drexa/internal/blob" "github.com/google/uuid" + "github.com/uptrace/bun" ) type FlatKeyResolver struct{} @@ -19,7 +20,7 @@ func (r *FlatKeyResolver) ShouldPersistKey() bool { return true } -func (r *FlatKeyResolver) Resolve(ctx context.Context, node *Node) (blob.Key, error) { +func (r *FlatKeyResolver) Resolve(ctx context.Context, db bun.IDB, node *Node) (blob.Key, error) { if node.BlobKey == "" { id, err := uuid.NewV7() if err != nil { diff --git a/apps/backend/internal/virtualfs/hierarchical_key_resolver.go b/apps/backend/internal/virtualfs/hierarchical_key_resolver.go index eb55552..720dfec 100644 --- a/apps/backend/internal/virtualfs/hierarchical_key_resolver.go +++ b/apps/backend/internal/virtualfs/hierarchical_key_resolver.go @@ -22,8 +22,8 @@ func (r *HierarchicalKeyResolver) ShouldPersistKey() bool { return false } -func (r *HierarchicalKeyResolver) Resolve(ctx context.Context, node *Node) (blob.Key, error) { - path, err := buildNodeAbsolutePath(ctx, r.db, node.ID) +func (r *HierarchicalKeyResolver) Resolve(ctx context.Context, db bun.IDB, node *Node) (blob.Key, error) { + path, err := buildNodeAbsolutePath(ctx, db, node) if err != nil { return "", err } @@ -32,7 +32,7 @@ func (r *HierarchicalKeyResolver) Resolve(ctx context.Context, node *Node) (blob } func (r *HierarchicalKeyResolver) ResolveDeletionKeys(ctx context.Context, node *Node, allKeys []blob.Key) (*DeletionPlan, error) { - path, err := buildNodeAbsolutePath(ctx, r.db, node.ID) + path, err := buildNodeAbsolutePath(ctx, r.db, node) if err != nil { return nil, err } diff --git a/apps/backend/internal/virtualfs/key_resolver.go b/apps/backend/internal/virtualfs/key_resolver.go index 82bc734..7b2def4 100644 --- a/apps/backend/internal/virtualfs/key_resolver.go +++ b/apps/backend/internal/virtualfs/key_resolver.go @@ -4,6 +4,7 @@ import ( "context" "github.com/get-drexa/drexa/internal/blob" + "github.com/uptrace/bun" ) type BlobKeyResolver interface { @@ -11,7 +12,7 @@ type BlobKeyResolver interface { // Flat keys (e.g. UUIDs) return true - key is generated once and stored. // Hierarchical keys return false - key is derived from path each time. ShouldPersistKey() bool - Resolve(ctx context.Context, node *Node) (blob.Key, error) + Resolve(ctx context.Context, db bun.IDB, node *Node) (blob.Key, error) ResolveDeletionKeys(ctx context.Context, node *Node, allKeys []blob.Key) (*DeletionPlan, error) } diff --git a/apps/backend/internal/virtualfs/node.go b/apps/backend/internal/virtualfs/node.go index ccd81d8..534ec52 100644 --- a/apps/backend/internal/virtualfs/node.go +++ b/apps/backend/internal/virtualfs/node.go @@ -37,9 +37,9 @@ type Node struct { Size int64 `bun:"size"` MimeType string `bun:"mime_type,nullzero"` - CreatedAt time.Time `bun:"created_at,notnull,nullzero"` - UpdatedAt time.Time `bun:"updated_at,notnull,nullzero"` - DeletedAt time.Time `bun:"deleted_at,nullzero"` + CreatedAt time.Time `bun:"created_at,notnull,nullzero"` + UpdatedAt time.Time `bun:"updated_at,notnull,nullzero"` + DeletedAt *time.Time `bun:"deleted_at,nullzero"` } func newNodeID() (uuid.UUID, error) { @@ -49,5 +49,5 @@ func newNodeID() (uuid.UUID, error) { // IsAccessible returns true if the node can be accessed. // If the node is not ready or if it is soft deleted, it cannot be accessed. func (n *Node) IsAccessible() bool { - return n.DeletedAt.IsZero() && n.Status == NodeStatusReady + return n.DeletedAt == nil && n.Status == NodeStatusReady } diff --git a/apps/backend/internal/virtualfs/path.go b/apps/backend/internal/virtualfs/path.go index 19e5799..0fe86d8 100644 --- a/apps/backend/internal/virtualfs/path.go +++ b/apps/backend/internal/virtualfs/path.go @@ -6,7 +6,6 @@ import ( "errors" "strings" - "github.com/google/uuid" "github.com/uptrace/bun" ) @@ -19,7 +18,6 @@ const absolutePathQuery = `WITH RECURSIVE path AS ( SELECT n.id, n.parent_id, n.name, p.depth + 1 FROM vfs_nodes n JOIN path p ON n.id = p.parent_id - WHERE n.deleted_at IS NULL ) SELECT name FROM path WHERE EXISTS (SELECT 1 FROM path WHERE parent_id IS NULL) @@ -29,9 +27,9 @@ func JoinPath(parts ...string) string { return strings.Join(parts, "/") } -func buildNodeAbsolutePath(ctx context.Context, db bun.IDB, nodeID uuid.UUID) (string, error) { +func buildNodeAbsolutePath(ctx context.Context, db bun.IDB, node *Node) (string, error) { var path []string - err := db.NewRaw(absolutePathQuery, nodeID).Scan(ctx, &path) + err := db.NewRaw(absolutePathQuery, node.ID).Scan(ctx, &path) if err != nil { if errors.Is(err, sql.ErrNoRows) { return "", ErrNodeNotFound diff --git a/apps/backend/internal/virtualfs/vfs.go b/apps/backend/internal/virtualfs/vfs.go index b7a3a3b..a651173 100644 --- a/apps/backend/internal/virtualfs/vfs.go +++ b/apps/backend/internal/virtualfs/vfs.go @@ -8,6 +8,7 @@ import ( "encoding/binary" "errors" "io" + "time" "github.com/gabriel-vasile/mimetype" "github.com/get-drexa/drexa/internal/blob" @@ -36,22 +37,9 @@ type CreateFileOptions struct { Name string } -type FileContent struct { - reader io.Reader - blobKey blob.Key -} - const RootDirectoryName = "root" -func FileContentFromReader(reader io.Reader) FileContent { - return FileContent{reader: reader} -} - -func FileContentFromBlobKey(blobKey blob.Key) FileContent { - return FileContent{blobKey: blobKey} -} - -func NewVirtualFS(blobStore blob.Store, keyResolver BlobKeyResolver) (*VirtualFS, error) { +func New(blobStore blob.Store, keyResolver BlobKeyResolver) (*VirtualFS, error) { sqid, err := sqids.New() if err != nil { return nil, err @@ -86,7 +74,6 @@ func (vfs *VirtualFS) FindNodeByPublicID(ctx context.Context, db bun.IDB, accoun Where("account_id = ?", accountID). Where("public_id = ?", publicID). Where("status = ?", NodeStatusReady). - Where("deleted_at IS NULL"). Scan(ctx) if err != nil { if errors.Is(err, sql.ErrNoRows) { @@ -141,7 +128,7 @@ func (vfs *VirtualFS) CreateFile(ctx context.Context, db bun.IDB, accountID uuid } if vfs.keyResolver.ShouldPersistKey() { - node.BlobKey, err = vfs.keyResolver.Resolve(ctx, &node) + node.BlobKey, err = vfs.keyResolver.Resolve(ctx, db, &node) if err != nil { return nil, err } @@ -159,31 +146,31 @@ func (vfs *VirtualFS) CreateFile(ctx context.Context, db bun.IDB, accountID uuid } func (vfs *VirtualFS) WriteFile(ctx context.Context, db bun.IDB, node *Node, content FileContent) error { - if content.reader == nil && content.blobKey.IsNil() { + if content.Reader == nil && content.BlobKey.IsNil() { return blob.ErrInvalidFileContent } - if !node.DeletedAt.IsZero() { + if node.DeletedAt != nil { return ErrNodeNotFound } setCols := make([]string, 0, 4) - if content.reader != nil { - key, err := vfs.keyResolver.Resolve(ctx, node) + if content.Reader != nil { + key, err := vfs.keyResolver.Resolve(ctx, db, node) if err != nil { return err } buf := make([]byte, 3072) - n, err := io.ReadFull(content.reader, buf) + n, err := io.ReadFull(content.Reader, buf) if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { return err } buf = buf[:n] mt := mimetype.Detect(buf) - cr := ioext.NewCountingReader(io.MultiReader(bytes.NewReader(buf), content.reader)) + cr := ioext.NewCountingReader(io.MultiReader(bytes.NewReader(buf), content.Reader)) err = vfs.blobStore.Put(ctx, key, cr) if err != nil { @@ -201,9 +188,9 @@ func (vfs *VirtualFS) WriteFile(ctx context.Context, db bun.IDB, node *Node, con setCols = append(setCols, "mime_type", "size", "status") } else { - node.BlobKey = content.blobKey + node.BlobKey = content.BlobKey - b, err := vfs.blobStore.ReadRange(ctx, content.blobKey, 0, 3072) + b, err := vfs.blobStore.ReadRange(ctx, content.BlobKey, 0, 3072) if err != nil { return err } @@ -219,7 +206,7 @@ func (vfs *VirtualFS) WriteFile(ctx context.Context, db bun.IDB, node *Node, con node.MimeType = mt.String() node.Status = NodeStatusReady - s, err := vfs.blobStore.ReadSize(ctx, content.blobKey) + s, err := vfs.blobStore.ReadSize(ctx, content.BlobKey) if err != nil { return err } @@ -239,6 +226,34 @@ func (vfs *VirtualFS) WriteFile(ctx context.Context, db bun.IDB, node *Node, con return nil } +func (vfs *VirtualFS) ReadFile(ctx context.Context, db bun.IDB, node *Node) (FileContent, error) { + if node.Kind != NodeKindFile { + return EmptyFileContent(), ErrUnsupportedOperation + } + + key, err := vfs.keyResolver.Resolve(ctx, db, node) + if err != nil { + return EmptyFileContent(), err + } + + if vfs.blobStore.SupportsDirectDownload() { + url, err := vfs.blobStore.GenerateDownloadURL(ctx, key, blob.DownloadURLOptions{ + Duration: 1 * time.Hour, + }) + if err != nil { + return EmptyFileContent(), err + } + return FileContentFromURL(url), nil + } + + reader, err := vfs.blobStore.Read(ctx, key) + if err != nil { + return EmptyFileContent(), err + } + + return FileContentFromReaderWithSize(reader, node.Size), nil +} + func (vfs *VirtualFS) CreateDirectory(ctx context.Context, db bun.IDB, accountID uuid.UUID, parentID uuid.UUID, name string) (*Node, error) { pid, err := vfs.generatePublicID() if err != nil { @@ -319,7 +334,12 @@ func (vfs *VirtualFS) RenameNode(ctx context.Context, db bun.IDB, node *Node, na return ErrNodeNotFound } - _, err := db.NewUpdate().Model(node). + oldKey, err := vfs.keyResolver.Resolve(ctx, db, node) + if err != nil { + return err + } + + _, err = db.NewUpdate().Model(node). WherePK(). Where("status = ?", NodeStatusReady). Where("deleted_at IS NULL"). @@ -332,6 +352,30 @@ func (vfs *VirtualFS) RenameNode(ctx context.Context, db bun.IDB, node *Node, na } return err } + + newKey, err := vfs.keyResolver.Resolve(ctx, db, node) + if err != nil { + return err + } + + if oldKey != newKey { + err = vfs.blobStore.Move(ctx, oldKey, newKey) + if err != nil { + return err + } + + if vfs.keyResolver.ShouldPersistKey() { + node.BlobKey = newKey + _, err = db.NewUpdate().Model(node). + WherePK(). + Set("blob_key = ?", newKey). + Exec(ctx) + if err != nil { + return err + } + } + } + return nil } @@ -340,7 +384,7 @@ func (vfs *VirtualFS) MoveNode(ctx context.Context, db bun.IDB, node *Node, pare return ErrNodeNotFound } - oldKey, err := vfs.keyResolver.Resolve(ctx, node) + oldKey, err := vfs.keyResolver.Resolve(ctx, db, node) if err != nil { return err } @@ -362,7 +406,7 @@ func (vfs *VirtualFS) MoveNode(ctx context.Context, db bun.IDB, node *Node, pare return err } - newKey, err := vfs.keyResolver.Resolve(ctx, node) + newKey, err := vfs.keyResolver.Resolve(ctx, db, node) if err != nil { return err } @@ -390,13 +434,10 @@ func (vfs *VirtualFS) AbsolutePath(ctx context.Context, db bun.IDB, node *Node) if !node.IsAccessible() { return "", ErrNodeNotFound } - return buildNodeAbsolutePath(ctx, db, node.ID) + return buildNodeAbsolutePath(ctx, db, node) } func (vfs *VirtualFS) PermanentlyDeleteNode(ctx context.Context, db bun.IDB, node *Node) error { - if !node.IsAccessible() { - return ErrNodeNotFound - } switch node.Kind { case NodeKindFile: return vfs.permanentlyDeleteFileNode(ctx, db, node) @@ -408,7 +449,12 @@ func (vfs *VirtualFS) PermanentlyDeleteNode(ctx context.Context, db bun.IDB, nod } func (vfs *VirtualFS) permanentlyDeleteFileNode(ctx context.Context, db bun.IDB, node *Node) error { - err := vfs.blobStore.Delete(ctx, node.BlobKey) + key, err := vfs.keyResolver.Resolve(ctx, db, node) + if err != nil { + return err + } + + err = vfs.blobStore.Delete(ctx, key) if err != nil { return err }