feat: support bulk file move in same dir

This commit is contained in:
2025-12-13 19:24:54 +00:00
parent 085bbd4ffe
commit 918b85dfd5
7 changed files with 227 additions and 28 deletions

View File

@@ -2,6 +2,8 @@ package catalog
import (
"errors"
"slices"
"strings"
"time"
"github.com/get-drexa/drexa/internal/account"
@@ -30,6 +32,10 @@ type createDirectoryRequest struct {
Name string `json:"name"`
}
type postDirectoryContentRequest struct {
Items []string `json:"items"`
}
func (h *HTTPHandler) currentDirectoryMiddleware(c *fiber.Ctx) error {
account := account.CurrentAccount(c)
if account == nil {
@@ -54,6 +60,10 @@ func mustCurrentDirectoryNode(c *fiber.Ctx) *virtualfs.Node {
return c.Locals("directory").(*virtualfs.Node)
}
func includeParam(c *fiber.Ctx) []string {
return strings.Split(c.Query("include"), ",")
}
func (h *HTTPHandler) createDirectory(c *fiber.Ctx) error {
account := account.CurrentAccount(c)
if account == nil {
@@ -91,19 +101,30 @@ func (h *HTTPHandler) createDirectory(c *fiber.Ctx) error {
return httperr.Internal(err)
}
err = tx.Commit()
if err != nil {
return httperr.Internal(err)
}
return c.JSON(DirectoryInfo{
i := DirectoryInfo{
Kind: DirItemKindDirectory,
ID: node.PublicID,
Name: node.Name,
CreatedAt: node.CreatedAt,
UpdatedAt: node.UpdatedAt,
DeletedAt: node.DeletedAt,
})
}
include := includeParam(c)
if slices.Contains(include, "path") {
p, err := h.vfs.RealPath(c.Context(), tx, node)
if err != nil {
return httperr.Internal(err)
}
i.Path = p
}
err = tx.Commit()
if err != nil {
return httperr.Internal(err)
}
return c.JSON(i)
}
func (h *HTTPHandler) fetchDirectory(c *fiber.Ctx) error {
@@ -118,8 +139,8 @@ func (h *HTTPHandler) fetchDirectory(c *fiber.Ctx) error {
DeletedAt: node.DeletedAt,
}
include := c.Query("include")
if include == "path" {
include := includeParam(c)
if slices.Contains(include, "path") {
p, err := h.vfs.RealPath(c.Context(), h.db, node)
if err != nil {
return httperr.Internal(err)
@@ -237,3 +258,54 @@ func (h *HTTPHandler) deleteDirectory(c *fiber.Ctx) error {
return c.SendStatus(fiber.StatusNoContent)
}
func (h *HTTPHandler) moveItemsToDirectory(c *fiber.Ctx) error {
acc := account.CurrentAccount(c)
if acc == nil {
return c.SendStatus(fiber.StatusUnauthorized)
}
targetDir := mustCurrentDirectoryNode(c)
req := new(postDirectoryContentRequest)
if err := c.BodyParser(req); err != nil {
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{"error": "Invalid request"})
}
if len(req.Items) == 0 {
return c.SendStatus(fiber.StatusNoContent)
}
tx, err := h.db.BeginTx(c.Context(), nil)
if err != nil {
return httperr.Internal(err)
}
defer tx.Rollback()
nodes, err := h.vfs.FindNodesByPublicID(c.Context(), tx, acc.ID, req.Items)
if err != nil {
return httperr.Internal(err)
}
if len(nodes) != len(req.Items) {
return c.Status(fiber.StatusNotFound).JSON(fiber.Map{"error": "One or more items not found"})
}
// Move all nodes to the target directory
err = h.vfs.MoveNodesInSameDirectory(c.Context(), tx, nodes, targetDir.ID)
if err != nil {
if errors.Is(err, virtualfs.ErrUnsupportedOperation) {
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{"error": "All items must be in the same directory"})
}
if errors.Is(err, virtualfs.ErrNodeConflict) {
return c.Status(fiber.StatusConflict).JSON(fiber.Map{"error": "Name conflict in target directory"})
}
return httperr.Internal(err)
}
err = tx.Commit()
if err != nil {
return httperr.Internal(err)
}
return c.SendStatus(fiber.StatusNoContent)
}

View File

@@ -36,6 +36,7 @@ func (h *HTTPHandler) RegisterRoutes(api fiber.Router) {
dg := api.Group("/directories/:directoryID")
dg.Use(h.currentDirectoryMiddleware)
dg.Get("/", h.fetchDirectory)
dg.Post("/content", h.moveItemsToDirectory)
dg.Get("/content", h.listDirectory)
dg.Patch("/", h.patchDirectory)
dg.Delete("/", h.deleteDirectory)

View File

@@ -34,3 +34,14 @@ func (r *FlatKeyResolver) Resolve(ctx context.Context, db bun.IDB, node *Node) (
func (r *FlatKeyResolver) ResolveDeletionKeys(ctx context.Context, node *Node, allKeys []blob.Key) (*DeletionPlan, error) {
return &DeletionPlan{Keys: allKeys}, nil
}
// ResolveBulkMoveOps returns nil for flat key storage since blob keys are UUIDs
// and don't change when nodes are moved to a different parent.
func (r *FlatKeyResolver) ResolveBulkMoveOps(ctx context.Context, db bun.IDB, nodes []*Node, newParentID uuid.UUID) ([]BlobMoveOp, error) {
for _, node := range nodes[1:] {
if node.ParentID != nodes[0].ParentID {
return nil, ErrUnsupportedOperation
}
}
return nil, nil
}

View File

@@ -5,6 +5,7 @@ import (
"fmt"
"github.com/get-drexa/drexa/internal/blob"
"github.com/google/uuid"
"github.com/uptrace/bun"
)
@@ -38,3 +39,41 @@ func (r *HierarchicalKeyResolver) ResolveDeletionKeys(ctx context.Context, node
}
return &DeletionPlan{Prefix: blob.Key(path)}, nil
}
// ResolveBulkMoveOps computes blob move operations for nodes being moved to a new parent.
// This implementation optimizes by computing parent paths only once (2 queries total),
// rather than computing the full path for each node individually (N queries).
func (r *HierarchicalKeyResolver) ResolveBulkMoveOps(ctx context.Context, db bun.IDB, nodes []*Node, newParentID uuid.UUID) ([]BlobMoveOp, error) {
if len(nodes) == 0 {
return nil, nil
}
accountID := nodes[0].AccountID
oldParentID := nodes[0].ParentID
for _, node := range nodes[1:] {
if node.ParentID != oldParentID {
return nil, ErrUnsupportedOperation
}
}
oldParentPath, err := buildPathFromNodeID(ctx, db, oldParentID)
if err != nil {
return nil, err
}
newParentPath, err := buildPathFromNodeID(ctx, db, newParentID)
if err != nil {
return nil, err
}
// For each node, construct old and new keys using the precomputed parent paths
ops := make([]BlobMoveOp, len(nodes))
for i, node := range nodes {
oldKey := blob.Key(fmt.Sprintf("%s/%s/%s", accountID, oldParentPath, node.Name))
newKey := blob.Key(fmt.Sprintf("%s/%s/%s", accountID, newParentPath, node.Name))
ops[i] = BlobMoveOp{OldKey: oldKey, NewKey: newKey}
}
return ops, nil
}

View File

@@ -4,6 +4,7 @@ import (
"context"
"github.com/get-drexa/drexa/internal/blob"
"github.com/google/uuid"
"github.com/uptrace/bun"
)
@@ -14,9 +15,20 @@ type BlobKeyResolver interface {
ShouldPersistKey() bool
Resolve(ctx context.Context, db bun.IDB, node *Node) (blob.Key, error)
ResolveDeletionKeys(ctx context.Context, node *Node, allKeys []blob.Key) (*DeletionPlan, error)
// ResolveBulkMoveOps returns blob move operations for nodes being moved to a new parent.
// Returns ErrBulkMoveRequiresSameParent if nodes don't all share the same parent.
// Returns nil, nil if no blob moves are needed (e.g., flat key storage where keys are UUIDs).
ResolveBulkMoveOps(ctx context.Context, db bun.IDB, nodes []*Node, newParentID uuid.UUID) ([]BlobMoveOp, error)
}
type DeletionPlan struct {
Prefix blob.Key
Keys []blob.Key
}
// BlobMoveOp represents a blob move operation from OldKey to NewKey.
type BlobMoveOp struct {
OldKey blob.Key
NewKey blob.Key
}

View File

@@ -6,6 +6,7 @@ import (
"errors"
"strings"
"github.com/google/uuid"
"github.com/uptrace/bun"
)
@@ -51,8 +52,12 @@ func JoinPath(parts ...string) string {
}
func buildNodeAbsolutePathString(ctx context.Context, db bun.IDB, node *Node) (string, error) {
return buildPathFromNodeID(ctx, db, node.ID)
}
func buildPathFromNodeID(ctx context.Context, db bun.IDB, nodeID uuid.UUID) (string, error) {
var path []string
err := db.NewRaw(absolutePathQuery, node.ID).Scan(ctx, &path)
err := db.NewRaw(absolutePathQuery, nodeID).Scan(ctx, &path)
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
return "", ErrNodeNotFound

View File

@@ -69,19 +69,32 @@ func (vfs *VirtualFS) FindNode(ctx context.Context, db bun.IDB, accountID, fileI
}
func (vfs *VirtualFS) FindNodeByPublicID(ctx context.Context, db bun.IDB, accountID uuid.UUID, publicID string) (*Node, error) {
var node Node
err := db.NewSelect().Model(&node).
nodes, err := vfs.FindNodesByPublicID(ctx, db, accountID, []string{publicID})
if err != nil {
return nil, err
}
if len(nodes) == 0 {
return nil, ErrNodeNotFound
}
return nodes[0], nil
}
func (vfs *VirtualFS) FindNodesByPublicID(ctx context.Context, db bun.IDB, accountID uuid.UUID, publicIDs []string) ([]*Node, error) {
if len(publicIDs) == 0 {
return nil, nil
}
var nodes []*Node
err := db.NewSelect().Model(&nodes).
Where("account_id = ?", accountID).
Where("public_id = ?", publicID).
Where("public_id IN (?)", bun.In(publicIDs)).
Where("status = ?", NodeStatusReady).
Scan(ctx)
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
return nil, ErrNodeNotFound
}
return nil, err
}
return &node, nil
return nodes, nil
}
func (vfs *VirtualFS) ListChildren(ctx context.Context, db bun.IDB, node *Node) ([]*Node, error) {
@@ -430,11 +443,64 @@ func (vfs *VirtualFS) MoveNode(ctx context.Context, db bun.IDB, node *Node, pare
return nil
}
func (vfs *VirtualFS) AbsolutePath(ctx context.Context, db bun.IDB, node *Node) (string, error) {
if !node.IsAccessible() {
return "", ErrNodeNotFound
// MoveNodesInSameDirectory moves multiple nodes to a new parent directory in a single operation.
// All nodes MUST have the same current parent directory; this constraint enables an
// optimization where parent paths are computed only once (2 recursive queries total)
// rather than computing full paths for each node individually (N queries).
func (vfs *VirtualFS) MoveNodesInSameDirectory(ctx context.Context, db bun.IDB, nodes []*Node, newParentID uuid.UUID) error {
if len(nodes) == 0 {
return nil
}
return buildNodeAbsolutePathString(ctx, db, node)
// Validate all nodes are accessible
nodeIDs := make([]uuid.UUID, len(nodes))
for i, node := range nodes {
if !node.IsAccessible() {
return ErrNodeNotFound
}
nodeIDs[i] = node.ID
}
moveOps, err := vfs.keyResolver.ResolveBulkMoveOps(ctx, db, nodes, newParentID)
if err != nil {
return err
}
_, err = db.NewUpdate().
Model((*Node)(nil)).
Where("id IN (?)", bun.In(nodeIDs)).
Where("status = ?", NodeStatusReady).
Where("deleted_at IS NULL").
Set("parent_id = ?", newParentID).
Exec(ctx)
if err != nil {
if database.IsUniqueViolation(err) {
return ErrNodeConflict
}
return err
}
for _, op := range moveOps {
if op.OldKey != op.NewKey {
err = vfs.blobStore.Move(ctx, op.OldKey, op.NewKey)
if err != nil {
return err
}
}
}
for _, node := range nodes {
node.ParentID = newParentID
}
return nil
}
func (vfs *VirtualFS) RealPath(ctx context.Context, db bun.IDB, node *Node) (Path, error) {
if !node.IsAccessible() {
return nil, ErrNodeNotFound
}
return buildNoteAbsolutePath(ctx, db, node)
}
func (vfs *VirtualFS) PermanentlyDeleteNode(ctx context.Context, db bun.IDB, node *Node) error {
@@ -564,10 +630,3 @@ func (vfs *VirtualFS) generatePublicID() (string, error) {
n := binary.BigEndian.Uint64(b[:])
return vfs.sqid.Encode([]uint64{n})
}
func (vfs *VirtualFS) RealPath(ctx context.Context, db bun.IDB, node *Node) (Path, error) {
if !node.IsAccessible() {
return nil, ErrNodeNotFound
}
return buildNoteAbsolutePath(ctx, db, node)
}