feat: support bulk file move in same dir

This commit is contained in:
2025-12-13 19:24:54 +00:00
parent 085bbd4ffe
commit 918b85dfd5
7 changed files with 227 additions and 28 deletions

View File

@@ -5,6 +5,7 @@ import (
"fmt"
"github.com/get-drexa/drexa/internal/blob"
"github.com/google/uuid"
"github.com/uptrace/bun"
)
@@ -38,3 +39,41 @@ func (r *HierarchicalKeyResolver) ResolveDeletionKeys(ctx context.Context, node
}
return &DeletionPlan{Prefix: blob.Key(path)}, nil
}
// ResolveBulkMoveOps computes blob move operations for nodes being moved to a new parent.
// This implementation optimizes by computing parent paths only once (2 queries total),
// rather than computing the full path for each node individually (N queries).
func (r *HierarchicalKeyResolver) ResolveBulkMoveOps(ctx context.Context, db bun.IDB, nodes []*Node, newParentID uuid.UUID) ([]BlobMoveOp, error) {
if len(nodes) == 0 {
return nil, nil
}
accountID := nodes[0].AccountID
oldParentID := nodes[0].ParentID
for _, node := range nodes[1:] {
if node.ParentID != oldParentID {
return nil, ErrUnsupportedOperation
}
}
oldParentPath, err := buildPathFromNodeID(ctx, db, oldParentID)
if err != nil {
return nil, err
}
newParentPath, err := buildPathFromNodeID(ctx, db, newParentID)
if err != nil {
return nil, err
}
// For each node, construct old and new keys using the precomputed parent paths
ops := make([]BlobMoveOp, len(nodes))
for i, node := range nodes {
oldKey := blob.Key(fmt.Sprintf("%s/%s/%s", accountID, oldParentPath, node.Name))
newKey := blob.Key(fmt.Sprintf("%s/%s/%s", accountID, newParentPath, node.Name))
ops[i] = BlobMoveOp{OldKey: oldKey, NewKey: newKey}
}
return ops, nil
}