feat: support bulk file move in same dir

This commit is contained in:
2025-12-13 19:24:54 +00:00
parent 085bbd4ffe
commit 918b85dfd5
7 changed files with 227 additions and 28 deletions

View File

@@ -34,3 +34,14 @@ func (r *FlatKeyResolver) Resolve(ctx context.Context, db bun.IDB, node *Node) (
func (r *FlatKeyResolver) ResolveDeletionKeys(ctx context.Context, node *Node, allKeys []blob.Key) (*DeletionPlan, error) {
return &DeletionPlan{Keys: allKeys}, nil
}
// ResolveBulkMoveOps returns nil for flat key storage since blob keys are UUIDs
// and don't change when nodes are moved to a different parent.
func (r *FlatKeyResolver) ResolveBulkMoveOps(ctx context.Context, db bun.IDB, nodes []*Node, newParentID uuid.UUID) ([]BlobMoveOp, error) {
for _, node := range nodes[1:] {
if node.ParentID != nodes[0].ParentID {
return nil, ErrUnsupportedOperation
}
}
return nil, nil
}

View File

@@ -5,6 +5,7 @@ import (
"fmt"
"github.com/get-drexa/drexa/internal/blob"
"github.com/google/uuid"
"github.com/uptrace/bun"
)
@@ -38,3 +39,41 @@ func (r *HierarchicalKeyResolver) ResolveDeletionKeys(ctx context.Context, node
}
return &DeletionPlan{Prefix: blob.Key(path)}, nil
}
// ResolveBulkMoveOps computes blob move operations for nodes being moved to a new parent.
// This implementation optimizes by computing parent paths only once (2 queries total),
// rather than computing the full path for each node individually (N queries).
func (r *HierarchicalKeyResolver) ResolveBulkMoveOps(ctx context.Context, db bun.IDB, nodes []*Node, newParentID uuid.UUID) ([]BlobMoveOp, error) {
if len(nodes) == 0 {
return nil, nil
}
accountID := nodes[0].AccountID
oldParentID := nodes[0].ParentID
for _, node := range nodes[1:] {
if node.ParentID != oldParentID {
return nil, ErrUnsupportedOperation
}
}
oldParentPath, err := buildPathFromNodeID(ctx, db, oldParentID)
if err != nil {
return nil, err
}
newParentPath, err := buildPathFromNodeID(ctx, db, newParentID)
if err != nil {
return nil, err
}
// For each node, construct old and new keys using the precomputed parent paths
ops := make([]BlobMoveOp, len(nodes))
for i, node := range nodes {
oldKey := blob.Key(fmt.Sprintf("%s/%s/%s", accountID, oldParentPath, node.Name))
newKey := blob.Key(fmt.Sprintf("%s/%s/%s", accountID, newParentPath, node.Name))
ops[i] = BlobMoveOp{OldKey: oldKey, NewKey: newKey}
}
return ops, nil
}

View File

@@ -4,6 +4,7 @@ import (
"context"
"github.com/get-drexa/drexa/internal/blob"
"github.com/google/uuid"
"github.com/uptrace/bun"
)
@@ -14,9 +15,20 @@ type BlobKeyResolver interface {
ShouldPersistKey() bool
Resolve(ctx context.Context, db bun.IDB, node *Node) (blob.Key, error)
ResolveDeletionKeys(ctx context.Context, node *Node, allKeys []blob.Key) (*DeletionPlan, error)
// ResolveBulkMoveOps returns blob move operations for nodes being moved to a new parent.
// Returns ErrBulkMoveRequiresSameParent if nodes don't all share the same parent.
// Returns nil, nil if no blob moves are needed (e.g., flat key storage where keys are UUIDs).
ResolveBulkMoveOps(ctx context.Context, db bun.IDB, nodes []*Node, newParentID uuid.UUID) ([]BlobMoveOp, error)
}
type DeletionPlan struct {
Prefix blob.Key
Keys []blob.Key
}
// BlobMoveOp represents a blob move operation from OldKey to NewKey.
type BlobMoveOp struct {
OldKey blob.Key
NewKey blob.Key
}

View File

@@ -6,6 +6,7 @@ import (
"errors"
"strings"
"github.com/google/uuid"
"github.com/uptrace/bun"
)
@@ -51,8 +52,12 @@ func JoinPath(parts ...string) string {
}
func buildNodeAbsolutePathString(ctx context.Context, db bun.IDB, node *Node) (string, error) {
return buildPathFromNodeID(ctx, db, node.ID)
}
func buildPathFromNodeID(ctx context.Context, db bun.IDB, nodeID uuid.UUID) (string, error) {
var path []string
err := db.NewRaw(absolutePathQuery, node.ID).Scan(ctx, &path)
err := db.NewRaw(absolutePathQuery, nodeID).Scan(ctx, &path)
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
return "", ErrNodeNotFound

View File

@@ -69,19 +69,32 @@ func (vfs *VirtualFS) FindNode(ctx context.Context, db bun.IDB, accountID, fileI
}
func (vfs *VirtualFS) FindNodeByPublicID(ctx context.Context, db bun.IDB, accountID uuid.UUID, publicID string) (*Node, error) {
var node Node
err := db.NewSelect().Model(&node).
nodes, err := vfs.FindNodesByPublicID(ctx, db, accountID, []string{publicID})
if err != nil {
return nil, err
}
if len(nodes) == 0 {
return nil, ErrNodeNotFound
}
return nodes[0], nil
}
func (vfs *VirtualFS) FindNodesByPublicID(ctx context.Context, db bun.IDB, accountID uuid.UUID, publicIDs []string) ([]*Node, error) {
if len(publicIDs) == 0 {
return nil, nil
}
var nodes []*Node
err := db.NewSelect().Model(&nodes).
Where("account_id = ?", accountID).
Where("public_id = ?", publicID).
Where("public_id IN (?)", bun.In(publicIDs)).
Where("status = ?", NodeStatusReady).
Scan(ctx)
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
return nil, ErrNodeNotFound
}
return nil, err
}
return &node, nil
return nodes, nil
}
func (vfs *VirtualFS) ListChildren(ctx context.Context, db bun.IDB, node *Node) ([]*Node, error) {
@@ -430,11 +443,64 @@ func (vfs *VirtualFS) MoveNode(ctx context.Context, db bun.IDB, node *Node, pare
return nil
}
func (vfs *VirtualFS) AbsolutePath(ctx context.Context, db bun.IDB, node *Node) (string, error) {
if !node.IsAccessible() {
return "", ErrNodeNotFound
// MoveNodesInSameDirectory moves multiple nodes to a new parent directory in a single operation.
// All nodes MUST have the same current parent directory; this constraint enables an
// optimization where parent paths are computed only once (2 recursive queries total)
// rather than computing full paths for each node individually (N queries).
func (vfs *VirtualFS) MoveNodesInSameDirectory(ctx context.Context, db bun.IDB, nodes []*Node, newParentID uuid.UUID) error {
if len(nodes) == 0 {
return nil
}
return buildNodeAbsolutePathString(ctx, db, node)
// Validate all nodes are accessible
nodeIDs := make([]uuid.UUID, len(nodes))
for i, node := range nodes {
if !node.IsAccessible() {
return ErrNodeNotFound
}
nodeIDs[i] = node.ID
}
moveOps, err := vfs.keyResolver.ResolveBulkMoveOps(ctx, db, nodes, newParentID)
if err != nil {
return err
}
_, err = db.NewUpdate().
Model((*Node)(nil)).
Where("id IN (?)", bun.In(nodeIDs)).
Where("status = ?", NodeStatusReady).
Where("deleted_at IS NULL").
Set("parent_id = ?", newParentID).
Exec(ctx)
if err != nil {
if database.IsUniqueViolation(err) {
return ErrNodeConflict
}
return err
}
for _, op := range moveOps {
if op.OldKey != op.NewKey {
err = vfs.blobStore.Move(ctx, op.OldKey, op.NewKey)
if err != nil {
return err
}
}
}
for _, node := range nodes {
node.ParentID = newParentID
}
return nil
}
func (vfs *VirtualFS) RealPath(ctx context.Context, db bun.IDB, node *Node) (Path, error) {
if !node.IsAccessible() {
return nil, ErrNodeNotFound
}
return buildNoteAbsolutePath(ctx, db, node)
}
func (vfs *VirtualFS) PermanentlyDeleteNode(ctx context.Context, db bun.IDB, node *Node) error {
@@ -564,10 +630,3 @@ func (vfs *VirtualFS) generatePublicID() (string, error) {
n := binary.BigEndian.Uint64(b[:])
return vfs.sqid.Encode([]uint64{n})
}
func (vfs *VirtualFS) RealPath(ctx context.Context, db bun.IDB, node *Node) (Path, error) {
if !node.IsAccessible() {
return nil, ErrNodeNotFound
}
return buildNoteAbsolutePath(ctx, db, node)
}