21 Commits

Author SHA1 Message Date
3e96c42c4a fix: handle missing expected err cases 2025-11-30 20:08:31 +00:00
6c61cbe1fd fix: hierarchical keys should have acc id prefix 2025-11-30 19:49:13 +00:00
ccdeaf0364 fix: some weird upload bugs 2025-11-30 19:39:47 +00:00
a3110b67c3 fix: vfs node primary id not generated properly 2025-11-30 19:20:08 +00:00
1907cd83c8 fix: upload url generation 2025-11-30 19:19:54 +00:00
033ad65d5f feat: improve err logging 2025-11-30 19:19:33 +00:00
987edc0d4a fix: timestamp cols default not working 2025-11-30 17:42:35 +00:00
fb8e91dd47 feat: create root dir on acc registration 2025-11-30 17:37:59 +00:00
0b8aee5d60 refactor: make vfs methods accept bun.IDB 2025-11-30 17:31:24 +00:00
89b62f6d8a feat: introduce account 2025-11-30 17:12:50 +00:00
1c1392a0a1 refactor: replace KeyMode with ShouldPersistKey 2025-11-30 15:02:37 +00:00
6984bb209e refactor: node deletion 2025-11-30 01:16:44 +00:00
629d56b5ab fix: registration endpoint and db auto close issue 2025-11-29 20:51:56 +00:00
5e4e08c255 fix: migration code not working
- read database config from config file
- rename migration file to expected file name format
2025-11-29 20:32:32 +00:00
42b805fbd1 feat: add blob store Initialize method 2025-11-29 18:39:21 +00:00
ab4c14bc09 feat: impl config loading 2025-11-29 18:09:41 +00:00
fd3b2d3908 fix: handle NewServer error 2025-11-29 17:28:53 +00:00
39824e45d9 feat: impl upload api endpoints 2025-11-29 17:25:11 +00:00
6aee150a59 build: remove devcontainer name 2025-11-29 17:24:49 +00:00
9ea76d2021 build: per lang code format settings 2025-11-28 22:31:25 +00:00
987f36e1d2 feat: impl upload service 2025-11-28 22:31:00 +00:00
41 changed files with 1709 additions and 452 deletions

View File

@@ -1,5 +1,4 @@
{ {
"name": "React + Bun + Convex Development",
"build": { "build": {
"context": ".", "context": ".",
"dockerfile": "Dockerfile" "dockerfile": "Dockerfile"
@@ -25,12 +24,7 @@
"golang.go" "golang.go"
], ],
"settings": { "settings": {
"editor.defaultFormatter": "biomejs.biome",
"editor.formatOnSave": true, "editor.formatOnSave": true,
"editor.codeActionsOnSave": {
"source.organizeImports.biome": "explicit",
"source.fixAll.biome": "explicit"
},
"typescript.preferences.importModuleSpecifier": "relative", "typescript.preferences.importModuleSpecifier": "relative",
"typescript.suggest.autoImports": true, "typescript.suggest.autoImports": true,
"emmet.includeLanguages": { "emmet.includeLanguages": {
@@ -40,7 +34,63 @@
"tailwindCSS.experimental.classRegex": [ "tailwindCSS.experimental.classRegex": [
["cva\\(([^)]*)\\)", "[\"'`]([^\"'`]*).*?[\"'`]"], ["cva\\(([^)]*)\\)", "[\"'`]([^\"'`]*).*?[\"'`]"],
["cx\\(([^)]*)\\)", "(?:'|\"|`)([^']*)(?:'|\"|`)"] ["cx\\(([^)]*)\\)", "(?:'|\"|`)([^']*)(?:'|\"|`)"]
] ],
"[javascript]": {
"editor.formatOnSave": true,
"editor.defaultFormatter": "biomejs.biome",
"editor.codeActionsOnSave": {
"source.organizeImports.biome": "explicit",
"source.fixAll.biome": "explicit"
}
},
"[javascriptreact]": {
"editor.formatOnSave": true,
"editor.defaultFormatter": "biomejs.biome",
"editor.codeActionsOnSave": {
"source.organizeImports.biome": "explicit",
"source.fixAll.biome": "explicit"
}
},
"[typescript]": {
"editor.formatOnSave": true,
"editor.defaultFormatter": "biomejs.biome",
"editor.codeActionsOnSave": {
"source.organizeImports.biome": "explicit",
"source.fixAll.biome": "explicit"
}
},
"[typescriptreact]": {
"editor.formatOnSave": true,
"editor.defaultFormatter": "biomejs.biome",
"editor.codeActionsOnSave": {
"source.organizeImports.biome": "explicit",
"source.fixAll.biome": "explicit"
}
},
"[json]": {
"editor.formatOnSave": true,
"editor.defaultFormatter": "biomejs.biome",
"editor.codeActionsOnSave": {
"source.fixAll.biome": "explicit"
}
},
"[jsonc]": {
"editor.formatOnSave": true,
"editor.defaultFormatter": "biomejs.biome",
"editor.codeActionsOnSave": {
"source.fixAll.biome": "explicit"
}
},
"[go]": {
"editor.formatOnSave": true,
"editor.defaultFormatter": "golang.go",
"editor.codeActionsOnSave": {
"source.organizeImports": "explicit"
}
},
"go.formatTool": "goimports",
"go.lintTool": "golangci-lint",
"go.useLanguageServer": true
} }
} }
}, },

View File

@@ -1,22 +1,34 @@
package main package main
import ( import (
"flag"
"fmt" "fmt"
"log" "log"
"os"
"github.com/get-drexa/drexa/internal/drexa" "github.com/get-drexa/drexa/internal/drexa"
"github.com/joho/godotenv"
) )
func main() { func main() {
_ = godotenv.Load() configPath := flag.String("config", "", "path to config file (required)")
flag.Parse()
config, err := drexa.ServerConfigFromEnv() if *configPath == "" {
fmt.Fprintln(os.Stderr, "error: --config is required")
flag.Usage()
os.Exit(1)
}
config, err := drexa.ConfigFromFile(*configPath)
if err != nil {
log.Fatalf("failed to load config: %v", err)
}
server, err := drexa.NewServer(*config)
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
server := drexa.NewServer(*config) log.Printf("starting server on :%d", config.Server.Port)
log.Fatal(server.Listen(fmt.Sprintf(":%d", config.Server.Port)))
log.Fatal(server.Listen(fmt.Sprintf(":%d", config.Port)))
} }

View File

@@ -1,13 +1,37 @@
package main package main
import ( import (
"context"
"flag"
"fmt"
"log" "log"
"os"
"github.com/get-drexa/drexa/internal/database" "github.com/get-drexa/drexa/internal/database"
"github.com/get-drexa/drexa/internal/drexa"
) )
func main() { func main() {
if err := database.RunMigrations(); err != nil { configPath := flag.String("config", "", "path to config file (required)")
log.Fatalf("Failed to run migrations: %v", err) flag.Parse()
if *configPath == "" {
fmt.Fprintln(os.Stderr, "error: --config is required")
flag.Usage()
os.Exit(1)
} }
config, err := drexa.ConfigFromFile(*configPath)
if err != nil {
log.Fatalf("failed to load config: %v", err)
}
db := database.NewFromPostgres(config.Database.PostgresURL)
defer db.Close()
log.Println("running migrations...")
if err := database.RunMigrations(context.Background(), db); err != nil {
log.Fatalf("failed to run migrations: %v", err)
}
log.Println("migrations completed successfully")
} }

View File

@@ -0,0 +1,30 @@
# Drexa Backend Configuration
# Copy this file to config.yaml and adjust values for your environment.
server:
port: 8080
database:
postgres_url: postgres://user:password@localhost:5432/drexa?sslmode=disable
jwt:
issuer: drexa
audience: drexa-api
# Secret key can be provided via (in order of precedence):
# 1. JWT_SECRET_KEY environment variable (base64 encoded)
# 2. secret_key_base64 below (base64 encoded)
# 3. secret_key_path below (file with base64 encoded content)
# secret_key_base64: "base64encodedkey"
secret_key_path: /run/secrets/jwt_secret_key
storage:
# Mode: "flat" (UUID-based keys) or "hierarchical" (path-based keys)
# Note: S3 backend only supports "flat" mode
mode: flat
# Backend: "fs" (filesystem) or "s3" (not yet implemented)
backend: fs
# Required when backend is "fs"
root_path: /var/lib/drexa/blobs
# Required when backend is "s3"
# bucket: my-drexa-bucket

15
apps/backend/config.yaml Normal file
View File

@@ -0,0 +1,15 @@
server:
port: 8080
database:
postgres_url: postgres://drexa:hunter2@helian:5433/drexa?sslmode=disable
jwt:
issuer: drexa
audience: drexa-api
secret_key_base64: "pNeUExoqdakfecZLFL53NJpY4iB9zFot9EuEBItlYKY="
storage:
mode: hierarchical
backend: fs
root_path: ./data

View File

@@ -3,24 +3,26 @@ module github.com/get-drexa/drexa
go 1.25.4 go 1.25.4
require ( require (
github.com/gabriel-vasile/mimetype v1.4.11
github.com/gofiber/fiber/v2 v2.52.9 github.com/gofiber/fiber/v2 v2.52.9
github.com/google/uuid v1.6.0 github.com/google/uuid v1.6.0
github.com/uptrace/bun v1.2.15 github.com/sqids/sqids-go v0.4.1
golang.org/x/crypto v0.40.0 github.com/uptrace/bun v1.2.16
github.com/uptrace/bun/extra/bundebug v1.2.16
golang.org/x/crypto v0.45.0
gopkg.in/yaml.v3 v3.0.1
) )
require ( require (
github.com/gabriel-vasile/mimetype v1.4.11 // indirect github.com/fatih/color v1.18.0 // indirect
github.com/joho/godotenv v1.5.1 // indirect go.opentelemetry.io/otel v1.38.0 // indirect
github.com/sqids/sqids-go v0.4.1 // indirect go.opentelemetry.io/otel/trace v1.38.0 // indirect
go.opentelemetry.io/otel v1.37.0 // indirect
go.opentelemetry.io/otel/trace v1.37.0 // indirect
mellium.im/sasl v0.3.2 // indirect mellium.im/sasl v0.3.2 // indirect
) )
require ( require (
github.com/andybalholm/brotli v1.1.0 // indirect github.com/andybalholm/brotli v1.1.0 // indirect
github.com/golang-jwt/jwt/v5 v5.3.0 // indirect github.com/golang-jwt/jwt/v5 v5.3.0
github.com/jinzhu/inflection v1.0.0 // indirect github.com/jinzhu/inflection v1.0.0 // indirect
github.com/klauspost/compress v1.17.9 // indirect github.com/klauspost/compress v1.17.9 // indirect
github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-colorable v0.1.14 // indirect
@@ -28,14 +30,13 @@ require (
github.com/mattn/go-runewidth v0.0.16 // indirect github.com/mattn/go-runewidth v0.0.16 // indirect
github.com/puzpuzpuz/xsync/v3 v3.5.1 // indirect github.com/puzpuzpuz/xsync/v3 v3.5.1 // indirect
github.com/rivo/uniseg v0.2.0 // indirect github.com/rivo/uniseg v0.2.0 // indirect
github.com/stretchr/testify v1.10.0 // indirect
github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc // indirect github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc // indirect
github.com/uptrace/bun/dialect/pgdialect v1.2.15 github.com/uptrace/bun/dialect/pgdialect v1.2.16
github.com/uptrace/bun/driver/pgdriver v1.2.15 github.com/uptrace/bun/driver/pgdriver v1.2.16
github.com/valyala/bytebufferpool v1.0.0 // indirect github.com/valyala/bytebufferpool v1.0.0 // indirect
github.com/valyala/fasthttp v1.51.0 // indirect github.com/valyala/fasthttp v1.51.0 // indirect
github.com/valyala/tcplisten v1.0.0 // indirect github.com/valyala/tcplisten v1.0.0 // indirect
github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
golang.org/x/sys v0.34.0 // indirect golang.org/x/sys v0.38.0 // indirect
) )

View File

@@ -2,26 +2,32 @@ github.com/andybalholm/brotli v1.1.0 h1:eLKJA0d02Lf0mVpIDgYnqXcUn0GqVmEFny3VuID1
github.com/andybalholm/brotli v1.1.0/go.mod h1:sms7XGricyQI9K10gOSf56VKKWS4oLer58Q+mhRPtnY= github.com/andybalholm/brotli v1.1.0/go.mod h1:sms7XGricyQI9K10gOSf56VKKWS4oLer58Q+mhRPtnY=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
github.com/gabriel-vasile/mimetype v1.4.11 h1:AQvxbp830wPhHTqc1u7nzoLT+ZFxGY7emj5DR5DYFik= github.com/gabriel-vasile/mimetype v1.4.11 h1:AQvxbp830wPhHTqc1u7nzoLT+ZFxGY7emj5DR5DYFik=
github.com/gabriel-vasile/mimetype v1.4.11/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s= github.com/gabriel-vasile/mimetype v1.4.11/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s=
github.com/gofiber/fiber/v2 v2.52.9 h1:YjKl5DOiyP3j0mO61u3NTmK7or8GzzWzCFzkboyP5cw= github.com/gofiber/fiber/v2 v2.52.9 h1:YjKl5DOiyP3j0mO61u3NTmK7or8GzzWzCFzkboyP5cw=
github.com/gofiber/fiber/v2 v2.52.9/go.mod h1:YEcBbO/FB+5M1IZNBP9FO3J9281zgPAreiI1oqg8nDw= github.com/gofiber/fiber/v2 v2.52.9/go.mod h1:YEcBbO/FB+5M1IZNBP9FO3J9281zgPAreiI1oqg8nDw=
github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo=
github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0=
github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4=
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg= github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg=
@@ -30,16 +36,18 @@ github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/sqids/sqids-go v0.4.1 h1:eQKYzmAZbLlRwHeHYPF35QhgxwZHLnlmVj9AkIj/rrw= github.com/sqids/sqids-go v0.4.1 h1:eQKYzmAZbLlRwHeHYPF35QhgxwZHLnlmVj9AkIj/rrw=
github.com/sqids/sqids-go v0.4.1/go.mod h1:EMwHuPQgSNFS0A49jESTfIQS+066XQTVhukrzEPScl8= github.com/sqids/sqids-go v0.4.1/go.mod h1:EMwHuPQgSNFS0A49jESTfIQS+066XQTVhukrzEPScl8=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc h1:9lRDQMhESg+zvGYmW5DyG0UqvY96Bu5QYsTLvCHdrgo= github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc h1:9lRDQMhESg+zvGYmW5DyG0UqvY96Bu5QYsTLvCHdrgo=
github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc/go.mod h1:bciPuU6GHm1iF1pBvUfxfsH0Wmnc2VbpgvbI9ZWuIRs= github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc/go.mod h1:bciPuU6GHm1iF1pBvUfxfsH0Wmnc2VbpgvbI9ZWuIRs=
github.com/uptrace/bun v1.2.15 h1:Ut68XRBLDgp9qG9QBMa9ELWaZOmzHNdczHQdrOZbEFE= github.com/uptrace/bun v1.2.16 h1:QlObi6ZIK5Ao7kAALnh91HWYNZUBbVwye52fmlQM9kc=
github.com/uptrace/bun v1.2.15/go.mod h1:Eghz7NonZMiTX/Z6oKYytJ0oaMEJ/eq3kEV4vSqG038= github.com/uptrace/bun v1.2.16/go.mod h1:jMoNg2n56ckaawi/O/J92BHaECmrz6IRjuMWqlMaMTM=
github.com/uptrace/bun/dialect/pgdialect v1.2.15 h1:er+/3giAIqpfrXJw+KP9B7ujyQIi5XkPnFmgjAVL6bA= github.com/uptrace/bun/dialect/pgdialect v1.2.16 h1:KFNZ0LxAyczKNfK/IJWMyaleO6eI9/Z5tUv3DE1NVL4=
github.com/uptrace/bun/dialect/pgdialect v1.2.15/go.mod h1:QSiz6Qpy9wlGFsfpf7UMSL6mXAL1jDJhFwuOVacCnOQ= github.com/uptrace/bun/dialect/pgdialect v1.2.16/go.mod h1:IJdMeV4sLfh0LDUZl7TIxLI0LipF1vwTK3hBC7p5qLo=
github.com/uptrace/bun/driver/pgdriver v1.2.15 h1:eZZ60ZtUUE6jjv6VAI1pCMaTgtx3sxmChQzwbvchOOo= github.com/uptrace/bun/driver/pgdriver v1.2.16 h1:b1kpXKUxtTSGYow5Vlsb+dKV3z0R7aSAJNfMfKp61ZU=
github.com/uptrace/bun/driver/pgdriver v1.2.15/go.mod h1:s2zz/BAeScal4KLFDI8PURwATN8s9RDBsElEbnPAjv4= github.com/uptrace/bun/driver/pgdriver v1.2.16/go.mod h1:H6lUZ9CBfp1X5Vq62YGSV7q96/v94ja9AYFjKvdoTk0=
github.com/uptrace/bun/extra/bundebug v1.2.16 h1:3OXAfHTU4ydu2+4j05oB1BxPx6+ypdWIVzTugl/7zl0=
github.com/uptrace/bun/extra/bundebug v1.2.16/go.mod h1:vk6R/1i67/S2RvUI5AH/m3P5e67mOkfDCmmCsAPUumo=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasthttp v1.51.0 h1:8b30A5JlZ6C7AS81RsWjYMQmrZG6feChmgAolCl1SqA= github.com/valyala/fasthttp v1.51.0 h1:8b30A5JlZ6C7AS81RsWjYMQmrZG6feChmgAolCl1SqA=
@@ -50,15 +58,18 @@ github.com/vmihailenco/msgpack/v5 v5.4.1 h1:cQriyiUvjTwOHg8QZaPihLWeRAAVoCpE00IU
github.com/vmihailenco/msgpack/v5 v5.4.1/go.mod h1:GaZTsDaehaPpQVyxrf5mtQlH+pc21PIudVV/E3rRQok= github.com/vmihailenco/msgpack/v5 v5.4.1/go.mod h1:GaZTsDaehaPpQVyxrf5mtQlH+pc21PIudVV/E3rRQok=
github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g=
github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds=
go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8=
go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM=
go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4= go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE=
go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs=
golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM= golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=
golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY= golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA= golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
mellium.im/sasl v0.3.2 h1:PT6Xp7ccn9XaXAnJ03FcEjmAn7kK1x7aoXV6F+Vmrl0= mellium.im/sasl v0.3.2 h1:PT6Xp7ccn9XaXAnJ03FcEjmAn7kK1x7aoXV6F+Vmrl0=

View File

@@ -0,0 +1,23 @@
package account
import (
"time"
"github.com/google/uuid"
"github.com/uptrace/bun"
)
type Account struct {
bun.BaseModel `bun:"accounts"`
ID uuid.UUID `bun:",pk,type:uuid" json:"id"`
UserID uuid.UUID `bun:"user_id,notnull,type:uuid" json:"userId"`
StorageUsageBytes int64 `bun:"storage_usage_bytes,notnull" json:"storageUsageBytes"`
StorageQuotaBytes int64 `bun:"storage_quota_bytes,notnull" json:"storageQuotaBytes"`
CreatedAt time.Time `bun:"created_at,notnull,nullzero" json:"createdAt"`
UpdatedAt time.Time `bun:"updated_at,notnull,nullzero" json:"updatedAt"`
}
func newAccountID() (uuid.UUID, error) {
return uuid.NewV7()
}

View File

@@ -0,0 +1,8 @@
package account
import "errors"
var (
ErrAccountNotFound = errors.New("account not found")
ErrAccountAlreadyExists = errors.New("account already exists")
)

View File

@@ -0,0 +1,132 @@
package account
import (
"errors"
"github.com/get-drexa/drexa/internal/auth"
"github.com/get-drexa/drexa/internal/httperr"
"github.com/get-drexa/drexa/internal/user"
"github.com/gofiber/fiber/v2"
"github.com/google/uuid"
"github.com/uptrace/bun"
)
type HTTPHandler struct {
accountService *Service
authService *auth.Service
db *bun.DB
authMiddleware fiber.Handler
}
type registerAccountRequest struct {
Email string `json:"email"`
Password string `json:"password"`
DisplayName string `json:"displayName"`
}
type registerAccountResponse struct {
Account *Account `json:"account"`
User *user.User `json:"user"`
AccessToken string `json:"accessToken"`
RefreshToken string `json:"refreshToken"`
}
const currentAccountKey = "currentAccount"
func CurrentAccount(c *fiber.Ctx) *Account {
return c.Locals(currentAccountKey).(*Account)
}
func NewHTTPHandler(accountService *Service, authService *auth.Service, db *bun.DB, authMiddleware fiber.Handler) *HTTPHandler {
return &HTTPHandler{accountService: accountService, authService: authService, db: db, authMiddleware: authMiddleware}
}
func (h *HTTPHandler) RegisterRoutes(api fiber.Router) fiber.Router {
api.Post("/accounts", h.registerAccount)
account := api.Group("/accounts/:accountID")
account.Use(h.authMiddleware)
account.Use(h.accountMiddleware)
account.Get("/", h.getAccount)
return account
}
func (h *HTTPHandler) accountMiddleware(c *fiber.Ctx) error {
user, err := auth.AuthenticatedUser(c)
if err != nil {
return c.SendStatus(fiber.StatusUnauthorized)
}
accountID, err := uuid.Parse(c.Params("accountID"))
if err != nil {
return c.SendStatus(fiber.StatusNotFound)
}
account, err := h.accountService.AccountByID(c.Context(), h.db, user.ID, accountID)
if err != nil {
if errors.Is(err, ErrAccountNotFound) {
return c.SendStatus(fiber.StatusNotFound)
}
return httperr.Internal(err)
}
c.Locals(currentAccountKey, account)
return c.Next()
}
func (h *HTTPHandler) getAccount(c *fiber.Ctx) error {
account := CurrentAccount(c)
if account == nil {
return c.SendStatus(fiber.StatusNotFound)
}
return c.JSON(account)
}
func (h *HTTPHandler) registerAccount(c *fiber.Ctx) error {
req := new(registerAccountRequest)
if err := c.BodyParser(req); err != nil {
return c.SendStatus(fiber.StatusBadRequest)
}
tx, err := h.db.BeginTx(c.Context(), nil)
if err != nil {
return httperr.Internal(err)
}
defer tx.Rollback()
acc, u, err := h.accountService.Register(c.Context(), tx, RegisterOptions{
Email: req.Email,
Password: req.Password,
DisplayName: req.DisplayName,
})
if err != nil {
var ae *user.AlreadyExistsError
if errors.As(err, &ae) {
return c.SendStatus(fiber.StatusConflict)
}
if errors.Is(err, ErrAccountAlreadyExists) {
return c.SendStatus(fiber.StatusConflict)
}
return httperr.Internal(err)
}
result, err := h.authService.GenerateTokenForUser(c.Context(), tx, u)
if err != nil {
return httperr.Internal(err)
}
err = tx.Commit()
if err != nil {
return httperr.Internal(err)
}
return c.JSON(registerAccountResponse{
Account: acc,
User: u,
AccessToken: result.AccessToken,
RefreshToken: result.RefreshToken,
})
}

View File

@@ -0,0 +1,115 @@
package account
import (
"context"
"database/sql"
"errors"
"github.com/get-drexa/drexa/internal/database"
"github.com/get-drexa/drexa/internal/password"
"github.com/get-drexa/drexa/internal/user"
"github.com/get-drexa/drexa/internal/virtualfs"
"github.com/google/uuid"
"github.com/uptrace/bun"
)
type Service struct {
userService user.Service
vfs *virtualfs.VirtualFS
}
type RegisterOptions struct {
Email string
Password string
DisplayName string
}
type CreateAccountOptions struct {
OrganizationID uuid.UUID
QuotaBytes int64
}
func NewService(userService *user.Service, vfs *virtualfs.VirtualFS) *Service {
return &Service{
userService: *userService,
vfs: vfs,
}
}
func (s *Service) Register(ctx context.Context, db bun.IDB, opts RegisterOptions) (*Account, *user.User, error) {
hashed, err := password.Hash(opts.Password)
if err != nil {
return nil, nil, err
}
u, err := s.userService.RegisterUser(ctx, db, user.UserRegistrationOptions{
Email: opts.Email,
Password: hashed,
DisplayName: opts.DisplayName,
})
if err != nil {
return nil, nil, err
}
acc, err := s.CreateAccount(ctx, db, u.ID, CreateAccountOptions{
// TODO: make quota configurable
QuotaBytes: 1024 * 1024 * 1024, // 1GB
})
if err != nil {
return nil, nil, err
}
_, err = s.vfs.CreateDirectory(ctx, db, acc.ID, uuid.Nil, virtualfs.RootDirectoryName)
if err != nil {
return nil, nil, err
}
return acc, u, nil
}
func (s *Service) CreateAccount(ctx context.Context, db bun.IDB, userID uuid.UUID, opts CreateAccountOptions) (*Account, error) {
id, err := newAccountID()
if err != nil {
return nil, err
}
account := &Account{
ID: id,
UserID: userID,
StorageQuotaBytes: opts.QuotaBytes,
}
_, err = db.NewInsert().Model(account).Returning("*").Exec(ctx)
if err != nil {
if database.IsUniqueViolation(err) {
return nil, ErrAccountAlreadyExists
}
return nil, err
}
return account, nil
}
func (s *Service) AccountByUserID(ctx context.Context, db bun.IDB, userID uuid.UUID) (*Account, error) {
var account Account
err := db.NewSelect().Model(&account).Where("user_id = ?", userID).Scan(ctx)
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
return nil, ErrAccountNotFound
}
return nil, err
}
return &account, nil
}
func (s *Service) AccountByID(ctx context.Context, db bun.IDB, userID uuid.UUID, id uuid.UUID) (*Account, error) {
var account Account
err := db.NewSelect().Model(&account).Where("user_id = ?", userID).Where("id = ?", id).Scan(ctx)
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
return nil, ErrAccountNotFound
}
return nil, err
}
return &account, nil
}

View File

@@ -3,85 +3,59 @@ package auth
import ( import (
"errors" "errors"
"github.com/get-drexa/drexa/internal/httperr"
"github.com/get-drexa/drexa/internal/user" "github.com/get-drexa/drexa/internal/user"
"github.com/gofiber/fiber/v2" "github.com/gofiber/fiber/v2"
"github.com/uptrace/bun"
) )
const authServiceKey = "authService"
type loginRequest struct { type loginRequest struct {
Email string `json:"email"` Email string `json:"email"`
Password string `json:"password"` Password string `json:"password"`
} }
type registerRequest struct {
Email string `json:"email"`
Password string `json:"password"`
DisplayName string `json:"displayName"`
}
type loginResponse struct { type loginResponse struct {
User user.User `json:"user"` User user.User `json:"user"`
AccessToken string `json:"accessToken"` AccessToken string `json:"accessToken"`
RefreshToken string `json:"refreshToken"` RefreshToken string `json:"refreshToken"`
} }
func RegisterAPIRoutes(api fiber.Router, s *Service) { type HTTPHandler struct {
auth := api.Group("/auth", func(c *fiber.Ctx) error { service *Service
c.Locals(authServiceKey, s) db *bun.DB
return c.Next()
})
auth.Post("/login", login)
auth.Post("/register", register)
} }
func mustAuthService(c *fiber.Ctx) *Service { func NewHTTPHandler(s *Service, db *bun.DB) *HTTPHandler {
return c.Locals(authServiceKey).(*Service) return &HTTPHandler{service: s, db: db}
} }
func login(c *fiber.Ctx) error { func (h *HTTPHandler) RegisterRoutes(api fiber.Router) {
s := mustAuthService(c) auth := api.Group("/auth")
auth.Post("/login", h.Login)
}
func (h *HTTPHandler) Login(c *fiber.Ctx) error {
req := new(loginRequest) req := new(loginRequest)
if err := c.BodyParser(req); err != nil { if err := c.BodyParser(req); err != nil {
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{"error": "Invalid request"}) return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{"error": "Invalid request"})
} }
result, err := s.LoginWithEmailAndPassword(c.Context(), req.Email, req.Password) tx, err := h.db.BeginTx(c.Context(), nil)
if err != nil {
return httperr.Internal(err)
}
defer tx.Rollback()
result, err := h.service.AuthenticateWithEmailAndPassword(c.Context(), tx, req.Email, req.Password)
if err != nil { if err != nil {
if errors.Is(err, ErrInvalidCredentials) { if errors.Is(err, ErrInvalidCredentials) {
return c.Status(fiber.StatusUnauthorized).JSON(fiber.Map{"error": "Invalid credentials"}) return c.Status(fiber.StatusUnauthorized).JSON(fiber.Map{"error": "Invalid credentials"})
} }
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{"error": "Internal server error"}) return httperr.Internal(err)
} }
return c.JSON(loginResponse{ if err := tx.Commit(); err != nil {
User: *result.User, return httperr.Internal(err)
AccessToken: result.AccessToken,
RefreshToken: result.RefreshToken,
})
}
func register(c *fiber.Ctx) error {
s := mustAuthService(c)
req := new(registerRequest)
if err := c.BodyParser(req); err != nil {
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{"error": "Invalid request"})
}
result, err := s.Register(c.Context(), registerOptions{
email: req.Email,
password: req.Password,
displayName: req.DisplayName,
})
if err != nil {
var ae *user.AlreadyExistsError
if errors.As(err, &ae) {
return c.Status(fiber.StatusConflict).JSON(fiber.Map{"error": "User already exists"})
}
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{"error": "Internal server error"})
} }
return c.JSON(loginResponse{ return c.JSON(loginResponse{

View File

@@ -2,42 +2,49 @@ package auth
import ( import (
"errors" "errors"
"log/slog"
"strings" "strings"
"github.com/get-drexa/drexa/internal/httperr"
"github.com/get-drexa/drexa/internal/user" "github.com/get-drexa/drexa/internal/user"
"github.com/gofiber/fiber/v2" "github.com/gofiber/fiber/v2"
"github.com/uptrace/bun"
) )
const authenticatedUserKey = "authenticatedUser" const authenticatedUserKey = "authenticatedUser"
// NewBearerAuthMiddleware is a middleware that authenticates a request using a bearer token. // NewBearerAuthMiddleware is a middleware that authenticates a request using a bearer token.
// To obtain the authenticated user in subsequent handlers, see AuthenticatedUser. // To obtain the authenticated user in subsequent handlers, see AuthenticatedUser.
func NewBearerAuthMiddleware(s *Service) fiber.Handler { func NewBearerAuthMiddleware(s *Service, db *bun.DB) fiber.Handler {
return func(c *fiber.Ctx) error { return func(c *fiber.Ctx) error {
authHeader := c.Get("Authorization") authHeader := c.Get("Authorization")
if authHeader == "" { if authHeader == "" {
slog.Info("no auth header")
return c.SendStatus(fiber.StatusUnauthorized) return c.SendStatus(fiber.StatusUnauthorized)
} }
parts := strings.Split(authHeader, " ") parts := strings.Split(authHeader, " ")
if len(parts) != 2 || parts[0] != "Bearer" { if len(parts) != 2 || parts[0] != "Bearer" {
slog.Info("invalid auth header")
return c.SendStatus(fiber.StatusUnauthorized) return c.SendStatus(fiber.StatusUnauthorized)
} }
token := parts[1] token := parts[1]
u, err := s.AuthenticateWithAccessToken(c.Context(), token) u, err := s.AuthenticateWithAccessToken(c.Context(), db, token)
if err != nil { if err != nil {
var e *InvalidAccessTokenError var e *InvalidAccessTokenError
if errors.As(err, &e) { if errors.As(err, &e) {
slog.Info("invalid access token")
return c.SendStatus(fiber.StatusUnauthorized) return c.SendStatus(fiber.StatusUnauthorized)
} }
var nf *user.NotFoundError var nf *user.NotFoundError
if errors.As(err, &nf) { if errors.As(err, &nf) {
slog.Info("user not found")
return c.SendStatus(fiber.StatusUnauthorized) return c.SendStatus(fiber.StatusUnauthorized)
} }
return c.SendStatus(fiber.StatusInternalServerError) return httperr.Internal(err)
} }
c.Locals(authenticatedUserKey, u) c.Locals(authenticatedUserKey, u)

View File

@@ -4,6 +4,7 @@ import (
"context" "context"
"encoding/hex" "encoding/hex"
"errors" "errors"
"log/slog"
"github.com/get-drexa/drexa/internal/password" "github.com/get-drexa/drexa/internal/password"
"github.com/get-drexa/drexa/internal/user" "github.com/get-drexa/drexa/internal/user"
@@ -11,7 +12,7 @@ import (
"github.com/uptrace/bun" "github.com/uptrace/bun"
) )
type LoginResult struct { type AuthenticationResult struct {
User *user.User User *user.User
AccessToken string AccessToken string
RefreshToken string RefreshToken string
@@ -20,27 +21,42 @@ type LoginResult struct {
var ErrInvalidCredentials = errors.New("invalid credentials") var ErrInvalidCredentials = errors.New("invalid credentials")
type Service struct { type Service struct {
db *bun.DB
userService *user.Service userService *user.Service
tokenConfig TokenConfig tokenConfig TokenConfig
} }
type registerOptions struct { func NewService(userService *user.Service, tokenConfig TokenConfig) *Service {
displayName string
email string
password string
}
func NewService(db *bun.DB, userService *user.Service, tokenConfig TokenConfig) *Service {
return &Service{ return &Service{
db: db,
userService: userService, userService: userService,
tokenConfig: tokenConfig, tokenConfig: tokenConfig,
} }
} }
func (s *Service) LoginWithEmailAndPassword(ctx context.Context, email, plain string) (*LoginResult, error) { func (s *Service) GenerateTokenForUser(ctx context.Context, db bun.IDB, user *user.User) (*AuthenticationResult, error) {
u, err := s.userService.UserByEmail(ctx, email) at, err := GenerateAccessToken(user, &s.tokenConfig)
if err != nil {
return nil, err
}
rt, err := GenerateRefreshToken(user, &s.tokenConfig)
if err != nil {
return nil, err
}
_, err = db.NewInsert().Model(rt).Exec(ctx)
if err != nil {
return nil, err
}
return &AuthenticationResult{
User: user,
AccessToken: at,
RefreshToken: hex.EncodeToString(rt.Token),
}, nil
}
func (s *Service) AuthenticateWithEmailAndPassword(ctx context.Context, db bun.IDB, email, plain string) (*AuthenticationResult, error) {
u, err := s.userService.UserByEmail(ctx, db, email)
if err != nil { if err != nil {
var nf *user.NotFoundError var nf *user.NotFoundError
if errors.As(err, &nf) { if errors.As(err, &nf) {
@@ -64,65 +80,30 @@ func (s *Service) LoginWithEmailAndPassword(ctx context.Context, email, plain st
return nil, err return nil, err
} }
_, err = s.db.NewInsert().Model(rt).Exec(ctx) _, err = db.NewInsert().Model(rt).Exec(ctx)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return &LoginResult{ return &AuthenticationResult{
User: u, User: u,
AccessToken: at, AccessToken: at,
RefreshToken: hex.EncodeToString(rt.Token), RefreshToken: hex.EncodeToString(rt.Token),
}, nil }, nil
} }
func (s *Service) Register(ctx context.Context, opts registerOptions) (*LoginResult, error) { func (s *Service) AuthenticateWithAccessToken(ctx context.Context, db bun.IDB, token string) (*user.User, error) {
hashed, err := password.Hash(opts.password)
if err != nil {
return nil, err
}
u, err := s.userService.RegisterUser(ctx, user.UserRegistrationOptions{
Email: opts.email,
DisplayName: opts.displayName,
Password: hashed,
})
if err != nil {
return nil, err
}
at, err := GenerateAccessToken(u, &s.tokenConfig)
if err != nil {
return nil, err
}
rt, err := GenerateRefreshToken(u, &s.tokenConfig)
if err != nil {
return nil, err
}
_, err = s.db.NewInsert().Model(rt).Exec(ctx)
if err != nil {
return nil, err
}
return &LoginResult{
User: u,
AccessToken: at,
RefreshToken: hex.EncodeToString(rt.Token),
}, nil
}
func (s *Service) AuthenticateWithAccessToken(ctx context.Context, token string) (*user.User, error) {
claims, err := ParseAccessToken(token, &s.tokenConfig) claims, err := ParseAccessToken(token, &s.tokenConfig)
if err != nil { if err != nil {
slog.Info("failed to parse access token", "error", err)
return nil, err return nil, err
} }
id, err := uuid.Parse(claims.Subject) id, err := uuid.Parse(claims.Subject)
if err != nil { if err != nil {
slog.Info("failed to parse access token subject", "error", err)
return nil, newInvalidAccessTokenError(err) return nil, newInvalidAccessTokenError(err)
} }
return s.userService.UserByID(ctx, id) return s.userService.UserByID(ctx, db, id)
} }

View File

@@ -33,7 +33,11 @@ type RefreshToken struct {
Token []byte `bun:"-"` Token []byte `bun:"-"`
TokenHash string `bun:"token_hash,notnull"` TokenHash string `bun:"token_hash,notnull"`
ExpiresAt time.Time `bun:"expires_at,notnull"` ExpiresAt time.Time `bun:"expires_at,notnull"`
CreatedAt time.Time `bun:"created_at,notnull"` CreatedAt time.Time `bun:"created_at,notnull,nullzero"`
}
func newTokenID() (uuid.UUID, error) {
return uuid.NewV7()
} }
func GenerateAccessToken(user *user.User, c *TokenConfig) (string, error) { func GenerateAccessToken(user *user.User, c *TokenConfig) (string, error) {
@@ -63,9 +67,9 @@ func GenerateRefreshToken(user *user.User, c *TokenConfig) (*RefreshToken, error
return nil, fmt.Errorf("failed to generate refresh token: %w", err) return nil, fmt.Errorf("failed to generate refresh token: %w", err)
} }
id, err := uuid.NewV7() id, err := newTokenID()
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to generate refresh token: %w", err) return nil, fmt.Errorf("failed to generate token ID: %w", err)
} }
h := sha256.Sum256(buf) h := sha256.Sum256(buf)

View File

@@ -3,6 +3,7 @@ package blob
import "errors" import "errors"
var ( var (
ErrConflict = errors.New("key already used for a different blob") ErrConflict = errors.New("key already used for a different blob")
ErrNotFound = errors.New("key not found") ErrNotFound = errors.New("key not found")
ErrInvalidFileContent = errors.New("invalid file content. must provide either a reader or a blob key")
) )

View File

@@ -5,6 +5,8 @@ import (
"io" "io"
"os" "os"
"path/filepath" "path/filepath"
"github.com/get-drexa/drexa/internal/ioext"
) )
var _ Store = &FSStore{} var _ Store = &FSStore{}
@@ -21,6 +23,10 @@ func NewFSStore(config FSStoreConfig) *FSStore {
return &FSStore{config: config} return &FSStore{config: config}
} }
func (s *FSStore) Initialize(ctx context.Context) error {
return os.MkdirAll(s.config.Root, 0755)
}
func (s *FSStore) Put(ctx context.Context, key Key, reader io.Reader) error { func (s *FSStore) Put(ctx context.Context, key Key, reader io.Reader) error {
path := filepath.Join(s.config.Root, string(key)) path := filepath.Join(s.config.Root, string(key))
@@ -47,7 +53,7 @@ func (s *FSStore) Put(ctx context.Context, key Key, reader io.Reader) error {
return nil return nil
} }
func (s *FSStore) Retrieve(ctx context.Context, key Key) (io.ReadCloser, error) { func (s *FSStore) Read(ctx context.Context, key Key) (io.ReadCloser, error) {
path := filepath.Join(s.config.Root, string(key)) path := filepath.Join(s.config.Root, string(key))
f, err := os.Open(path) f, err := os.Open(path)
if err != nil { if err != nil {
@@ -59,6 +65,37 @@ func (s *FSStore) Retrieve(ctx context.Context, key Key) (io.ReadCloser, error)
return f, nil return f, nil
} }
func (s *FSStore) ReadRange(ctx context.Context, key Key, offset, length int64) (io.ReadCloser, error) {
path := filepath.Join(s.config.Root, string(key))
f, err := os.Open(path)
if err != nil {
if os.IsNotExist(err) {
return nil, ErrNotFound
}
return nil, err
}
_, err = f.Seek(offset, io.SeekStart)
if err != nil {
return nil, err
}
return ioext.NewLimitReadCloser(f, length), nil
}
func (s *FSStore) ReadSize(ctx context.Context, key Key) (int64, error) {
path := filepath.Join(s.config.Root, string(key))
fi, err := os.Stat(path)
if err != nil {
if os.IsNotExist(err) {
return 0, ErrNotFound
}
return 0, err
}
return fi.Size(), nil
}
func (s *FSStore) Delete(ctx context.Context, key Key) error { func (s *FSStore) Delete(ctx context.Context, key Key) error {
err := os.Remove(filepath.Join(s.config.Root, string(key))) err := os.Remove(filepath.Join(s.config.Root, string(key)))
// no op if file does not exist // no op if file does not exist
@@ -69,6 +106,20 @@ func (s *FSStore) Delete(ctx context.Context, key Key) error {
return nil return nil
} }
func (s *FSStore) DeletePrefix(ctx context.Context, prefix Key) error {
prefixPath := filepath.Join(s.config.Root, string(prefix))
err := os.RemoveAll(prefixPath)
if err != nil && !os.IsNotExist(err) {
return err
}
return nil
}
func (s *FSStore) Update(ctx context.Context, key Key, opts UpdateOptions) error {
// Update is a no-op for FSStore
return nil
}
func (s *FSStore) Move(ctx context.Context, srcKey, dstKey Key) error { func (s *FSStore) Move(ctx context.Context, srcKey, dstKey Key) error {
oldPath := filepath.Join(s.config.Root, string(srcKey)) oldPath := filepath.Join(s.config.Root, string(srcKey))
newPath := filepath.Join(s.config.Root, string(dstKey)) newPath := filepath.Join(s.config.Root, string(dstKey))
@@ -93,3 +144,11 @@ func (s *FSStore) Move(ctx context.Context, srcKey, dstKey Key) error {
return nil return nil
} }
func (s *FSStore) SupportsDirectUpload() bool {
return false
}
func (s *FSStore) GenerateUploadURL(ctx context.Context, key Key, opts UploadURLOptions) (string, error) {
return "", nil
}

View File

@@ -3,11 +3,31 @@ package blob
import ( import (
"context" "context"
"io" "io"
"time"
) )
type Store interface { type UploadURLOptions struct {
Put(ctx context.Context, key Key, reader io.Reader) error Duration time.Duration
Retrieve(ctx context.Context, key Key) (io.ReadCloser, error) }
Delete(ctx context.Context, key Key) error
Move(ctx context.Context, srcKey, dstKey Key) error type UpdateOptions struct {
ContentType string
}
type Store interface {
Initialize(ctx context.Context) error
Put(ctx context.Context, key Key, reader io.Reader) error
Update(ctx context.Context, key Key, opts UpdateOptions) error
Delete(ctx context.Context, key Key) error
DeletePrefix(ctx context.Context, prefix Key) error
Move(ctx context.Context, srcKey, dstKey Key) error
Read(ctx context.Context, key Key) (io.ReadCloser, error)
ReadRange(ctx context.Context, key Key, offset, length int64) (io.ReadCloser, error)
ReadSize(ctx context.Context, key Key) (int64, error)
// SupportsDirectUpload returns true if the store allows files to be uploaded directly to the blob store.
SupportsDirectUpload() bool
// GenerateUploadURL generates a URL that can be used to upload a file directly to the blob store. If unsupported, returns an empty string with no error.
GenerateUploadURL(ctx context.Context, key Key, opts UploadURLOptions) (string, error)
} }

View File

@@ -1,17 +1,28 @@
package database package database
import ( import (
"context"
"embed" "embed"
"github.com/uptrace/bun"
"github.com/uptrace/bun/migrate" "github.com/uptrace/bun/migrate"
) )
//go:embed migrations/*.sql //go:embed migrations/*.sql
var sqlMigrations embed.FS var sqlMigrations embed.FS
// RunMigrations discovers and runs all migrations in the migrations directory. // RunMigrations discovers and runs all migrations against the database.
// Currently, the migrations directory is in internal/db/migrations. func RunMigrations(ctx context.Context, db *bun.DB) error {
func RunMigrations() error { migrations := migrate.NewMigrations()
m := migrate.NewMigrations() if err := migrations.Discover(sqlMigrations); err != nil {
return m.Discover(sqlMigrations) return err
}
migrator := migrate.NewMigrator(db, migrations)
if err := migrator.Init(ctx); err != nil {
return err
}
_, err := migrator.Migrate(ctx)
return err
} }

View File

@@ -1,45 +1,31 @@
-- Enable UUID extension for UUIDv7 support
CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
-- UUIDv7 generation function (timestamp-ordered UUIDs)
-- Based on the draft RFC: https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format
CREATE OR REPLACE FUNCTION uuid_generate_v7()
RETURNS UUID
AS $$
DECLARE
unix_ts_ms BIGINT;
uuid_bytes BYTEA;
BEGIN
unix_ts_ms = (EXTRACT(EPOCH FROM CLOCK_TIMESTAMP()) * 1000)::BIGINT;
uuid_bytes = OVERLAY(gen_random_bytes(16) PLACING
SUBSTRING(INT8SEND(unix_ts_ms) FROM 3) FROM 1 FOR 6
);
-- Set version (7) and variant bits
uuid_bytes = SET_BYTE(uuid_bytes, 6, (GET_BYTE(uuid_bytes, 6) & 15) | 112);
uuid_bytes = SET_BYTE(uuid_bytes, 8, (GET_BYTE(uuid_bytes, 8) & 63) | 128);
RETURN ENCODE(uuid_bytes, 'hex')::UUID;
END;
$$ LANGUAGE plpgsql VOLATILE;
-- ============================================================================ -- ============================================================================
-- Application Tables -- Application Tables
-- ============================================================================ -- ============================================================================
CREATE TABLE IF NOT EXISTS users ( CREATE TABLE IF NOT EXISTS users (
id UUID PRIMARY KEY DEFAULT uuid_generate_v7(), id UUID PRIMARY KEY,
display_name TEXT, display_name TEXT,
email TEXT NOT NULL UNIQUE, email TEXT NOT NULL UNIQUE,
password TEXT NOT NULL, password TEXT NOT NULL,
storage_usage_bytes BIGINT NOT NULL,
storage_quota_bytes BIGINT NOT NULL,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW() updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
); );
CREATE INDEX idx_users_email ON users(email); CREATE INDEX idx_users_email ON users(email);
CREATE TABLE IF NOT EXISTS accounts (
id UUID PRIMARY KEY,
user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
storage_usage_bytes BIGINT NOT NULL DEFAULT 0,
storage_quota_bytes BIGINT NOT NULL,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE INDEX idx_accounts_user_id ON accounts(user_id);
CREATE TABLE IF NOT EXISTS refresh_tokens ( CREATE TABLE IF NOT EXISTS refresh_tokens (
id UUID PRIMARY KEY DEFAULT uuid_generate_v7(), id UUID PRIMARY KEY,
user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE, user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
token_hash TEXT NOT NULL UNIQUE, token_hash TEXT NOT NULL UNIQUE,
expires_at TIMESTAMPTZ NOT NULL, expires_at TIMESTAMPTZ NOT NULL,
@@ -52,9 +38,9 @@ CREATE INDEX idx_refresh_tokens_expires_at ON refresh_tokens(expires_at);
-- Virtual filesystem nodes (unified files + directories) -- Virtual filesystem nodes (unified files + directories)
CREATE TABLE IF NOT EXISTS vfs_nodes ( CREATE TABLE IF NOT EXISTS vfs_nodes (
id UUID PRIMARY KEY DEFAULT uuid_generate_v7(), id UUID PRIMARY KEY,
public_id TEXT NOT NULL UNIQUE, -- opaque ID for external API (no timestamp leak) public_id TEXT NOT NULL UNIQUE, -- opaque ID for external API (no timestamp leak)
user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE, account_id UUID NOT NULL REFERENCES accounts(id) ON DELETE CASCADE,
parent_id UUID REFERENCES vfs_nodes(id) ON DELETE CASCADE, -- NULL = root directory parent_id UUID REFERENCES vfs_nodes(id) ON DELETE CASCADE, -- NULL = root directory
kind TEXT NOT NULL CHECK (kind IN ('file', 'directory')), kind TEXT NOT NULL CHECK (kind IN ('file', 'directory')),
status TEXT NOT NULL DEFAULT 'ready' CHECK (status IN ('pending', 'ready')), status TEXT NOT NULL DEFAULT 'ready' CHECK (status IN ('pending', 'ready')),
@@ -69,21 +55,21 @@ CREATE TABLE IF NOT EXISTS vfs_nodes (
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
deleted_at TIMESTAMPTZ, -- soft delete for trash deleted_at TIMESTAMPTZ, -- soft delete for trash
-- No duplicate names in same parent (per user, excluding deleted) -- No duplicate names in same parent (per account, excluding deleted)
CONSTRAINT unique_node_name UNIQUE NULLS NOT DISTINCT (user_id, parent_id, name, deleted_at) CONSTRAINT unique_node_name UNIQUE NULLS NOT DISTINCT (account_id, parent_id, name, deleted_at)
); );
CREATE INDEX idx_vfs_nodes_user_id ON vfs_nodes(user_id) WHERE deleted_at IS NULL; CREATE INDEX idx_vfs_nodes_account_id ON vfs_nodes(account_id) WHERE deleted_at IS NULL;
CREATE INDEX idx_vfs_nodes_parent_id ON vfs_nodes(parent_id) WHERE deleted_at IS NULL; CREATE INDEX idx_vfs_nodes_parent_id ON vfs_nodes(parent_id) WHERE deleted_at IS NULL;
CREATE INDEX idx_vfs_nodes_user_parent ON vfs_nodes(user_id, parent_id) WHERE deleted_at IS NULL; CREATE INDEX idx_vfs_nodes_account_parent ON vfs_nodes(account_id, parent_id) WHERE deleted_at IS NULL;
CREATE INDEX idx_vfs_nodes_kind ON vfs_nodes(user_id, kind) WHERE deleted_at IS NULL; CREATE INDEX idx_vfs_nodes_kind ON vfs_nodes(account_id, kind) WHERE deleted_at IS NULL;
CREATE INDEX idx_vfs_nodes_deleted ON vfs_nodes(user_id, deleted_at) WHERE deleted_at IS NOT NULL; CREATE INDEX idx_vfs_nodes_deleted ON vfs_nodes(account_id, deleted_at) WHERE deleted_at IS NOT NULL;
CREATE INDEX idx_vfs_nodes_public_id ON vfs_nodes(public_id); CREATE INDEX idx_vfs_nodes_public_id ON vfs_nodes(public_id);
CREATE UNIQUE INDEX idx_vfs_nodes_user_root ON vfs_nodes(user_id) WHERE parent_id IS NULL; -- one root per user CREATE UNIQUE INDEX idx_vfs_nodes_account_root ON vfs_nodes(account_id) WHERE parent_id IS NULL; -- one root per account
CREATE INDEX idx_vfs_nodes_pending ON vfs_nodes(created_at) WHERE status = 'pending'; -- for cleanup job CREATE INDEX idx_vfs_nodes_pending ON vfs_nodes(created_at) WHERE status = 'pending'; -- for cleanup job
CREATE TABLE IF NOT EXISTS node_shares ( CREATE TABLE IF NOT EXISTS node_shares (
id UUID PRIMARY KEY DEFAULT uuid_generate_v7(), id UUID PRIMARY KEY,
node_id UUID NOT NULL REFERENCES vfs_nodes(id) ON DELETE CASCADE, node_id UUID NOT NULL REFERENCES vfs_nodes(id) ON DELETE CASCADE,
share_token TEXT NOT NULL UNIQUE, share_token TEXT NOT NULL UNIQUE,
expires_at TIMESTAMPTZ, expires_at TIMESTAMPTZ,
@@ -114,4 +100,7 @@ CREATE TRIGGER update_vfs_nodes_updated_at BEFORE UPDATE ON vfs_nodes
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column(); FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
CREATE TRIGGER update_node_shares_updated_at BEFORE UPDATE ON node_shares CREATE TRIGGER update_node_shares_updated_at BEFORE UPDATE ON node_shares
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
CREATE TRIGGER update_accounts_updated_at BEFORE UPDATE ON accounts
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column(); FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();

View File

@@ -2,6 +2,7 @@ package database
import ( import (
"database/sql" "database/sql"
"time"
"github.com/uptrace/bun" "github.com/uptrace/bun"
"github.com/uptrace/bun/dialect/pgdialect" "github.com/uptrace/bun/dialect/pgdialect"
@@ -10,6 +11,17 @@ import (
func NewFromPostgres(url string) *bun.DB { func NewFromPostgres(url string) *bun.DB {
sqldb := sql.OpenDB(pgdriver.NewConnector(pgdriver.WithDSN(url))) sqldb := sql.OpenDB(pgdriver.NewConnector(pgdriver.WithDSN(url)))
// Configure connection pool to prevent "database closed" errors
// SetMaxOpenConns sets the maximum number of open connections to the database
sqldb.SetMaxOpenConns(25)
// SetMaxIdleConns sets the maximum number of connections in the idle connection pool
sqldb.SetMaxIdleConns(5)
// SetConnMaxLifetime sets the maximum amount of time a connection may be reused
sqldb.SetConnMaxLifetime(5 * time.Minute)
// SetConnMaxIdleTime sets the maximum amount of time a connection may be idle
sqldb.SetConnMaxIdleTime(10 * time.Minute)
db := bun.NewDB(sqldb, pgdialect.New()) db := bun.NewDB(sqldb, pgdialect.New())
return db return db
} }

View File

@@ -0,0 +1,155 @@
package drexa
import (
"encoding/base64"
"errors"
"fmt"
"os"
"gopkg.in/yaml.v3"
)
type StorageMode string
type StorageBackend string
const (
StorageModeFlat StorageMode = "flat"
StorageModeHierarchical StorageMode = "hierarchical"
)
const (
StorageBackendFS StorageBackend = "fs"
StorageBackendS3 StorageBackend = "s3"
)
type Config struct {
Server ServerConfig `yaml:"server"`
Database DatabaseConfig `yaml:"database"`
JWT JWTConfig `yaml:"jwt"`
Storage StorageConfig `yaml:"storage"`
}
type ServerConfig struct {
Port int `yaml:"port"`
}
type DatabaseConfig struct {
PostgresURL string `yaml:"postgres_url"`
}
type JWTConfig struct {
Issuer string `yaml:"issuer"`
Audience string `yaml:"audience"`
SecretKeyBase64 string `yaml:"secret_key_base64"`
SecretKeyPath string `yaml:"secret_key_path"`
SecretKey []byte `yaml:"-"`
}
type StorageConfig struct {
Mode StorageMode `yaml:"mode"`
Backend StorageBackend `yaml:"backend"`
RootPath string `yaml:"root_path"`
Bucket string `yaml:"bucket"`
}
// ConfigFromFile loads configuration from a YAML file.
// JWT secret key is loaded from JWT_SECRET_KEY env var (base64 encoded),
// falling back to the file path specified in jwt.secret_key_path.
func ConfigFromFile(path string) (*Config, error) {
data, err := os.ReadFile(path)
if err != nil {
if os.IsNotExist(err) {
return nil, fmt.Errorf("config file not found: %s", path)
}
return nil, err
}
var config Config
if err := yaml.Unmarshal(data, &config); err != nil {
return nil, err
}
// Load JWT secret key (priority: env var > config base64 > config file path)
if envKey := os.Getenv("JWT_SECRET_KEY"); envKey != "" {
key, err := base64.StdEncoding.DecodeString(envKey)
if err != nil {
return nil, errors.New("JWT_SECRET_KEY env var is not valid base64")
}
config.JWT.SecretKey = key
} else if config.JWT.SecretKeyBase64 != "" {
key, err := base64.StdEncoding.DecodeString(config.JWT.SecretKeyBase64)
if err != nil {
return nil, errors.New("jwt.secret_key_base64 is not valid base64")
}
config.JWT.SecretKey = key
} else if config.JWT.SecretKeyPath != "" {
keyData, err := os.ReadFile(config.JWT.SecretKeyPath)
if err != nil {
return nil, err
}
key, err := base64.StdEncoding.DecodeString(string(keyData))
if err != nil {
return nil, errors.New("jwt.secret_key_path file content is not valid base64")
}
config.JWT.SecretKey = key
}
if errs := config.Validate(); len(errs) > 0 {
return nil, NewConfigError(errs...)
}
return &config, nil
}
// Validate checks for required configuration fields.
func (c *Config) Validate() []error {
var errs []error
// Server
if c.Server.Port == 0 {
errs = append(errs, errors.New("server.port is required"))
}
// Database
if c.Database.PostgresURL == "" {
errs = append(errs, errors.New("database.postgres_url is required"))
}
// JWT
if c.JWT.Issuer == "" {
errs = append(errs, errors.New("jwt.issuer is required"))
}
if c.JWT.Audience == "" {
errs = append(errs, errors.New("jwt.audience is required"))
}
if len(c.JWT.SecretKey) == 0 {
errs = append(errs, errors.New("jwt secret key is required (set JWT_SECRET_KEY env var, jwt.secret_key_base64, or jwt.secret_key_path)"))
}
// Storage
if c.Storage.Mode == "" {
errs = append(errs, errors.New("storage.mode is required"))
} else if c.Storage.Mode != StorageModeFlat && c.Storage.Mode != StorageModeHierarchical {
errs = append(errs, errors.New("storage.mode must be 'flat' or 'hierarchical'"))
}
if c.Storage.Backend == "" {
errs = append(errs, errors.New("storage.backend is required"))
} else if c.Storage.Backend != StorageBackendFS && c.Storage.Backend != StorageBackendS3 {
errs = append(errs, errors.New("storage.backend must be 'fs' or 's3'"))
}
if c.Storage.Backend == StorageBackendFS && c.Storage.RootPath == "" {
errs = append(errs, errors.New("storage.root_path is required when backend is 'fs'"))
}
if c.Storage.Backend == StorageBackendS3 {
if c.Storage.Bucket == "" {
errs = append(errs, errors.New("storage.bucket is required when backend is 's3'"))
}
if c.Storage.Mode == StorageModeHierarchical {
errs = append(errs, errors.New("storage.mode must be 'flat' when backend is 's3'"))
}
}
return errs
}

View File

@@ -5,17 +5,17 @@ import (
"strings" "strings"
) )
type ServerConfigError struct { type ConfigError struct {
Errors []error Errors []error
} }
func NewServerConfigError(errs ...error) *ServerConfigError { func NewConfigError(errs ...error) *ConfigError {
return &ServerConfigError{Errors: errs} return &ConfigError{Errors: errs}
} }
func (e *ServerConfigError) Error() string { func (e *ConfigError) Error() string {
sb := strings.Builder{} sb := strings.Builder{}
sb.WriteString("invalid server config:\n") sb.WriteString("invalid config:\n")
for _, err := range e.Errors { for _, err := range e.Errors {
sb.WriteString(fmt.Sprintf(" - %s\n", err.Error())) sb.WriteString(fmt.Sprintf(" - %s\n", err.Error()))
} }

View File

@@ -1,86 +1,83 @@
package drexa package drexa
import ( import (
"encoding/hex" "context"
"errors"
"fmt" "fmt"
"os"
"strconv"
"github.com/get-drexa/drexa/internal/account"
"github.com/get-drexa/drexa/internal/auth" "github.com/get-drexa/drexa/internal/auth"
"github.com/get-drexa/drexa/internal/blob"
"github.com/get-drexa/drexa/internal/database" "github.com/get-drexa/drexa/internal/database"
"github.com/get-drexa/drexa/internal/httperr"
"github.com/get-drexa/drexa/internal/upload"
"github.com/get-drexa/drexa/internal/user" "github.com/get-drexa/drexa/internal/user"
"github.com/get-drexa/drexa/internal/virtualfs"
"github.com/gofiber/fiber/v2" "github.com/gofiber/fiber/v2"
"github.com/gofiber/fiber/v2/middleware/logger"
"github.com/uptrace/bun/extra/bundebug"
) )
type ServerConfig struct { func NewServer(c Config) (*fiber.App, error) {
Port int app := fiber.New(fiber.Config{
PostgresURL string ErrorHandler: httperr.ErrorHandler,
JWTIssuer string StreamRequestBody: true,
JWTAudience string
JWTSecretKey []byte
}
func NewServer(c ServerConfig) *fiber.App {
app := fiber.New()
db := database.NewFromPostgres(c.PostgresURL)
userService := user.NewService(db)
authService := auth.NewService(db, userService, auth.TokenConfig{
Issuer: c.JWTIssuer,
Audience: c.JWTAudience,
SecretKey: c.JWTSecretKey,
}) })
app.Use(logger.New())
db := database.NewFromPostgres(c.Database.PostgresURL)
db.AddQueryHook(bundebug.NewQueryHook(bundebug.WithVerbose(true)))
// Initialize blob store based on config
var blobStore blob.Store
switch c.Storage.Backend {
case StorageBackendFS:
blobStore = blob.NewFSStore(blob.FSStoreConfig{
Root: c.Storage.RootPath,
})
case StorageBackendS3:
return nil, fmt.Errorf("s3 storage backend not yet implemented")
default:
return nil, fmt.Errorf("unknown storage backend: %s", c.Storage.Backend)
}
err := blobStore.Initialize(context.Background())
if err != nil {
return nil, fmt.Errorf("failed to initialize blob store: %w", err)
}
// Initialize key resolver based on config
var keyResolver virtualfs.BlobKeyResolver
switch c.Storage.Mode {
case StorageModeFlat:
keyResolver = virtualfs.NewFlatKeyResolver()
case StorageModeHierarchical:
keyResolver = virtualfs.NewHierarchicalKeyResolver(db)
default:
return nil, fmt.Errorf("unknown storage mode: %s", c.Storage.Mode)
}
vfs, err := virtualfs.NewVirtualFS(blobStore, keyResolver)
if err != nil {
return nil, fmt.Errorf("failed to create virtual file system: %w", err)
}
userService := user.NewService()
authService := auth.NewService(userService, auth.TokenConfig{
Issuer: c.JWT.Issuer,
Audience: c.JWT.Audience,
SecretKey: c.JWT.SecretKey,
})
uploadService := upload.NewService(vfs, blobStore)
accountService := account.NewService(userService, vfs)
authMiddleware := auth.NewBearerAuthMiddleware(authService, db)
api := app.Group("/api") api := app.Group("/api")
auth.RegisterAPIRoutes(api, authService)
return app accRouter := account.NewHTTPHandler(accountService, authService, db, authMiddleware).RegisterRoutes(api)
}
auth.NewHTTPHandler(authService, db).RegisterRoutes(api)
// ServerConfigFromEnv creates a ServerConfig from environment variables. upload.NewHTTPHandler(uploadService, db).RegisterRoutes(accRouter)
func ServerConfigFromEnv() (*ServerConfig, error) {
c := ServerConfig{ return app, nil
PostgresURL: os.Getenv("POSTGRES_URL"),
JWTIssuer: os.Getenv("JWT_ISSUER"),
JWTAudience: os.Getenv("JWT_AUDIENCE"),
}
errs := []error{}
keyHex := os.Getenv("JWT_SECRET_KEY")
if keyHex == "" {
errs = append(errs, errors.New("JWT_SECRET_KEY is required"))
} else {
k, err := hex.DecodeString(keyHex)
if err != nil {
errs = append(errs, fmt.Errorf("failed to decode JWT_SECRET_KEY: %w", err))
}
c.JWTSecretKey = k
}
p, err := strconv.Atoi(os.Getenv("PORT"))
if err != nil {
errs = append(errs, fmt.Errorf("failed to parse PORT: %w", err))
}
c.Port = p
if c.PostgresURL == "" {
errs = append(errs, errors.New("POSTGRES_URL is required"))
}
if c.JWTIssuer == "" {
errs = append(errs, errors.New("JWT_ISSUER is required"))
}
if c.JWTAudience == "" {
errs = append(errs, errors.New("JWT_AUDIENCE is required"))
}
if len(c.JWTSecretKey) == 0 {
errs = append(errs, errors.New("JWT_SECRET_KEY is required"))
}
if len(errs) > 0 {
return nil, NewServerConfigError(errs...)
}
return &c, nil
} }

View File

@@ -0,0 +1,42 @@
package httperr
import (
"fmt"
"github.com/gofiber/fiber/v2"
)
// HTTPError represents an HTTP error with a status code and underlying error.
type HTTPError struct {
Code int
Message string
Err error
}
// Error implements the error interface.
func (e *HTTPError) Error() string {
if e.Err != nil {
return fmt.Sprintf("HTTP %d: %s: %v", e.Code, e.Message, e.Err)
}
return fmt.Sprintf("HTTP %d: %s", e.Code, e.Message)
}
// Unwrap returns the underlying error.
func (e *HTTPError) Unwrap() error {
return e.Err
}
// NewHTTPError creates a new HTTPError with the given status code, message, and underlying error.
func NewHTTPError(code int, message string, err error) *HTTPError {
return &HTTPError{
Code: code,
Message: message,
Err: err,
}
}
// Internal creates a new HTTPError with status 500.
func Internal(err error) *HTTPError {
return NewHTTPError(fiber.StatusInternalServerError, "Internal", err)
}

View File

@@ -0,0 +1,64 @@
package httperr
import (
"errors"
"log/slog"
"github.com/gofiber/fiber/v2"
)
// ErrorHandler is a global error handler for Fiber that logs errors and returns appropriate responses.
func ErrorHandler(c *fiber.Ctx, err error) error {
// Default status code
code := fiber.StatusInternalServerError
message := "Internal"
// Check if it's our custom HTTPError
var httpErr *HTTPError
if errors.As(err, &httpErr) {
code = httpErr.Code
message = httpErr.Message
// Log the error with underlying error details
if httpErr.Err != nil {
slog.Error("HTTP error",
"status", code,
"message", message,
"error", httpErr.Err.Error(),
"path", c.Path(),
"method", c.Method(),
)
} else {
slog.Warn("HTTP error",
"status", code,
"message", message,
"path", c.Path(),
"method", c.Method(),
)
}
} else {
// Check if it's a Fiber error
var fiberErr *fiber.Error
if errors.As(err, &fiberErr) {
code = fiberErr.Code
message = fiberErr.Message
} else {
// Generic error - log it
slog.Error("Unhandled error",
"status", code,
"error", err.Error(),
"path", c.Path(),
"method", c.Method(),
)
}
}
// Set Content-Type header
c.Set(fiber.HeaderContentType, fiber.MIMEApplicationJSONCharsetUTF8)
// Return JSON response
return c.Status(code).JSON(fiber.Map{
"error": message,
})
}

View File

@@ -1,4 +1,4 @@
package virtualfs package ioext
import "io" import "io"
@@ -20,3 +20,4 @@ func (r *CountingReader) Read(p []byte) (n int, err error) {
func (r *CountingReader) Count() int64 { func (r *CountingReader) Count() int64 {
return r.count return r.count
} }

View File

@@ -0,0 +1,24 @@
package ioext
import "io"
type LimitReadCloser struct {
reader io.ReadCloser
limitReader io.Reader
}
func NewLimitReadCloser(reader io.ReadCloser, length int64) *LimitReadCloser {
return &LimitReadCloser{
reader: reader,
limitReader: io.LimitReader(reader, length),
}
}
func (r *LimitReadCloser) Read(p []byte) (n int, err error) {
return r.limitReader.Read(p)
}
func (r *LimitReadCloser) Close() error {
return r.reader.Close()
}

View File

@@ -0,0 +1,10 @@
package upload
import "errors"
var (
ErrNotFound = errors.New("not found")
ErrParentNotDirectory = errors.New("parent is not a directory")
ErrConflict = errors.New("node conflict")
ErrContentNotUploaded = errors.New("content has not been uploaded")
)

View File

@@ -0,0 +1,120 @@
package upload
import (
"errors"
"fmt"
"github.com/get-drexa/drexa/internal/account"
"github.com/get-drexa/drexa/internal/httperr"
"github.com/gofiber/fiber/v2"
"github.com/uptrace/bun"
)
type createUploadRequest struct {
ParentID string `json:"parentId"`
Name string `json:"name"`
}
type updateUploadRequest struct {
Status Status `json:"status"`
}
type HTTPHandler struct {
service *Service
db *bun.DB
}
func NewHTTPHandler(s *Service, db *bun.DB) *HTTPHandler {
return &HTTPHandler{service: s, db: db}
}
func (h *HTTPHandler) RegisterRoutes(api fiber.Router) {
upload := api.Group("/uploads")
upload.Post("/", h.Create)
upload.Put("/:uploadID/content", h.ReceiveContent)
upload.Patch("/:uploadID", h.Update)
}
func (h *HTTPHandler) Create(c *fiber.Ctx) error {
account := account.CurrentAccount(c)
if account == nil {
return c.SendStatus(fiber.StatusUnauthorized)
}
req := new(createUploadRequest)
if err := c.BodyParser(req); err != nil {
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{"error": "Invalid request"})
}
upload, err := h.service.CreateUpload(c.Context(), h.db, account.ID, CreateUploadOptions{
ParentID: req.ParentID,
Name: req.Name,
})
if err != nil {
if errors.Is(err, ErrNotFound) {
return c.SendStatus(fiber.StatusNotFound)
}
if errors.Is(err, ErrParentNotDirectory) {
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{"error": "Parent is not a directory"})
}
if errors.Is(err, ErrConflict) {
return c.Status(fiber.StatusConflict).JSON(fiber.Map{"error": "A file with this name already exists"})
}
return httperr.Internal(err)
}
if upload.UploadURL == "" {
upload.UploadURL = fmt.Sprintf("%s%s/%s/content", c.BaseURL(), c.OriginalURL(), upload.ID)
}
return c.JSON(upload)
}
func (h *HTTPHandler) ReceiveContent(c *fiber.Ctx) error {
account := account.CurrentAccount(c)
if account == nil {
return c.SendStatus(fiber.StatusUnauthorized)
}
uploadID := c.Params("uploadID")
err := h.service.ReceiveUpload(c.Context(), h.db, account.ID, uploadID, c.Context().RequestBodyStream())
defer c.Context().Request.CloseBodyStream()
if err != nil {
if errors.Is(err, ErrNotFound) {
return c.SendStatus(fiber.StatusNotFound)
}
return httperr.Internal(err)
}
return c.SendStatus(fiber.StatusNoContent)
}
func (h *HTTPHandler) Update(c *fiber.Ctx) error {
account := account.CurrentAccount(c)
if account == nil {
return c.SendStatus(fiber.StatusUnauthorized)
}
req := new(updateUploadRequest)
if err := c.BodyParser(req); err != nil {
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{"error": "Invalid request"})
}
if req.Status == StatusCompleted {
upload, err := h.service.CompleteUpload(c.Context(), h.db, account.ID, c.Params("uploadID"))
if err != nil {
if errors.Is(err, ErrNotFound) {
return c.SendStatus(fiber.StatusNotFound)
}
if errors.Is(err, ErrContentNotUploaded) {
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{"error": "Content has not been uploaded"})
}
return httperr.Internal(err)
}
return c.JSON(upload)
}
return c.SendStatus(fiber.StatusBadRequest)
}

View File

@@ -0,0 +1,144 @@
package upload
import (
"context"
"errors"
"fmt"
"io"
"sync"
"time"
"github.com/get-drexa/drexa/internal/blob"
"github.com/get-drexa/drexa/internal/virtualfs"
"github.com/google/uuid"
"github.com/uptrace/bun"
)
type Service struct {
vfs *virtualfs.VirtualFS
blobStore blob.Store
pendingUploads sync.Map
}
func NewService(vfs *virtualfs.VirtualFS, blobStore blob.Store) *Service {
return &Service{
vfs: vfs,
blobStore: blobStore,
pendingUploads: sync.Map{},
}
}
type CreateUploadOptions struct {
ParentID string
Name string
}
func (s *Service) CreateUpload(ctx context.Context, db bun.IDB, accountID uuid.UUID, opts CreateUploadOptions) (*Upload, error) {
parentNode, err := s.vfs.FindNodeByPublicID(ctx, db, accountID, opts.ParentID)
if err != nil {
if errors.Is(err, virtualfs.ErrNodeNotFound) {
return nil, ErrNotFound
}
return nil, err
}
if parentNode.Kind != virtualfs.NodeKindDirectory {
return nil, ErrParentNotDirectory
}
node, err := s.vfs.CreateFile(ctx, db, accountID, virtualfs.CreateFileOptions{
ParentID: parentNode.ID,
Name: opts.Name,
})
if err != nil {
if errors.Is(err, virtualfs.ErrNodeConflict) {
return nil, ErrConflict
}
return nil, err
}
var uploadURL string
if s.blobStore.SupportsDirectUpload() {
uploadURL, err = s.blobStore.GenerateUploadURL(ctx, node.BlobKey, blob.UploadURLOptions{
Duration: 1 * time.Hour,
})
if err != nil {
_ = s.vfs.PermanentlyDeleteNode(ctx, db, node)
return nil, err
}
} else {
uploadURL = ""
}
upload := &Upload{
ID: node.PublicID,
Status: StatusPending,
TargetNode: node,
UploadURL: uploadURL,
}
s.pendingUploads.Store(upload.ID, upload)
return upload, nil
}
func (s *Service) ReceiveUpload(ctx context.Context, db bun.IDB, accountID uuid.UUID, uploadID string, reader io.Reader) error {
fmt.Printf("reader: %v\n", reader)
n, ok := s.pendingUploads.Load(uploadID)
if !ok {
return ErrNotFound
}
upload, ok := n.(*Upload)
if !ok {
return ErrNotFound
}
if upload.TargetNode.AccountID != accountID {
return ErrNotFound
}
err := s.vfs.WriteFile(ctx, db, upload.TargetNode, virtualfs.FileContentFromReader(reader))
if err != nil {
return err
}
upload.Status = StatusCompleted
return nil
}
func (s *Service) CompleteUpload(ctx context.Context, db bun.IDB, accountID uuid.UUID, uploadID string) (*Upload, error) {
n, ok := s.pendingUploads.Load(uploadID)
if !ok {
return nil, ErrNotFound
}
upload, ok := n.(*Upload)
if !ok {
return nil, ErrNotFound
}
if upload.TargetNode.AccountID != accountID {
return nil, ErrNotFound
}
if upload.TargetNode.Status == virtualfs.NodeStatusReady && upload.Status == StatusCompleted {
return upload, nil
}
err := s.vfs.WriteFile(ctx, db, upload.TargetNode, virtualfs.FileContentFromBlobKey(upload.TargetNode.BlobKey))
if err != nil {
if errors.Is(err, blob.ErrNotFound) {
return nil, ErrContentNotUploaded
}
return nil, err
}
upload.Status = StatusCompleted
s.pendingUploads.Delete(uploadID)
return upload, nil
}

View File

@@ -0,0 +1,18 @@
package upload
import "github.com/get-drexa/drexa/internal/virtualfs"
type Status string
const (
StatusPending Status = "pending"
StatusCompleted Status = "completed"
StatusFailed Status = "failed"
)
type Upload struct {
ID string `json:"id"`
Status Status `json:"status"`
TargetNode *virtualfs.Node `json:"-"`
UploadURL string `json:"uploadUrl"`
}

View File

@@ -11,9 +11,7 @@ import (
"github.com/uptrace/bun" "github.com/uptrace/bun"
) )
type Service struct { type Service struct{}
db *bun.DB
}
type UserRegistrationOptions struct { type UserRegistrationOptions struct {
Email string Email string
@@ -21,20 +19,24 @@ type UserRegistrationOptions struct {
Password password.Hashed Password password.Hashed
} }
func NewService(db *bun.DB) *Service { func NewService() *Service {
return &Service{ return &Service{}
db: db,
}
} }
func (s *Service) RegisterUser(ctx context.Context, opts UserRegistrationOptions) (*User, error) { func (s *Service) RegisterUser(ctx context.Context, db bun.IDB, opts UserRegistrationOptions) (*User, error) {
uid, err := newUserID()
if err != nil {
return nil, err
}
u := User{ u := User{
ID: uid,
Email: opts.Email, Email: opts.Email,
DisplayName: opts.DisplayName, DisplayName: opts.DisplayName,
Password: opts.Password, Password: opts.Password,
} }
_, err := s.db.NewInsert().Model(&u).Returning("*").Exec(ctx) _, err = db.NewInsert().Model(&u).Returning("*").Exec(ctx)
if err != nil { if err != nil {
if database.IsUniqueViolation(err) { if database.IsUniqueViolation(err) {
return nil, newAlreadyExistsError(u.Email) return nil, newAlreadyExistsError(u.Email)
@@ -45,9 +47,9 @@ func (s *Service) RegisterUser(ctx context.Context, opts UserRegistrationOptions
return &u, nil return &u, nil
} }
func (s *Service) UserByID(ctx context.Context, id uuid.UUID) (*User, error) { func (s *Service) UserByID(ctx context.Context, db bun.IDB, id uuid.UUID) (*User, error) {
var user User var user User
err := s.db.NewSelect().Model(&user).Where("id = ?", id).Scan(ctx) err := db.NewSelect().Model(&user).Where("id = ?", id).Scan(ctx)
if err != nil { if err != nil {
if errors.Is(err, sql.ErrNoRows) { if errors.Is(err, sql.ErrNoRows) {
return nil, newNotFoundError(id, "") return nil, newNotFoundError(id, "")
@@ -57,9 +59,9 @@ func (s *Service) UserByID(ctx context.Context, id uuid.UUID) (*User, error) {
return &user, nil return &user, nil
} }
func (s *Service) UserByEmail(ctx context.Context, email string) (*User, error) { func (s *Service) UserByEmail(ctx context.Context, db bun.IDB, email string) (*User, error) {
var user User var user User
err := s.db.NewSelect().Model(&user).Where("email = ?", email).Scan(ctx) err := db.NewSelect().Model(&user).Where("email = ?", email).Scan(ctx)
if err != nil { if err != nil {
if errors.Is(err, sql.ErrNoRows) { if errors.Is(err, sql.ErrNoRows) {
return nil, newNotFoundError(uuid.Nil, email) return nil, newNotFoundError(uuid.Nil, email)
@@ -69,6 +71,6 @@ func (s *Service) UserByEmail(ctx context.Context, email string) (*User, error)
return &user, nil return &user, nil
} }
func (s *Service) UserExistsByEmail(ctx context.Context, email string) (bool, error) { func (s *Service) UserExistsByEmail(ctx context.Context, db bun.IDB, email string) (bool, error) {
return s.db.NewSelect().Model(&User{}).Where("email = ?", email).Exists(ctx) return db.NewSelect().Model(&User{}).Where("email = ?", email).Exists(ctx)
} }

View File

@@ -1,6 +1,8 @@
package user package user
import ( import (
"time"
"github.com/get-drexa/drexa/internal/password" "github.com/get-drexa/drexa/internal/password"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/uptrace/bun" "github.com/uptrace/bun"
@@ -9,10 +11,14 @@ import (
type User struct { type User struct {
bun.BaseModel `bun:"users"` bun.BaseModel `bun:"users"`
ID uuid.UUID `bun:",pk,type:uuid" json:"id"` ID uuid.UUID `bun:",pk,type:uuid" json:"id"`
DisplayName string `bun:"display_name,notnull" json:"displayName"` DisplayName string `bun:"display_name" json:"displayName"`
Email string `bun:"email,unique,notnull" json:"email"` Email string `bun:"email,unique,notnull" json:"email"`
Password password.Hashed `bun:"password,notnull" json:"-"` Password password.Hashed `bun:"password,notnull" json:"-"`
StorageUsageBytes int64 `bun:"storage_usage_bytes,notnull" json:"storageUsageBytes"` CreatedAt time.Time `bun:"created_at,notnull,nullzero" json:"createdAt"`
StorageQuotaBytes int64 `bun:"storage_quota_bytes,notnull" json:"storageQuotaBytes"` UpdatedAt time.Time `bun:"updated_at,notnull,nullzero" json:"updatedAt"`
}
func newUserID() (uuid.UUID, error) {
return uuid.NewV7()
} }

View File

@@ -3,6 +3,7 @@ package virtualfs
import "errors" import "errors"
var ( var (
ErrNodeNotFound = errors.New("node not found") ErrNodeNotFound = errors.New("node not found")
ErrNodeConflict = errors.New("node conflict") ErrNodeConflict = errors.New("node conflict")
ErrUnsupportedOperation = errors.New("unsupported operation")
) )

View File

@@ -0,0 +1,35 @@
package virtualfs
import (
"context"
"github.com/get-drexa/drexa/internal/blob"
"github.com/google/uuid"
)
type FlatKeyResolver struct{}
var _ BlobKeyResolver = &FlatKeyResolver{}
func NewFlatKeyResolver() *FlatKeyResolver {
return &FlatKeyResolver{}
}
func (r *FlatKeyResolver) ShouldPersistKey() bool {
return true
}
func (r *FlatKeyResolver) Resolve(ctx context.Context, node *Node) (blob.Key, error) {
if node.BlobKey == "" {
id, err := uuid.NewV7()
if err != nil {
return "", err
}
return blob.Key(id.String()), nil
}
return node.BlobKey, nil
}
func (r *FlatKeyResolver) ResolveDeletionKeys(ctx context.Context, node *Node, allKeys []blob.Key) (*DeletionPlan, error) {
return &DeletionPlan{Keys: allKeys}, nil
}

View File

@@ -0,0 +1,40 @@
package virtualfs
import (
"context"
"fmt"
"github.com/get-drexa/drexa/internal/blob"
"github.com/uptrace/bun"
)
type HierarchicalKeyResolver struct {
db *bun.DB
}
var _ BlobKeyResolver = &HierarchicalKeyResolver{}
func NewHierarchicalKeyResolver(db *bun.DB) *HierarchicalKeyResolver {
return &HierarchicalKeyResolver{db: db}
}
func (r *HierarchicalKeyResolver) ShouldPersistKey() bool {
return false
}
func (r *HierarchicalKeyResolver) Resolve(ctx context.Context, node *Node) (blob.Key, error) {
path, err := buildNodeAbsolutePath(ctx, r.db, node.ID)
if err != nil {
return "", err
}
return blob.Key(fmt.Sprintf("%s/%s", node.AccountID, path)), nil
}
func (r *HierarchicalKeyResolver) ResolveDeletionKeys(ctx context.Context, node *Node, allKeys []blob.Key) (*DeletionPlan, error) {
path, err := buildNodeAbsolutePath(ctx, r.db, node.ID)
if err != nil {
return nil, err
}
return &DeletionPlan{Prefix: blob.Key(path)}, nil
}

View File

@@ -4,35 +4,18 @@ import (
"context" "context"
"github.com/get-drexa/drexa/internal/blob" "github.com/get-drexa/drexa/internal/blob"
"github.com/google/uuid"
"github.com/uptrace/bun"
) )
type BlobKeyResolver interface { type BlobKeyResolver interface {
// ShouldPersistKey returns true if the resolved key should be stored in node.BlobKey.
// Flat keys (e.g. UUIDs) return true - key is generated once and stored.
// Hierarchical keys return false - key is derived from path each time.
ShouldPersistKey() bool
Resolve(ctx context.Context, node *Node) (blob.Key, error) Resolve(ctx context.Context, node *Node) (blob.Key, error)
ResolveDeletionKeys(ctx context.Context, node *Node, allKeys []blob.Key) (*DeletionPlan, error)
} }
type FlatKeyResolver struct{} type DeletionPlan struct {
Prefix blob.Key
func (r *FlatKeyResolver) Resolve(ctx context.Context, node *Node) (blob.Key, error) { Keys []blob.Key
if node.BlobKey == "" {
id, err := uuid.NewV7()
if err != nil {
return "", err
}
return blob.Key(id.String()), nil
}
return node.BlobKey, nil
}
type HierarchicalKeyResolver struct {
db *bun.DB
}
func (r *HierarchicalKeyResolver) Resolve(ctx context.Context, node *Node) (blob.Key, error) {
path, err := buildNodeAbsolutePath(ctx, r.db, node.ID)
if err != nil {
return "", err
}
return blob.Key(path), nil
} }

View File

@@ -25,21 +25,25 @@ const (
type Node struct { type Node struct {
bun.BaseModel `bun:"vfs_nodes"` bun.BaseModel `bun:"vfs_nodes"`
ID uuid.UUID `bun:",pk,type:uuid"` ID uuid.UUID `bun:",pk,type:uuid"`
PublicID string `bun:"public_id,notnull"` PublicID string `bun:"public_id,notnull"`
UserID uuid.UUID `bun:"user_id,notnull"` AccountID uuid.UUID `bun:"account_id,notnull,type:uuid"`
ParentID uuid.UUID `bun:"parent_id,notnull"` ParentID uuid.UUID `bun:"parent_id,nullzero"`
Kind NodeKind `bun:"kind,notnull"` Kind NodeKind `bun:"kind,notnull"`
Status NodeStatus `bun:"status,notnull"` Status NodeStatus `bun:"status,notnull"`
Name string `bun:"name,notnull"` Name string `bun:"name,notnull"`
BlobKey blob.Key `bun:"blob_key"` BlobKey blob.Key `bun:"blob_key,nullzero"`
Size int64 `bun:"size"` Size int64 `bun:"size"`
MimeType string `bun:"mime_type"` MimeType string `bun:"mime_type,nullzero"`
CreatedAt time.Time `bun:"created_at,notnull"` CreatedAt time.Time `bun:"created_at,notnull,nullzero"`
UpdatedAt time.Time `bun:"updated_at,notnull"` UpdatedAt time.Time `bun:"updated_at,notnull,nullzero"`
DeletedAt time.Time `bun:"deleted_at"` DeletedAt time.Time `bun:"deleted_at,nullzero"`
}
func newNodeID() (uuid.UUID, error) {
return uuid.NewV7()
} }
// IsAccessible returns true if the node can be accessed. // IsAccessible returns true if the node can be accessed.

View File

@@ -12,7 +12,7 @@ import (
const absolutePathQuery = `WITH RECURSIVE path AS ( const absolutePathQuery = `WITH RECURSIVE path AS (
SELECT id, parent_id, name, 1 as depth SELECT id, parent_id, name, 1 as depth
FROM vfs_nodes WHERE id = $1 AND deleted_at IS NULL FROM vfs_nodes WHERE id = ? AND deleted_at IS NULL
UNION ALL UNION ALL
@@ -29,7 +29,7 @@ func JoinPath(parts ...string) string {
return strings.Join(parts, "/") return strings.Join(parts, "/")
} }
func buildNodeAbsolutePath(ctx context.Context, db *bun.DB, nodeID uuid.UUID) (string, error) { func buildNodeAbsolutePath(ctx context.Context, db bun.IDB, nodeID uuid.UUID) (string, error) {
var path []string var path []string
err := db.NewRaw(absolutePathQuery, nodeID).Scan(ctx, &path) err := db.NewRaw(absolutePathQuery, nodeID).Scan(ctx, &path)
if err != nil { if err != nil {

View File

@@ -12,13 +12,13 @@ import (
"github.com/gabriel-vasile/mimetype" "github.com/gabriel-vasile/mimetype"
"github.com/get-drexa/drexa/internal/blob" "github.com/get-drexa/drexa/internal/blob"
"github.com/get-drexa/drexa/internal/database" "github.com/get-drexa/drexa/internal/database"
"github.com/get-drexa/drexa/internal/ioext"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/sqids/sqids-go" "github.com/sqids/sqids-go"
"github.com/uptrace/bun" "github.com/uptrace/bun"
) )
type VirtualFS struct { type VirtualFS struct {
db *bun.DB
blobStore blob.Store blobStore blob.Store
keyResolver BlobKeyResolver keyResolver BlobKeyResolver
@@ -31,23 +31,42 @@ type CreateNodeOptions struct {
Name string Name string
} }
type WriteFileOptions struct { type CreateFileOptions struct {
ParentID uuid.UUID ParentID uuid.UUID
Name string Name string
} }
func NewVirtualFS(db *bun.DB, blobStore blob.Store, keyResolver BlobKeyResolver) *VirtualFS { type FileContent struct {
return &VirtualFS{ reader io.Reader
db: db, blobKey blob.Key
blobStore: blobStore,
keyResolver: keyResolver,
}
} }
func (vfs *VirtualFS) FindNode(ctx context.Context, userID, fileID string) (*Node, error) { const RootDirectoryName = "root"
func FileContentFromReader(reader io.Reader) FileContent {
return FileContent{reader: reader}
}
func FileContentFromBlobKey(blobKey blob.Key) FileContent {
return FileContent{blobKey: blobKey}
}
func NewVirtualFS(blobStore blob.Store, keyResolver BlobKeyResolver) (*VirtualFS, error) {
sqid, err := sqids.New()
if err != nil {
return nil, err
}
return &VirtualFS{
blobStore: blobStore,
keyResolver: keyResolver,
sqid: sqid,
}, nil
}
func (vfs *VirtualFS) FindNode(ctx context.Context, db bun.IDB, accountID, fileID string) (*Node, error) {
var node Node var node Node
err := vfs.db.NewSelect().Model(&node). err := db.NewSelect().Model(&node).
Where("user_id = ?", userID). Where("account_id = ?", accountID).
Where("id = ?", fileID). Where("id = ?", fileID).
Where("status = ?", NodeStatusReady). Where("status = ?", NodeStatusReady).
Where("deleted_at IS NULL"). Where("deleted_at IS NULL").
@@ -61,10 +80,10 @@ func (vfs *VirtualFS) FindNode(ctx context.Context, userID, fileID string) (*Nod
return &node, nil return &node, nil
} }
func (vfs *VirtualFS) FindNodeByPublicID(ctx context.Context, userID uuid.UUID, publicID string) (*Node, error) { func (vfs *VirtualFS) FindNodeByPublicID(ctx context.Context, db bun.IDB, accountID uuid.UUID, publicID string) (*Node, error) {
var node Node var node Node
err := vfs.db.NewSelect().Model(&node). err := db.NewSelect().Model(&node).
Where("user_id = ?", userID). Where("account_id = ?", accountID).
Where("public_id = ?", publicID). Where("public_id = ?", publicID).
Where("status = ?", NodeStatusReady). Where("status = ?", NodeStatusReady).
Where("deleted_at IS NULL"). Where("deleted_at IS NULL").
@@ -78,14 +97,14 @@ func (vfs *VirtualFS) FindNodeByPublicID(ctx context.Context, userID uuid.UUID,
return &node, nil return &node, nil
} }
func (vfs *VirtualFS) ListChildren(ctx context.Context, node *Node) ([]*Node, error) { func (vfs *VirtualFS) ListChildren(ctx context.Context, db bun.IDB, node *Node) ([]*Node, error) {
if !node.IsAccessible() { if !node.IsAccessible() {
return nil, ErrNodeNotFound return nil, ErrNodeNotFound
} }
var nodes []*Node var nodes []*Node
err := vfs.db.NewSelect().Model(&nodes). err := db.NewSelect().Model(&nodes).
Where("user_id = ?", node.UserID). Where("account_id = ?", node.AccountID).
Where("parent_id = ?", node.ID). Where("parent_id = ?", node.ID).
Where("status = ?", NodeStatusReady). Where("status = ?", NodeStatusReady).
Where("deleted_at IS NULL"). Where("deleted_at IS NULL").
@@ -100,22 +119,35 @@ func (vfs *VirtualFS) ListChildren(ctx context.Context, node *Node) ([]*Node, er
return nodes, nil return nodes, nil
} }
func (vfs *VirtualFS) WriteFile(ctx context.Context, userID uuid.UUID, reader io.Reader, opts WriteFileOptions) (*Node, error) { func (vfs *VirtualFS) CreateFile(ctx context.Context, db bun.IDB, accountID uuid.UUID, opts CreateFileOptions) (*Node, error) {
pid, err := vfs.generatePublicID() pid, err := vfs.generatePublicID()
if err != nil { if err != nil {
return nil, err return nil, err
} }
node := Node{ id, err := newNodeID()
PublicID: pid, if err != nil {
UserID: userID, return nil, err
ParentID: opts.ParentID,
Kind: NodeKindFile,
Status: NodeStatusPending,
Name: opts.Name,
} }
_, err = vfs.db.NewInsert().Model(&node).Exec(ctx) node := Node{
ID: id,
PublicID: pid,
AccountID: accountID,
ParentID: opts.ParentID,
Kind: NodeKindFile,
Status: NodeStatusPending,
Name: opts.Name,
}
if vfs.keyResolver.ShouldPersistKey() {
node.BlobKey, err = vfs.keyResolver.Resolve(ctx, &node)
if err != nil {
return nil, err
}
}
_, err = db.NewInsert().Model(&node).Returning("*").Exec(ctx)
if err != nil { if err != nil {
if database.IsUniqueViolation(err) { if database.IsUniqueViolation(err) {
return nil, ErrNodeConflict return nil, ErrNodeConflict
@@ -123,66 +155,112 @@ func (vfs *VirtualFS) WriteFile(ctx context.Context, userID uuid.UUID, reader io
return nil, err return nil, err
} }
cleanup := func() { return &node, nil
_, _ = vfs.db.NewDelete().Model(&node).WherePK().Exec(ctx) }
func (vfs *VirtualFS) WriteFile(ctx context.Context, db bun.IDB, node *Node, content FileContent) error {
if content.reader == nil && content.blobKey.IsNil() {
return blob.ErrInvalidFileContent
} }
key, err := vfs.keyResolver.Resolve(ctx, &node) if !node.DeletedAt.IsZero() {
if err != nil { return ErrNodeNotFound
cleanup()
return nil, err
} }
h := make([]byte, 3072) setCols := make([]string, 0, 4)
n, err := io.ReadFull(reader, h)
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
cleanup()
return nil, err
}
h = h[:n]
mt := mimetype.Detect(h) if content.reader != nil {
cr := NewCountingReader(io.MultiReader(bytes.NewReader(h), reader)) key, err := vfs.keyResolver.Resolve(ctx, node)
if err != nil {
return err
}
err = vfs.blobStore.Put(ctx, key, cr) buf := make([]byte, 3072)
if err != nil { n, err := io.ReadFull(content.reader, buf)
cleanup() if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
return nil, err return err
}
buf = buf[:n]
mt := mimetype.Detect(buf)
cr := ioext.NewCountingReader(io.MultiReader(bytes.NewReader(buf), content.reader))
err = vfs.blobStore.Put(ctx, key, cr)
if err != nil {
return err
}
if vfs.keyResolver.ShouldPersistKey() {
node.BlobKey = key
setCols = append(setCols, "blob_key")
}
node.MimeType = mt.String()
node.Size = cr.Count()
node.Status = NodeStatusReady
setCols = append(setCols, "mime_type", "size", "status")
} else {
node.BlobKey = content.blobKey
b, err := vfs.blobStore.ReadRange(ctx, content.blobKey, 0, 3072)
if err != nil {
return err
}
defer b.Close()
buf := make([]byte, 3072)
n, err := io.ReadFull(b, buf)
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
return err
}
buf = buf[:n]
mt := mimetype.Detect(buf)
node.MimeType = mt.String()
node.Status = NodeStatusReady
s, err := vfs.blobStore.ReadSize(ctx, content.blobKey)
if err != nil {
return err
}
node.Size = s
setCols = append(setCols, "mime_type", "blob_key", "size", "status")
} }
node.BlobKey = key _, err := db.NewUpdate().Model(node).
node.Size = cr.Count() Column(setCols...).
node.MimeType = mt.String()
node.Status = NodeStatusReady
_, err = vfs.db.NewUpdate().Model(&node).
Column("status", "blob_key", "size", "mime_type").
WherePK(). WherePK().
Exec(ctx) Exec(ctx)
if err != nil { if err != nil {
cleanup() return err
return nil, err
} }
return &node, nil return nil
} }
func (vfs *VirtualFS) CreateDirectory(ctx context.Context, userID uuid.UUID, parentID uuid.UUID, name string) (*Node, error) { func (vfs *VirtualFS) CreateDirectory(ctx context.Context, db bun.IDB, accountID uuid.UUID, parentID uuid.UUID, name string) (*Node, error) {
pid, err := vfs.generatePublicID() pid, err := vfs.generatePublicID()
if err != nil { if err != nil {
return nil, err return nil, err
} }
node := Node{ id, err := newNodeID()
PublicID: pid, if err != nil {
UserID: userID, return nil, err
ParentID: parentID,
Kind: NodeKindDirectory,
Status: NodeStatusReady,
Name: name,
} }
_, err = vfs.db.NewInsert().Model(&node).Exec(ctx) node := Node{
ID: id,
PublicID: pid,
AccountID: accountID,
ParentID: parentID,
Kind: NodeKindDirectory,
Status: NodeStatusReady,
Name: name,
}
_, err = db.NewInsert().Model(node).Exec(ctx)
if err != nil { if err != nil {
if database.IsUniqueViolation(err) { if database.IsUniqueViolation(err) {
return nil, ErrNodeConflict return nil, ErrNodeConflict
@@ -193,12 +271,12 @@ func (vfs *VirtualFS) CreateDirectory(ctx context.Context, userID uuid.UUID, par
return &node, nil return &node, nil
} }
func (vfs *VirtualFS) SoftDeleteNode(ctx context.Context, node *Node) error { func (vfs *VirtualFS) SoftDeleteNode(ctx context.Context, db bun.IDB, node *Node) error {
if !node.IsAccessible() { if !node.IsAccessible() {
return ErrNodeNotFound return ErrNodeNotFound
} }
_, err := vfs.db.NewUpdate().Model(node). _, err := db.NewUpdate().Model(node).
WherePK(). WherePK().
Where("deleted_at IS NULL"). Where("deleted_at IS NULL").
Where("status = ?", NodeStatusReady). Where("status = ?", NodeStatusReady).
@@ -215,12 +293,12 @@ func (vfs *VirtualFS) SoftDeleteNode(ctx context.Context, node *Node) error {
return nil return nil
} }
func (vfs *VirtualFS) RestoreNode(ctx context.Context, node *Node) error { func (vfs *VirtualFS) RestoreNode(ctx context.Context, db bun.IDB, node *Node) error {
if !node.IsAccessible() { if node.Status != NodeStatusReady {
return ErrNodeNotFound return ErrNodeNotFound
} }
_, err := vfs.db.NewUpdate().Model(node). _, err := db.NewUpdate().Model(node).
WherePK(). WherePK().
Where("deleted_at IS NOT NULL"). Where("deleted_at IS NOT NULL").
Set("deleted_at = NULL"). Set("deleted_at = NULL").
@@ -236,12 +314,12 @@ func (vfs *VirtualFS) RestoreNode(ctx context.Context, node *Node) error {
return nil return nil
} }
func (vfs *VirtualFS) RenameNode(ctx context.Context, node *Node, name string) error { func (vfs *VirtualFS) RenameNode(ctx context.Context, db bun.IDB, node *Node, name string) error {
if !node.IsAccessible() { if !node.IsAccessible() {
return ErrNodeNotFound return ErrNodeNotFound
} }
_, err := vfs.db.NewUpdate().Model(node). _, err := db.NewUpdate().Model(node).
WherePK(). WherePK().
Where("status = ?", NodeStatusReady). Where("status = ?", NodeStatusReady).
Where("deleted_at IS NULL"). Where("deleted_at IS NULL").
@@ -257,7 +335,7 @@ func (vfs *VirtualFS) RenameNode(ctx context.Context, node *Node, name string) e
return nil return nil
} }
func (vfs *VirtualFS) MoveNode(ctx context.Context, node *Node, parentID uuid.UUID) error { func (vfs *VirtualFS) MoveNode(ctx context.Context, db bun.IDB, node *Node, parentID uuid.UUID) error {
if !node.IsAccessible() { if !node.IsAccessible() {
return ErrNodeNotFound return ErrNodeNotFound
} }
@@ -267,7 +345,7 @@ func (vfs *VirtualFS) MoveNode(ctx context.Context, node *Node, parentID uuid.UU
return err return err
} }
_, err = vfs.db.NewUpdate().Model(node). _, err = db.NewUpdate().Model(node).
WherePK(). WherePK().
Where("status = ?", NodeStatusReady). Where("status = ?", NodeStatusReady).
Where("deleted_at IS NULL"). Where("deleted_at IS NULL").
@@ -289,16 +367,14 @@ func (vfs *VirtualFS) MoveNode(ctx context.Context, node *Node, parentID uuid.UU
return err return err
} }
if node.Kind == NodeKindFile && !node.BlobKey.IsNil() && oldKey != newKey { err = vfs.blobStore.Move(ctx, oldKey, newKey)
// if node is a file, has a previous key, and the new key is different, we need to update the node with the new key if err != nil {
err = vfs.blobStore.Move(ctx, oldKey, newKey) return err
if err != nil { }
return err
}
if vfs.keyResolver.ShouldPersistKey() {
node.BlobKey = newKey node.BlobKey = newKey
_, err = db.NewUpdate().Model(node).
_, err = vfs.db.NewUpdate().Model(node).
WherePK(). WherePK().
Set("blob_key = ?", newKey). Set("blob_key = ?", newKey).
Exec(ctx) Exec(ctx)
@@ -310,18 +386,42 @@ func (vfs *VirtualFS) MoveNode(ctx context.Context, node *Node, parentID uuid.UU
return nil return nil
} }
func (vfs *VirtualFS) AbsolutePath(ctx context.Context, node *Node) (string, error) { func (vfs *VirtualFS) AbsolutePath(ctx context.Context, db bun.IDB, node *Node) (string, error) {
if !node.IsAccessible() { if !node.IsAccessible() {
return "", ErrNodeNotFound return "", ErrNodeNotFound
} }
return buildNodeAbsolutePath(ctx, vfs.db, node.ID) return buildNodeAbsolutePath(ctx, db, node.ID)
} }
func (vfs *VirtualFS) PermanentlyDeleteNode(ctx context.Context, node *Node) error { func (vfs *VirtualFS) PermanentlyDeleteNode(ctx context.Context, db bun.IDB, node *Node) error {
if !node.IsAccessible() { if !node.IsAccessible() {
return ErrNodeNotFound return ErrNodeNotFound
} }
switch node.Kind {
case NodeKindFile:
return vfs.permanentlyDeleteFileNode(ctx, db, node)
case NodeKindDirectory:
return vfs.permanentlyDeleteDirectoryNode(ctx, db, node)
default:
return ErrUnsupportedOperation
}
}
func (vfs *VirtualFS) permanentlyDeleteFileNode(ctx context.Context, db bun.IDB, node *Node) error {
err := vfs.blobStore.Delete(ctx, node.BlobKey)
if err != nil {
return err
}
_, err = db.NewDelete().Model(node).WherePK().Exec(ctx)
if err != nil {
return err
}
return nil
}
func (vfs *VirtualFS) permanentlyDeleteDirectoryNode(ctx context.Context, db bun.IDB, node *Node) error {
const descendantsQuery = `WITH RECURSIVE descendants AS ( const descendantsQuery = `WITH RECURSIVE descendants AS (
SELECT id, blob_key FROM vfs_nodes WHERE id = ? SELECT id, blob_key FROM vfs_nodes WHERE id = ?
UNION ALL UNION ALL
@@ -335,43 +435,75 @@ func (vfs *VirtualFS) PermanentlyDeleteNode(ctx context.Context, node *Node) err
BlobKey blob.Key `bun:"blob_key"` BlobKey blob.Key `bun:"blob_key"`
} }
var blobKeys []blob.Key // If db is already a transaction, use it directly; otherwise start a new transaction
var tx bun.IDB
err := vfs.db.RunInTx(ctx, nil, func(ctx context.Context, tx bun.Tx) error { var startedTx *bun.Tx
var records []nodeRecord switch v := db.(type) {
err := tx.NewRaw(descendantsQuery, node.ID).Scan(ctx, &records) case *bun.DB:
newTx, err := v.BeginTx(ctx, nil)
if err != nil { if err != nil {
if errors.Is(err, sql.ErrNoRows) {
return ErrNodeNotFound
}
return err return err
} }
startedTx = &newTx
tx = newTx
defer func() {
if startedTx != nil {
(*startedTx).Rollback()
}
}()
default:
// Assume it's already a transaction
tx = db
}
if len(records) == 0 { var records []nodeRecord
err := tx.NewRaw(descendantsQuery, node.ID).Scan(ctx, &records)
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
return ErrNodeNotFound return ErrNodeNotFound
} }
nodeIDs := make([]uuid.UUID, 0, len(records))
for _, r := range records {
nodeIDs = append(nodeIDs, r.ID)
if !r.BlobKey.IsNil() {
blobKeys = append(blobKeys, r.BlobKey)
}
}
_, err = tx.NewDelete().
Model((*Node)(nil)).
Where("id IN (?)", bun.In(nodeIDs)).
Exec(ctx)
return err return err
}) }
if len(records) == 0 {
return ErrNodeNotFound
}
nodeIDs := make([]uuid.UUID, 0, len(records))
blobKeys := make([]blob.Key, 0, len(records))
for _, r := range records {
nodeIDs = append(nodeIDs, r.ID)
if !r.BlobKey.IsNil() {
blobKeys = append(blobKeys, r.BlobKey)
}
}
plan, err := vfs.keyResolver.ResolveDeletionKeys(ctx, node, blobKeys)
if err != nil { if err != nil {
return err return err
} }
// Delete blobs outside transaction (best effort) _, err = tx.NewDelete().
for _, key := range blobKeys { Model((*Node)(nil)).
_ = vfs.blobStore.Delete(ctx, key) Where("id IN (?)", bun.In(nodeIDs)).
Exec(ctx)
if err != nil {
return err
}
if !plan.Prefix.IsNil() {
_ = vfs.blobStore.DeletePrefix(ctx, plan.Prefix)
} else {
for _, key := range plan.Keys {
_ = vfs.blobStore.Delete(ctx, key)
}
}
// Only commit if we started the transaction
if startedTx != nil {
err := (*startedTx).Commit()
startedTx = nil // Prevent defer from rolling back
return err
} }
return nil return nil