mirror of
https://github.com/get-drexa/drive.git
synced 2025-12-01 05:51:39 +00:00
Compare commits
30 Commits
1feac70f7f
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
| e8a558d652 | |||
| 3e96c42c4a | |||
| 6c61cbe1fd | |||
| ccdeaf0364 | |||
| a3110b67c3 | |||
| 1907cd83c8 | |||
| 033ad65d5f | |||
| 987edc0d4a | |||
| fb8e91dd47 | |||
| 0b8aee5d60 | |||
| 89b62f6d8a | |||
| 1c1392a0a1 | |||
| 6984bb209e | |||
| 629d56b5ab | |||
| 5e4e08c255 | |||
| 42b805fbd1 | |||
| ab4c14bc09 | |||
| fd3b2d3908 | |||
| 39824e45d9 | |||
| 6aee150a59 | |||
|
9ea76d2021
|
|||
|
987f36e1d2
|
|||
|
797b40a35c
|
|||
| e32e00a230 | |||
| b1e34f878c | |||
| c0e2f7ff37 | |||
| 834517f3c0 | |||
| 06c3951293 | |||
| 389fe35a0a | |||
| 81e3f7af75 |
@@ -1,5 +1,4 @@
|
|||||||
{
|
{
|
||||||
"name": "React + Bun + Convex Development",
|
|
||||||
"build": {
|
"build": {
|
||||||
"context": ".",
|
"context": ".",
|
||||||
"dockerfile": "Dockerfile"
|
"dockerfile": "Dockerfile"
|
||||||
@@ -7,9 +6,6 @@
|
|||||||
"features": {
|
"features": {
|
||||||
"ghcr.io/devcontainers/features/git:1": {},
|
"ghcr.io/devcontainers/features/git:1": {},
|
||||||
"ghcr.io/devcontainers/features/github-cli:1": {},
|
"ghcr.io/devcontainers/features/github-cli:1": {},
|
||||||
"ghcr.io/devcontainers/features/docker-in-docker:2": {
|
|
||||||
"moby": false
|
|
||||||
},
|
|
||||||
"ghcr.io/tailscale/codespace/tailscale": {
|
"ghcr.io/tailscale/codespace/tailscale": {
|
||||||
"version": "latest"
|
"version": "latest"
|
||||||
},
|
},
|
||||||
@@ -24,21 +20,11 @@
|
|||||||
"extensions": [
|
"extensions": [
|
||||||
"biomejs.biome",
|
"biomejs.biome",
|
||||||
"bradlc.vscode-tailwindcss",
|
"bradlc.vscode-tailwindcss",
|
||||||
"ms-vscode.vscode-typescript-next",
|
|
||||||
"esbenp.prettier-vscode",
|
|
||||||
"ms-vscode.vscode-json",
|
|
||||||
"formulahendry.auto-rename-tag",
|
|
||||||
"christian-kohler.path-intellisense",
|
"christian-kohler.path-intellisense",
|
||||||
"ms-vscode.vscode-eslint",
|
"golang.go"
|
||||||
"convex.convex-vscode"
|
|
||||||
],
|
],
|
||||||
"settings": {
|
"settings": {
|
||||||
"editor.defaultFormatter": "biomejs.biome",
|
|
||||||
"editor.formatOnSave": true,
|
"editor.formatOnSave": true,
|
||||||
"editor.codeActionsOnSave": {
|
|
||||||
"source.organizeImports.biome": "explicit",
|
|
||||||
"source.fixAll.biome": "explicit"
|
|
||||||
},
|
|
||||||
"typescript.preferences.importModuleSpecifier": "relative",
|
"typescript.preferences.importModuleSpecifier": "relative",
|
||||||
"typescript.suggest.autoImports": true,
|
"typescript.suggest.autoImports": true,
|
||||||
"emmet.includeLanguages": {
|
"emmet.includeLanguages": {
|
||||||
@@ -48,7 +34,63 @@
|
|||||||
"tailwindCSS.experimental.classRegex": [
|
"tailwindCSS.experimental.classRegex": [
|
||||||
["cva\\(([^)]*)\\)", "[\"'`]([^\"'`]*).*?[\"'`]"],
|
["cva\\(([^)]*)\\)", "[\"'`]([^\"'`]*).*?[\"'`]"],
|
||||||
["cx\\(([^)]*)\\)", "(?:'|\"|`)([^']*)(?:'|\"|`)"]
|
["cx\\(([^)]*)\\)", "(?:'|\"|`)([^']*)(?:'|\"|`)"]
|
||||||
]
|
],
|
||||||
|
"[javascript]": {
|
||||||
|
"editor.formatOnSave": true,
|
||||||
|
"editor.defaultFormatter": "biomejs.biome",
|
||||||
|
"editor.codeActionsOnSave": {
|
||||||
|
"source.organizeImports.biome": "explicit",
|
||||||
|
"source.fixAll.biome": "explicit"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"[javascriptreact]": {
|
||||||
|
"editor.formatOnSave": true,
|
||||||
|
"editor.defaultFormatter": "biomejs.biome",
|
||||||
|
"editor.codeActionsOnSave": {
|
||||||
|
"source.organizeImports.biome": "explicit",
|
||||||
|
"source.fixAll.biome": "explicit"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"[typescript]": {
|
||||||
|
"editor.formatOnSave": true,
|
||||||
|
"editor.defaultFormatter": "biomejs.biome",
|
||||||
|
"editor.codeActionsOnSave": {
|
||||||
|
"source.organizeImports.biome": "explicit",
|
||||||
|
"source.fixAll.biome": "explicit"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"[typescriptreact]": {
|
||||||
|
"editor.formatOnSave": true,
|
||||||
|
"editor.defaultFormatter": "biomejs.biome",
|
||||||
|
"editor.codeActionsOnSave": {
|
||||||
|
"source.organizeImports.biome": "explicit",
|
||||||
|
"source.fixAll.biome": "explicit"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"[json]": {
|
||||||
|
"editor.formatOnSave": true,
|
||||||
|
"editor.defaultFormatter": "biomejs.biome",
|
||||||
|
"editor.codeActionsOnSave": {
|
||||||
|
"source.fixAll.biome": "explicit"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"[jsonc]": {
|
||||||
|
"editor.formatOnSave": true,
|
||||||
|
"editor.defaultFormatter": "biomejs.biome",
|
||||||
|
"editor.codeActionsOnSave": {
|
||||||
|
"source.fixAll.biome": "explicit"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"[go]": {
|
||||||
|
"editor.formatOnSave": true,
|
||||||
|
"editor.defaultFormatter": "golang.go",
|
||||||
|
"editor.codeActionsOnSave": {
|
||||||
|
"source.organizeImports": "explicit"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"go.formatTool": "goimports",
|
||||||
|
"go.lintTool": "golangci-lint",
|
||||||
|
"go.useLanguageServer": true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -1,22 +1,33 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
|
"os"
|
||||||
|
|
||||||
"github.com/get-drexa/drexa/internal/drexa"
|
"github.com/get-drexa/drexa/internal/drexa"
|
||||||
"github.com/joho/godotenv"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
_ = godotenv.Load()
|
configPath := flag.String("config", "", "path to config file (required)")
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
config, err := drexa.ServerConfigFromEnv()
|
if *configPath == "" {
|
||||||
|
fmt.Fprintln(os.Stderr, "error: --config is required")
|
||||||
|
flag.Usage()
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
config, err := drexa.ConfigFromFile(*configPath)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("failed to load config: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
server, err := drexa.NewServer(*config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
server := drexa.NewServer(*config)
|
log.Fatal(server.Start())
|
||||||
|
|
||||||
log.Fatal(server.Listen(fmt.Sprintf(":%d", config.Port)))
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,13 +1,37 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
|
"os"
|
||||||
|
|
||||||
"github.com/get-drexa/drexa/internal/database"
|
"github.com/get-drexa/drexa/internal/database"
|
||||||
|
"github.com/get-drexa/drexa/internal/drexa"
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
if err := database.RunMigrations(); err != nil {
|
configPath := flag.String("config", "", "path to config file (required)")
|
||||||
log.Fatalf("Failed to run migrations: %v", err)
|
flag.Parse()
|
||||||
|
|
||||||
|
if *configPath == "" {
|
||||||
|
fmt.Fprintln(os.Stderr, "error: --config is required")
|
||||||
|
flag.Usage()
|
||||||
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
config, err := drexa.ConfigFromFile(*configPath)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("failed to load config: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
db := database.NewFromPostgres(config.Database.PostgresURL)
|
||||||
|
defer db.Close()
|
||||||
|
|
||||||
|
log.Println("running migrations...")
|
||||||
|
if err := database.RunMigrations(context.Background(), db); err != nil {
|
||||||
|
log.Fatalf("failed to run migrations: %v", err)
|
||||||
|
}
|
||||||
|
log.Println("migrations completed successfully")
|
||||||
}
|
}
|
||||||
|
|||||||
30
apps/backend/config.example.yaml
Normal file
30
apps/backend/config.example.yaml
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
# Drexa Backend Configuration
|
||||||
|
# Copy this file to config.yaml and adjust values for your environment.
|
||||||
|
|
||||||
|
server:
|
||||||
|
port: 8080
|
||||||
|
|
||||||
|
database:
|
||||||
|
postgres_url: postgres://user:password@localhost:5432/drexa?sslmode=disable
|
||||||
|
|
||||||
|
jwt:
|
||||||
|
issuer: drexa
|
||||||
|
audience: drexa-api
|
||||||
|
# Secret key can be provided via (in order of precedence):
|
||||||
|
# 1. JWT_SECRET_KEY environment variable (base64 encoded)
|
||||||
|
# 2. secret_key_base64 below (base64 encoded)
|
||||||
|
# 3. secret_key_path below (file with base64 encoded content)
|
||||||
|
# secret_key_base64: "base64encodedkey"
|
||||||
|
secret_key_path: /run/secrets/jwt_secret_key
|
||||||
|
|
||||||
|
storage:
|
||||||
|
# Mode: "flat" (UUID-based keys) or "hierarchical" (path-based keys)
|
||||||
|
# Note: S3 backend only supports "flat" mode
|
||||||
|
mode: flat
|
||||||
|
# Backend: "fs" (filesystem) or "s3" (not yet implemented)
|
||||||
|
backend: fs
|
||||||
|
# Required when backend is "fs"
|
||||||
|
root_path: /var/lib/drexa/blobs
|
||||||
|
# Required when backend is "s3"
|
||||||
|
# bucket: my-drexa-bucket
|
||||||
|
|
||||||
15
apps/backend/config.yaml
Normal file
15
apps/backend/config.yaml
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
server:
|
||||||
|
port: 8080
|
||||||
|
|
||||||
|
database:
|
||||||
|
postgres_url: postgres://drexa:hunter2@helian:5433/drexa?sslmode=disable
|
||||||
|
|
||||||
|
jwt:
|
||||||
|
issuer: drexa
|
||||||
|
audience: drexa-api
|
||||||
|
secret_key_base64: "pNeUExoqdakfecZLFL53NJpY4iB9zFot9EuEBItlYKY="
|
||||||
|
|
||||||
|
storage:
|
||||||
|
mode: hierarchical
|
||||||
|
backend: fs
|
||||||
|
root_path: ./data
|
||||||
@@ -3,22 +3,26 @@ module github.com/get-drexa/drexa
|
|||||||
go 1.25.4
|
go 1.25.4
|
||||||
|
|
||||||
require (
|
require (
|
||||||
|
github.com/gabriel-vasile/mimetype v1.4.11
|
||||||
github.com/gofiber/fiber/v2 v2.52.9
|
github.com/gofiber/fiber/v2 v2.52.9
|
||||||
github.com/google/uuid v1.6.0
|
github.com/google/uuid v1.6.0
|
||||||
github.com/uptrace/bun v1.2.15
|
github.com/sqids/sqids-go v0.4.1
|
||||||
golang.org/x/crypto v0.40.0
|
github.com/uptrace/bun v1.2.16
|
||||||
|
github.com/uptrace/bun/extra/bundebug v1.2.16
|
||||||
|
golang.org/x/crypto v0.45.0
|
||||||
|
gopkg.in/yaml.v3 v3.0.1
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/joho/godotenv v1.5.1 // indirect
|
github.com/fatih/color v1.18.0 // indirect
|
||||||
go.opentelemetry.io/otel v1.37.0 // indirect
|
go.opentelemetry.io/otel v1.38.0 // indirect
|
||||||
go.opentelemetry.io/otel/trace v1.37.0 // indirect
|
go.opentelemetry.io/otel/trace v1.38.0 // indirect
|
||||||
mellium.im/sasl v0.3.2 // indirect
|
mellium.im/sasl v0.3.2 // indirect
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/andybalholm/brotli v1.1.0 // indirect
|
github.com/andybalholm/brotli v1.1.0 // indirect
|
||||||
github.com/golang-jwt/jwt/v5 v5.3.0 // indirect
|
github.com/golang-jwt/jwt/v5 v5.3.0
|
||||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||||
github.com/klauspost/compress v1.17.9 // indirect
|
github.com/klauspost/compress v1.17.9 // indirect
|
||||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||||
@@ -26,14 +30,13 @@ require (
|
|||||||
github.com/mattn/go-runewidth v0.0.16 // indirect
|
github.com/mattn/go-runewidth v0.0.16 // indirect
|
||||||
github.com/puzpuzpuz/xsync/v3 v3.5.1 // indirect
|
github.com/puzpuzpuz/xsync/v3 v3.5.1 // indirect
|
||||||
github.com/rivo/uniseg v0.2.0 // indirect
|
github.com/rivo/uniseg v0.2.0 // indirect
|
||||||
github.com/stretchr/testify v1.10.0 // indirect
|
|
||||||
github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc // indirect
|
github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc // indirect
|
||||||
github.com/uptrace/bun/dialect/pgdialect v1.2.15
|
github.com/uptrace/bun/dialect/pgdialect v1.2.16
|
||||||
github.com/uptrace/bun/driver/pgdriver v1.2.15
|
github.com/uptrace/bun/driver/pgdriver v1.2.16
|
||||||
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
||||||
github.com/valyala/fasthttp v1.51.0 // indirect
|
github.com/valyala/fasthttp v1.51.0 // indirect
|
||||||
github.com/valyala/tcplisten v1.0.0 // indirect
|
github.com/valyala/tcplisten v1.0.0 // indirect
|
||||||
github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect
|
github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect
|
||||||
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
|
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
|
||||||
golang.org/x/sys v0.34.0 // indirect
|
golang.org/x/sys v0.38.0 // indirect
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -2,40 +2,52 @@ github.com/andybalholm/brotli v1.1.0 h1:eLKJA0d02Lf0mVpIDgYnqXcUn0GqVmEFny3VuID1
|
|||||||
github.com/andybalholm/brotli v1.1.0/go.mod h1:sms7XGricyQI9K10gOSf56VKKWS4oLer58Q+mhRPtnY=
|
github.com/andybalholm/brotli v1.1.0/go.mod h1:sms7XGricyQI9K10gOSf56VKKWS4oLer58Q+mhRPtnY=
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
|
||||||
|
github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
|
||||||
|
github.com/gabriel-vasile/mimetype v1.4.11 h1:AQvxbp830wPhHTqc1u7nzoLT+ZFxGY7emj5DR5DYFik=
|
||||||
|
github.com/gabriel-vasile/mimetype v1.4.11/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s=
|
||||||
github.com/gofiber/fiber/v2 v2.52.9 h1:YjKl5DOiyP3j0mO61u3NTmK7or8GzzWzCFzkboyP5cw=
|
github.com/gofiber/fiber/v2 v2.52.9 h1:YjKl5DOiyP3j0mO61u3NTmK7or8GzzWzCFzkboyP5cw=
|
||||||
github.com/gofiber/fiber/v2 v2.52.9/go.mod h1:YEcBbO/FB+5M1IZNBP9FO3J9281zgPAreiI1oqg8nDw=
|
github.com/gofiber/fiber/v2 v2.52.9/go.mod h1:YEcBbO/FB+5M1IZNBP9FO3J9281zgPAreiI1oqg8nDw=
|
||||||
github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo=
|
github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo=
|
||||||
github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=
|
github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=
|
||||||
|
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||||
|
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
|
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
|
||||||
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
|
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
|
||||||
github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0=
|
|
||||||
github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4=
|
|
||||||
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
|
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
|
||||||
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
||||||
|
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||||
|
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||||
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
|
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
|
||||||
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
|
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
|
||||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||||
github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
|
github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
|
||||||
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||||
|
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
|
||||||
|
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg=
|
github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg=
|
||||||
github.com/puzpuzpuz/xsync/v3 v3.5.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
|
github.com/puzpuzpuz/xsync/v3 v3.5.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
|
||||||
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
|
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
|
||||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
github.com/sqids/sqids-go v0.4.1 h1:eQKYzmAZbLlRwHeHYPF35QhgxwZHLnlmVj9AkIj/rrw=
|
||||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
github.com/sqids/sqids-go v0.4.1/go.mod h1:EMwHuPQgSNFS0A49jESTfIQS+066XQTVhukrzEPScl8=
|
||||||
|
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||||
|
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||||
github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc h1:9lRDQMhESg+zvGYmW5DyG0UqvY96Bu5QYsTLvCHdrgo=
|
github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc h1:9lRDQMhESg+zvGYmW5DyG0UqvY96Bu5QYsTLvCHdrgo=
|
||||||
github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc/go.mod h1:bciPuU6GHm1iF1pBvUfxfsH0Wmnc2VbpgvbI9ZWuIRs=
|
github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc/go.mod h1:bciPuU6GHm1iF1pBvUfxfsH0Wmnc2VbpgvbI9ZWuIRs=
|
||||||
github.com/uptrace/bun v1.2.15 h1:Ut68XRBLDgp9qG9QBMa9ELWaZOmzHNdczHQdrOZbEFE=
|
github.com/uptrace/bun v1.2.16 h1:QlObi6ZIK5Ao7kAALnh91HWYNZUBbVwye52fmlQM9kc=
|
||||||
github.com/uptrace/bun v1.2.15/go.mod h1:Eghz7NonZMiTX/Z6oKYytJ0oaMEJ/eq3kEV4vSqG038=
|
github.com/uptrace/bun v1.2.16/go.mod h1:jMoNg2n56ckaawi/O/J92BHaECmrz6IRjuMWqlMaMTM=
|
||||||
github.com/uptrace/bun/dialect/pgdialect v1.2.15 h1:er+/3giAIqpfrXJw+KP9B7ujyQIi5XkPnFmgjAVL6bA=
|
github.com/uptrace/bun/dialect/pgdialect v1.2.16 h1:KFNZ0LxAyczKNfK/IJWMyaleO6eI9/Z5tUv3DE1NVL4=
|
||||||
github.com/uptrace/bun/dialect/pgdialect v1.2.15/go.mod h1:QSiz6Qpy9wlGFsfpf7UMSL6mXAL1jDJhFwuOVacCnOQ=
|
github.com/uptrace/bun/dialect/pgdialect v1.2.16/go.mod h1:IJdMeV4sLfh0LDUZl7TIxLI0LipF1vwTK3hBC7p5qLo=
|
||||||
github.com/uptrace/bun/driver/pgdriver v1.2.15 h1:eZZ60ZtUUE6jjv6VAI1pCMaTgtx3sxmChQzwbvchOOo=
|
github.com/uptrace/bun/driver/pgdriver v1.2.16 h1:b1kpXKUxtTSGYow5Vlsb+dKV3z0R7aSAJNfMfKp61ZU=
|
||||||
github.com/uptrace/bun/driver/pgdriver v1.2.15/go.mod h1:s2zz/BAeScal4KLFDI8PURwATN8s9RDBsElEbnPAjv4=
|
github.com/uptrace/bun/driver/pgdriver v1.2.16/go.mod h1:H6lUZ9CBfp1X5Vq62YGSV7q96/v94ja9AYFjKvdoTk0=
|
||||||
|
github.com/uptrace/bun/extra/bundebug v1.2.16 h1:3OXAfHTU4ydu2+4j05oB1BxPx6+ypdWIVzTugl/7zl0=
|
||||||
|
github.com/uptrace/bun/extra/bundebug v1.2.16/go.mod h1:vk6R/1i67/S2RvUI5AH/m3P5e67mOkfDCmmCsAPUumo=
|
||||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||||
github.com/valyala/fasthttp v1.51.0 h1:8b30A5JlZ6C7AS81RsWjYMQmrZG6feChmgAolCl1SqA=
|
github.com/valyala/fasthttp v1.51.0 h1:8b30A5JlZ6C7AS81RsWjYMQmrZG6feChmgAolCl1SqA=
|
||||||
@@ -46,15 +58,18 @@ github.com/vmihailenco/msgpack/v5 v5.4.1 h1:cQriyiUvjTwOHg8QZaPihLWeRAAVoCpE00IU
|
|||||||
github.com/vmihailenco/msgpack/v5 v5.4.1/go.mod h1:GaZTsDaehaPpQVyxrf5mtQlH+pc21PIudVV/E3rRQok=
|
github.com/vmihailenco/msgpack/v5 v5.4.1/go.mod h1:GaZTsDaehaPpQVyxrf5mtQlH+pc21PIudVV/E3rRQok=
|
||||||
github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g=
|
github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g=
|
||||||
github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds=
|
github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds=
|
||||||
go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ=
|
go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8=
|
||||||
go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I=
|
go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM=
|
||||||
go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4=
|
go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE=
|
||||||
go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0=
|
go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs=
|
||||||
golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM=
|
golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=
|
||||||
golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY=
|
golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
|
||||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA=
|
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||||
golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||||
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
|
||||||
|
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
mellium.im/sasl v0.3.2 h1:PT6Xp7ccn9XaXAnJ03FcEjmAn7kK1x7aoXV6F+Vmrl0=
|
mellium.im/sasl v0.3.2 h1:PT6Xp7ccn9XaXAnJ03FcEjmAn7kK1x7aoXV6F+Vmrl0=
|
||||||
|
|||||||
23
apps/backend/internal/account/account.go
Normal file
23
apps/backend/internal/account/account.go
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
package account
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/google/uuid"
|
||||||
|
"github.com/uptrace/bun"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Account struct {
|
||||||
|
bun.BaseModel `bun:"accounts"`
|
||||||
|
|
||||||
|
ID uuid.UUID `bun:",pk,type:uuid" json:"id"`
|
||||||
|
UserID uuid.UUID `bun:"user_id,notnull,type:uuid" json:"userId"`
|
||||||
|
StorageUsageBytes int64 `bun:"storage_usage_bytes,notnull" json:"storageUsageBytes"`
|
||||||
|
StorageQuotaBytes int64 `bun:"storage_quota_bytes,notnull" json:"storageQuotaBytes"`
|
||||||
|
CreatedAt time.Time `bun:"created_at,notnull,nullzero" json:"createdAt"`
|
||||||
|
UpdatedAt time.Time `bun:"updated_at,notnull,nullzero" json:"updatedAt"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func newAccountID() (uuid.UUID, error) {
|
||||||
|
return uuid.NewV7()
|
||||||
|
}
|
||||||
8
apps/backend/internal/account/err.go
Normal file
8
apps/backend/internal/account/err.go
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
package account
|
||||||
|
|
||||||
|
import "errors"
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrAccountNotFound = errors.New("account not found")
|
||||||
|
ErrAccountAlreadyExists = errors.New("account already exists")
|
||||||
|
)
|
||||||
132
apps/backend/internal/account/http.go
Normal file
132
apps/backend/internal/account/http.go
Normal file
@@ -0,0 +1,132 @@
|
|||||||
|
package account
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
|
||||||
|
"github.com/get-drexa/drexa/internal/auth"
|
||||||
|
"github.com/get-drexa/drexa/internal/httperr"
|
||||||
|
"github.com/get-drexa/drexa/internal/user"
|
||||||
|
"github.com/gofiber/fiber/v2"
|
||||||
|
"github.com/google/uuid"
|
||||||
|
"github.com/uptrace/bun"
|
||||||
|
)
|
||||||
|
|
||||||
|
type HTTPHandler struct {
|
||||||
|
accountService *Service
|
||||||
|
authService *auth.Service
|
||||||
|
db *bun.DB
|
||||||
|
authMiddleware fiber.Handler
|
||||||
|
}
|
||||||
|
|
||||||
|
type registerAccountRequest struct {
|
||||||
|
Email string `json:"email"`
|
||||||
|
Password string `json:"password"`
|
||||||
|
DisplayName string `json:"displayName"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type registerAccountResponse struct {
|
||||||
|
Account *Account `json:"account"`
|
||||||
|
User *user.User `json:"user"`
|
||||||
|
AccessToken string `json:"accessToken"`
|
||||||
|
RefreshToken string `json:"refreshToken"`
|
||||||
|
}
|
||||||
|
|
||||||
|
const currentAccountKey = "currentAccount"
|
||||||
|
|
||||||
|
func CurrentAccount(c *fiber.Ctx) *Account {
|
||||||
|
return c.Locals(currentAccountKey).(*Account)
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewHTTPHandler(accountService *Service, authService *auth.Service, db *bun.DB, authMiddleware fiber.Handler) *HTTPHandler {
|
||||||
|
return &HTTPHandler{accountService: accountService, authService: authService, db: db, authMiddleware: authMiddleware}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *HTTPHandler) RegisterRoutes(api fiber.Router) fiber.Router {
|
||||||
|
api.Post("/accounts", h.registerAccount)
|
||||||
|
|
||||||
|
account := api.Group("/accounts/:accountID")
|
||||||
|
account.Use(h.authMiddleware)
|
||||||
|
account.Use(h.accountMiddleware)
|
||||||
|
|
||||||
|
account.Get("/", h.getAccount)
|
||||||
|
|
||||||
|
return account
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *HTTPHandler) accountMiddleware(c *fiber.Ctx) error {
|
||||||
|
user, err := auth.AuthenticatedUser(c)
|
||||||
|
if err != nil {
|
||||||
|
return c.SendStatus(fiber.StatusUnauthorized)
|
||||||
|
}
|
||||||
|
|
||||||
|
accountID, err := uuid.Parse(c.Params("accountID"))
|
||||||
|
if err != nil {
|
||||||
|
return c.SendStatus(fiber.StatusNotFound)
|
||||||
|
}
|
||||||
|
|
||||||
|
account, err := h.accountService.AccountByID(c.Context(), h.db, user.ID, accountID)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, ErrAccountNotFound) {
|
||||||
|
return c.SendStatus(fiber.StatusNotFound)
|
||||||
|
}
|
||||||
|
return httperr.Internal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
c.Locals(currentAccountKey, account)
|
||||||
|
|
||||||
|
return c.Next()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *HTTPHandler) getAccount(c *fiber.Ctx) error {
|
||||||
|
account := CurrentAccount(c)
|
||||||
|
if account == nil {
|
||||||
|
return c.SendStatus(fiber.StatusNotFound)
|
||||||
|
}
|
||||||
|
return c.JSON(account)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *HTTPHandler) registerAccount(c *fiber.Ctx) error {
|
||||||
|
req := new(registerAccountRequest)
|
||||||
|
if err := c.BodyParser(req); err != nil {
|
||||||
|
return c.SendStatus(fiber.StatusBadRequest)
|
||||||
|
}
|
||||||
|
|
||||||
|
tx, err := h.db.BeginTx(c.Context(), nil)
|
||||||
|
if err != nil {
|
||||||
|
return httperr.Internal(err)
|
||||||
|
}
|
||||||
|
defer tx.Rollback()
|
||||||
|
|
||||||
|
acc, u, err := h.accountService.Register(c.Context(), tx, RegisterOptions{
|
||||||
|
Email: req.Email,
|
||||||
|
Password: req.Password,
|
||||||
|
DisplayName: req.DisplayName,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
var ae *user.AlreadyExistsError
|
||||||
|
if errors.As(err, &ae) {
|
||||||
|
return c.SendStatus(fiber.StatusConflict)
|
||||||
|
}
|
||||||
|
if errors.Is(err, ErrAccountAlreadyExists) {
|
||||||
|
return c.SendStatus(fiber.StatusConflict)
|
||||||
|
}
|
||||||
|
return httperr.Internal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
result, err := h.authService.GenerateTokenForUser(c.Context(), tx, u)
|
||||||
|
if err != nil {
|
||||||
|
return httperr.Internal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = tx.Commit()
|
||||||
|
if err != nil {
|
||||||
|
return httperr.Internal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return c.JSON(registerAccountResponse{
|
||||||
|
Account: acc,
|
||||||
|
User: u,
|
||||||
|
AccessToken: result.AccessToken,
|
||||||
|
RefreshToken: result.RefreshToken,
|
||||||
|
})
|
||||||
|
}
|
||||||
115
apps/backend/internal/account/service.go
Normal file
115
apps/backend/internal/account/service.go
Normal file
@@ -0,0 +1,115 @@
|
|||||||
|
package account
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"database/sql"
|
||||||
|
"errors"
|
||||||
|
|
||||||
|
"github.com/get-drexa/drexa/internal/database"
|
||||||
|
"github.com/get-drexa/drexa/internal/password"
|
||||||
|
"github.com/get-drexa/drexa/internal/user"
|
||||||
|
"github.com/get-drexa/drexa/internal/virtualfs"
|
||||||
|
"github.com/google/uuid"
|
||||||
|
"github.com/uptrace/bun"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Service struct {
|
||||||
|
userService user.Service
|
||||||
|
vfs *virtualfs.VirtualFS
|
||||||
|
}
|
||||||
|
|
||||||
|
type RegisterOptions struct {
|
||||||
|
Email string
|
||||||
|
Password string
|
||||||
|
DisplayName string
|
||||||
|
}
|
||||||
|
|
||||||
|
type CreateAccountOptions struct {
|
||||||
|
OrganizationID uuid.UUID
|
||||||
|
QuotaBytes int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewService(userService *user.Service, vfs *virtualfs.VirtualFS) *Service {
|
||||||
|
return &Service{
|
||||||
|
userService: *userService,
|
||||||
|
vfs: vfs,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Service) Register(ctx context.Context, db bun.IDB, opts RegisterOptions) (*Account, *user.User, error) {
|
||||||
|
hashed, err := password.Hash(opts.Password)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
u, err := s.userService.RegisterUser(ctx, db, user.UserRegistrationOptions{
|
||||||
|
Email: opts.Email,
|
||||||
|
Password: hashed,
|
||||||
|
DisplayName: opts.DisplayName,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
acc, err := s.CreateAccount(ctx, db, u.ID, CreateAccountOptions{
|
||||||
|
// TODO: make quota configurable
|
||||||
|
QuotaBytes: 1024 * 1024 * 1024, // 1GB
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = s.vfs.CreateDirectory(ctx, db, acc.ID, uuid.Nil, virtualfs.RootDirectoryName)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return acc, u, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Service) CreateAccount(ctx context.Context, db bun.IDB, userID uuid.UUID, opts CreateAccountOptions) (*Account, error) {
|
||||||
|
id, err := newAccountID()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
account := &Account{
|
||||||
|
ID: id,
|
||||||
|
UserID: userID,
|
||||||
|
StorageQuotaBytes: opts.QuotaBytes,
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = db.NewInsert().Model(account).Returning("*").Exec(ctx)
|
||||||
|
if err != nil {
|
||||||
|
if database.IsUniqueViolation(err) {
|
||||||
|
return nil, ErrAccountAlreadyExists
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return account, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Service) AccountByUserID(ctx context.Context, db bun.IDB, userID uuid.UUID) (*Account, error) {
|
||||||
|
var account Account
|
||||||
|
err := db.NewSelect().Model(&account).Where("user_id = ?", userID).Scan(ctx)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, sql.ErrNoRows) {
|
||||||
|
return nil, ErrAccountNotFound
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &account, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Service) AccountByID(ctx context.Context, db bun.IDB, userID uuid.UUID, id uuid.UUID) (*Account, error) {
|
||||||
|
var account Account
|
||||||
|
err := db.NewSelect().Model(&account).Where("user_id = ?", userID).Where("id = ?", id).Scan(ctx)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, sql.ErrNoRows) {
|
||||||
|
return nil, ErrAccountNotFound
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &account, nil
|
||||||
|
}
|
||||||
@@ -1,6 +1,11 @@
|
|||||||
package auth
|
package auth
|
||||||
|
|
||||||
import "fmt"
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
var ErrUnauthenticatedRequest = errors.New("unauthenticated request")
|
||||||
|
|
||||||
type InvalidAccessTokenError struct {
|
type InvalidAccessTokenError struct {
|
||||||
err error
|
err error
|
||||||
|
|||||||
@@ -3,87 +3,63 @@ package auth
|
|||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
|
||||||
|
"github.com/get-drexa/drexa/internal/httperr"
|
||||||
|
"github.com/get-drexa/drexa/internal/user"
|
||||||
"github.com/gofiber/fiber/v2"
|
"github.com/gofiber/fiber/v2"
|
||||||
|
"github.com/uptrace/bun"
|
||||||
)
|
)
|
||||||
|
|
||||||
const authServiceKey = "authService"
|
|
||||||
|
|
||||||
type loginRequest struct {
|
type loginRequest struct {
|
||||||
Email string `json:"email"`
|
Email string `json:"email"`
|
||||||
Password string `json:"password"`
|
Password string `json:"password"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type registerRequest struct {
|
|
||||||
Email string `json:"email"`
|
|
||||||
Password string `json:"password"`
|
|
||||||
DisplayName string `json:"displayName"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type loginResponse struct {
|
type loginResponse struct {
|
||||||
User User `json:"user"`
|
User user.User `json:"user"`
|
||||||
AccessToken string `json:"accessToken"`
|
AccessToken string `json:"accessToken"`
|
||||||
RefreshToken string `json:"refreshToken"`
|
RefreshToken string `json:"refreshToken"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func RegisterAPIRoutes(api fiber.Router, s *Service) {
|
type HTTPHandler struct {
|
||||||
auth := api.Group("/auth", func(c *fiber.Ctx) error {
|
service *Service
|
||||||
c.Locals(authServiceKey, s)
|
db *bun.DB
|
||||||
return c.Next()
|
|
||||||
})
|
|
||||||
|
|
||||||
auth.Post("/login", login)
|
|
||||||
auth.Post("/register", register)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func mustAuthService(c *fiber.Ctx) *Service {
|
func NewHTTPHandler(s *Service, db *bun.DB) *HTTPHandler {
|
||||||
return c.Locals(authServiceKey).(*Service)
|
return &HTTPHandler{service: s, db: db}
|
||||||
}
|
}
|
||||||
|
|
||||||
func login(c *fiber.Ctx) error {
|
func (h *HTTPHandler) RegisterRoutes(api fiber.Router) {
|
||||||
s := mustAuthService(c)
|
auth := api.Group("/auth")
|
||||||
|
auth.Post("/login", h.Login)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *HTTPHandler) Login(c *fiber.Ctx) error {
|
||||||
req := new(loginRequest)
|
req := new(loginRequest)
|
||||||
if err := c.BodyParser(req); err != nil {
|
if err := c.BodyParser(req); err != nil {
|
||||||
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{"error": "Invalid request"})
|
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{"error": "Invalid request"})
|
||||||
}
|
}
|
||||||
|
|
||||||
result, err := s.LoginWithEmailAndPassword(c.Context(), req.Email, req.Password)
|
tx, err := h.db.BeginTx(c.Context(), nil)
|
||||||
|
if err != nil {
|
||||||
|
return httperr.Internal(err)
|
||||||
|
}
|
||||||
|
defer tx.Rollback()
|
||||||
|
|
||||||
|
result, err := h.service.AuthenticateWithEmailAndPassword(c.Context(), tx, req.Email, req.Password)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, ErrInvalidCredentials) {
|
if errors.Is(err, ErrInvalidCredentials) {
|
||||||
return c.Status(fiber.StatusUnauthorized).JSON(fiber.Map{"error": "Invalid credentials"})
|
return c.Status(fiber.StatusUnauthorized).JSON(fiber.Map{"error": "Invalid credentials"})
|
||||||
}
|
}
|
||||||
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{"error": "Internal server error"})
|
return httperr.Internal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := tx.Commit(); err != nil {
|
||||||
|
return httperr.Internal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return c.JSON(loginResponse{
|
return c.JSON(loginResponse{
|
||||||
User: result.User,
|
User: *result.User,
|
||||||
AccessToken: result.AccessToken,
|
|
||||||
RefreshToken: result.RefreshToken,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func register(c *fiber.Ctx) error {
|
|
||||||
s := mustAuthService(c)
|
|
||||||
|
|
||||||
req := new(registerRequest)
|
|
||||||
if err := c.BodyParser(req); err != nil {
|
|
||||||
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{"error": "Invalid request"})
|
|
||||||
}
|
|
||||||
|
|
||||||
result, err := s.Register(c.Context(), registerOptions{
|
|
||||||
email: req.Email,
|
|
||||||
password: req.Password,
|
|
||||||
displayName: req.DisplayName,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
if errors.Is(err, ErrUserExists) {
|
|
||||||
return c.Status(fiber.StatusConflict).JSON(fiber.Map{"error": "User already exists"})
|
|
||||||
}
|
|
||||||
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{"error": "Internal server error"})
|
|
||||||
}
|
|
||||||
|
|
||||||
return c.JSON(loginResponse{
|
|
||||||
User: result.User,
|
|
||||||
AccessToken: result.AccessToken,
|
AccessToken: result.AccessToken,
|
||||||
RefreshToken: result.RefreshToken,
|
RefreshToken: result.RefreshToken,
|
||||||
})
|
})
|
||||||
|
|||||||
63
apps/backend/internal/auth/middleware.go
Normal file
63
apps/backend/internal/auth/middleware.go
Normal file
@@ -0,0 +1,63 @@
|
|||||||
|
package auth
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"log/slog"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/get-drexa/drexa/internal/httperr"
|
||||||
|
"github.com/get-drexa/drexa/internal/user"
|
||||||
|
"github.com/gofiber/fiber/v2"
|
||||||
|
"github.com/uptrace/bun"
|
||||||
|
)
|
||||||
|
|
||||||
|
const authenticatedUserKey = "authenticatedUser"
|
||||||
|
|
||||||
|
// NewBearerAuthMiddleware is a middleware that authenticates a request using a bearer token.
|
||||||
|
// To obtain the authenticated user in subsequent handlers, see AuthenticatedUser.
|
||||||
|
func NewBearerAuthMiddleware(s *Service, db *bun.DB) fiber.Handler {
|
||||||
|
return func(c *fiber.Ctx) error {
|
||||||
|
authHeader := c.Get("Authorization")
|
||||||
|
if authHeader == "" {
|
||||||
|
slog.Info("no auth header")
|
||||||
|
return c.SendStatus(fiber.StatusUnauthorized)
|
||||||
|
}
|
||||||
|
|
||||||
|
parts := strings.Split(authHeader, " ")
|
||||||
|
if len(parts) != 2 || parts[0] != "Bearer" {
|
||||||
|
slog.Info("invalid auth header")
|
||||||
|
return c.SendStatus(fiber.StatusUnauthorized)
|
||||||
|
}
|
||||||
|
|
||||||
|
token := parts[1]
|
||||||
|
u, err := s.AuthenticateWithAccessToken(c.Context(), db, token)
|
||||||
|
if err != nil {
|
||||||
|
var e *InvalidAccessTokenError
|
||||||
|
if errors.As(err, &e) {
|
||||||
|
slog.Info("invalid access token")
|
||||||
|
return c.SendStatus(fiber.StatusUnauthorized)
|
||||||
|
}
|
||||||
|
|
||||||
|
var nf *user.NotFoundError
|
||||||
|
if errors.As(err, &nf) {
|
||||||
|
slog.Info("user not found")
|
||||||
|
return c.SendStatus(fiber.StatusUnauthorized)
|
||||||
|
}
|
||||||
|
|
||||||
|
return httperr.Internal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
c.Locals(authenticatedUserKey, u)
|
||||||
|
|
||||||
|
return c.Next()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AuthenticatedUser returns the authenticated user from the given fiber context.
|
||||||
|
// Returns ErrUnauthenticatedRequest if not authenticated.
|
||||||
|
func AuthenticatedUser(c *fiber.Ctx) (*user.User, error) {
|
||||||
|
if u, ok := c.Locals(authenticatedUserKey).(*user.User); ok {
|
||||||
|
return u, nil
|
||||||
|
}
|
||||||
|
return nil, ErrUnauthenticatedRequest
|
||||||
|
}
|
||||||
@@ -4,109 +4,106 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"errors"
|
"errors"
|
||||||
|
"log/slog"
|
||||||
|
|
||||||
|
"github.com/get-drexa/drexa/internal/password"
|
||||||
|
"github.com/get-drexa/drexa/internal/user"
|
||||||
|
"github.com/google/uuid"
|
||||||
"github.com/uptrace/bun"
|
"github.com/uptrace/bun"
|
||||||
)
|
)
|
||||||
|
|
||||||
type LoginResult struct {
|
type AuthenticationResult struct {
|
||||||
User User
|
User *user.User
|
||||||
AccessToken string
|
AccessToken string
|
||||||
RefreshToken string
|
RefreshToken string
|
||||||
}
|
}
|
||||||
|
|
||||||
var ErrInvalidCredentials = errors.New("invalid credentials")
|
var ErrInvalidCredentials = errors.New("invalid credentials")
|
||||||
var ErrUserExists = errors.New("user already exists")
|
|
||||||
|
|
||||||
type Service struct {
|
type Service struct {
|
||||||
db *bun.DB
|
userService *user.Service
|
||||||
tokenConfig TokenConfig
|
tokenConfig TokenConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
type registerOptions struct {
|
func NewService(userService *user.Service, tokenConfig TokenConfig) *Service {
|
||||||
displayName string
|
|
||||||
email string
|
|
||||||
password string
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewService(db *bun.DB, tokenConfig TokenConfig) *Service {
|
|
||||||
return &Service{
|
return &Service{
|
||||||
db: db,
|
userService: userService,
|
||||||
tokenConfig: tokenConfig,
|
tokenConfig: tokenConfig,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Service) LoginWithEmailAndPassword(ctx context.Context, email, password string) (*LoginResult, error) {
|
func (s *Service) GenerateTokenForUser(ctx context.Context, db bun.IDB, user *user.User) (*AuthenticationResult, error) {
|
||||||
var user User
|
at, err := GenerateAccessToken(user, &s.tokenConfig)
|
||||||
|
|
||||||
err := s.db.NewSelect().Model(&user).Where("email = ?", email).Scan(ctx)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
ok, err := VerifyPassword(password, user.Password)
|
rt, err := GenerateRefreshToken(user, &s.tokenConfig)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = db.NewInsert().Model(rt).Exec(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &AuthenticationResult{
|
||||||
|
User: user,
|
||||||
|
AccessToken: at,
|
||||||
|
RefreshToken: hex.EncodeToString(rt.Token),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Service) AuthenticateWithEmailAndPassword(ctx context.Context, db bun.IDB, email, plain string) (*AuthenticationResult, error) {
|
||||||
|
u, err := s.userService.UserByEmail(ctx, db, email)
|
||||||
|
if err != nil {
|
||||||
|
var nf *user.NotFoundError
|
||||||
|
if errors.As(err, &nf) {
|
||||||
|
return nil, ErrInvalidCredentials
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ok, err := password.Verify(plain, u.Password)
|
||||||
if err != nil || !ok {
|
if err != nil || !ok {
|
||||||
return nil, ErrInvalidCredentials
|
return nil, ErrInvalidCredentials
|
||||||
}
|
}
|
||||||
|
|
||||||
at, err := GenerateAccessToken(&user, &s.tokenConfig)
|
at, err := GenerateAccessToken(u, &s.tokenConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
rt, err := GenerateRefreshToken(&user, &s.tokenConfig)
|
rt, err := GenerateRefreshToken(u, &s.tokenConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = s.db.NewInsert().Model(rt).Exec(ctx)
|
_, err = db.NewInsert().Model(rt).Exec(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return &LoginResult{
|
return &AuthenticationResult{
|
||||||
User: user,
|
User: u,
|
||||||
AccessToken: at,
|
AccessToken: at,
|
||||||
RefreshToken: hex.EncodeToString(rt.Token),
|
RefreshToken: hex.EncodeToString(rt.Token),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Service) Register(ctx context.Context, opts registerOptions) (*LoginResult, error) {
|
func (s *Service) AuthenticateWithAccessToken(ctx context.Context, db bun.IDB, token string) (*user.User, error) {
|
||||||
user := User{
|
claims, err := ParseAccessToken(token, &s.tokenConfig)
|
||||||
Email: opts.email,
|
|
||||||
DisplayName: opts.displayName,
|
|
||||||
}
|
|
||||||
|
|
||||||
exists, err := s.db.NewSelect().Model(&user).Where("email = ?", opts.email).Exists(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if exists {
|
|
||||||
return nil, ErrUserExists
|
|
||||||
}
|
|
||||||
|
|
||||||
user.Password, err = HashPassword(opts.password)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
slog.Info("failed to parse access token", "error", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = s.db.NewInsert().Model(&user).Exec(ctx)
|
id, err := uuid.Parse(claims.Subject)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
slog.Info("failed to parse access token subject", "error", err)
|
||||||
|
return nil, newInvalidAccessTokenError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
at, err := GenerateAccessToken(&user, &s.tokenConfig)
|
return s.userService.UserByID(ctx, db, id)
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
rt, err := GenerateRefreshToken(&user, &s.tokenConfig)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &LoginResult{
|
|
||||||
User: user,
|
|
||||||
AccessToken: at,
|
|
||||||
RefreshToken: hex.EncodeToString(rt.Token),
|
|
||||||
}, nil
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/get-drexa/drexa/internal/user"
|
||||||
"github.com/golang-jwt/jwt/v5"
|
"github.com/golang-jwt/jwt/v5"
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"github.com/uptrace/bun"
|
"github.com/uptrace/bun"
|
||||||
@@ -32,10 +33,14 @@ type RefreshToken struct {
|
|||||||
Token []byte `bun:"-"`
|
Token []byte `bun:"-"`
|
||||||
TokenHash string `bun:"token_hash,notnull"`
|
TokenHash string `bun:"token_hash,notnull"`
|
||||||
ExpiresAt time.Time `bun:"expires_at,notnull"`
|
ExpiresAt time.Time `bun:"expires_at,notnull"`
|
||||||
CreatedAt time.Time `bun:"created_at,notnull"`
|
CreatedAt time.Time `bun:"created_at,notnull,nullzero"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func GenerateAccessToken(user *User, c *TokenConfig) (string, error) {
|
func newTokenID() (uuid.UUID, error) {
|
||||||
|
return uuid.NewV7()
|
||||||
|
}
|
||||||
|
|
||||||
|
func GenerateAccessToken(user *user.User, c *TokenConfig) (string, error) {
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
|
|
||||||
token := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.RegisteredClaims{
|
token := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.RegisteredClaims{
|
||||||
@@ -54,7 +59,7 @@ func GenerateAccessToken(user *User, c *TokenConfig) (string, error) {
|
|||||||
return signed, nil
|
return signed, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func GenerateRefreshToken(user *User, c *TokenConfig) (*RefreshToken, error) {
|
func GenerateRefreshToken(user *user.User, c *TokenConfig) (*RefreshToken, error) {
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
|
|
||||||
buf := make([]byte, refreshTokenByteLength)
|
buf := make([]byte, refreshTokenByteLength)
|
||||||
@@ -62,9 +67,9 @@ func GenerateRefreshToken(user *User, c *TokenConfig) (*RefreshToken, error) {
|
|||||||
return nil, fmt.Errorf("failed to generate refresh token: %w", err)
|
return nil, fmt.Errorf("failed to generate refresh token: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
id, err := uuid.NewV7()
|
id, err := newTokenID()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to generate refresh token: %w", err)
|
return nil, fmt.Errorf("failed to generate token ID: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
h := sha256.Sum256(buf)
|
h := sha256.Sum256(buf)
|
||||||
@@ -80,6 +85,8 @@ func GenerateRefreshToken(user *User, c *TokenConfig) (*RefreshToken, error) {
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ParseAccessToken parses a JWT access token and returns the claims.
|
||||||
|
// Returns an InvalidAccessTokenError if the token is invalid.
|
||||||
func ParseAccessToken(token string, c *TokenConfig) (*jwt.RegisteredClaims, error) {
|
func ParseAccessToken(token string, c *TokenConfig) (*jwt.RegisteredClaims, error) {
|
||||||
parsed, err := jwt.ParseWithClaims(token, &jwt.RegisteredClaims{}, func(token *jwt.Token) (any, error) {
|
parsed, err := jwt.ParseWithClaims(token, &jwt.RegisteredClaims{}, func(token *jwt.Token) (any, error) {
|
||||||
return c.SecretKey, nil
|
return c.SecretKey, nil
|
||||||
|
|||||||
@@ -1,17 +0,0 @@
|
|||||||
package auth
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/google/uuid"
|
|
||||||
"github.com/uptrace/bun"
|
|
||||||
)
|
|
||||||
|
|
||||||
type User struct {
|
|
||||||
bun.BaseModel `bun:"users"`
|
|
||||||
|
|
||||||
ID uuid.UUID `bun:",pk,type:uuid" json:"id"`
|
|
||||||
DisplayName string `bun:"display_name,notnull" json:"displayName"`
|
|
||||||
Email string `bun:"email,unique,notnull" json:"email"`
|
|
||||||
Password string `bun:"password,notnull" json:"-"`
|
|
||||||
StorageUsageBytes int64 `bun:"storage_usage_bytes,notnull" json:"storageUsageBytes"`
|
|
||||||
StorageQuotaBytes int64 `bun:"storage_quota_bytes,notnull" json:"storageQuotaBytes"`
|
|
||||||
}
|
|
||||||
9
apps/backend/internal/blob/err.go
Normal file
9
apps/backend/internal/blob/err.go
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
package blob
|
||||||
|
|
||||||
|
import "errors"
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrConflict = errors.New("key already used for a different blob")
|
||||||
|
ErrNotFound = errors.New("key not found")
|
||||||
|
ErrInvalidFileContent = errors.New("invalid file content. must provide either a reader or a blob key")
|
||||||
|
)
|
||||||
154
apps/backend/internal/blob/fs_store.go
Normal file
154
apps/backend/internal/blob/fs_store.go
Normal file
@@ -0,0 +1,154 @@
|
|||||||
|
package blob
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"github.com/get-drexa/drexa/internal/ioext"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ Store = &FSStore{}
|
||||||
|
|
||||||
|
type FSStore struct {
|
||||||
|
config FSStoreConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
type FSStoreConfig struct {
|
||||||
|
Root string
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewFSStore(config FSStoreConfig) *FSStore {
|
||||||
|
return &FSStore{config: config}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FSStore) Initialize(ctx context.Context) error {
|
||||||
|
return os.MkdirAll(s.config.Root, 0755)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FSStore) Put(ctx context.Context, key Key, reader io.Reader) error {
|
||||||
|
path := filepath.Join(s.config.Root, string(key))
|
||||||
|
|
||||||
|
err := os.MkdirAll(filepath.Dir(path), 0755)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_EXCL, 0644)
|
||||||
|
if err != nil {
|
||||||
|
if os.IsExist(err) {
|
||||||
|
return ErrConflict
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer f.Close()
|
||||||
|
_, err = io.Copy(f, reader)
|
||||||
|
if err != nil {
|
||||||
|
_ = os.Remove(path)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FSStore) Read(ctx context.Context, key Key) (io.ReadCloser, error) {
|
||||||
|
path := filepath.Join(s.config.Root, string(key))
|
||||||
|
f, err := os.Open(path)
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return nil, ErrNotFound
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return f, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FSStore) ReadRange(ctx context.Context, key Key, offset, length int64) (io.ReadCloser, error) {
|
||||||
|
path := filepath.Join(s.config.Root, string(key))
|
||||||
|
|
||||||
|
f, err := os.Open(path)
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return nil, ErrNotFound
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = f.Seek(offset, io.SeekStart)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return ioext.NewLimitReadCloser(f, length), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FSStore) ReadSize(ctx context.Context, key Key) (int64, error) {
|
||||||
|
path := filepath.Join(s.config.Root, string(key))
|
||||||
|
fi, err := os.Stat(path)
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return 0, ErrNotFound
|
||||||
|
}
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return fi.Size(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FSStore) Delete(ctx context.Context, key Key) error {
|
||||||
|
err := os.Remove(filepath.Join(s.config.Root, string(key)))
|
||||||
|
// no op if file does not exist
|
||||||
|
// swallow error if file does not exist
|
||||||
|
if err != nil && !os.IsNotExist(err) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FSStore) DeletePrefix(ctx context.Context, prefix Key) error {
|
||||||
|
prefixPath := filepath.Join(s.config.Root, string(prefix))
|
||||||
|
err := os.RemoveAll(prefixPath)
|
||||||
|
if err != nil && !os.IsNotExist(err) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FSStore) Update(ctx context.Context, key Key, opts UpdateOptions) error {
|
||||||
|
// Update is a no-op for FSStore
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FSStore) Move(ctx context.Context, srcKey, dstKey Key) error {
|
||||||
|
oldPath := filepath.Join(s.config.Root, string(srcKey))
|
||||||
|
newPath := filepath.Join(s.config.Root, string(dstKey))
|
||||||
|
|
||||||
|
_, err := os.Stat(newPath)
|
||||||
|
if err == nil {
|
||||||
|
return ErrConflict
|
||||||
|
}
|
||||||
|
|
||||||
|
err = os.MkdirAll(filepath.Dir(newPath), 0755)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = os.Rename(oldPath, newPath)
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return ErrNotFound
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FSStore) SupportsDirectUpload() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FSStore) GenerateUploadURL(ctx context.Context, key Key, opts UploadURLOptions) (string, error) {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
7
apps/backend/internal/blob/key.go
Normal file
7
apps/backend/internal/blob/key.go
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
package blob
|
||||||
|
|
||||||
|
type Key string
|
||||||
|
|
||||||
|
func (k Key) IsNil() bool {
|
||||||
|
return k == ""
|
||||||
|
}
|
||||||
33
apps/backend/internal/blob/store.go
Normal file
33
apps/backend/internal/blob/store.go
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
package blob
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type UploadURLOptions struct {
|
||||||
|
Duration time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
type UpdateOptions struct {
|
||||||
|
ContentType string
|
||||||
|
}
|
||||||
|
|
||||||
|
type Store interface {
|
||||||
|
Initialize(ctx context.Context) error
|
||||||
|
Put(ctx context.Context, key Key, reader io.Reader) error
|
||||||
|
Update(ctx context.Context, key Key, opts UpdateOptions) error
|
||||||
|
Delete(ctx context.Context, key Key) error
|
||||||
|
DeletePrefix(ctx context.Context, prefix Key) error
|
||||||
|
Move(ctx context.Context, srcKey, dstKey Key) error
|
||||||
|
Read(ctx context.Context, key Key) (io.ReadCloser, error)
|
||||||
|
ReadRange(ctx context.Context, key Key, offset, length int64) (io.ReadCloser, error)
|
||||||
|
ReadSize(ctx context.Context, key Key) (int64, error)
|
||||||
|
|
||||||
|
// SupportsDirectUpload returns true if the store allows files to be uploaded directly to the blob store.
|
||||||
|
SupportsDirectUpload() bool
|
||||||
|
|
||||||
|
// GenerateUploadURL generates a URL that can be used to upload a file directly to the blob store. If unsupported, returns an empty string with no error.
|
||||||
|
GenerateUploadURL(ctx context.Context, key Key, opts UploadURLOptions) (string, error)
|
||||||
|
}
|
||||||
61
apps/backend/internal/database/errs.go
Normal file
61
apps/backend/internal/database/errs.go
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
package database
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
|
||||||
|
"github.com/uptrace/bun/driver/pgdriver"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PostgreSQL SQLSTATE error codes.
|
||||||
|
// See: https://www.postgresql.org/docs/current/errcodes-appendix.html
|
||||||
|
const (
|
||||||
|
PgUniqueViolation = "23505"
|
||||||
|
PgForeignKeyViolation = "23503"
|
||||||
|
PgNotNullViolation = "23502"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PostgreSQL protocol error field identifiers used with pgdriver.Error.Field().
|
||||||
|
// See: https://www.postgresql.org/docs/current/protocol-error-fields.html
|
||||||
|
//
|
||||||
|
// Common fields:
|
||||||
|
// - 'C' - SQLSTATE code (e.g., "23505")
|
||||||
|
// - 'M' - Primary error message
|
||||||
|
// - 'D' - Detail message
|
||||||
|
// - 'H' - Hint
|
||||||
|
// - 's' - Schema name
|
||||||
|
// - 't' - Table name
|
||||||
|
// - 'c' - Column name
|
||||||
|
// - 'n' - Constraint name
|
||||||
|
const (
|
||||||
|
pgFieldCode = 'C'
|
||||||
|
pgFieldConstraint = 'n'
|
||||||
|
)
|
||||||
|
|
||||||
|
// IsUniqueViolation checks if the error is a PostgreSQL unique constraint violation.
|
||||||
|
func IsUniqueViolation(err error) bool {
|
||||||
|
return hasPgCode(err, PgUniqueViolation)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsForeignKeyViolation checks if the error is a PostgreSQL foreign key violation.
|
||||||
|
func IsForeignKeyViolation(err error) bool {
|
||||||
|
return hasPgCode(err, PgForeignKeyViolation)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsNotNullViolation checks if the error is a PostgreSQL not-null constraint violation.
|
||||||
|
func IsNotNullViolation(err error) bool {
|
||||||
|
return hasPgCode(err, PgNotNullViolation)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConstraintName returns the constraint name from a PostgreSQL error, or empty string if not applicable.
|
||||||
|
func ConstraintName(err error) string {
|
||||||
|
var pgErr pgdriver.Error
|
||||||
|
if errors.As(err, &pgErr) {
|
||||||
|
return pgErr.Field(pgFieldConstraint)
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func hasPgCode(err error, code string) bool {
|
||||||
|
var pgErr pgdriver.Error
|
||||||
|
return errors.As(err, &pgErr) && pgErr.Field(pgFieldCode) == code
|
||||||
|
}
|
||||||
@@ -1,17 +1,28 @@
|
|||||||
package database
|
package database
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"embed"
|
"embed"
|
||||||
|
|
||||||
|
"github.com/uptrace/bun"
|
||||||
"github.com/uptrace/bun/migrate"
|
"github.com/uptrace/bun/migrate"
|
||||||
)
|
)
|
||||||
|
|
||||||
//go:embed migrations/*.sql
|
//go:embed migrations/*.sql
|
||||||
var sqlMigrations embed.FS
|
var sqlMigrations embed.FS
|
||||||
|
|
||||||
// RunMigrations discovers and runs all migrations in the migrations directory.
|
// RunMigrations discovers and runs all migrations against the database.
|
||||||
// Currently, the migrations directory is in internal/db/migrations.
|
func RunMigrations(ctx context.Context, db *bun.DB) error {
|
||||||
func RunMigrations() error {
|
migrations := migrate.NewMigrations()
|
||||||
m := migrate.NewMigrations()
|
if err := migrations.Discover(sqlMigrations); err != nil {
|
||||||
return m.Discover(sqlMigrations)
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
migrator := migrate.NewMigrator(db, migrations)
|
||||||
|
if err := migrator.Init(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := migrator.Migrate(ctx)
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|||||||
106
apps/backend/internal/database/migrations/001_initial.up.sql
Normal file
106
apps/backend/internal/database/migrations/001_initial.up.sql
Normal file
@@ -0,0 +1,106 @@
|
|||||||
|
-- ============================================================================
|
||||||
|
-- Application Tables
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
CREATE TABLE IF NOT EXISTS users (
|
||||||
|
id UUID PRIMARY KEY,
|
||||||
|
display_name TEXT,
|
||||||
|
email TEXT NOT NULL UNIQUE,
|
||||||
|
password TEXT NOT NULL,
|
||||||
|
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||||
|
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE INDEX idx_users_email ON users(email);
|
||||||
|
|
||||||
|
CREATE TABLE IF NOT EXISTS accounts (
|
||||||
|
id UUID PRIMARY KEY,
|
||||||
|
user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
||||||
|
storage_usage_bytes BIGINT NOT NULL DEFAULT 0,
|
||||||
|
storage_quota_bytes BIGINT NOT NULL,
|
||||||
|
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||||
|
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE INDEX idx_accounts_user_id ON accounts(user_id);
|
||||||
|
|
||||||
|
CREATE TABLE IF NOT EXISTS refresh_tokens (
|
||||||
|
id UUID PRIMARY KEY,
|
||||||
|
user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
||||||
|
token_hash TEXT NOT NULL UNIQUE,
|
||||||
|
expires_at TIMESTAMPTZ NOT NULL,
|
||||||
|
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE INDEX idx_refresh_tokens_user_id ON refresh_tokens(user_id);
|
||||||
|
CREATE INDEX idx_refresh_tokens_token_hash ON refresh_tokens(token_hash);
|
||||||
|
CREATE INDEX idx_refresh_tokens_expires_at ON refresh_tokens(expires_at);
|
||||||
|
|
||||||
|
-- Virtual filesystem nodes (unified files + directories)
|
||||||
|
CREATE TABLE IF NOT EXISTS vfs_nodes (
|
||||||
|
id UUID PRIMARY KEY,
|
||||||
|
public_id TEXT NOT NULL UNIQUE, -- opaque ID for external API (no timestamp leak)
|
||||||
|
account_id UUID NOT NULL REFERENCES accounts(id) ON DELETE CASCADE,
|
||||||
|
parent_id UUID REFERENCES vfs_nodes(id) ON DELETE CASCADE, -- NULL = root directory
|
||||||
|
kind TEXT NOT NULL CHECK (kind IN ('file', 'directory')),
|
||||||
|
status TEXT NOT NULL DEFAULT 'ready' CHECK (status IN ('pending', 'ready')),
|
||||||
|
name TEXT NOT NULL,
|
||||||
|
|
||||||
|
-- File-specific fields (NULL for directories)
|
||||||
|
blob_key TEXT, -- reference to blob storage (flat mode), NULL for hierarchical
|
||||||
|
size BIGINT, -- file size in bytes
|
||||||
|
mime_type TEXT, -- content type
|
||||||
|
|
||||||
|
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||||
|
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||||
|
deleted_at TIMESTAMPTZ, -- soft delete for trash
|
||||||
|
|
||||||
|
-- No duplicate names in same parent (per account, excluding deleted)
|
||||||
|
CONSTRAINT unique_node_name UNIQUE NULLS NOT DISTINCT (account_id, parent_id, name, deleted_at)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE INDEX idx_vfs_nodes_account_id ON vfs_nodes(account_id) WHERE deleted_at IS NULL;
|
||||||
|
CREATE INDEX idx_vfs_nodes_parent_id ON vfs_nodes(parent_id) WHERE deleted_at IS NULL;
|
||||||
|
CREATE INDEX idx_vfs_nodes_account_parent ON vfs_nodes(account_id, parent_id) WHERE deleted_at IS NULL;
|
||||||
|
CREATE INDEX idx_vfs_nodes_kind ON vfs_nodes(account_id, kind) WHERE deleted_at IS NULL;
|
||||||
|
CREATE INDEX idx_vfs_nodes_deleted ON vfs_nodes(account_id, deleted_at) WHERE deleted_at IS NOT NULL;
|
||||||
|
CREATE INDEX idx_vfs_nodes_public_id ON vfs_nodes(public_id);
|
||||||
|
CREATE UNIQUE INDEX idx_vfs_nodes_account_root ON vfs_nodes(account_id) WHERE parent_id IS NULL; -- one root per account
|
||||||
|
CREATE INDEX idx_vfs_nodes_pending ON vfs_nodes(created_at) WHERE status = 'pending'; -- for cleanup job
|
||||||
|
|
||||||
|
CREATE TABLE IF NOT EXISTS node_shares (
|
||||||
|
id UUID PRIMARY KEY,
|
||||||
|
node_id UUID NOT NULL REFERENCES vfs_nodes(id) ON DELETE CASCADE,
|
||||||
|
share_token TEXT NOT NULL UNIQUE,
|
||||||
|
expires_at TIMESTAMPTZ,
|
||||||
|
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||||
|
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE INDEX idx_node_shares_share_token ON node_shares(share_token);
|
||||||
|
CREATE INDEX idx_node_shares_node_id ON node_shares(node_id);
|
||||||
|
CREATE INDEX idx_node_shares_expires_at ON node_shares(expires_at) WHERE expires_at IS NOT NULL;
|
||||||
|
|
||||||
|
-- ============================================================================
|
||||||
|
-- Triggers for updated_at timestamps
|
||||||
|
-- ============================================================================
|
||||||
|
|
||||||
|
CREATE OR REPLACE FUNCTION update_updated_at_column()
|
||||||
|
RETURNS TRIGGER AS $$
|
||||||
|
BEGIN
|
||||||
|
NEW.updated_at = NOW();
|
||||||
|
RETURN NEW;
|
||||||
|
END;
|
||||||
|
$$ LANGUAGE plpgsql;
|
||||||
|
|
||||||
|
CREATE TRIGGER update_users_updated_at BEFORE UPDATE ON users
|
||||||
|
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
|
||||||
|
|
||||||
|
CREATE TRIGGER update_vfs_nodes_updated_at BEFORE UPDATE ON vfs_nodes
|
||||||
|
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
|
||||||
|
|
||||||
|
CREATE TRIGGER update_node_shares_updated_at BEFORE UPDATE ON node_shares
|
||||||
|
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
|
||||||
|
|
||||||
|
CREATE TRIGGER update_accounts_updated_at BEFORE UPDATE ON accounts
|
||||||
|
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
|
||||||
@@ -1,122 +0,0 @@
|
|||||||
-- Enable UUID extension for UUIDv7 support
|
|
||||||
CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
|
|
||||||
|
|
||||||
-- UUIDv7 generation function (timestamp-ordered UUIDs)
|
|
||||||
-- Based on the draft RFC: https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format
|
|
||||||
CREATE OR REPLACE FUNCTION uuid_generate_v7()
|
|
||||||
RETURNS UUID
|
|
||||||
AS $$
|
|
||||||
DECLARE
|
|
||||||
unix_ts_ms BIGINT;
|
|
||||||
uuid_bytes BYTEA;
|
|
||||||
BEGIN
|
|
||||||
unix_ts_ms = (EXTRACT(EPOCH FROM CLOCK_TIMESTAMP()) * 1000)::BIGINT;
|
|
||||||
uuid_bytes = OVERLAY(gen_random_bytes(16) PLACING
|
|
||||||
SUBSTRING(INT8SEND(unix_ts_ms) FROM 3) FROM 1 FOR 6
|
|
||||||
);
|
|
||||||
-- Set version (7) and variant bits
|
|
||||||
uuid_bytes = SET_BYTE(uuid_bytes, 6, (GET_BYTE(uuid_bytes, 6) & 15) | 112);
|
|
||||||
uuid_bytes = SET_BYTE(uuid_bytes, 8, (GET_BYTE(uuid_bytes, 8) & 63) | 128);
|
|
||||||
RETURN ENCODE(uuid_bytes, 'hex')::UUID;
|
|
||||||
END;
|
|
||||||
$$ LANGUAGE plpgsql VOLATILE;
|
|
||||||
|
|
||||||
-- ============================================================================
|
|
||||||
-- Application Tables
|
|
||||||
-- ============================================================================
|
|
||||||
|
|
||||||
CREATE TABLE IF NOT EXISTS users (
|
|
||||||
id UUID PRIMARY KEY DEFAULT uuid_generate_v7(),
|
|
||||||
display_name TEXT,
|
|
||||||
email TEXT NOT NULL UNIQUE,
|
|
||||||
password TEXT NOT NULL,
|
|
||||||
storage_usage_bytes BIGINT NOT NULL,
|
|
||||||
storage_quota_bytes BIGINT NOT NULL,
|
|
||||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
|
||||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
|
||||||
);
|
|
||||||
|
|
||||||
CREATE INDEX idx_users_email ON users(email);
|
|
||||||
|
|
||||||
CREATE TABLE IF NOT EXISTS refresh_tokens (
|
|
||||||
id UUID PRIMARY KEY DEFAULT uuid_generate_v7(),
|
|
||||||
user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
|
||||||
token_hash TEXT NOT NULL UNIQUE,
|
|
||||||
expires_at TIMESTAMPTZ NOT NULL,
|
|
||||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
|
||||||
);
|
|
||||||
|
|
||||||
CREATE INDEX idx_refresh_tokens_user_id ON refresh_tokens(user_id);
|
|
||||||
CREATE INDEX idx_refresh_tokens_token_hash ON refresh_tokens(token_hash);
|
|
||||||
CREATE INDEX idx_refresh_tokens_expires_at ON refresh_tokens(expires_at);
|
|
||||||
|
|
||||||
CREATE TABLE IF NOT EXISTS directories (
|
|
||||||
id UUID PRIMARY KEY DEFAULT uuid_generate_v7(),
|
|
||||||
user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
|
||||||
name TEXT NOT NULL,
|
|
||||||
parent_id UUID REFERENCES directories(id) ON DELETE CASCADE,
|
|
||||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
|
||||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
|
||||||
deleted_at TIMESTAMPTZ,
|
|
||||||
CONSTRAINT unique_directory_path UNIQUE NULLS NOT DISTINCT (user_id, parent_id, name, deleted_at)
|
|
||||||
);
|
|
||||||
|
|
||||||
CREATE INDEX idx_directories_user_id ON directories(user_id, deleted_at);
|
|
||||||
CREATE INDEX idx_directories_path ON directories(user_id, path, deleted_at);
|
|
||||||
|
|
||||||
CREATE TABLE IF NOT EXISTS files (
|
|
||||||
id UUID PRIMARY KEY DEFAULT uuid_generate_v7(),
|
|
||||||
user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
|
||||||
directory_id UUID REFERENCES directories(id) ON DELETE CASCADE,
|
|
||||||
name TEXT NOT NULL,
|
|
||||||
size BIGINT NOT NULL,
|
|
||||||
mime_type TEXT,
|
|
||||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
|
||||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
|
||||||
deleted_at TIMESTAMPTZ,
|
|
||||||
last_accessed_at TIMESTAMPTZ,
|
|
||||||
CONSTRAINT unique_file_in_directory UNIQUE NULLS NOT DISTINCT (user_id, directory_id, name, deleted_at)
|
|
||||||
);
|
|
||||||
|
|
||||||
CREATE INDEX idx_files_user_id ON files(user_id, deleted_at);
|
|
||||||
CREATE INDEX idx_files_directory_id ON files(directory_id) WHERE directory_id IS NOT NULL;
|
|
||||||
CREATE INDEX idx_files_path ON files(user_id, path, deleted_at);
|
|
||||||
CREATE INDEX idx_files_deleted_at ON files(deleted_at) WHERE deleted_at IS NOT NULL;
|
|
||||||
CREATE INDEX idx_files_last_accessed_at ON files(user_id, deleted_at, last_accessed_at);
|
|
||||||
|
|
||||||
CREATE TABLE IF NOT EXISTS file_shares (
|
|
||||||
id UUID PRIMARY KEY DEFAULT uuid_generate_v7(),
|
|
||||||
file_id UUID NOT NULL REFERENCES files(id) ON DELETE CASCADE,
|
|
||||||
share_token TEXT NOT NULL UNIQUE,
|
|
||||||
expires_at TIMESTAMPTZ,
|
|
||||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
|
||||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
|
||||||
);
|
|
||||||
|
|
||||||
CREATE INDEX idx_file_shares_share_token ON file_shares(share_token);
|
|
||||||
CREATE INDEX idx_file_shares_file_id ON file_shares(file_id);
|
|
||||||
CREATE INDEX idx_file_shares_expires_at ON file_shares(expires_at) WHERE expires_at IS NOT NULL;
|
|
||||||
|
|
||||||
-- ============================================================================
|
|
||||||
-- Triggers for updated_at timestamps
|
|
||||||
-- ============================================================================
|
|
||||||
|
|
||||||
CREATE OR REPLACE FUNCTION update_updated_at_column()
|
|
||||||
RETURNS TRIGGER AS $$
|
|
||||||
BEGIN
|
|
||||||
NEW.updated_at = NOW();
|
|
||||||
RETURN NEW;
|
|
||||||
END;
|
|
||||||
$$ LANGUAGE plpgsql;
|
|
||||||
|
|
||||||
CREATE TRIGGER update_users_updated_at BEFORE UPDATE ON users
|
|
||||||
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
|
|
||||||
|
|
||||||
CREATE TRIGGER update_directories_updated_at BEFORE UPDATE ON directories
|
|
||||||
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
|
|
||||||
|
|
||||||
CREATE TRIGGER update_files_updated_at BEFORE UPDATE ON files
|
|
||||||
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
|
|
||||||
|
|
||||||
CREATE TRIGGER update_file_shares_updated_at BEFORE UPDATE ON file_shares
|
|
||||||
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
|
|
||||||
@@ -2,6 +2,7 @@ package database
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"database/sql"
|
"database/sql"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/uptrace/bun"
|
"github.com/uptrace/bun"
|
||||||
"github.com/uptrace/bun/dialect/pgdialect"
|
"github.com/uptrace/bun/dialect/pgdialect"
|
||||||
@@ -10,6 +11,17 @@ import (
|
|||||||
|
|
||||||
func NewFromPostgres(url string) *bun.DB {
|
func NewFromPostgres(url string) *bun.DB {
|
||||||
sqldb := sql.OpenDB(pgdriver.NewConnector(pgdriver.WithDSN(url)))
|
sqldb := sql.OpenDB(pgdriver.NewConnector(pgdriver.WithDSN(url)))
|
||||||
|
|
||||||
|
// Configure connection pool to prevent "database closed" errors
|
||||||
|
// SetMaxOpenConns sets the maximum number of open connections to the database
|
||||||
|
sqldb.SetMaxOpenConns(25)
|
||||||
|
// SetMaxIdleConns sets the maximum number of connections in the idle connection pool
|
||||||
|
sqldb.SetMaxIdleConns(5)
|
||||||
|
// SetConnMaxLifetime sets the maximum amount of time a connection may be reused
|
||||||
|
sqldb.SetConnMaxLifetime(5 * time.Minute)
|
||||||
|
// SetConnMaxIdleTime sets the maximum amount of time a connection may be idle
|
||||||
|
sqldb.SetConnMaxIdleTime(10 * time.Minute)
|
||||||
|
|
||||||
db := bun.NewDB(sqldb, pgdialect.New())
|
db := bun.NewDB(sqldb, pgdialect.New())
|
||||||
return db
|
return db
|
||||||
}
|
}
|
||||||
|
|||||||
155
apps/backend/internal/drexa/config.go
Normal file
155
apps/backend/internal/drexa/config.go
Normal file
@@ -0,0 +1,155 @@
|
|||||||
|
package drexa
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/base64"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"gopkg.in/yaml.v3"
|
||||||
|
)
|
||||||
|
|
||||||
|
type StorageMode string
|
||||||
|
type StorageBackend string
|
||||||
|
|
||||||
|
const (
|
||||||
|
StorageModeFlat StorageMode = "flat"
|
||||||
|
StorageModeHierarchical StorageMode = "hierarchical"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
StorageBackendFS StorageBackend = "fs"
|
||||||
|
StorageBackendS3 StorageBackend = "s3"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Config struct {
|
||||||
|
Server ServerConfig `yaml:"server"`
|
||||||
|
Database DatabaseConfig `yaml:"database"`
|
||||||
|
JWT JWTConfig `yaml:"jwt"`
|
||||||
|
Storage StorageConfig `yaml:"storage"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ServerConfig struct {
|
||||||
|
Port int `yaml:"port"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type DatabaseConfig struct {
|
||||||
|
PostgresURL string `yaml:"postgres_url"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type JWTConfig struct {
|
||||||
|
Issuer string `yaml:"issuer"`
|
||||||
|
Audience string `yaml:"audience"`
|
||||||
|
SecretKeyBase64 string `yaml:"secret_key_base64"`
|
||||||
|
SecretKeyPath string `yaml:"secret_key_path"`
|
||||||
|
SecretKey []byte `yaml:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type StorageConfig struct {
|
||||||
|
Mode StorageMode `yaml:"mode"`
|
||||||
|
Backend StorageBackend `yaml:"backend"`
|
||||||
|
RootPath string `yaml:"root_path"`
|
||||||
|
Bucket string `yaml:"bucket"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConfigFromFile loads configuration from a YAML file.
|
||||||
|
// JWT secret key is loaded from JWT_SECRET_KEY env var (base64 encoded),
|
||||||
|
// falling back to the file path specified in jwt.secret_key_path.
|
||||||
|
func ConfigFromFile(path string) (*Config, error) {
|
||||||
|
data, err := os.ReadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return nil, fmt.Errorf("config file not found: %s", path)
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var config Config
|
||||||
|
if err := yaml.Unmarshal(data, &config); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load JWT secret key (priority: env var > config base64 > config file path)
|
||||||
|
if envKey := os.Getenv("JWT_SECRET_KEY"); envKey != "" {
|
||||||
|
key, err := base64.StdEncoding.DecodeString(envKey)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.New("JWT_SECRET_KEY env var is not valid base64")
|
||||||
|
}
|
||||||
|
config.JWT.SecretKey = key
|
||||||
|
} else if config.JWT.SecretKeyBase64 != "" {
|
||||||
|
key, err := base64.StdEncoding.DecodeString(config.JWT.SecretKeyBase64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.New("jwt.secret_key_base64 is not valid base64")
|
||||||
|
}
|
||||||
|
config.JWT.SecretKey = key
|
||||||
|
} else if config.JWT.SecretKeyPath != "" {
|
||||||
|
keyData, err := os.ReadFile(config.JWT.SecretKeyPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
key, err := base64.StdEncoding.DecodeString(string(keyData))
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.New("jwt.secret_key_path file content is not valid base64")
|
||||||
|
}
|
||||||
|
config.JWT.SecretKey = key
|
||||||
|
}
|
||||||
|
|
||||||
|
if errs := config.Validate(); len(errs) > 0 {
|
||||||
|
return nil, NewConfigError(errs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &config, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate checks for required configuration fields.
|
||||||
|
func (c *Config) Validate() []error {
|
||||||
|
var errs []error
|
||||||
|
|
||||||
|
// Server
|
||||||
|
if c.Server.Port == 0 {
|
||||||
|
errs = append(errs, errors.New("server.port is required"))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Database
|
||||||
|
if c.Database.PostgresURL == "" {
|
||||||
|
errs = append(errs, errors.New("database.postgres_url is required"))
|
||||||
|
}
|
||||||
|
|
||||||
|
// JWT
|
||||||
|
if c.JWT.Issuer == "" {
|
||||||
|
errs = append(errs, errors.New("jwt.issuer is required"))
|
||||||
|
}
|
||||||
|
if c.JWT.Audience == "" {
|
||||||
|
errs = append(errs, errors.New("jwt.audience is required"))
|
||||||
|
}
|
||||||
|
if len(c.JWT.SecretKey) == 0 {
|
||||||
|
errs = append(errs, errors.New("jwt secret key is required (set JWT_SECRET_KEY env var, jwt.secret_key_base64, or jwt.secret_key_path)"))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Storage
|
||||||
|
if c.Storage.Mode == "" {
|
||||||
|
errs = append(errs, errors.New("storage.mode is required"))
|
||||||
|
} else if c.Storage.Mode != StorageModeFlat && c.Storage.Mode != StorageModeHierarchical {
|
||||||
|
errs = append(errs, errors.New("storage.mode must be 'flat' or 'hierarchical'"))
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.Storage.Backend == "" {
|
||||||
|
errs = append(errs, errors.New("storage.backend is required"))
|
||||||
|
} else if c.Storage.Backend != StorageBackendFS && c.Storage.Backend != StorageBackendS3 {
|
||||||
|
errs = append(errs, errors.New("storage.backend must be 'fs' or 's3'"))
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.Storage.Backend == StorageBackendFS && c.Storage.RootPath == "" {
|
||||||
|
errs = append(errs, errors.New("storage.root_path is required when backend is 'fs'"))
|
||||||
|
}
|
||||||
|
if c.Storage.Backend == StorageBackendS3 {
|
||||||
|
if c.Storage.Bucket == "" {
|
||||||
|
errs = append(errs, errors.New("storage.bucket is required when backend is 's3'"))
|
||||||
|
}
|
||||||
|
if c.Storage.Mode == StorageModeHierarchical {
|
||||||
|
errs = append(errs, errors.New("storage.mode must be 'flat' when backend is 's3'"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return errs
|
||||||
|
}
|
||||||
@@ -5,17 +5,17 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
type ServerConfigError struct {
|
type ConfigError struct {
|
||||||
Errors []error
|
Errors []error
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewServerConfigError(errs ...error) *ServerConfigError {
|
func NewConfigError(errs ...error) *ConfigError {
|
||||||
return &ServerConfigError{Errors: errs}
|
return &ConfigError{Errors: errs}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *ServerConfigError) Error() string {
|
func (e *ConfigError) Error() string {
|
||||||
sb := strings.Builder{}
|
sb := strings.Builder{}
|
||||||
sb.WriteString("invalid server config:\n")
|
sb.WriteString("invalid config:\n")
|
||||||
for _, err := range e.Errors {
|
for _, err := range e.Errors {
|
||||||
sb.WriteString(fmt.Sprintf(" - %s\n", err.Error()))
|
sb.WriteString(fmt.Sprintf(" - %s\n", err.Error()))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,84 +1,114 @@
|
|||||||
package drexa
|
package drexa
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/hex"
|
"context"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
|
||||||
"strconv"
|
|
||||||
|
|
||||||
|
"github.com/get-drexa/drexa/internal/account"
|
||||||
"github.com/get-drexa/drexa/internal/auth"
|
"github.com/get-drexa/drexa/internal/auth"
|
||||||
|
"github.com/get-drexa/drexa/internal/blob"
|
||||||
"github.com/get-drexa/drexa/internal/database"
|
"github.com/get-drexa/drexa/internal/database"
|
||||||
|
"github.com/get-drexa/drexa/internal/httperr"
|
||||||
|
"github.com/get-drexa/drexa/internal/upload"
|
||||||
|
"github.com/get-drexa/drexa/internal/user"
|
||||||
|
"github.com/get-drexa/drexa/internal/virtualfs"
|
||||||
"github.com/gofiber/fiber/v2"
|
"github.com/gofiber/fiber/v2"
|
||||||
|
"github.com/gofiber/fiber/v2/middleware/logger"
|
||||||
|
"github.com/uptrace/bun"
|
||||||
|
"github.com/uptrace/bun/extra/bundebug"
|
||||||
)
|
)
|
||||||
|
|
||||||
type ServerConfig struct {
|
type Server struct {
|
||||||
Port int
|
config Config
|
||||||
PostgresURL string
|
app *fiber.App
|
||||||
JWTIssuer string
|
db *bun.DB
|
||||||
JWTAudience string
|
blobStore blob.Store
|
||||||
JWTSecretKey []byte
|
vfs *virtualfs.VirtualFS
|
||||||
|
userService *user.Service
|
||||||
|
authService *auth.Service
|
||||||
|
accountService *account.Service
|
||||||
|
uploadService *upload.Service
|
||||||
|
authMiddleware fiber.Handler
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewServer(c ServerConfig) *fiber.App {
|
func NewServer(c Config) (*Server, error) {
|
||||||
app := fiber.New()
|
app := fiber.New(fiber.Config{
|
||||||
db := database.NewFromPostgres(c.PostgresURL)
|
ErrorHandler: httperr.ErrorHandler,
|
||||||
|
StreamRequestBody: true,
|
||||||
authService := auth.NewService(db, auth.TokenConfig{
|
|
||||||
Issuer: c.JWTIssuer,
|
|
||||||
Audience: c.JWTAudience,
|
|
||||||
SecretKey: c.JWTSecretKey,
|
|
||||||
})
|
})
|
||||||
|
app.Use(logger.New())
|
||||||
|
|
||||||
|
db := database.NewFromPostgres(c.Database.PostgresURL)
|
||||||
|
db.AddQueryHook(bundebug.NewQueryHook(bundebug.WithVerbose(true)))
|
||||||
|
|
||||||
|
// Initialize blob store based on config
|
||||||
|
var blobStore blob.Store
|
||||||
|
switch c.Storage.Backend {
|
||||||
|
case StorageBackendFS:
|
||||||
|
blobStore = blob.NewFSStore(blob.FSStoreConfig{
|
||||||
|
Root: c.Storage.RootPath,
|
||||||
|
})
|
||||||
|
case StorageBackendS3:
|
||||||
|
return nil, fmt.Errorf("s3 storage backend not yet implemented")
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("unknown storage backend: %s", c.Storage.Backend)
|
||||||
|
}
|
||||||
|
|
||||||
|
err := blobStore.Initialize(context.Background())
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to initialize blob store: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize key resolver based on config
|
||||||
|
var keyResolver virtualfs.BlobKeyResolver
|
||||||
|
switch c.Storage.Mode {
|
||||||
|
case StorageModeFlat:
|
||||||
|
keyResolver = virtualfs.NewFlatKeyResolver()
|
||||||
|
case StorageModeHierarchical:
|
||||||
|
keyResolver = virtualfs.NewHierarchicalKeyResolver(db)
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("unknown storage mode: %s", c.Storage.Mode)
|
||||||
|
}
|
||||||
|
|
||||||
|
vfs, err := virtualfs.NewVirtualFS(blobStore, keyResolver)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create virtual file system: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
userService := user.NewService()
|
||||||
|
authService := auth.NewService(userService, auth.TokenConfig{
|
||||||
|
Issuer: c.JWT.Issuer,
|
||||||
|
Audience: c.JWT.Audience,
|
||||||
|
SecretKey: c.JWT.SecretKey,
|
||||||
|
})
|
||||||
|
uploadService := upload.NewService(vfs, blobStore)
|
||||||
|
accountService := account.NewService(userService)
|
||||||
|
|
||||||
|
authMiddleware := auth.NewBearerAuthMiddleware(authService, db)
|
||||||
|
|
||||||
api := app.Group("/api")
|
api := app.Group("/api")
|
||||||
auth.RegisterAPIRoutes(api, authService)
|
|
||||||
|
|
||||||
return app
|
accRouter := account.NewHTTPHandler(accountService, authService, db, authMiddleware).RegisterRoutes(api)
|
||||||
|
|
||||||
|
auth.NewHTTPHandler(authService, db).RegisterRoutes(api)
|
||||||
|
upload.NewHTTPHandler(uploadService, db).RegisterRoutes(accRouter)
|
||||||
|
|
||||||
|
s := &Server{
|
||||||
|
config: c,
|
||||||
|
app: app,
|
||||||
|
db: db,
|
||||||
|
blobStore: blobStore,
|
||||||
|
vfs: vfs,
|
||||||
|
userService: userService,
|
||||||
|
authService: authService,
|
||||||
|
accountService: accountService,
|
||||||
|
uploadService: uploadService,
|
||||||
|
authMiddleware: authMiddleware,
|
||||||
|
}
|
||||||
|
|
||||||
|
return s, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ServerConfigFromEnv creates a ServerConfig from environment variables.
|
func (s *Server) Start() error {
|
||||||
func ServerConfigFromEnv() (*ServerConfig, error) {
|
return s.app.Listen(fmt.Sprintf(":%d", s.config.Server.Port))
|
||||||
c := ServerConfig{
|
|
||||||
PostgresURL: os.Getenv("POSTGRES_URL"),
|
|
||||||
JWTIssuer: os.Getenv("JWT_ISSUER"),
|
|
||||||
JWTAudience: os.Getenv("JWT_AUDIENCE"),
|
|
||||||
}
|
|
||||||
|
|
||||||
errs := []error{}
|
|
||||||
|
|
||||||
keyHex := os.Getenv("JWT_SECRET_KEY")
|
|
||||||
if keyHex == "" {
|
|
||||||
errs = append(errs, errors.New("JWT_SECRET_KEY is required"))
|
|
||||||
} else {
|
|
||||||
k, err := hex.DecodeString(keyHex)
|
|
||||||
if err != nil {
|
|
||||||
errs = append(errs, fmt.Errorf("failed to decode JWT_SECRET_KEY: %w", err))
|
|
||||||
}
|
|
||||||
c.JWTSecretKey = k
|
|
||||||
}
|
|
||||||
|
|
||||||
p, err := strconv.Atoi(os.Getenv("PORT"))
|
|
||||||
if err != nil {
|
|
||||||
errs = append(errs, fmt.Errorf("failed to parse PORT: %w", err))
|
|
||||||
}
|
|
||||||
c.Port = p
|
|
||||||
|
|
||||||
if c.PostgresURL == "" {
|
|
||||||
errs = append(errs, errors.New("POSTGRES_URL is required"))
|
|
||||||
}
|
|
||||||
if c.JWTIssuer == "" {
|
|
||||||
errs = append(errs, errors.New("JWT_ISSUER is required"))
|
|
||||||
}
|
|
||||||
if c.JWTAudience == "" {
|
|
||||||
errs = append(errs, errors.New("JWT_AUDIENCE is required"))
|
|
||||||
}
|
|
||||||
if len(c.JWTSecretKey) == 0 {
|
|
||||||
errs = append(errs, errors.New("JWT_SECRET_KEY is required"))
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(errs) > 0 {
|
|
||||||
return nil, NewServerConfigError(errs...)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &c, nil
|
|
||||||
}
|
}
|
||||||
|
|||||||
42
apps/backend/internal/httperr/error.go
Normal file
42
apps/backend/internal/httperr/error.go
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
package httperr
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/gofiber/fiber/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
// HTTPError represents an HTTP error with a status code and underlying error.
|
||||||
|
type HTTPError struct {
|
||||||
|
Code int
|
||||||
|
Message string
|
||||||
|
Err error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error implements the error interface.
|
||||||
|
func (e *HTTPError) Error() string {
|
||||||
|
if e.Err != nil {
|
||||||
|
return fmt.Sprintf("HTTP %d: %s: %v", e.Code, e.Message, e.Err)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("HTTP %d: %s", e.Code, e.Message)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unwrap returns the underlying error.
|
||||||
|
func (e *HTTPError) Unwrap() error {
|
||||||
|
return e.Err
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewHTTPError creates a new HTTPError with the given status code, message, and underlying error.
|
||||||
|
func NewHTTPError(code int, message string, err error) *HTTPError {
|
||||||
|
return &HTTPError{
|
||||||
|
Code: code,
|
||||||
|
Message: message,
|
||||||
|
Err: err,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Internal creates a new HTTPError with status 500.
|
||||||
|
func Internal(err error) *HTTPError {
|
||||||
|
return NewHTTPError(fiber.StatusInternalServerError, "Internal", err)
|
||||||
|
}
|
||||||
|
|
||||||
64
apps/backend/internal/httperr/handler.go
Normal file
64
apps/backend/internal/httperr/handler.go
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
package httperr
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"log/slog"
|
||||||
|
|
||||||
|
"github.com/gofiber/fiber/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ErrorHandler is a global error handler for Fiber that logs errors and returns appropriate responses.
|
||||||
|
func ErrorHandler(c *fiber.Ctx, err error) error {
|
||||||
|
// Default status code
|
||||||
|
code := fiber.StatusInternalServerError
|
||||||
|
message := "Internal"
|
||||||
|
|
||||||
|
// Check if it's our custom HTTPError
|
||||||
|
var httpErr *HTTPError
|
||||||
|
if errors.As(err, &httpErr) {
|
||||||
|
code = httpErr.Code
|
||||||
|
message = httpErr.Message
|
||||||
|
|
||||||
|
// Log the error with underlying error details
|
||||||
|
if httpErr.Err != nil {
|
||||||
|
slog.Error("HTTP error",
|
||||||
|
"status", code,
|
||||||
|
"message", message,
|
||||||
|
"error", httpErr.Err.Error(),
|
||||||
|
"path", c.Path(),
|
||||||
|
"method", c.Method(),
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
slog.Warn("HTTP error",
|
||||||
|
"status", code,
|
||||||
|
"message", message,
|
||||||
|
"path", c.Path(),
|
||||||
|
"method", c.Method(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Check if it's a Fiber error
|
||||||
|
var fiberErr *fiber.Error
|
||||||
|
if errors.As(err, &fiberErr) {
|
||||||
|
code = fiberErr.Code
|
||||||
|
message = fiberErr.Message
|
||||||
|
} else {
|
||||||
|
// Generic error - log it
|
||||||
|
slog.Error("Unhandled error",
|
||||||
|
"status", code,
|
||||||
|
"error", err.Error(),
|
||||||
|
"path", c.Path(),
|
||||||
|
"method", c.Method(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set Content-Type header
|
||||||
|
c.Set(fiber.HeaderContentType, fiber.MIMEApplicationJSONCharsetUTF8)
|
||||||
|
|
||||||
|
// Return JSON response
|
||||||
|
return c.Status(code).JSON(fiber.Map{
|
||||||
|
"error": message,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
23
apps/backend/internal/ioext/counting_reader.go
Normal file
23
apps/backend/internal/ioext/counting_reader.go
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
package ioext
|
||||||
|
|
||||||
|
import "io"
|
||||||
|
|
||||||
|
type CountingReader struct {
|
||||||
|
reader io.Reader
|
||||||
|
count int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewCountingReader(reader io.Reader) *CountingReader {
|
||||||
|
return &CountingReader{reader: reader}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *CountingReader) Read(p []byte) (n int, err error) {
|
||||||
|
n, err = r.reader.Read(p)
|
||||||
|
r.count += int64(n)
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *CountingReader) Count() int64 {
|
||||||
|
return r.count
|
||||||
|
}
|
||||||
|
|
||||||
24
apps/backend/internal/ioext/limit_read_closer.go
Normal file
24
apps/backend/internal/ioext/limit_read_closer.go
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
package ioext
|
||||||
|
|
||||||
|
import "io"
|
||||||
|
|
||||||
|
type LimitReadCloser struct {
|
||||||
|
reader io.ReadCloser
|
||||||
|
limitReader io.Reader
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewLimitReadCloser(reader io.ReadCloser, length int64) *LimitReadCloser {
|
||||||
|
return &LimitReadCloser{
|
||||||
|
reader: reader,
|
||||||
|
limitReader: io.LimitReader(reader, length),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *LimitReadCloser) Read(p []byte) (n int, err error) {
|
||||||
|
return r.limitReader.Read(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *LimitReadCloser) Close() error {
|
||||||
|
return r.reader.Close()
|
||||||
|
}
|
||||||
|
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
package auth
|
package password
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
@@ -11,6 +11,10 @@ import (
|
|||||||
"golang.org/x/crypto/argon2"
|
"golang.org/x/crypto/argon2"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Hashed represents a securely hashed password.
|
||||||
|
// This type ensures plaintext passwords cannot be accidentally stored.
|
||||||
|
type Hashed string
|
||||||
|
|
||||||
// argon2id parameters
|
// argon2id parameters
|
||||||
const (
|
const (
|
||||||
memory = 64 * 1024
|
memory = 64 * 1024
|
||||||
@@ -34,14 +38,15 @@ type argon2Hash struct {
|
|||||||
hash []byte
|
hash []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
func HashPassword(password string) (string, error) {
|
// Hash securely hashes a plaintext password using argon2id.
|
||||||
|
func Hash(plain string) (Hashed, error) {
|
||||||
salt := make([]byte, saltLength)
|
salt := make([]byte, saltLength)
|
||||||
if _, err := rand.Read(salt); err != nil {
|
if _, err := rand.Read(salt); err != nil {
|
||||||
return "", fmt.Errorf("failed to generate salt: %w", err)
|
return "", fmt.Errorf("failed to generate salt: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
hash := argon2.IDKey(
|
hash := argon2.IDKey(
|
||||||
[]byte(password),
|
[]byte(plain),
|
||||||
salt,
|
salt,
|
||||||
iterations,
|
iterations,
|
||||||
memory,
|
memory,
|
||||||
@@ -52,7 +57,7 @@ func HashPassword(password string) (string, error) {
|
|||||||
b64Salt := base64.RawStdEncoding.EncodeToString(salt)
|
b64Salt := base64.RawStdEncoding.EncodeToString(salt)
|
||||||
b64Hash := base64.RawStdEncoding.EncodeToString(hash)
|
b64Hash := base64.RawStdEncoding.EncodeToString(hash)
|
||||||
|
|
||||||
encodedHash := fmt.Sprintf(
|
encoded := fmt.Sprintf(
|
||||||
"$argon2id$v=%d$m=%d,t=%d,p=%d$%s$%s",
|
"$argon2id$v=%d$m=%d,t=%d,p=%d$%s$%s",
|
||||||
argon2.Version,
|
argon2.Version,
|
||||||
memory,
|
memory,
|
||||||
@@ -62,17 +67,18 @@ func HashPassword(password string) (string, error) {
|
|||||||
b64Hash,
|
b64Hash,
|
||||||
)
|
)
|
||||||
|
|
||||||
return encodedHash, nil
|
return Hashed(encoded), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func VerifyPassword(password, encodedHash string) (bool, error) {
|
// Verify checks if a plaintext password matches a hashed password.
|
||||||
h, err := decodeHash(encodedHash)
|
func Verify(plain string, hashed Hashed) (bool, error) {
|
||||||
|
h, err := decodeHash(string(hashed))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
otherHash := argon2.IDKey(
|
otherHash := argon2.IDKey(
|
||||||
[]byte(password),
|
[]byte(plain),
|
||||||
h.salt,
|
h.salt,
|
||||||
h.iterations,
|
h.iterations,
|
||||||
h.memory,
|
h.memory,
|
||||||
10
apps/backend/internal/upload/err.go
Normal file
10
apps/backend/internal/upload/err.go
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
package upload
|
||||||
|
|
||||||
|
import "errors"
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrNotFound = errors.New("not found")
|
||||||
|
ErrParentNotDirectory = errors.New("parent is not a directory")
|
||||||
|
ErrConflict = errors.New("node conflict")
|
||||||
|
ErrContentNotUploaded = errors.New("content has not been uploaded")
|
||||||
|
)
|
||||||
120
apps/backend/internal/upload/http.go
Normal file
120
apps/backend/internal/upload/http.go
Normal file
@@ -0,0 +1,120 @@
|
|||||||
|
package upload
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/get-drexa/drexa/internal/account"
|
||||||
|
"github.com/get-drexa/drexa/internal/httperr"
|
||||||
|
"github.com/gofiber/fiber/v2"
|
||||||
|
"github.com/uptrace/bun"
|
||||||
|
)
|
||||||
|
|
||||||
|
type createUploadRequest struct {
|
||||||
|
ParentID string `json:"parentId"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type updateUploadRequest struct {
|
||||||
|
Status Status `json:"status"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type HTTPHandler struct {
|
||||||
|
service *Service
|
||||||
|
db *bun.DB
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewHTTPHandler(s *Service, db *bun.DB) *HTTPHandler {
|
||||||
|
return &HTTPHandler{service: s, db: db}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *HTTPHandler) RegisterRoutes(api fiber.Router) {
|
||||||
|
upload := api.Group("/uploads")
|
||||||
|
|
||||||
|
upload.Post("/", h.Create)
|
||||||
|
upload.Put("/:uploadID/content", h.ReceiveContent)
|
||||||
|
upload.Patch("/:uploadID", h.Update)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *HTTPHandler) Create(c *fiber.Ctx) error {
|
||||||
|
account := account.CurrentAccount(c)
|
||||||
|
if account == nil {
|
||||||
|
return c.SendStatus(fiber.StatusUnauthorized)
|
||||||
|
}
|
||||||
|
|
||||||
|
req := new(createUploadRequest)
|
||||||
|
if err := c.BodyParser(req); err != nil {
|
||||||
|
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{"error": "Invalid request"})
|
||||||
|
}
|
||||||
|
|
||||||
|
upload, err := h.service.CreateUpload(c.Context(), h.db, account.ID, CreateUploadOptions{
|
||||||
|
ParentID: req.ParentID,
|
||||||
|
Name: req.Name,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, ErrNotFound) {
|
||||||
|
return c.SendStatus(fiber.StatusNotFound)
|
||||||
|
}
|
||||||
|
if errors.Is(err, ErrParentNotDirectory) {
|
||||||
|
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{"error": "Parent is not a directory"})
|
||||||
|
}
|
||||||
|
if errors.Is(err, ErrConflict) {
|
||||||
|
return c.Status(fiber.StatusConflict).JSON(fiber.Map{"error": "A file with this name already exists"})
|
||||||
|
}
|
||||||
|
return httperr.Internal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if upload.UploadURL == "" {
|
||||||
|
upload.UploadURL = fmt.Sprintf("%s%s/%s/content", c.BaseURL(), c.OriginalURL(), upload.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
return c.JSON(upload)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *HTTPHandler) ReceiveContent(c *fiber.Ctx) error {
|
||||||
|
account := account.CurrentAccount(c)
|
||||||
|
if account == nil {
|
||||||
|
return c.SendStatus(fiber.StatusUnauthorized)
|
||||||
|
}
|
||||||
|
|
||||||
|
uploadID := c.Params("uploadID")
|
||||||
|
|
||||||
|
err := h.service.ReceiveUpload(c.Context(), h.db, account.ID, uploadID, c.Context().RequestBodyStream())
|
||||||
|
defer c.Context().Request.CloseBodyStream()
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, ErrNotFound) {
|
||||||
|
return c.SendStatus(fiber.StatusNotFound)
|
||||||
|
}
|
||||||
|
return httperr.Internal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return c.SendStatus(fiber.StatusNoContent)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *HTTPHandler) Update(c *fiber.Ctx) error {
|
||||||
|
account := account.CurrentAccount(c)
|
||||||
|
if account == nil {
|
||||||
|
return c.SendStatus(fiber.StatusUnauthorized)
|
||||||
|
}
|
||||||
|
|
||||||
|
req := new(updateUploadRequest)
|
||||||
|
if err := c.BodyParser(req); err != nil {
|
||||||
|
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{"error": "Invalid request"})
|
||||||
|
}
|
||||||
|
|
||||||
|
if req.Status == StatusCompleted {
|
||||||
|
upload, err := h.service.CompleteUpload(c.Context(), h.db, account.ID, c.Params("uploadID"))
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, ErrNotFound) {
|
||||||
|
return c.SendStatus(fiber.StatusNotFound)
|
||||||
|
}
|
||||||
|
if errors.Is(err, ErrContentNotUploaded) {
|
||||||
|
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{"error": "Content has not been uploaded"})
|
||||||
|
}
|
||||||
|
return httperr.Internal(err)
|
||||||
|
}
|
||||||
|
return c.JSON(upload)
|
||||||
|
}
|
||||||
|
|
||||||
|
return c.SendStatus(fiber.StatusBadRequest)
|
||||||
|
}
|
||||||
144
apps/backend/internal/upload/service.go
Normal file
144
apps/backend/internal/upload/service.go
Normal file
@@ -0,0 +1,144 @@
|
|||||||
|
package upload
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/get-drexa/drexa/internal/blob"
|
||||||
|
"github.com/get-drexa/drexa/internal/virtualfs"
|
||||||
|
"github.com/google/uuid"
|
||||||
|
"github.com/uptrace/bun"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Service struct {
|
||||||
|
vfs *virtualfs.VirtualFS
|
||||||
|
blobStore blob.Store
|
||||||
|
|
||||||
|
pendingUploads sync.Map
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewService(vfs *virtualfs.VirtualFS, blobStore blob.Store) *Service {
|
||||||
|
return &Service{
|
||||||
|
vfs: vfs,
|
||||||
|
blobStore: blobStore,
|
||||||
|
|
||||||
|
pendingUploads: sync.Map{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type CreateUploadOptions struct {
|
||||||
|
ParentID string
|
||||||
|
Name string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Service) CreateUpload(ctx context.Context, db bun.IDB, accountID uuid.UUID, opts CreateUploadOptions) (*Upload, error) {
|
||||||
|
parentNode, err := s.vfs.FindNodeByPublicID(ctx, db, accountID, opts.ParentID)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, virtualfs.ErrNodeNotFound) {
|
||||||
|
return nil, ErrNotFound
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if parentNode.Kind != virtualfs.NodeKindDirectory {
|
||||||
|
return nil, ErrParentNotDirectory
|
||||||
|
}
|
||||||
|
|
||||||
|
node, err := s.vfs.CreateFile(ctx, db, accountID, virtualfs.CreateFileOptions{
|
||||||
|
ParentID: parentNode.ID,
|
||||||
|
Name: opts.Name,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, virtualfs.ErrNodeConflict) {
|
||||||
|
return nil, ErrConflict
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var uploadURL string
|
||||||
|
if s.blobStore.SupportsDirectUpload() {
|
||||||
|
uploadURL, err = s.blobStore.GenerateUploadURL(ctx, node.BlobKey, blob.UploadURLOptions{
|
||||||
|
Duration: 1 * time.Hour,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
_ = s.vfs.PermanentlyDeleteNode(ctx, db, node)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
uploadURL = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
upload := &Upload{
|
||||||
|
ID: node.PublicID,
|
||||||
|
Status: StatusPending,
|
||||||
|
TargetNode: node,
|
||||||
|
UploadURL: uploadURL,
|
||||||
|
}
|
||||||
|
|
||||||
|
s.pendingUploads.Store(upload.ID, upload)
|
||||||
|
|
||||||
|
return upload, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Service) ReceiveUpload(ctx context.Context, db bun.IDB, accountID uuid.UUID, uploadID string, reader io.Reader) error {
|
||||||
|
fmt.Printf("reader: %v\n", reader)
|
||||||
|
n, ok := s.pendingUploads.Load(uploadID)
|
||||||
|
if !ok {
|
||||||
|
return ErrNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
upload, ok := n.(*Upload)
|
||||||
|
if !ok {
|
||||||
|
return ErrNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
if upload.TargetNode.AccountID != accountID {
|
||||||
|
return ErrNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
err := s.vfs.WriteFile(ctx, db, upload.TargetNode, virtualfs.FileContentFromReader(reader))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
upload.Status = StatusCompleted
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Service) CompleteUpload(ctx context.Context, db bun.IDB, accountID uuid.UUID, uploadID string) (*Upload, error) {
|
||||||
|
n, ok := s.pendingUploads.Load(uploadID)
|
||||||
|
if !ok {
|
||||||
|
return nil, ErrNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
upload, ok := n.(*Upload)
|
||||||
|
if !ok {
|
||||||
|
return nil, ErrNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
if upload.TargetNode.AccountID != accountID {
|
||||||
|
return nil, ErrNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
if upload.TargetNode.Status == virtualfs.NodeStatusReady && upload.Status == StatusCompleted {
|
||||||
|
return upload, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
err := s.vfs.WriteFile(ctx, db, upload.TargetNode, virtualfs.FileContentFromBlobKey(upload.TargetNode.BlobKey))
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, blob.ErrNotFound) {
|
||||||
|
return nil, ErrContentNotUploaded
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
upload.Status = StatusCompleted
|
||||||
|
s.pendingUploads.Delete(uploadID)
|
||||||
|
|
||||||
|
return upload, nil
|
||||||
|
}
|
||||||
18
apps/backend/internal/upload/upload.go
Normal file
18
apps/backend/internal/upload/upload.go
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
package upload
|
||||||
|
|
||||||
|
import "github.com/get-drexa/drexa/internal/virtualfs"
|
||||||
|
|
||||||
|
type Status string
|
||||||
|
|
||||||
|
const (
|
||||||
|
StatusPending Status = "pending"
|
||||||
|
StatusCompleted Status = "completed"
|
||||||
|
StatusFailed Status = "failed"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Upload struct {
|
||||||
|
ID string `json:"id"`
|
||||||
|
Status Status `json:"status"`
|
||||||
|
TargetNode *virtualfs.Node `json:"-"`
|
||||||
|
UploadURL string `json:"uploadUrl"`
|
||||||
|
}
|
||||||
36
apps/backend/internal/user/err.go
Normal file
36
apps/backend/internal/user/err.go
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
package user
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/google/uuid"
|
||||||
|
)
|
||||||
|
|
||||||
|
type NotFoundError struct {
|
||||||
|
// ID is the ID that was used to try to find the user.
|
||||||
|
// Not set if not tried.
|
||||||
|
id uuid.UUID
|
||||||
|
|
||||||
|
// Email is the email that was used to try to find the user.
|
||||||
|
// Not set if not tried.
|
||||||
|
email string
|
||||||
|
}
|
||||||
|
|
||||||
|
func newNotFoundError(id uuid.UUID, email string) *NotFoundError {
|
||||||
|
return &NotFoundError{id, email}
|
||||||
|
}
|
||||||
|
func (e *NotFoundError) Error() string {
|
||||||
|
return fmt.Sprintf("user not found: %v", e.id)
|
||||||
|
}
|
||||||
|
|
||||||
|
type AlreadyExistsError struct {
|
||||||
|
// Email is the email that was used to try to create the user.
|
||||||
|
Email string
|
||||||
|
}
|
||||||
|
|
||||||
|
func newAlreadyExistsError(email string) *AlreadyExistsError {
|
||||||
|
return &AlreadyExistsError{email}
|
||||||
|
}
|
||||||
|
func (e *AlreadyExistsError) Error() string {
|
||||||
|
return fmt.Sprintf("user with email %s already exists", e.Email)
|
||||||
|
}
|
||||||
76
apps/backend/internal/user/service.go
Normal file
76
apps/backend/internal/user/service.go
Normal file
@@ -0,0 +1,76 @@
|
|||||||
|
package user
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"database/sql"
|
||||||
|
"errors"
|
||||||
|
|
||||||
|
"github.com/get-drexa/drexa/internal/database"
|
||||||
|
"github.com/get-drexa/drexa/internal/password"
|
||||||
|
"github.com/google/uuid"
|
||||||
|
"github.com/uptrace/bun"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Service struct{}
|
||||||
|
|
||||||
|
type UserRegistrationOptions struct {
|
||||||
|
Email string
|
||||||
|
DisplayName string
|
||||||
|
Password password.Hashed
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewService() *Service {
|
||||||
|
return &Service{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Service) RegisterUser(ctx context.Context, db bun.IDB, opts UserRegistrationOptions) (*User, error) {
|
||||||
|
uid, err := newUserID()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
u := User{
|
||||||
|
ID: uid,
|
||||||
|
Email: opts.Email,
|
||||||
|
DisplayName: opts.DisplayName,
|
||||||
|
Password: opts.Password,
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = db.NewInsert().Model(&u).Returning("*").Exec(ctx)
|
||||||
|
if err != nil {
|
||||||
|
if database.IsUniqueViolation(err) {
|
||||||
|
return nil, newAlreadyExistsError(u.Email)
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &u, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Service) UserByID(ctx context.Context, db bun.IDB, id uuid.UUID) (*User, error) {
|
||||||
|
var user User
|
||||||
|
err := db.NewSelect().Model(&user).Where("id = ?", id).Scan(ctx)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, sql.ErrNoRows) {
|
||||||
|
return nil, newNotFoundError(id, "")
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &user, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Service) UserByEmail(ctx context.Context, db bun.IDB, email string) (*User, error) {
|
||||||
|
var user User
|
||||||
|
err := db.NewSelect().Model(&user).Where("email = ?", email).Scan(ctx)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, sql.ErrNoRows) {
|
||||||
|
return nil, newNotFoundError(uuid.Nil, email)
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &user, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Service) UserExistsByEmail(ctx context.Context, db bun.IDB, email string) (bool, error) {
|
||||||
|
return db.NewSelect().Model(&User{}).Where("email = ?", email).Exists(ctx)
|
||||||
|
}
|
||||||
24
apps/backend/internal/user/user.go
Normal file
24
apps/backend/internal/user/user.go
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
package user
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/get-drexa/drexa/internal/password"
|
||||||
|
"github.com/google/uuid"
|
||||||
|
"github.com/uptrace/bun"
|
||||||
|
)
|
||||||
|
|
||||||
|
type User struct {
|
||||||
|
bun.BaseModel `bun:"users"`
|
||||||
|
|
||||||
|
ID uuid.UUID `bun:",pk,type:uuid" json:"id"`
|
||||||
|
DisplayName string `bun:"display_name" json:"displayName"`
|
||||||
|
Email string `bun:"email,unique,notnull" json:"email"`
|
||||||
|
Password password.Hashed `bun:"password,notnull" json:"-"`
|
||||||
|
CreatedAt time.Time `bun:"created_at,notnull,nullzero" json:"createdAt"`
|
||||||
|
UpdatedAt time.Time `bun:"updated_at,notnull,nullzero" json:"updatedAt"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func newUserID() (uuid.UUID, error) {
|
||||||
|
return uuid.NewV7()
|
||||||
|
}
|
||||||
9
apps/backend/internal/virtualfs/err.go
Normal file
9
apps/backend/internal/virtualfs/err.go
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
package virtualfs
|
||||||
|
|
||||||
|
import "errors"
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrNodeNotFound = errors.New("node not found")
|
||||||
|
ErrNodeConflict = errors.New("node conflict")
|
||||||
|
ErrUnsupportedOperation = errors.New("unsupported operation")
|
||||||
|
)
|
||||||
35
apps/backend/internal/virtualfs/flat_key_resolver.go
Normal file
35
apps/backend/internal/virtualfs/flat_key_resolver.go
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
package virtualfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/get-drexa/drexa/internal/blob"
|
||||||
|
"github.com/google/uuid"
|
||||||
|
)
|
||||||
|
|
||||||
|
type FlatKeyResolver struct{}
|
||||||
|
|
||||||
|
var _ BlobKeyResolver = &FlatKeyResolver{}
|
||||||
|
|
||||||
|
func NewFlatKeyResolver() *FlatKeyResolver {
|
||||||
|
return &FlatKeyResolver{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *FlatKeyResolver) ShouldPersistKey() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *FlatKeyResolver) Resolve(ctx context.Context, node *Node) (blob.Key, error) {
|
||||||
|
if node.BlobKey == "" {
|
||||||
|
id, err := uuid.NewV7()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return blob.Key(id.String()), nil
|
||||||
|
}
|
||||||
|
return node.BlobKey, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *FlatKeyResolver) ResolveDeletionKeys(ctx context.Context, node *Node, allKeys []blob.Key) (*DeletionPlan, error) {
|
||||||
|
return &DeletionPlan{Keys: allKeys}, nil
|
||||||
|
}
|
||||||
40
apps/backend/internal/virtualfs/hierarchical_key_resolver.go
Normal file
40
apps/backend/internal/virtualfs/hierarchical_key_resolver.go
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
package virtualfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/get-drexa/drexa/internal/blob"
|
||||||
|
"github.com/uptrace/bun"
|
||||||
|
)
|
||||||
|
|
||||||
|
type HierarchicalKeyResolver struct {
|
||||||
|
db *bun.DB
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ BlobKeyResolver = &HierarchicalKeyResolver{}
|
||||||
|
|
||||||
|
func NewHierarchicalKeyResolver(db *bun.DB) *HierarchicalKeyResolver {
|
||||||
|
return &HierarchicalKeyResolver{db: db}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *HierarchicalKeyResolver) ShouldPersistKey() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *HierarchicalKeyResolver) Resolve(ctx context.Context, node *Node) (blob.Key, error) {
|
||||||
|
path, err := buildNodeAbsolutePath(ctx, r.db, node.ID)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return blob.Key(fmt.Sprintf("%s/%s", node.AccountID, path)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *HierarchicalKeyResolver) ResolveDeletionKeys(ctx context.Context, node *Node, allKeys []blob.Key) (*DeletionPlan, error) {
|
||||||
|
path, err := buildNodeAbsolutePath(ctx, r.db, node.ID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &DeletionPlan{Prefix: blob.Key(path)}, nil
|
||||||
|
}
|
||||||
21
apps/backend/internal/virtualfs/key_resolver.go
Normal file
21
apps/backend/internal/virtualfs/key_resolver.go
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
package virtualfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/get-drexa/drexa/internal/blob"
|
||||||
|
)
|
||||||
|
|
||||||
|
type BlobKeyResolver interface {
|
||||||
|
// ShouldPersistKey returns true if the resolved key should be stored in node.BlobKey.
|
||||||
|
// Flat keys (e.g. UUIDs) return true - key is generated once and stored.
|
||||||
|
// Hierarchical keys return false - key is derived from path each time.
|
||||||
|
ShouldPersistKey() bool
|
||||||
|
Resolve(ctx context.Context, node *Node) (blob.Key, error)
|
||||||
|
ResolveDeletionKeys(ctx context.Context, node *Node, allKeys []blob.Key) (*DeletionPlan, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type DeletionPlan struct {
|
||||||
|
Prefix blob.Key
|
||||||
|
Keys []blob.Key
|
||||||
|
}
|
||||||
53
apps/backend/internal/virtualfs/node.go
Normal file
53
apps/backend/internal/virtualfs/node.go
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
package virtualfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/get-drexa/drexa/internal/blob"
|
||||||
|
"github.com/google/uuid"
|
||||||
|
"github.com/uptrace/bun"
|
||||||
|
)
|
||||||
|
|
||||||
|
type NodeKind string
|
||||||
|
|
||||||
|
const (
|
||||||
|
NodeKindFile NodeKind = "file"
|
||||||
|
NodeKindDirectory NodeKind = "directory"
|
||||||
|
)
|
||||||
|
|
||||||
|
type NodeStatus string
|
||||||
|
|
||||||
|
const (
|
||||||
|
NodeStatusPending NodeStatus = "pending"
|
||||||
|
NodeStatusReady NodeStatus = "ready"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Node struct {
|
||||||
|
bun.BaseModel `bun:"vfs_nodes"`
|
||||||
|
|
||||||
|
ID uuid.UUID `bun:",pk,type:uuid"`
|
||||||
|
PublicID string `bun:"public_id,notnull"`
|
||||||
|
AccountID uuid.UUID `bun:"account_id,notnull,type:uuid"`
|
||||||
|
ParentID uuid.UUID `bun:"parent_id,nullzero"`
|
||||||
|
Kind NodeKind `bun:"kind,notnull"`
|
||||||
|
Status NodeStatus `bun:"status,notnull"`
|
||||||
|
Name string `bun:"name,notnull"`
|
||||||
|
|
||||||
|
BlobKey blob.Key `bun:"blob_key,nullzero"`
|
||||||
|
Size int64 `bun:"size"`
|
||||||
|
MimeType string `bun:"mime_type,nullzero"`
|
||||||
|
|
||||||
|
CreatedAt time.Time `bun:"created_at,notnull,nullzero"`
|
||||||
|
UpdatedAt time.Time `bun:"updated_at,notnull,nullzero"`
|
||||||
|
DeletedAt time.Time `bun:"deleted_at,nullzero"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func newNodeID() (uuid.UUID, error) {
|
||||||
|
return uuid.NewV7()
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsAccessible returns true if the node can be accessed.
|
||||||
|
// If the node is not ready or if it is soft deleted, it cannot be accessed.
|
||||||
|
func (n *Node) IsAccessible() bool {
|
||||||
|
return n.DeletedAt.IsZero() && n.Status == NodeStatusReady
|
||||||
|
}
|
||||||
42
apps/backend/internal/virtualfs/path.go
Normal file
42
apps/backend/internal/virtualfs/path.go
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
package virtualfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"database/sql"
|
||||||
|
"errors"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/google/uuid"
|
||||||
|
"github.com/uptrace/bun"
|
||||||
|
)
|
||||||
|
|
||||||
|
const absolutePathQuery = `WITH RECURSIVE path AS (
|
||||||
|
SELECT id, parent_id, name, 1 as depth
|
||||||
|
FROM vfs_nodes WHERE id = ? AND deleted_at IS NULL
|
||||||
|
|
||||||
|
UNION ALL
|
||||||
|
|
||||||
|
SELECT n.id, n.parent_id, n.name, p.depth + 1
|
||||||
|
FROM vfs_nodes n
|
||||||
|
JOIN path p ON n.id = p.parent_id
|
||||||
|
WHERE n.deleted_at IS NULL
|
||||||
|
)
|
||||||
|
SELECT name FROM path
|
||||||
|
WHERE EXISTS (SELECT 1 FROM path WHERE parent_id IS NULL)
|
||||||
|
ORDER BY depth DESC;`
|
||||||
|
|
||||||
|
func JoinPath(parts ...string) string {
|
||||||
|
return strings.Join(parts, "/")
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildNodeAbsolutePath(ctx context.Context, db bun.IDB, nodeID uuid.UUID) (string, error) {
|
||||||
|
var path []string
|
||||||
|
err := db.NewRaw(absolutePathQuery, nodeID).Scan(ctx, &path)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, sql.ErrNoRows) {
|
||||||
|
return "", ErrNodeNotFound
|
||||||
|
}
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return JoinPath(path...), nil
|
||||||
|
}
|
||||||
520
apps/backend/internal/virtualfs/vfs.go
Normal file
520
apps/backend/internal/virtualfs/vfs.go
Normal file
@@ -0,0 +1,520 @@
|
|||||||
|
package virtualfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"crypto/rand"
|
||||||
|
"database/sql"
|
||||||
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"github.com/gabriel-vasile/mimetype"
|
||||||
|
"github.com/get-drexa/drexa/internal/blob"
|
||||||
|
"github.com/get-drexa/drexa/internal/database"
|
||||||
|
"github.com/get-drexa/drexa/internal/ioext"
|
||||||
|
"github.com/google/uuid"
|
||||||
|
"github.com/sqids/sqids-go"
|
||||||
|
"github.com/uptrace/bun"
|
||||||
|
)
|
||||||
|
|
||||||
|
type VirtualFS struct {
|
||||||
|
blobStore blob.Store
|
||||||
|
keyResolver BlobKeyResolver
|
||||||
|
|
||||||
|
sqid *sqids.Sqids
|
||||||
|
}
|
||||||
|
|
||||||
|
type CreateNodeOptions struct {
|
||||||
|
ParentID uuid.UUID
|
||||||
|
Kind NodeKind
|
||||||
|
Name string
|
||||||
|
}
|
||||||
|
|
||||||
|
type CreateFileOptions struct {
|
||||||
|
ParentID uuid.UUID
|
||||||
|
Name string
|
||||||
|
}
|
||||||
|
|
||||||
|
type FileContent struct {
|
||||||
|
reader io.Reader
|
||||||
|
blobKey blob.Key
|
||||||
|
}
|
||||||
|
|
||||||
|
const RootDirectoryName = "root"
|
||||||
|
|
||||||
|
func FileContentFromReader(reader io.Reader) FileContent {
|
||||||
|
return FileContent{reader: reader}
|
||||||
|
}
|
||||||
|
|
||||||
|
func FileContentFromBlobKey(blobKey blob.Key) FileContent {
|
||||||
|
return FileContent{blobKey: blobKey}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewVirtualFS(blobStore blob.Store, keyResolver BlobKeyResolver) (*VirtualFS, error) {
|
||||||
|
sqid, err := sqids.New()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &VirtualFS{
|
||||||
|
blobStore: blobStore,
|
||||||
|
keyResolver: keyResolver,
|
||||||
|
sqid: sqid,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (vfs *VirtualFS) FindNode(ctx context.Context, db bun.IDB, accountID, fileID string) (*Node, error) {
|
||||||
|
var node Node
|
||||||
|
err := db.NewSelect().Model(&node).
|
||||||
|
Where("account_id = ?", accountID).
|
||||||
|
Where("id = ?", fileID).
|
||||||
|
Where("status = ?", NodeStatusReady).
|
||||||
|
Where("deleted_at IS NULL").
|
||||||
|
Scan(ctx)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, sql.ErrNoRows) {
|
||||||
|
return nil, ErrNodeNotFound
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &node, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (vfs *VirtualFS) FindNodeByPublicID(ctx context.Context, db bun.IDB, accountID uuid.UUID, publicID string) (*Node, error) {
|
||||||
|
var node Node
|
||||||
|
err := db.NewSelect().Model(&node).
|
||||||
|
Where("account_id = ?", accountID).
|
||||||
|
Where("public_id = ?", publicID).
|
||||||
|
Where("status = ?", NodeStatusReady).
|
||||||
|
Where("deleted_at IS NULL").
|
||||||
|
Scan(ctx)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, sql.ErrNoRows) {
|
||||||
|
return nil, ErrNodeNotFound
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &node, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (vfs *VirtualFS) ListChildren(ctx context.Context, db bun.IDB, node *Node) ([]*Node, error) {
|
||||||
|
if !node.IsAccessible() {
|
||||||
|
return nil, ErrNodeNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
var nodes []*Node
|
||||||
|
err := db.NewSelect().Model(&nodes).
|
||||||
|
Where("account_id = ?", node.AccountID).
|
||||||
|
Where("parent_id = ?", node.ID).
|
||||||
|
Where("status = ?", NodeStatusReady).
|
||||||
|
Where("deleted_at IS NULL").
|
||||||
|
Scan(ctx)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, sql.ErrNoRows) {
|
||||||
|
return make([]*Node, 0), nil
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nodes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (vfs *VirtualFS) CreateFile(ctx context.Context, db bun.IDB, accountID uuid.UUID, opts CreateFileOptions) (*Node, error) {
|
||||||
|
pid, err := vfs.generatePublicID()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
id, err := newNodeID()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
node := Node{
|
||||||
|
ID: id,
|
||||||
|
PublicID: pid,
|
||||||
|
AccountID: accountID,
|
||||||
|
ParentID: opts.ParentID,
|
||||||
|
Kind: NodeKindFile,
|
||||||
|
Status: NodeStatusPending,
|
||||||
|
Name: opts.Name,
|
||||||
|
}
|
||||||
|
|
||||||
|
if vfs.keyResolver.ShouldPersistKey() {
|
||||||
|
node.BlobKey, err = vfs.keyResolver.Resolve(ctx, &node)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = db.NewInsert().Model(&node).Returning("*").Exec(ctx)
|
||||||
|
if err != nil {
|
||||||
|
if database.IsUniqueViolation(err) {
|
||||||
|
return nil, ErrNodeConflict
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &node, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (vfs *VirtualFS) WriteFile(ctx context.Context, db bun.IDB, node *Node, content FileContent) error {
|
||||||
|
if content.reader == nil && content.blobKey.IsNil() {
|
||||||
|
return blob.ErrInvalidFileContent
|
||||||
|
}
|
||||||
|
|
||||||
|
if !node.DeletedAt.IsZero() {
|
||||||
|
return ErrNodeNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
setCols := make([]string, 0, 4)
|
||||||
|
|
||||||
|
if content.reader != nil {
|
||||||
|
key, err := vfs.keyResolver.Resolve(ctx, node)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := make([]byte, 3072)
|
||||||
|
n, err := io.ReadFull(content.reader, buf)
|
||||||
|
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
buf = buf[:n]
|
||||||
|
|
||||||
|
mt := mimetype.Detect(buf)
|
||||||
|
cr := ioext.NewCountingReader(io.MultiReader(bytes.NewReader(buf), content.reader))
|
||||||
|
|
||||||
|
err = vfs.blobStore.Put(ctx, key, cr)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if vfs.keyResolver.ShouldPersistKey() {
|
||||||
|
node.BlobKey = key
|
||||||
|
setCols = append(setCols, "blob_key")
|
||||||
|
}
|
||||||
|
|
||||||
|
node.MimeType = mt.String()
|
||||||
|
node.Size = cr.Count()
|
||||||
|
node.Status = NodeStatusReady
|
||||||
|
|
||||||
|
setCols = append(setCols, "mime_type", "size", "status")
|
||||||
|
} else {
|
||||||
|
node.BlobKey = content.blobKey
|
||||||
|
|
||||||
|
b, err := vfs.blobStore.ReadRange(ctx, content.blobKey, 0, 3072)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer b.Close()
|
||||||
|
|
||||||
|
buf := make([]byte, 3072)
|
||||||
|
n, err := io.ReadFull(b, buf)
|
||||||
|
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
buf = buf[:n]
|
||||||
|
mt := mimetype.Detect(buf)
|
||||||
|
node.MimeType = mt.String()
|
||||||
|
node.Status = NodeStatusReady
|
||||||
|
|
||||||
|
s, err := vfs.blobStore.ReadSize(ctx, content.blobKey)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
node.Size = s
|
||||||
|
|
||||||
|
setCols = append(setCols, "mime_type", "blob_key", "size", "status")
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := db.NewUpdate().Model(node).
|
||||||
|
Column(setCols...).
|
||||||
|
WherePK().
|
||||||
|
Exec(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (vfs *VirtualFS) CreateDirectory(ctx context.Context, db bun.IDB, accountID uuid.UUID, parentID uuid.UUID, name string) (*Node, error) {
|
||||||
|
pid, err := vfs.generatePublicID()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
id, err := newNodeID()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
node := Node{
|
||||||
|
ID: id,
|
||||||
|
PublicID: pid,
|
||||||
|
AccountID: accountID,
|
||||||
|
ParentID: parentID,
|
||||||
|
Kind: NodeKindDirectory,
|
||||||
|
Status: NodeStatusReady,
|
||||||
|
Name: name,
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = db.NewInsert().Model(node).Exec(ctx)
|
||||||
|
if err != nil {
|
||||||
|
if database.IsUniqueViolation(err) {
|
||||||
|
return nil, ErrNodeConflict
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &node, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (vfs *VirtualFS) SoftDeleteNode(ctx context.Context, db bun.IDB, node *Node) error {
|
||||||
|
if !node.IsAccessible() {
|
||||||
|
return ErrNodeNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := db.NewUpdate().Model(node).
|
||||||
|
WherePK().
|
||||||
|
Where("deleted_at IS NULL").
|
||||||
|
Where("status = ?", NodeStatusReady).
|
||||||
|
Set("deleted_at = NOW()").
|
||||||
|
Returning("deleted_at").
|
||||||
|
Exec(ctx)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, sql.ErrNoRows) {
|
||||||
|
return ErrNodeNotFound
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (vfs *VirtualFS) RestoreNode(ctx context.Context, db bun.IDB, node *Node) error {
|
||||||
|
if node.Status != NodeStatusReady {
|
||||||
|
return ErrNodeNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := db.NewUpdate().Model(node).
|
||||||
|
WherePK().
|
||||||
|
Where("deleted_at IS NOT NULL").
|
||||||
|
Set("deleted_at = NULL").
|
||||||
|
Returning("deleted_at").
|
||||||
|
Exec(ctx)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, sql.ErrNoRows) {
|
||||||
|
return ErrNodeNotFound
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (vfs *VirtualFS) RenameNode(ctx context.Context, db bun.IDB, node *Node, name string) error {
|
||||||
|
if !node.IsAccessible() {
|
||||||
|
return ErrNodeNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := db.NewUpdate().Model(node).
|
||||||
|
WherePK().
|
||||||
|
Where("status = ?", NodeStatusReady).
|
||||||
|
Where("deleted_at IS NULL").
|
||||||
|
Set("name = ?", name).
|
||||||
|
Returning("name, updated_at").
|
||||||
|
Exec(ctx)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, sql.ErrNoRows) {
|
||||||
|
return ErrNodeNotFound
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (vfs *VirtualFS) MoveNode(ctx context.Context, db bun.IDB, node *Node, parentID uuid.UUID) error {
|
||||||
|
if !node.IsAccessible() {
|
||||||
|
return ErrNodeNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
oldKey, err := vfs.keyResolver.Resolve(ctx, node)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = db.NewUpdate().Model(node).
|
||||||
|
WherePK().
|
||||||
|
Where("status = ?", NodeStatusReady).
|
||||||
|
Where("deleted_at IS NULL").
|
||||||
|
Set("parent_id = ?", parentID).
|
||||||
|
Returning("parent_id, updated_at").
|
||||||
|
Exec(ctx)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, sql.ErrNoRows) {
|
||||||
|
return ErrNodeNotFound
|
||||||
|
}
|
||||||
|
if database.IsUniqueViolation(err) {
|
||||||
|
return ErrNodeConflict
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
newKey, err := vfs.keyResolver.Resolve(ctx, node)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = vfs.blobStore.Move(ctx, oldKey, newKey)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if vfs.keyResolver.ShouldPersistKey() {
|
||||||
|
node.BlobKey = newKey
|
||||||
|
_, err = db.NewUpdate().Model(node).
|
||||||
|
WherePK().
|
||||||
|
Set("blob_key = ?", newKey).
|
||||||
|
Exec(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (vfs *VirtualFS) AbsolutePath(ctx context.Context, db bun.IDB, node *Node) (string, error) {
|
||||||
|
if !node.IsAccessible() {
|
||||||
|
return "", ErrNodeNotFound
|
||||||
|
}
|
||||||
|
return buildNodeAbsolutePath(ctx, db, node.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (vfs *VirtualFS) PermanentlyDeleteNode(ctx context.Context, db bun.IDB, node *Node) error {
|
||||||
|
if !node.IsAccessible() {
|
||||||
|
return ErrNodeNotFound
|
||||||
|
}
|
||||||
|
switch node.Kind {
|
||||||
|
case NodeKindFile:
|
||||||
|
return vfs.permanentlyDeleteFileNode(ctx, db, node)
|
||||||
|
case NodeKindDirectory:
|
||||||
|
return vfs.permanentlyDeleteDirectoryNode(ctx, db, node)
|
||||||
|
default:
|
||||||
|
return ErrUnsupportedOperation
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (vfs *VirtualFS) permanentlyDeleteFileNode(ctx context.Context, db bun.IDB, node *Node) error {
|
||||||
|
err := vfs.blobStore.Delete(ctx, node.BlobKey)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = db.NewDelete().Model(node).WherePK().Exec(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (vfs *VirtualFS) permanentlyDeleteDirectoryNode(ctx context.Context, db bun.IDB, node *Node) error {
|
||||||
|
const descendantsQuery = `WITH RECURSIVE descendants AS (
|
||||||
|
SELECT id, blob_key FROM vfs_nodes WHERE id = ?
|
||||||
|
UNION ALL
|
||||||
|
SELECT n.id, n.blob_key FROM vfs_nodes n
|
||||||
|
JOIN descendants d ON n.parent_id = d.id
|
||||||
|
)
|
||||||
|
SELECT id, blob_key FROM descendants`
|
||||||
|
|
||||||
|
type nodeRecord struct {
|
||||||
|
ID uuid.UUID `bun:"id"`
|
||||||
|
BlobKey blob.Key `bun:"blob_key"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// If db is already a transaction, use it directly; otherwise start a new transaction
|
||||||
|
var tx bun.IDB
|
||||||
|
var startedTx *bun.Tx
|
||||||
|
switch v := db.(type) {
|
||||||
|
case *bun.DB:
|
||||||
|
newTx, err := v.BeginTx(ctx, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
startedTx = &newTx
|
||||||
|
tx = newTx
|
||||||
|
defer func() {
|
||||||
|
if startedTx != nil {
|
||||||
|
(*startedTx).Rollback()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
default:
|
||||||
|
// Assume it's already a transaction
|
||||||
|
tx = db
|
||||||
|
}
|
||||||
|
|
||||||
|
var records []nodeRecord
|
||||||
|
err := tx.NewRaw(descendantsQuery, node.ID).Scan(ctx, &records)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, sql.ErrNoRows) {
|
||||||
|
return ErrNodeNotFound
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(records) == 0 {
|
||||||
|
return ErrNodeNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
nodeIDs := make([]uuid.UUID, 0, len(records))
|
||||||
|
blobKeys := make([]blob.Key, 0, len(records))
|
||||||
|
for _, r := range records {
|
||||||
|
nodeIDs = append(nodeIDs, r.ID)
|
||||||
|
if !r.BlobKey.IsNil() {
|
||||||
|
blobKeys = append(blobKeys, r.BlobKey)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
plan, err := vfs.keyResolver.ResolveDeletionKeys(ctx, node, blobKeys)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = tx.NewDelete().
|
||||||
|
Model((*Node)(nil)).
|
||||||
|
Where("id IN (?)", bun.In(nodeIDs)).
|
||||||
|
Exec(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !plan.Prefix.IsNil() {
|
||||||
|
_ = vfs.blobStore.DeletePrefix(ctx, plan.Prefix)
|
||||||
|
} else {
|
||||||
|
for _, key := range plan.Keys {
|
||||||
|
_ = vfs.blobStore.Delete(ctx, key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only commit if we started the transaction
|
||||||
|
if startedTx != nil {
|
||||||
|
err := (*startedTx).Commit()
|
||||||
|
startedTx = nil // Prevent defer from rolling back
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (vfs *VirtualFS) generatePublicID() (string, error) {
|
||||||
|
var b [8]byte
|
||||||
|
_, err := rand.Read(b[:])
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
n := binary.BigEndian.Uint64(b[:])
|
||||||
|
return vfs.sqid.Encode([]uint64{n})
|
||||||
|
}
|
||||||
54
scripts/install-vscode-extensions.sh
Executable file
54
scripts/install-vscode-extensions.sh
Executable file
@@ -0,0 +1,54 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Install VS Code extensions from devcontainer.json
|
||||||
|
# This script reads extensions from the single source of truth
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||||
|
DEVCONTAINER_JSON="$SCRIPT_DIR/../.devcontainer/devcontainer.json"
|
||||||
|
|
||||||
|
echo "Installing VS Code extensions from devcontainer.json..."
|
||||||
|
|
||||||
|
if [ ! -f "$DEVCONTAINER_JSON" ]; then
|
||||||
|
echo "Error: devcontainer.json not found at $DEVCONTAINER_JSON"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Use Bun to parse JSON and extract extensions
|
||||||
|
EXTENSIONS=$(bun -e "
|
||||||
|
const config = await Bun.file('$DEVCONTAINER_JSON').json();
|
||||||
|
const extensions = config?.customizations?.vscode?.extensions ?? [];
|
||||||
|
console.log(extensions.join('\n'));
|
||||||
|
")
|
||||||
|
|
||||||
|
if [ -z "$EXTENSIONS" ]; then
|
||||||
|
echo "No extensions found in devcontainer.json"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Determine which CLI to use (code-server, cursor, or code)
|
||||||
|
if command -v code-server &> /dev/null; then
|
||||||
|
CLI="code-server"
|
||||||
|
elif command -v cursor &> /dev/null; then
|
||||||
|
CLI="cursor"
|
||||||
|
elif command -v code &> /dev/null; then
|
||||||
|
CLI="code"
|
||||||
|
else
|
||||||
|
echo "Warning: No VS Code CLI found (code-server, cursor, or code)"
|
||||||
|
echo "Extensions to install:"
|
||||||
|
echo "$EXTENSIONS"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Using $CLI to install extensions..."
|
||||||
|
|
||||||
|
# Install each extension
|
||||||
|
echo "$EXTENSIONS" | while read -r ext; do
|
||||||
|
if [ -n "$ext" ]; then
|
||||||
|
echo "Installing: $ext"
|
||||||
|
$CLI --install-extension "$ext" 2>/dev/null || true
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "Extension installation complete!"
|
||||||
Reference in New Issue
Block a user