31 Commits

Author SHA1 Message Date
e8a558d652 refactor: wrap fiber.App in custom Server struct 2025-11-30 23:14:09 +00:00
3e96c42c4a fix: handle missing expected err cases 2025-11-30 20:08:31 +00:00
6c61cbe1fd fix: hierarchical keys should have acc id prefix 2025-11-30 19:49:13 +00:00
ccdeaf0364 fix: some weird upload bugs 2025-11-30 19:39:47 +00:00
a3110b67c3 fix: vfs node primary id not generated properly 2025-11-30 19:20:08 +00:00
1907cd83c8 fix: upload url generation 2025-11-30 19:19:54 +00:00
033ad65d5f feat: improve err logging 2025-11-30 19:19:33 +00:00
987edc0d4a fix: timestamp cols default not working 2025-11-30 17:42:35 +00:00
fb8e91dd47 feat: create root dir on acc registration 2025-11-30 17:37:59 +00:00
0b8aee5d60 refactor: make vfs methods accept bun.IDB 2025-11-30 17:31:24 +00:00
89b62f6d8a feat: introduce account 2025-11-30 17:12:50 +00:00
1c1392a0a1 refactor: replace KeyMode with ShouldPersistKey 2025-11-30 15:02:37 +00:00
6984bb209e refactor: node deletion 2025-11-30 01:16:44 +00:00
629d56b5ab fix: registration endpoint and db auto close issue 2025-11-29 20:51:56 +00:00
5e4e08c255 fix: migration code not working
- read database config from config file
- rename migration file to expected file name format
2025-11-29 20:32:32 +00:00
42b805fbd1 feat: add blob store Initialize method 2025-11-29 18:39:21 +00:00
ab4c14bc09 feat: impl config loading 2025-11-29 18:09:41 +00:00
fd3b2d3908 fix: handle NewServer error 2025-11-29 17:28:53 +00:00
39824e45d9 feat: impl upload api endpoints 2025-11-29 17:25:11 +00:00
6aee150a59 build: remove devcontainer name 2025-11-29 17:24:49 +00:00
9ea76d2021 build: per lang code format settings 2025-11-28 22:31:25 +00:00
987f36e1d2 feat: impl upload service 2025-11-28 22:31:00 +00:00
797b40a35c feat: finish implementing vfs 2025-11-27 22:44:13 +00:00
e32e00a230 build: remove install vscode ext from startup 2025-11-27 20:50:35 +00:00
b1e34f878c wip: vfs implementation 2025-11-27 20:49:58 +00:00
c0e2f7ff37 feat: impl fs backed blob store 2025-11-26 20:11:12 +00:00
834517f3c0 fix: check for correct user exist err in register 2025-11-26 01:45:28 +00:00
06c3951293 fix: ret invalid cred if usr not found in login 2025-11-26 01:42:52 +00:00
389fe35a0a feat: impl bearer auth middleware 2025-11-26 01:09:42 +00:00
81e3f7af75 build: add vscode ext install script 2025-11-26 00:08:22 +00:00
1feac70f7f feat: initial backend scaffolding
migrating away from convex

Co-authored-by: Ona <no-reply@ona.com>
2025-11-10 00:19:30 +00:00
47 changed files with 3143 additions and 16 deletions

View File

@@ -1,5 +1,4 @@
{ {
"name": "React + Bun + Convex Development",
"build": { "build": {
"context": ".", "context": ".",
"dockerfile": "Dockerfile" "dockerfile": "Dockerfile"
@@ -7,11 +6,12 @@
"features": { "features": {
"ghcr.io/devcontainers/features/git:1": {}, "ghcr.io/devcontainers/features/git:1": {},
"ghcr.io/devcontainers/features/github-cli:1": {}, "ghcr.io/devcontainers/features/github-cli:1": {},
"ghcr.io/devcontainers/features/docker-in-docker:2": {
"moby": false
},
"ghcr.io/tailscale/codespace/tailscale": { "ghcr.io/tailscale/codespace/tailscale": {
"version": "latest" "version": "latest"
},
"ghcr.io/devcontainers/features/go:1": {
"version": "1.25.4",
"golangciLintVersion": "2.6.1"
} }
}, },
"postCreateCommand": "./scripts/setup-git.sh", "postCreateCommand": "./scripts/setup-git.sh",
@@ -20,21 +20,11 @@
"extensions": [ "extensions": [
"biomejs.biome", "biomejs.biome",
"bradlc.vscode-tailwindcss", "bradlc.vscode-tailwindcss",
"ms-vscode.vscode-typescript-next",
"esbenp.prettier-vscode",
"ms-vscode.vscode-json",
"formulahendry.auto-rename-tag",
"christian-kohler.path-intellisense", "christian-kohler.path-intellisense",
"ms-vscode.vscode-eslint", "golang.go"
"convex.convex-vscode"
], ],
"settings": { "settings": {
"editor.defaultFormatter": "biomejs.biome",
"editor.formatOnSave": true, "editor.formatOnSave": true,
"editor.codeActionsOnSave": {
"source.organizeImports.biome": "explicit",
"source.fixAll.biome": "explicit"
},
"typescript.preferences.importModuleSpecifier": "relative", "typescript.preferences.importModuleSpecifier": "relative",
"typescript.suggest.autoImports": true, "typescript.suggest.autoImports": true,
"emmet.includeLanguages": { "emmet.includeLanguages": {
@@ -44,7 +34,63 @@
"tailwindCSS.experimental.classRegex": [ "tailwindCSS.experimental.classRegex": [
["cva\\(([^)]*)\\)", "[\"'`]([^\"'`]*).*?[\"'`]"], ["cva\\(([^)]*)\\)", "[\"'`]([^\"'`]*).*?[\"'`]"],
["cx\\(([^)]*)\\)", "(?:'|\"|`)([^']*)(?:'|\"|`)"] ["cx\\(([^)]*)\\)", "(?:'|\"|`)([^']*)(?:'|\"|`)"]
] ],
"[javascript]": {
"editor.formatOnSave": true,
"editor.defaultFormatter": "biomejs.biome",
"editor.codeActionsOnSave": {
"source.organizeImports.biome": "explicit",
"source.fixAll.biome": "explicit"
}
},
"[javascriptreact]": {
"editor.formatOnSave": true,
"editor.defaultFormatter": "biomejs.biome",
"editor.codeActionsOnSave": {
"source.organizeImports.biome": "explicit",
"source.fixAll.biome": "explicit"
}
},
"[typescript]": {
"editor.formatOnSave": true,
"editor.defaultFormatter": "biomejs.biome",
"editor.codeActionsOnSave": {
"source.organizeImports.biome": "explicit",
"source.fixAll.biome": "explicit"
}
},
"[typescriptreact]": {
"editor.formatOnSave": true,
"editor.defaultFormatter": "biomejs.biome",
"editor.codeActionsOnSave": {
"source.organizeImports.biome": "explicit",
"source.fixAll.biome": "explicit"
}
},
"[json]": {
"editor.formatOnSave": true,
"editor.defaultFormatter": "biomejs.biome",
"editor.codeActionsOnSave": {
"source.fixAll.biome": "explicit"
}
},
"[jsonc]": {
"editor.formatOnSave": true,
"editor.defaultFormatter": "biomejs.biome",
"editor.codeActionsOnSave": {
"source.fixAll.biome": "explicit"
}
},
"[go]": {
"editor.formatOnSave": true,
"editor.defaultFormatter": "golang.go",
"editor.codeActionsOnSave": {
"source.organizeImports": "explicit"
}
},
"go.formatTool": "goimports",
"go.lintTool": "golangci-lint",
"go.useLanguageServer": true
} }
} }
}, },

View File

@@ -0,0 +1,33 @@
package main
import (
"flag"
"fmt"
"log"
"os"
"github.com/get-drexa/drexa/internal/drexa"
)
func main() {
configPath := flag.String("config", "", "path to config file (required)")
flag.Parse()
if *configPath == "" {
fmt.Fprintln(os.Stderr, "error: --config is required")
flag.Usage()
os.Exit(1)
}
config, err := drexa.ConfigFromFile(*configPath)
if err != nil {
log.Fatalf("failed to load config: %v", err)
}
server, err := drexa.NewServer(*config)
if err != nil {
log.Fatal(err)
}
log.Fatal(server.Start())
}

View File

@@ -0,0 +1,37 @@
package main
import (
"context"
"flag"
"fmt"
"log"
"os"
"github.com/get-drexa/drexa/internal/database"
"github.com/get-drexa/drexa/internal/drexa"
)
func main() {
configPath := flag.String("config", "", "path to config file (required)")
flag.Parse()
if *configPath == "" {
fmt.Fprintln(os.Stderr, "error: --config is required")
flag.Usage()
os.Exit(1)
}
config, err := drexa.ConfigFromFile(*configPath)
if err != nil {
log.Fatalf("failed to load config: %v", err)
}
db := database.NewFromPostgres(config.Database.PostgresURL)
defer db.Close()
log.Println("running migrations...")
if err := database.RunMigrations(context.Background(), db); err != nil {
log.Fatalf("failed to run migrations: %v", err)
}
log.Println("migrations completed successfully")
}

View File

@@ -0,0 +1,30 @@
# Drexa Backend Configuration
# Copy this file to config.yaml and adjust values for your environment.
server:
port: 8080
database:
postgres_url: postgres://user:password@localhost:5432/drexa?sslmode=disable
jwt:
issuer: drexa
audience: drexa-api
# Secret key can be provided via (in order of precedence):
# 1. JWT_SECRET_KEY environment variable (base64 encoded)
# 2. secret_key_base64 below (base64 encoded)
# 3. secret_key_path below (file with base64 encoded content)
# secret_key_base64: "base64encodedkey"
secret_key_path: /run/secrets/jwt_secret_key
storage:
# Mode: "flat" (UUID-based keys) or "hierarchical" (path-based keys)
# Note: S3 backend only supports "flat" mode
mode: flat
# Backend: "fs" (filesystem) or "s3" (not yet implemented)
backend: fs
# Required when backend is "fs"
root_path: /var/lib/drexa/blobs
# Required when backend is "s3"
# bucket: my-drexa-bucket

15
apps/backend/config.yaml Normal file
View File

@@ -0,0 +1,15 @@
server:
port: 8080
database:
postgres_url: postgres://drexa:hunter2@helian:5433/drexa?sslmode=disable
jwt:
issuer: drexa
audience: drexa-api
secret_key_base64: "pNeUExoqdakfecZLFL53NJpY4iB9zFot9EuEBItlYKY="
storage:
mode: hierarchical
backend: fs
root_path: ./data

42
apps/backend/go.mod Normal file
View File

@@ -0,0 +1,42 @@
module github.com/get-drexa/drexa
go 1.25.4
require (
github.com/gabriel-vasile/mimetype v1.4.11
github.com/gofiber/fiber/v2 v2.52.9
github.com/google/uuid v1.6.0
github.com/sqids/sqids-go v0.4.1
github.com/uptrace/bun v1.2.16
github.com/uptrace/bun/extra/bundebug v1.2.16
golang.org/x/crypto v0.45.0
gopkg.in/yaml.v3 v3.0.1
)
require (
github.com/fatih/color v1.18.0 // indirect
go.opentelemetry.io/otel v1.38.0 // indirect
go.opentelemetry.io/otel/trace v1.38.0 // indirect
mellium.im/sasl v0.3.2 // indirect
)
require (
github.com/andybalholm/brotli v1.1.0 // indirect
github.com/golang-jwt/jwt/v5 v5.3.0
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/klauspost/compress v1.17.9 // indirect
github.com/mattn/go-colorable v0.1.14 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mattn/go-runewidth v0.0.16 // indirect
github.com/puzpuzpuz/xsync/v3 v3.5.1 // indirect
github.com/rivo/uniseg v0.2.0 // indirect
github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc // indirect
github.com/uptrace/bun/dialect/pgdialect v1.2.16
github.com/uptrace/bun/driver/pgdriver v1.2.16
github.com/valyala/bytebufferpool v1.0.0 // indirect
github.com/valyala/fasthttp v1.51.0 // indirect
github.com/valyala/tcplisten v1.0.0 // indirect
github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
golang.org/x/sys v0.38.0 // indirect
)

76
apps/backend/go.sum Normal file
View File

@@ -0,0 +1,76 @@
github.com/andybalholm/brotli v1.1.0 h1:eLKJA0d02Lf0mVpIDgYnqXcUn0GqVmEFny3VuID1U3M=
github.com/andybalholm/brotli v1.1.0/go.mod h1:sms7XGricyQI9K10gOSf56VKKWS4oLer58Q+mhRPtnY=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
github.com/gabriel-vasile/mimetype v1.4.11 h1:AQvxbp830wPhHTqc1u7nzoLT+ZFxGY7emj5DR5DYFik=
github.com/gabriel-vasile/mimetype v1.4.11/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s=
github.com/gofiber/fiber/v2 v2.52.9 h1:YjKl5DOiyP3j0mO61u3NTmK7or8GzzWzCFzkboyP5cw=
github.com/gofiber/fiber/v2 v2.52.9/go.mod h1:YEcBbO/FB+5M1IZNBP9FO3J9281zgPAreiI1oqg8nDw=
github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo=
github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg=
github.com/puzpuzpuz/xsync/v3 v3.5.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/sqids/sqids-go v0.4.1 h1:eQKYzmAZbLlRwHeHYPF35QhgxwZHLnlmVj9AkIj/rrw=
github.com/sqids/sqids-go v0.4.1/go.mod h1:EMwHuPQgSNFS0A49jESTfIQS+066XQTVhukrzEPScl8=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc h1:9lRDQMhESg+zvGYmW5DyG0UqvY96Bu5QYsTLvCHdrgo=
github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc/go.mod h1:bciPuU6GHm1iF1pBvUfxfsH0Wmnc2VbpgvbI9ZWuIRs=
github.com/uptrace/bun v1.2.16 h1:QlObi6ZIK5Ao7kAALnh91HWYNZUBbVwye52fmlQM9kc=
github.com/uptrace/bun v1.2.16/go.mod h1:jMoNg2n56ckaawi/O/J92BHaECmrz6IRjuMWqlMaMTM=
github.com/uptrace/bun/dialect/pgdialect v1.2.16 h1:KFNZ0LxAyczKNfK/IJWMyaleO6eI9/Z5tUv3DE1NVL4=
github.com/uptrace/bun/dialect/pgdialect v1.2.16/go.mod h1:IJdMeV4sLfh0LDUZl7TIxLI0LipF1vwTK3hBC7p5qLo=
github.com/uptrace/bun/driver/pgdriver v1.2.16 h1:b1kpXKUxtTSGYow5Vlsb+dKV3z0R7aSAJNfMfKp61ZU=
github.com/uptrace/bun/driver/pgdriver v1.2.16/go.mod h1:H6lUZ9CBfp1X5Vq62YGSV7q96/v94ja9AYFjKvdoTk0=
github.com/uptrace/bun/extra/bundebug v1.2.16 h1:3OXAfHTU4ydu2+4j05oB1BxPx6+ypdWIVzTugl/7zl0=
github.com/uptrace/bun/extra/bundebug v1.2.16/go.mod h1:vk6R/1i67/S2RvUI5AH/m3P5e67mOkfDCmmCsAPUumo=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasthttp v1.51.0 h1:8b30A5JlZ6C7AS81RsWjYMQmrZG6feChmgAolCl1SqA=
github.com/valyala/fasthttp v1.51.0/go.mod h1:oI2XroL+lI7vdXyYoQk03bXBThfFl2cVdIA3Xl7cH8g=
github.com/valyala/tcplisten v1.0.0 h1:rBHj/Xf+E1tRGZyWIWwJDiRY0zc1Js+CV5DqwacVSA8=
github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc=
github.com/vmihailenco/msgpack/v5 v5.4.1 h1:cQriyiUvjTwOHg8QZaPihLWeRAAVoCpE00IUPn0Bjt8=
github.com/vmihailenco/msgpack/v5 v5.4.1/go.mod h1:GaZTsDaehaPpQVyxrf5mtQlH+pc21PIudVV/E3rRQok=
github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g=
github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds=
go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8=
go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM=
go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE=
go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs=
golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=
golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
mellium.im/sasl v0.3.2 h1:PT6Xp7ccn9XaXAnJ03FcEjmAn7kK1x7aoXV6F+Vmrl0=
mellium.im/sasl v0.3.2/go.mod h1:NKXDi1zkr+BlMHLQjY3ofYuU4KSPFxknb8mfEu6SveY=

View File

@@ -0,0 +1,23 @@
package account
import (
"time"
"github.com/google/uuid"
"github.com/uptrace/bun"
)
type Account struct {
bun.BaseModel `bun:"accounts"`
ID uuid.UUID `bun:",pk,type:uuid" json:"id"`
UserID uuid.UUID `bun:"user_id,notnull,type:uuid" json:"userId"`
StorageUsageBytes int64 `bun:"storage_usage_bytes,notnull" json:"storageUsageBytes"`
StorageQuotaBytes int64 `bun:"storage_quota_bytes,notnull" json:"storageQuotaBytes"`
CreatedAt time.Time `bun:"created_at,notnull,nullzero" json:"createdAt"`
UpdatedAt time.Time `bun:"updated_at,notnull,nullzero" json:"updatedAt"`
}
func newAccountID() (uuid.UUID, error) {
return uuid.NewV7()
}

View File

@@ -0,0 +1,8 @@
package account
import "errors"
var (
ErrAccountNotFound = errors.New("account not found")
ErrAccountAlreadyExists = errors.New("account already exists")
)

View File

@@ -0,0 +1,132 @@
package account
import (
"errors"
"github.com/get-drexa/drexa/internal/auth"
"github.com/get-drexa/drexa/internal/httperr"
"github.com/get-drexa/drexa/internal/user"
"github.com/gofiber/fiber/v2"
"github.com/google/uuid"
"github.com/uptrace/bun"
)
type HTTPHandler struct {
accountService *Service
authService *auth.Service
db *bun.DB
authMiddleware fiber.Handler
}
type registerAccountRequest struct {
Email string `json:"email"`
Password string `json:"password"`
DisplayName string `json:"displayName"`
}
type registerAccountResponse struct {
Account *Account `json:"account"`
User *user.User `json:"user"`
AccessToken string `json:"accessToken"`
RefreshToken string `json:"refreshToken"`
}
const currentAccountKey = "currentAccount"
func CurrentAccount(c *fiber.Ctx) *Account {
return c.Locals(currentAccountKey).(*Account)
}
func NewHTTPHandler(accountService *Service, authService *auth.Service, db *bun.DB, authMiddleware fiber.Handler) *HTTPHandler {
return &HTTPHandler{accountService: accountService, authService: authService, db: db, authMiddleware: authMiddleware}
}
func (h *HTTPHandler) RegisterRoutes(api fiber.Router) fiber.Router {
api.Post("/accounts", h.registerAccount)
account := api.Group("/accounts/:accountID")
account.Use(h.authMiddleware)
account.Use(h.accountMiddleware)
account.Get("/", h.getAccount)
return account
}
func (h *HTTPHandler) accountMiddleware(c *fiber.Ctx) error {
user, err := auth.AuthenticatedUser(c)
if err != nil {
return c.SendStatus(fiber.StatusUnauthorized)
}
accountID, err := uuid.Parse(c.Params("accountID"))
if err != nil {
return c.SendStatus(fiber.StatusNotFound)
}
account, err := h.accountService.AccountByID(c.Context(), h.db, user.ID, accountID)
if err != nil {
if errors.Is(err, ErrAccountNotFound) {
return c.SendStatus(fiber.StatusNotFound)
}
return httperr.Internal(err)
}
c.Locals(currentAccountKey, account)
return c.Next()
}
func (h *HTTPHandler) getAccount(c *fiber.Ctx) error {
account := CurrentAccount(c)
if account == nil {
return c.SendStatus(fiber.StatusNotFound)
}
return c.JSON(account)
}
func (h *HTTPHandler) registerAccount(c *fiber.Ctx) error {
req := new(registerAccountRequest)
if err := c.BodyParser(req); err != nil {
return c.SendStatus(fiber.StatusBadRequest)
}
tx, err := h.db.BeginTx(c.Context(), nil)
if err != nil {
return httperr.Internal(err)
}
defer tx.Rollback()
acc, u, err := h.accountService.Register(c.Context(), tx, RegisterOptions{
Email: req.Email,
Password: req.Password,
DisplayName: req.DisplayName,
})
if err != nil {
var ae *user.AlreadyExistsError
if errors.As(err, &ae) {
return c.SendStatus(fiber.StatusConflict)
}
if errors.Is(err, ErrAccountAlreadyExists) {
return c.SendStatus(fiber.StatusConflict)
}
return httperr.Internal(err)
}
result, err := h.authService.GenerateTokenForUser(c.Context(), tx, u)
if err != nil {
return httperr.Internal(err)
}
err = tx.Commit()
if err != nil {
return httperr.Internal(err)
}
return c.JSON(registerAccountResponse{
Account: acc,
User: u,
AccessToken: result.AccessToken,
RefreshToken: result.RefreshToken,
})
}

View File

@@ -0,0 +1,115 @@
package account
import (
"context"
"database/sql"
"errors"
"github.com/get-drexa/drexa/internal/database"
"github.com/get-drexa/drexa/internal/password"
"github.com/get-drexa/drexa/internal/user"
"github.com/get-drexa/drexa/internal/virtualfs"
"github.com/google/uuid"
"github.com/uptrace/bun"
)
type Service struct {
userService user.Service
vfs *virtualfs.VirtualFS
}
type RegisterOptions struct {
Email string
Password string
DisplayName string
}
type CreateAccountOptions struct {
OrganizationID uuid.UUID
QuotaBytes int64
}
func NewService(userService *user.Service, vfs *virtualfs.VirtualFS) *Service {
return &Service{
userService: *userService,
vfs: vfs,
}
}
func (s *Service) Register(ctx context.Context, db bun.IDB, opts RegisterOptions) (*Account, *user.User, error) {
hashed, err := password.Hash(opts.Password)
if err != nil {
return nil, nil, err
}
u, err := s.userService.RegisterUser(ctx, db, user.UserRegistrationOptions{
Email: opts.Email,
Password: hashed,
DisplayName: opts.DisplayName,
})
if err != nil {
return nil, nil, err
}
acc, err := s.CreateAccount(ctx, db, u.ID, CreateAccountOptions{
// TODO: make quota configurable
QuotaBytes: 1024 * 1024 * 1024, // 1GB
})
if err != nil {
return nil, nil, err
}
_, err = s.vfs.CreateDirectory(ctx, db, acc.ID, uuid.Nil, virtualfs.RootDirectoryName)
if err != nil {
return nil, nil, err
}
return acc, u, nil
}
func (s *Service) CreateAccount(ctx context.Context, db bun.IDB, userID uuid.UUID, opts CreateAccountOptions) (*Account, error) {
id, err := newAccountID()
if err != nil {
return nil, err
}
account := &Account{
ID: id,
UserID: userID,
StorageQuotaBytes: opts.QuotaBytes,
}
_, err = db.NewInsert().Model(account).Returning("*").Exec(ctx)
if err != nil {
if database.IsUniqueViolation(err) {
return nil, ErrAccountAlreadyExists
}
return nil, err
}
return account, nil
}
func (s *Service) AccountByUserID(ctx context.Context, db bun.IDB, userID uuid.UUID) (*Account, error) {
var account Account
err := db.NewSelect().Model(&account).Where("user_id = ?", userID).Scan(ctx)
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
return nil, ErrAccountNotFound
}
return nil, err
}
return &account, nil
}
func (s *Service) AccountByID(ctx context.Context, db bun.IDB, userID uuid.UUID, id uuid.UUID) (*Account, error) {
var account Account
err := db.NewSelect().Model(&account).Where("user_id = ?", userID).Where("id = ?", id).Scan(ctx)
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
return nil, ErrAccountNotFound
}
return nil, err
}
return &account, nil
}

View File

@@ -0,0 +1,24 @@
package auth
import (
"errors"
"fmt"
)
var ErrUnauthenticatedRequest = errors.New("unauthenticated request")
type InvalidAccessTokenError struct {
err error
}
func newInvalidAccessTokenError(err error) *InvalidAccessTokenError {
return &InvalidAccessTokenError{err}
}
func (e *InvalidAccessTokenError) Error() string {
return fmt.Sprintf("invalid access token: %v", e.err)
}
func (e *InvalidAccessTokenError) Unwrap() error {
return e.err
}

View File

@@ -0,0 +1,66 @@
package auth
import (
"errors"
"github.com/get-drexa/drexa/internal/httperr"
"github.com/get-drexa/drexa/internal/user"
"github.com/gofiber/fiber/v2"
"github.com/uptrace/bun"
)
type loginRequest struct {
Email string `json:"email"`
Password string `json:"password"`
}
type loginResponse struct {
User user.User `json:"user"`
AccessToken string `json:"accessToken"`
RefreshToken string `json:"refreshToken"`
}
type HTTPHandler struct {
service *Service
db *bun.DB
}
func NewHTTPHandler(s *Service, db *bun.DB) *HTTPHandler {
return &HTTPHandler{service: s, db: db}
}
func (h *HTTPHandler) RegisterRoutes(api fiber.Router) {
auth := api.Group("/auth")
auth.Post("/login", h.Login)
}
func (h *HTTPHandler) Login(c *fiber.Ctx) error {
req := new(loginRequest)
if err := c.BodyParser(req); err != nil {
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{"error": "Invalid request"})
}
tx, err := h.db.BeginTx(c.Context(), nil)
if err != nil {
return httperr.Internal(err)
}
defer tx.Rollback()
result, err := h.service.AuthenticateWithEmailAndPassword(c.Context(), tx, req.Email, req.Password)
if err != nil {
if errors.Is(err, ErrInvalidCredentials) {
return c.Status(fiber.StatusUnauthorized).JSON(fiber.Map{"error": "Invalid credentials"})
}
return httperr.Internal(err)
}
if err := tx.Commit(); err != nil {
return httperr.Internal(err)
}
return c.JSON(loginResponse{
User: *result.User,
AccessToken: result.AccessToken,
RefreshToken: result.RefreshToken,
})
}

View File

@@ -0,0 +1,63 @@
package auth
import (
"errors"
"log/slog"
"strings"
"github.com/get-drexa/drexa/internal/httperr"
"github.com/get-drexa/drexa/internal/user"
"github.com/gofiber/fiber/v2"
"github.com/uptrace/bun"
)
const authenticatedUserKey = "authenticatedUser"
// NewBearerAuthMiddleware is a middleware that authenticates a request using a bearer token.
// To obtain the authenticated user in subsequent handlers, see AuthenticatedUser.
func NewBearerAuthMiddleware(s *Service, db *bun.DB) fiber.Handler {
return func(c *fiber.Ctx) error {
authHeader := c.Get("Authorization")
if authHeader == "" {
slog.Info("no auth header")
return c.SendStatus(fiber.StatusUnauthorized)
}
parts := strings.Split(authHeader, " ")
if len(parts) != 2 || parts[0] != "Bearer" {
slog.Info("invalid auth header")
return c.SendStatus(fiber.StatusUnauthorized)
}
token := parts[1]
u, err := s.AuthenticateWithAccessToken(c.Context(), db, token)
if err != nil {
var e *InvalidAccessTokenError
if errors.As(err, &e) {
slog.Info("invalid access token")
return c.SendStatus(fiber.StatusUnauthorized)
}
var nf *user.NotFoundError
if errors.As(err, &nf) {
slog.Info("user not found")
return c.SendStatus(fiber.StatusUnauthorized)
}
return httperr.Internal(err)
}
c.Locals(authenticatedUserKey, u)
return c.Next()
}
}
// AuthenticatedUser returns the authenticated user from the given fiber context.
// Returns ErrUnauthenticatedRequest if not authenticated.
func AuthenticatedUser(c *fiber.Ctx) (*user.User, error) {
if u, ok := c.Locals(authenticatedUserKey).(*user.User); ok {
return u, nil
}
return nil, ErrUnauthenticatedRequest
}

View File

@@ -0,0 +1,109 @@
package auth
import (
"context"
"encoding/hex"
"errors"
"log/slog"
"github.com/get-drexa/drexa/internal/password"
"github.com/get-drexa/drexa/internal/user"
"github.com/google/uuid"
"github.com/uptrace/bun"
)
type AuthenticationResult struct {
User *user.User
AccessToken string
RefreshToken string
}
var ErrInvalidCredentials = errors.New("invalid credentials")
type Service struct {
userService *user.Service
tokenConfig TokenConfig
}
func NewService(userService *user.Service, tokenConfig TokenConfig) *Service {
return &Service{
userService: userService,
tokenConfig: tokenConfig,
}
}
func (s *Service) GenerateTokenForUser(ctx context.Context, db bun.IDB, user *user.User) (*AuthenticationResult, error) {
at, err := GenerateAccessToken(user, &s.tokenConfig)
if err != nil {
return nil, err
}
rt, err := GenerateRefreshToken(user, &s.tokenConfig)
if err != nil {
return nil, err
}
_, err = db.NewInsert().Model(rt).Exec(ctx)
if err != nil {
return nil, err
}
return &AuthenticationResult{
User: user,
AccessToken: at,
RefreshToken: hex.EncodeToString(rt.Token),
}, nil
}
func (s *Service) AuthenticateWithEmailAndPassword(ctx context.Context, db bun.IDB, email, plain string) (*AuthenticationResult, error) {
u, err := s.userService.UserByEmail(ctx, db, email)
if err != nil {
var nf *user.NotFoundError
if errors.As(err, &nf) {
return nil, ErrInvalidCredentials
}
return nil, err
}
ok, err := password.Verify(plain, u.Password)
if err != nil || !ok {
return nil, ErrInvalidCredentials
}
at, err := GenerateAccessToken(u, &s.tokenConfig)
if err != nil {
return nil, err
}
rt, err := GenerateRefreshToken(u, &s.tokenConfig)
if err != nil {
return nil, err
}
_, err = db.NewInsert().Model(rt).Exec(ctx)
if err != nil {
return nil, err
}
return &AuthenticationResult{
User: u,
AccessToken: at,
RefreshToken: hex.EncodeToString(rt.Token),
}, nil
}
func (s *Service) AuthenticateWithAccessToken(ctx context.Context, db bun.IDB, token string) (*user.User, error) {
claims, err := ParseAccessToken(token, &s.tokenConfig)
if err != nil {
slog.Info("failed to parse access token", "error", err)
return nil, err
}
id, err := uuid.Parse(claims.Subject)
if err != nil {
slog.Info("failed to parse access token subject", "error", err)
return nil, newInvalidAccessTokenError(err)
}
return s.userService.UserByID(ctx, db, id)
}

View File

@@ -0,0 +1,98 @@
package auth
import (
"crypto/rand"
"crypto/sha256"
"encoding/hex"
"fmt"
"time"
"github.com/get-drexa/drexa/internal/user"
"github.com/golang-jwt/jwt/v5"
"github.com/google/uuid"
"github.com/uptrace/bun"
)
const (
accessTokenValidFor = time.Minute * 15
refreshTokenByteLength = 32
refreshTokenValidFor = time.Hour * 24 * 30
)
type TokenConfig struct {
Issuer string
Audience string
SecretKey []byte
}
type RefreshToken struct {
bun.BaseModel `bun:"refresh_tokens"`
ID uuid.UUID `bun:",pk,type:uuid"`
UserID uuid.UUID `bun:"user_id,notnull"`
Token []byte `bun:"-"`
TokenHash string `bun:"token_hash,notnull"`
ExpiresAt time.Time `bun:"expires_at,notnull"`
CreatedAt time.Time `bun:"created_at,notnull,nullzero"`
}
func newTokenID() (uuid.UUID, error) {
return uuid.NewV7()
}
func GenerateAccessToken(user *user.User, c *TokenConfig) (string, error) {
now := time.Now()
token := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.RegisteredClaims{
Issuer: c.Issuer,
Audience: jwt.ClaimStrings{c.Audience},
Subject: user.ID.String(),
ExpiresAt: jwt.NewNumericDate(now.Add(accessTokenValidFor)),
IssuedAt: jwt.NewNumericDate(now),
})
signed, err := token.SignedString(c.SecretKey)
if err != nil {
return "", fmt.Errorf("failed to sign token: %w", err)
}
return signed, nil
}
func GenerateRefreshToken(user *user.User, c *TokenConfig) (*RefreshToken, error) {
now := time.Now()
buf := make([]byte, refreshTokenByteLength)
if _, err := rand.Read(buf); err != nil {
return nil, fmt.Errorf("failed to generate refresh token: %w", err)
}
id, err := newTokenID()
if err != nil {
return nil, fmt.Errorf("failed to generate token ID: %w", err)
}
h := sha256.Sum256(buf)
hex := hex.EncodeToString(h[:])
return &RefreshToken{
ID: id,
UserID: user.ID,
Token: buf,
TokenHash: hex,
ExpiresAt: now.Add(refreshTokenValidFor),
CreatedAt: now,
}, nil
}
// ParseAccessToken parses a JWT access token and returns the claims.
// Returns an InvalidAccessTokenError if the token is invalid.
func ParseAccessToken(token string, c *TokenConfig) (*jwt.RegisteredClaims, error) {
parsed, err := jwt.ParseWithClaims(token, &jwt.RegisteredClaims{}, func(token *jwt.Token) (any, error) {
return c.SecretKey, nil
}, jwt.WithIssuer(c.Issuer), jwt.WithExpirationRequired(), jwt.WithAudience(c.Audience))
if err != nil {
return nil, newInvalidAccessTokenError(err)
}
return parsed.Claims.(*jwt.RegisteredClaims), nil
}

View File

@@ -0,0 +1,9 @@
package blob
import "errors"
var (
ErrConflict = errors.New("key already used for a different blob")
ErrNotFound = errors.New("key not found")
ErrInvalidFileContent = errors.New("invalid file content. must provide either a reader or a blob key")
)

View File

@@ -0,0 +1,154 @@
package blob
import (
"context"
"io"
"os"
"path/filepath"
"github.com/get-drexa/drexa/internal/ioext"
)
var _ Store = &FSStore{}
type FSStore struct {
config FSStoreConfig
}
type FSStoreConfig struct {
Root string
}
func NewFSStore(config FSStoreConfig) *FSStore {
return &FSStore{config: config}
}
func (s *FSStore) Initialize(ctx context.Context) error {
return os.MkdirAll(s.config.Root, 0755)
}
func (s *FSStore) Put(ctx context.Context, key Key, reader io.Reader) error {
path := filepath.Join(s.config.Root, string(key))
err := os.MkdirAll(filepath.Dir(path), 0755)
if err != nil {
return err
}
f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_EXCL, 0644)
if err != nil {
if os.IsExist(err) {
return ErrConflict
}
return err
}
defer f.Close()
_, err = io.Copy(f, reader)
if err != nil {
_ = os.Remove(path)
return err
}
return nil
}
func (s *FSStore) Read(ctx context.Context, key Key) (io.ReadCloser, error) {
path := filepath.Join(s.config.Root, string(key))
f, err := os.Open(path)
if err != nil {
if os.IsNotExist(err) {
return nil, ErrNotFound
}
return nil, err
}
return f, nil
}
func (s *FSStore) ReadRange(ctx context.Context, key Key, offset, length int64) (io.ReadCloser, error) {
path := filepath.Join(s.config.Root, string(key))
f, err := os.Open(path)
if err != nil {
if os.IsNotExist(err) {
return nil, ErrNotFound
}
return nil, err
}
_, err = f.Seek(offset, io.SeekStart)
if err != nil {
return nil, err
}
return ioext.NewLimitReadCloser(f, length), nil
}
func (s *FSStore) ReadSize(ctx context.Context, key Key) (int64, error) {
path := filepath.Join(s.config.Root, string(key))
fi, err := os.Stat(path)
if err != nil {
if os.IsNotExist(err) {
return 0, ErrNotFound
}
return 0, err
}
return fi.Size(), nil
}
func (s *FSStore) Delete(ctx context.Context, key Key) error {
err := os.Remove(filepath.Join(s.config.Root, string(key)))
// no op if file does not exist
// swallow error if file does not exist
if err != nil && !os.IsNotExist(err) {
return err
}
return nil
}
func (s *FSStore) DeletePrefix(ctx context.Context, prefix Key) error {
prefixPath := filepath.Join(s.config.Root, string(prefix))
err := os.RemoveAll(prefixPath)
if err != nil && !os.IsNotExist(err) {
return err
}
return nil
}
func (s *FSStore) Update(ctx context.Context, key Key, opts UpdateOptions) error {
// Update is a no-op for FSStore
return nil
}
func (s *FSStore) Move(ctx context.Context, srcKey, dstKey Key) error {
oldPath := filepath.Join(s.config.Root, string(srcKey))
newPath := filepath.Join(s.config.Root, string(dstKey))
_, err := os.Stat(newPath)
if err == nil {
return ErrConflict
}
err = os.MkdirAll(filepath.Dir(newPath), 0755)
if err != nil {
return err
}
err = os.Rename(oldPath, newPath)
if err != nil {
if os.IsNotExist(err) {
return ErrNotFound
}
return err
}
return nil
}
func (s *FSStore) SupportsDirectUpload() bool {
return false
}
func (s *FSStore) GenerateUploadURL(ctx context.Context, key Key, opts UploadURLOptions) (string, error) {
return "", nil
}

View File

@@ -0,0 +1,7 @@
package blob
type Key string
func (k Key) IsNil() bool {
return k == ""
}

View File

@@ -0,0 +1,33 @@
package blob
import (
"context"
"io"
"time"
)
type UploadURLOptions struct {
Duration time.Duration
}
type UpdateOptions struct {
ContentType string
}
type Store interface {
Initialize(ctx context.Context) error
Put(ctx context.Context, key Key, reader io.Reader) error
Update(ctx context.Context, key Key, opts UpdateOptions) error
Delete(ctx context.Context, key Key) error
DeletePrefix(ctx context.Context, prefix Key) error
Move(ctx context.Context, srcKey, dstKey Key) error
Read(ctx context.Context, key Key) (io.ReadCloser, error)
ReadRange(ctx context.Context, key Key, offset, length int64) (io.ReadCloser, error)
ReadSize(ctx context.Context, key Key) (int64, error)
// SupportsDirectUpload returns true if the store allows files to be uploaded directly to the blob store.
SupportsDirectUpload() bool
// GenerateUploadURL generates a URL that can be used to upload a file directly to the blob store. If unsupported, returns an empty string with no error.
GenerateUploadURL(ctx context.Context, key Key, opts UploadURLOptions) (string, error)
}

View File

@@ -0,0 +1,61 @@
package database
import (
"errors"
"github.com/uptrace/bun/driver/pgdriver"
)
// PostgreSQL SQLSTATE error codes.
// See: https://www.postgresql.org/docs/current/errcodes-appendix.html
const (
PgUniqueViolation = "23505"
PgForeignKeyViolation = "23503"
PgNotNullViolation = "23502"
)
// PostgreSQL protocol error field identifiers used with pgdriver.Error.Field().
// See: https://www.postgresql.org/docs/current/protocol-error-fields.html
//
// Common fields:
// - 'C' - SQLSTATE code (e.g., "23505")
// - 'M' - Primary error message
// - 'D' - Detail message
// - 'H' - Hint
// - 's' - Schema name
// - 't' - Table name
// - 'c' - Column name
// - 'n' - Constraint name
const (
pgFieldCode = 'C'
pgFieldConstraint = 'n'
)
// IsUniqueViolation checks if the error is a PostgreSQL unique constraint violation.
func IsUniqueViolation(err error) bool {
return hasPgCode(err, PgUniqueViolation)
}
// IsForeignKeyViolation checks if the error is a PostgreSQL foreign key violation.
func IsForeignKeyViolation(err error) bool {
return hasPgCode(err, PgForeignKeyViolation)
}
// IsNotNullViolation checks if the error is a PostgreSQL not-null constraint violation.
func IsNotNullViolation(err error) bool {
return hasPgCode(err, PgNotNullViolation)
}
// ConstraintName returns the constraint name from a PostgreSQL error, or empty string if not applicable.
func ConstraintName(err error) string {
var pgErr pgdriver.Error
if errors.As(err, &pgErr) {
return pgErr.Field(pgFieldConstraint)
}
return ""
}
func hasPgCode(err error, code string) bool {
var pgErr pgdriver.Error
return errors.As(err, &pgErr) && pgErr.Field(pgFieldCode) == code
}

View File

@@ -0,0 +1,28 @@
package database
import (
"context"
"embed"
"github.com/uptrace/bun"
"github.com/uptrace/bun/migrate"
)
//go:embed migrations/*.sql
var sqlMigrations embed.FS
// RunMigrations discovers and runs all migrations against the database.
func RunMigrations(ctx context.Context, db *bun.DB) error {
migrations := migrate.NewMigrations()
if err := migrations.Discover(sqlMigrations); err != nil {
return err
}
migrator := migrate.NewMigrator(db, migrations)
if err := migrator.Init(ctx); err != nil {
return err
}
_, err := migrator.Migrate(ctx)
return err
}

View File

@@ -0,0 +1,106 @@
-- ============================================================================
-- Application Tables
-- ============================================================================
CREATE TABLE IF NOT EXISTS users (
id UUID PRIMARY KEY,
display_name TEXT,
email TEXT NOT NULL UNIQUE,
password TEXT NOT NULL,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE INDEX idx_users_email ON users(email);
CREATE TABLE IF NOT EXISTS accounts (
id UUID PRIMARY KEY,
user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
storage_usage_bytes BIGINT NOT NULL DEFAULT 0,
storage_quota_bytes BIGINT NOT NULL,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE INDEX idx_accounts_user_id ON accounts(user_id);
CREATE TABLE IF NOT EXISTS refresh_tokens (
id UUID PRIMARY KEY,
user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
token_hash TEXT NOT NULL UNIQUE,
expires_at TIMESTAMPTZ NOT NULL,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE INDEX idx_refresh_tokens_user_id ON refresh_tokens(user_id);
CREATE INDEX idx_refresh_tokens_token_hash ON refresh_tokens(token_hash);
CREATE INDEX idx_refresh_tokens_expires_at ON refresh_tokens(expires_at);
-- Virtual filesystem nodes (unified files + directories)
CREATE TABLE IF NOT EXISTS vfs_nodes (
id UUID PRIMARY KEY,
public_id TEXT NOT NULL UNIQUE, -- opaque ID for external API (no timestamp leak)
account_id UUID NOT NULL REFERENCES accounts(id) ON DELETE CASCADE,
parent_id UUID REFERENCES vfs_nodes(id) ON DELETE CASCADE, -- NULL = root directory
kind TEXT NOT NULL CHECK (kind IN ('file', 'directory')),
status TEXT NOT NULL DEFAULT 'ready' CHECK (status IN ('pending', 'ready')),
name TEXT NOT NULL,
-- File-specific fields (NULL for directories)
blob_key TEXT, -- reference to blob storage (flat mode), NULL for hierarchical
size BIGINT, -- file size in bytes
mime_type TEXT, -- content type
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
deleted_at TIMESTAMPTZ, -- soft delete for trash
-- No duplicate names in same parent (per account, excluding deleted)
CONSTRAINT unique_node_name UNIQUE NULLS NOT DISTINCT (account_id, parent_id, name, deleted_at)
);
CREATE INDEX idx_vfs_nodes_account_id ON vfs_nodes(account_id) WHERE deleted_at IS NULL;
CREATE INDEX idx_vfs_nodes_parent_id ON vfs_nodes(parent_id) WHERE deleted_at IS NULL;
CREATE INDEX idx_vfs_nodes_account_parent ON vfs_nodes(account_id, parent_id) WHERE deleted_at IS NULL;
CREATE INDEX idx_vfs_nodes_kind ON vfs_nodes(account_id, kind) WHERE deleted_at IS NULL;
CREATE INDEX idx_vfs_nodes_deleted ON vfs_nodes(account_id, deleted_at) WHERE deleted_at IS NOT NULL;
CREATE INDEX idx_vfs_nodes_public_id ON vfs_nodes(public_id);
CREATE UNIQUE INDEX idx_vfs_nodes_account_root ON vfs_nodes(account_id) WHERE parent_id IS NULL; -- one root per account
CREATE INDEX idx_vfs_nodes_pending ON vfs_nodes(created_at) WHERE status = 'pending'; -- for cleanup job
CREATE TABLE IF NOT EXISTS node_shares (
id UUID PRIMARY KEY,
node_id UUID NOT NULL REFERENCES vfs_nodes(id) ON DELETE CASCADE,
share_token TEXT NOT NULL UNIQUE,
expires_at TIMESTAMPTZ,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE INDEX idx_node_shares_share_token ON node_shares(share_token);
CREATE INDEX idx_node_shares_node_id ON node_shares(node_id);
CREATE INDEX idx_node_shares_expires_at ON node_shares(expires_at) WHERE expires_at IS NOT NULL;
-- ============================================================================
-- Triggers for updated_at timestamps
-- ============================================================================
CREATE OR REPLACE FUNCTION update_updated_at_column()
RETURNS TRIGGER AS $$
BEGIN
NEW.updated_at = NOW();
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER update_users_updated_at BEFORE UPDATE ON users
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
CREATE TRIGGER update_vfs_nodes_updated_at BEFORE UPDATE ON vfs_nodes
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
CREATE TRIGGER update_node_shares_updated_at BEFORE UPDATE ON node_shares
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
CREATE TRIGGER update_accounts_updated_at BEFORE UPDATE ON accounts
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();

View File

@@ -0,0 +1,27 @@
package database
import (
"database/sql"
"time"
"github.com/uptrace/bun"
"github.com/uptrace/bun/dialect/pgdialect"
"github.com/uptrace/bun/driver/pgdriver"
)
func NewFromPostgres(url string) *bun.DB {
sqldb := sql.OpenDB(pgdriver.NewConnector(pgdriver.WithDSN(url)))
// Configure connection pool to prevent "database closed" errors
// SetMaxOpenConns sets the maximum number of open connections to the database
sqldb.SetMaxOpenConns(25)
// SetMaxIdleConns sets the maximum number of connections in the idle connection pool
sqldb.SetMaxIdleConns(5)
// SetConnMaxLifetime sets the maximum amount of time a connection may be reused
sqldb.SetConnMaxLifetime(5 * time.Minute)
// SetConnMaxIdleTime sets the maximum amount of time a connection may be idle
sqldb.SetConnMaxIdleTime(10 * time.Minute)
db := bun.NewDB(sqldb, pgdialect.New())
return db
}

View File

@@ -0,0 +1,155 @@
package drexa
import (
"encoding/base64"
"errors"
"fmt"
"os"
"gopkg.in/yaml.v3"
)
type StorageMode string
type StorageBackend string
const (
StorageModeFlat StorageMode = "flat"
StorageModeHierarchical StorageMode = "hierarchical"
)
const (
StorageBackendFS StorageBackend = "fs"
StorageBackendS3 StorageBackend = "s3"
)
type Config struct {
Server ServerConfig `yaml:"server"`
Database DatabaseConfig `yaml:"database"`
JWT JWTConfig `yaml:"jwt"`
Storage StorageConfig `yaml:"storage"`
}
type ServerConfig struct {
Port int `yaml:"port"`
}
type DatabaseConfig struct {
PostgresURL string `yaml:"postgres_url"`
}
type JWTConfig struct {
Issuer string `yaml:"issuer"`
Audience string `yaml:"audience"`
SecretKeyBase64 string `yaml:"secret_key_base64"`
SecretKeyPath string `yaml:"secret_key_path"`
SecretKey []byte `yaml:"-"`
}
type StorageConfig struct {
Mode StorageMode `yaml:"mode"`
Backend StorageBackend `yaml:"backend"`
RootPath string `yaml:"root_path"`
Bucket string `yaml:"bucket"`
}
// ConfigFromFile loads configuration from a YAML file.
// JWT secret key is loaded from JWT_SECRET_KEY env var (base64 encoded),
// falling back to the file path specified in jwt.secret_key_path.
func ConfigFromFile(path string) (*Config, error) {
data, err := os.ReadFile(path)
if err != nil {
if os.IsNotExist(err) {
return nil, fmt.Errorf("config file not found: %s", path)
}
return nil, err
}
var config Config
if err := yaml.Unmarshal(data, &config); err != nil {
return nil, err
}
// Load JWT secret key (priority: env var > config base64 > config file path)
if envKey := os.Getenv("JWT_SECRET_KEY"); envKey != "" {
key, err := base64.StdEncoding.DecodeString(envKey)
if err != nil {
return nil, errors.New("JWT_SECRET_KEY env var is not valid base64")
}
config.JWT.SecretKey = key
} else if config.JWT.SecretKeyBase64 != "" {
key, err := base64.StdEncoding.DecodeString(config.JWT.SecretKeyBase64)
if err != nil {
return nil, errors.New("jwt.secret_key_base64 is not valid base64")
}
config.JWT.SecretKey = key
} else if config.JWT.SecretKeyPath != "" {
keyData, err := os.ReadFile(config.JWT.SecretKeyPath)
if err != nil {
return nil, err
}
key, err := base64.StdEncoding.DecodeString(string(keyData))
if err != nil {
return nil, errors.New("jwt.secret_key_path file content is not valid base64")
}
config.JWT.SecretKey = key
}
if errs := config.Validate(); len(errs) > 0 {
return nil, NewConfigError(errs...)
}
return &config, nil
}
// Validate checks for required configuration fields.
func (c *Config) Validate() []error {
var errs []error
// Server
if c.Server.Port == 0 {
errs = append(errs, errors.New("server.port is required"))
}
// Database
if c.Database.PostgresURL == "" {
errs = append(errs, errors.New("database.postgres_url is required"))
}
// JWT
if c.JWT.Issuer == "" {
errs = append(errs, errors.New("jwt.issuer is required"))
}
if c.JWT.Audience == "" {
errs = append(errs, errors.New("jwt.audience is required"))
}
if len(c.JWT.SecretKey) == 0 {
errs = append(errs, errors.New("jwt secret key is required (set JWT_SECRET_KEY env var, jwt.secret_key_base64, or jwt.secret_key_path)"))
}
// Storage
if c.Storage.Mode == "" {
errs = append(errs, errors.New("storage.mode is required"))
} else if c.Storage.Mode != StorageModeFlat && c.Storage.Mode != StorageModeHierarchical {
errs = append(errs, errors.New("storage.mode must be 'flat' or 'hierarchical'"))
}
if c.Storage.Backend == "" {
errs = append(errs, errors.New("storage.backend is required"))
} else if c.Storage.Backend != StorageBackendFS && c.Storage.Backend != StorageBackendS3 {
errs = append(errs, errors.New("storage.backend must be 'fs' or 's3'"))
}
if c.Storage.Backend == StorageBackendFS && c.Storage.RootPath == "" {
errs = append(errs, errors.New("storage.root_path is required when backend is 'fs'"))
}
if c.Storage.Backend == StorageBackendS3 {
if c.Storage.Bucket == "" {
errs = append(errs, errors.New("storage.bucket is required when backend is 's3'"))
}
if c.Storage.Mode == StorageModeHierarchical {
errs = append(errs, errors.New("storage.mode must be 'flat' when backend is 's3'"))
}
}
return errs
}

View File

@@ -0,0 +1,23 @@
package drexa
import (
"fmt"
"strings"
)
type ConfigError struct {
Errors []error
}
func NewConfigError(errs ...error) *ConfigError {
return &ConfigError{Errors: errs}
}
func (e *ConfigError) Error() string {
sb := strings.Builder{}
sb.WriteString("invalid config:\n")
for _, err := range e.Errors {
sb.WriteString(fmt.Sprintf(" - %s\n", err.Error()))
}
return sb.String()
}

View File

@@ -0,0 +1,114 @@
package drexa
import (
"context"
"fmt"
"github.com/get-drexa/drexa/internal/account"
"github.com/get-drexa/drexa/internal/auth"
"github.com/get-drexa/drexa/internal/blob"
"github.com/get-drexa/drexa/internal/database"
"github.com/get-drexa/drexa/internal/httperr"
"github.com/get-drexa/drexa/internal/upload"
"github.com/get-drexa/drexa/internal/user"
"github.com/get-drexa/drexa/internal/virtualfs"
"github.com/gofiber/fiber/v2"
"github.com/gofiber/fiber/v2/middleware/logger"
"github.com/uptrace/bun"
"github.com/uptrace/bun/extra/bundebug"
)
type Server struct {
config Config
app *fiber.App
db *bun.DB
blobStore blob.Store
vfs *virtualfs.VirtualFS
userService *user.Service
authService *auth.Service
accountService *account.Service
uploadService *upload.Service
authMiddleware fiber.Handler
}
func NewServer(c Config) (*Server, error) {
app := fiber.New(fiber.Config{
ErrorHandler: httperr.ErrorHandler,
StreamRequestBody: true,
})
app.Use(logger.New())
db := database.NewFromPostgres(c.Database.PostgresURL)
db.AddQueryHook(bundebug.NewQueryHook(bundebug.WithVerbose(true)))
// Initialize blob store based on config
var blobStore blob.Store
switch c.Storage.Backend {
case StorageBackendFS:
blobStore = blob.NewFSStore(blob.FSStoreConfig{
Root: c.Storage.RootPath,
})
case StorageBackendS3:
return nil, fmt.Errorf("s3 storage backend not yet implemented")
default:
return nil, fmt.Errorf("unknown storage backend: %s", c.Storage.Backend)
}
err := blobStore.Initialize(context.Background())
if err != nil {
return nil, fmt.Errorf("failed to initialize blob store: %w", err)
}
// Initialize key resolver based on config
var keyResolver virtualfs.BlobKeyResolver
switch c.Storage.Mode {
case StorageModeFlat:
keyResolver = virtualfs.NewFlatKeyResolver()
case StorageModeHierarchical:
keyResolver = virtualfs.NewHierarchicalKeyResolver(db)
default:
return nil, fmt.Errorf("unknown storage mode: %s", c.Storage.Mode)
}
vfs, err := virtualfs.NewVirtualFS(blobStore, keyResolver)
if err != nil {
return nil, fmt.Errorf("failed to create virtual file system: %w", err)
}
userService := user.NewService()
authService := auth.NewService(userService, auth.TokenConfig{
Issuer: c.JWT.Issuer,
Audience: c.JWT.Audience,
SecretKey: c.JWT.SecretKey,
})
uploadService := upload.NewService(vfs, blobStore)
accountService := account.NewService(userService)
authMiddleware := auth.NewBearerAuthMiddleware(authService, db)
api := app.Group("/api")
accRouter := account.NewHTTPHandler(accountService, authService, db, authMiddleware).RegisterRoutes(api)
auth.NewHTTPHandler(authService, db).RegisterRoutes(api)
upload.NewHTTPHandler(uploadService, db).RegisterRoutes(accRouter)
s := &Server{
config: c,
app: app,
db: db,
blobStore: blobStore,
vfs: vfs,
userService: userService,
authService: authService,
accountService: accountService,
uploadService: uploadService,
authMiddleware: authMiddleware,
}
return s, nil
}
func (s *Server) Start() error {
return s.app.Listen(fmt.Sprintf(":%d", s.config.Server.Port))
}

View File

@@ -0,0 +1,42 @@
package httperr
import (
"fmt"
"github.com/gofiber/fiber/v2"
)
// HTTPError represents an HTTP error with a status code and underlying error.
type HTTPError struct {
Code int
Message string
Err error
}
// Error implements the error interface.
func (e *HTTPError) Error() string {
if e.Err != nil {
return fmt.Sprintf("HTTP %d: %s: %v", e.Code, e.Message, e.Err)
}
return fmt.Sprintf("HTTP %d: %s", e.Code, e.Message)
}
// Unwrap returns the underlying error.
func (e *HTTPError) Unwrap() error {
return e.Err
}
// NewHTTPError creates a new HTTPError with the given status code, message, and underlying error.
func NewHTTPError(code int, message string, err error) *HTTPError {
return &HTTPError{
Code: code,
Message: message,
Err: err,
}
}
// Internal creates a new HTTPError with status 500.
func Internal(err error) *HTTPError {
return NewHTTPError(fiber.StatusInternalServerError, "Internal", err)
}

View File

@@ -0,0 +1,64 @@
package httperr
import (
"errors"
"log/slog"
"github.com/gofiber/fiber/v2"
)
// ErrorHandler is a global error handler for Fiber that logs errors and returns appropriate responses.
func ErrorHandler(c *fiber.Ctx, err error) error {
// Default status code
code := fiber.StatusInternalServerError
message := "Internal"
// Check if it's our custom HTTPError
var httpErr *HTTPError
if errors.As(err, &httpErr) {
code = httpErr.Code
message = httpErr.Message
// Log the error with underlying error details
if httpErr.Err != nil {
slog.Error("HTTP error",
"status", code,
"message", message,
"error", httpErr.Err.Error(),
"path", c.Path(),
"method", c.Method(),
)
} else {
slog.Warn("HTTP error",
"status", code,
"message", message,
"path", c.Path(),
"method", c.Method(),
)
}
} else {
// Check if it's a Fiber error
var fiberErr *fiber.Error
if errors.As(err, &fiberErr) {
code = fiberErr.Code
message = fiberErr.Message
} else {
// Generic error - log it
slog.Error("Unhandled error",
"status", code,
"error", err.Error(),
"path", c.Path(),
"method", c.Method(),
)
}
}
// Set Content-Type header
c.Set(fiber.HeaderContentType, fiber.MIMEApplicationJSONCharsetUTF8)
// Return JSON response
return c.Status(code).JSON(fiber.Map{
"error": message,
})
}

View File

@@ -0,0 +1,23 @@
package ioext
import "io"
type CountingReader struct {
reader io.Reader
count int64
}
func NewCountingReader(reader io.Reader) *CountingReader {
return &CountingReader{reader: reader}
}
func (r *CountingReader) Read(p []byte) (n int, err error) {
n, err = r.reader.Read(p)
r.count += int64(n)
return n, err
}
func (r *CountingReader) Count() int64 {
return r.count
}

View File

@@ -0,0 +1,24 @@
package ioext
import "io"
type LimitReadCloser struct {
reader io.ReadCloser
limitReader io.Reader
}
func NewLimitReadCloser(reader io.ReadCloser, length int64) *LimitReadCloser {
return &LimitReadCloser{
reader: reader,
limitReader: io.LimitReader(reader, length),
}
}
func (r *LimitReadCloser) Read(p []byte) (n int, err error) {
return r.limitReader.Read(p)
}
func (r *LimitReadCloser) Close() error {
return r.reader.Close()
}

View File

@@ -0,0 +1,138 @@
package password
import (
"crypto/rand"
"crypto/subtle"
"encoding/base64"
"errors"
"fmt"
"strings"
"golang.org/x/crypto/argon2"
)
// Hashed represents a securely hashed password.
// This type ensures plaintext passwords cannot be accidentally stored.
type Hashed string
// argon2id parameters
const (
memory = 64 * 1024
iterations = 3
parallelism = 2
saltLength = 16
keyLength = 32
)
var (
ErrInvalidHash = errors.New("invalid hash format")
ErrIncompatibleHash = errors.New("incompatible hash algorithm")
ErrIncompatibleVersion = errors.New("incompatible argon2 version")
)
type argon2Hash struct {
memory uint32
iterations uint32
parallelism uint8
salt []byte
hash []byte
}
// Hash securely hashes a plaintext password using argon2id.
func Hash(plain string) (Hashed, error) {
salt := make([]byte, saltLength)
if _, err := rand.Read(salt); err != nil {
return "", fmt.Errorf("failed to generate salt: %w", err)
}
hash := argon2.IDKey(
[]byte(plain),
salt,
iterations,
memory,
parallelism,
keyLength,
)
b64Salt := base64.RawStdEncoding.EncodeToString(salt)
b64Hash := base64.RawStdEncoding.EncodeToString(hash)
encoded := fmt.Sprintf(
"$argon2id$v=%d$m=%d,t=%d,p=%d$%s$%s",
argon2.Version,
memory,
iterations,
parallelism,
b64Salt,
b64Hash,
)
return Hashed(encoded), nil
}
// Verify checks if a plaintext password matches a hashed password.
func Verify(plain string, hashed Hashed) (bool, error) {
h, err := decodeHash(string(hashed))
if err != nil {
return false, err
}
otherHash := argon2.IDKey(
[]byte(plain),
h.salt,
h.iterations,
h.memory,
h.parallelism,
uint32(len(h.hash)),
)
if subtle.ConstantTimeCompare(h.hash, otherHash) == 1 {
return true, nil
}
return false, nil
}
func decodeHash(encodedHash string) (*argon2Hash, error) {
parts := strings.Split(encodedHash, "$")
if len(parts) != 6 {
return nil, ErrInvalidHash
}
if parts[1] != "argon2id" {
return nil, ErrIncompatibleHash
}
var version int
if _, err := fmt.Sscanf(parts[2], "v=%d", &version); err != nil {
return nil, fmt.Errorf("failed to parse version: %w", err)
}
if version != argon2.Version {
return nil, ErrIncompatibleVersion
}
h := &argon2Hash{}
if _, err := fmt.Sscanf(
parts[3],
"m=%d,t=%d,p=%d",
&h.memory,
&h.iterations,
&h.parallelism,
); err != nil {
return nil, fmt.Errorf("failed to parse parameters: %w", err)
}
salt, err := base64.RawStdEncoding.DecodeString(parts[4])
if err != nil {
return nil, fmt.Errorf("failed to decode salt: %w", err)
}
h.salt = salt
hash, err := base64.RawStdEncoding.DecodeString(parts[5])
if err != nil {
return nil, fmt.Errorf("failed to decode hash: %w", err)
}
h.hash = hash
return h, nil
}

View File

@@ -0,0 +1,10 @@
package upload
import "errors"
var (
ErrNotFound = errors.New("not found")
ErrParentNotDirectory = errors.New("parent is not a directory")
ErrConflict = errors.New("node conflict")
ErrContentNotUploaded = errors.New("content has not been uploaded")
)

View File

@@ -0,0 +1,120 @@
package upload
import (
"errors"
"fmt"
"github.com/get-drexa/drexa/internal/account"
"github.com/get-drexa/drexa/internal/httperr"
"github.com/gofiber/fiber/v2"
"github.com/uptrace/bun"
)
type createUploadRequest struct {
ParentID string `json:"parentId"`
Name string `json:"name"`
}
type updateUploadRequest struct {
Status Status `json:"status"`
}
type HTTPHandler struct {
service *Service
db *bun.DB
}
func NewHTTPHandler(s *Service, db *bun.DB) *HTTPHandler {
return &HTTPHandler{service: s, db: db}
}
func (h *HTTPHandler) RegisterRoutes(api fiber.Router) {
upload := api.Group("/uploads")
upload.Post("/", h.Create)
upload.Put("/:uploadID/content", h.ReceiveContent)
upload.Patch("/:uploadID", h.Update)
}
func (h *HTTPHandler) Create(c *fiber.Ctx) error {
account := account.CurrentAccount(c)
if account == nil {
return c.SendStatus(fiber.StatusUnauthorized)
}
req := new(createUploadRequest)
if err := c.BodyParser(req); err != nil {
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{"error": "Invalid request"})
}
upload, err := h.service.CreateUpload(c.Context(), h.db, account.ID, CreateUploadOptions{
ParentID: req.ParentID,
Name: req.Name,
})
if err != nil {
if errors.Is(err, ErrNotFound) {
return c.SendStatus(fiber.StatusNotFound)
}
if errors.Is(err, ErrParentNotDirectory) {
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{"error": "Parent is not a directory"})
}
if errors.Is(err, ErrConflict) {
return c.Status(fiber.StatusConflict).JSON(fiber.Map{"error": "A file with this name already exists"})
}
return httperr.Internal(err)
}
if upload.UploadURL == "" {
upload.UploadURL = fmt.Sprintf("%s%s/%s/content", c.BaseURL(), c.OriginalURL(), upload.ID)
}
return c.JSON(upload)
}
func (h *HTTPHandler) ReceiveContent(c *fiber.Ctx) error {
account := account.CurrentAccount(c)
if account == nil {
return c.SendStatus(fiber.StatusUnauthorized)
}
uploadID := c.Params("uploadID")
err := h.service.ReceiveUpload(c.Context(), h.db, account.ID, uploadID, c.Context().RequestBodyStream())
defer c.Context().Request.CloseBodyStream()
if err != nil {
if errors.Is(err, ErrNotFound) {
return c.SendStatus(fiber.StatusNotFound)
}
return httperr.Internal(err)
}
return c.SendStatus(fiber.StatusNoContent)
}
func (h *HTTPHandler) Update(c *fiber.Ctx) error {
account := account.CurrentAccount(c)
if account == nil {
return c.SendStatus(fiber.StatusUnauthorized)
}
req := new(updateUploadRequest)
if err := c.BodyParser(req); err != nil {
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{"error": "Invalid request"})
}
if req.Status == StatusCompleted {
upload, err := h.service.CompleteUpload(c.Context(), h.db, account.ID, c.Params("uploadID"))
if err != nil {
if errors.Is(err, ErrNotFound) {
return c.SendStatus(fiber.StatusNotFound)
}
if errors.Is(err, ErrContentNotUploaded) {
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{"error": "Content has not been uploaded"})
}
return httperr.Internal(err)
}
return c.JSON(upload)
}
return c.SendStatus(fiber.StatusBadRequest)
}

View File

@@ -0,0 +1,144 @@
package upload
import (
"context"
"errors"
"fmt"
"io"
"sync"
"time"
"github.com/get-drexa/drexa/internal/blob"
"github.com/get-drexa/drexa/internal/virtualfs"
"github.com/google/uuid"
"github.com/uptrace/bun"
)
type Service struct {
vfs *virtualfs.VirtualFS
blobStore blob.Store
pendingUploads sync.Map
}
func NewService(vfs *virtualfs.VirtualFS, blobStore blob.Store) *Service {
return &Service{
vfs: vfs,
blobStore: blobStore,
pendingUploads: sync.Map{},
}
}
type CreateUploadOptions struct {
ParentID string
Name string
}
func (s *Service) CreateUpload(ctx context.Context, db bun.IDB, accountID uuid.UUID, opts CreateUploadOptions) (*Upload, error) {
parentNode, err := s.vfs.FindNodeByPublicID(ctx, db, accountID, opts.ParentID)
if err != nil {
if errors.Is(err, virtualfs.ErrNodeNotFound) {
return nil, ErrNotFound
}
return nil, err
}
if parentNode.Kind != virtualfs.NodeKindDirectory {
return nil, ErrParentNotDirectory
}
node, err := s.vfs.CreateFile(ctx, db, accountID, virtualfs.CreateFileOptions{
ParentID: parentNode.ID,
Name: opts.Name,
})
if err != nil {
if errors.Is(err, virtualfs.ErrNodeConflict) {
return nil, ErrConflict
}
return nil, err
}
var uploadURL string
if s.blobStore.SupportsDirectUpload() {
uploadURL, err = s.blobStore.GenerateUploadURL(ctx, node.BlobKey, blob.UploadURLOptions{
Duration: 1 * time.Hour,
})
if err != nil {
_ = s.vfs.PermanentlyDeleteNode(ctx, db, node)
return nil, err
}
} else {
uploadURL = ""
}
upload := &Upload{
ID: node.PublicID,
Status: StatusPending,
TargetNode: node,
UploadURL: uploadURL,
}
s.pendingUploads.Store(upload.ID, upload)
return upload, nil
}
func (s *Service) ReceiveUpload(ctx context.Context, db bun.IDB, accountID uuid.UUID, uploadID string, reader io.Reader) error {
fmt.Printf("reader: %v\n", reader)
n, ok := s.pendingUploads.Load(uploadID)
if !ok {
return ErrNotFound
}
upload, ok := n.(*Upload)
if !ok {
return ErrNotFound
}
if upload.TargetNode.AccountID != accountID {
return ErrNotFound
}
err := s.vfs.WriteFile(ctx, db, upload.TargetNode, virtualfs.FileContentFromReader(reader))
if err != nil {
return err
}
upload.Status = StatusCompleted
return nil
}
func (s *Service) CompleteUpload(ctx context.Context, db bun.IDB, accountID uuid.UUID, uploadID string) (*Upload, error) {
n, ok := s.pendingUploads.Load(uploadID)
if !ok {
return nil, ErrNotFound
}
upload, ok := n.(*Upload)
if !ok {
return nil, ErrNotFound
}
if upload.TargetNode.AccountID != accountID {
return nil, ErrNotFound
}
if upload.TargetNode.Status == virtualfs.NodeStatusReady && upload.Status == StatusCompleted {
return upload, nil
}
err := s.vfs.WriteFile(ctx, db, upload.TargetNode, virtualfs.FileContentFromBlobKey(upload.TargetNode.BlobKey))
if err != nil {
if errors.Is(err, blob.ErrNotFound) {
return nil, ErrContentNotUploaded
}
return nil, err
}
upload.Status = StatusCompleted
s.pendingUploads.Delete(uploadID)
return upload, nil
}

View File

@@ -0,0 +1,18 @@
package upload
import "github.com/get-drexa/drexa/internal/virtualfs"
type Status string
const (
StatusPending Status = "pending"
StatusCompleted Status = "completed"
StatusFailed Status = "failed"
)
type Upload struct {
ID string `json:"id"`
Status Status `json:"status"`
TargetNode *virtualfs.Node `json:"-"`
UploadURL string `json:"uploadUrl"`
}

View File

@@ -0,0 +1,36 @@
package user
import (
"fmt"
"github.com/google/uuid"
)
type NotFoundError struct {
// ID is the ID that was used to try to find the user.
// Not set if not tried.
id uuid.UUID
// Email is the email that was used to try to find the user.
// Not set if not tried.
email string
}
func newNotFoundError(id uuid.UUID, email string) *NotFoundError {
return &NotFoundError{id, email}
}
func (e *NotFoundError) Error() string {
return fmt.Sprintf("user not found: %v", e.id)
}
type AlreadyExistsError struct {
// Email is the email that was used to try to create the user.
Email string
}
func newAlreadyExistsError(email string) *AlreadyExistsError {
return &AlreadyExistsError{email}
}
func (e *AlreadyExistsError) Error() string {
return fmt.Sprintf("user with email %s already exists", e.Email)
}

View File

@@ -0,0 +1,76 @@
package user
import (
"context"
"database/sql"
"errors"
"github.com/get-drexa/drexa/internal/database"
"github.com/get-drexa/drexa/internal/password"
"github.com/google/uuid"
"github.com/uptrace/bun"
)
type Service struct{}
type UserRegistrationOptions struct {
Email string
DisplayName string
Password password.Hashed
}
func NewService() *Service {
return &Service{}
}
func (s *Service) RegisterUser(ctx context.Context, db bun.IDB, opts UserRegistrationOptions) (*User, error) {
uid, err := newUserID()
if err != nil {
return nil, err
}
u := User{
ID: uid,
Email: opts.Email,
DisplayName: opts.DisplayName,
Password: opts.Password,
}
_, err = db.NewInsert().Model(&u).Returning("*").Exec(ctx)
if err != nil {
if database.IsUniqueViolation(err) {
return nil, newAlreadyExistsError(u.Email)
}
return nil, err
}
return &u, nil
}
func (s *Service) UserByID(ctx context.Context, db bun.IDB, id uuid.UUID) (*User, error) {
var user User
err := db.NewSelect().Model(&user).Where("id = ?", id).Scan(ctx)
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
return nil, newNotFoundError(id, "")
}
return nil, err
}
return &user, nil
}
func (s *Service) UserByEmail(ctx context.Context, db bun.IDB, email string) (*User, error) {
var user User
err := db.NewSelect().Model(&user).Where("email = ?", email).Scan(ctx)
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
return nil, newNotFoundError(uuid.Nil, email)
}
return nil, err
}
return &user, nil
}
func (s *Service) UserExistsByEmail(ctx context.Context, db bun.IDB, email string) (bool, error) {
return db.NewSelect().Model(&User{}).Where("email = ?", email).Exists(ctx)
}

View File

@@ -0,0 +1,24 @@
package user
import (
"time"
"github.com/get-drexa/drexa/internal/password"
"github.com/google/uuid"
"github.com/uptrace/bun"
)
type User struct {
bun.BaseModel `bun:"users"`
ID uuid.UUID `bun:",pk,type:uuid" json:"id"`
DisplayName string `bun:"display_name" json:"displayName"`
Email string `bun:"email,unique,notnull" json:"email"`
Password password.Hashed `bun:"password,notnull" json:"-"`
CreatedAt time.Time `bun:"created_at,notnull,nullzero" json:"createdAt"`
UpdatedAt time.Time `bun:"updated_at,notnull,nullzero" json:"updatedAt"`
}
func newUserID() (uuid.UUID, error) {
return uuid.NewV7()
}

View File

@@ -0,0 +1,9 @@
package virtualfs
import "errors"
var (
ErrNodeNotFound = errors.New("node not found")
ErrNodeConflict = errors.New("node conflict")
ErrUnsupportedOperation = errors.New("unsupported operation")
)

View File

@@ -0,0 +1,35 @@
package virtualfs
import (
"context"
"github.com/get-drexa/drexa/internal/blob"
"github.com/google/uuid"
)
type FlatKeyResolver struct{}
var _ BlobKeyResolver = &FlatKeyResolver{}
func NewFlatKeyResolver() *FlatKeyResolver {
return &FlatKeyResolver{}
}
func (r *FlatKeyResolver) ShouldPersistKey() bool {
return true
}
func (r *FlatKeyResolver) Resolve(ctx context.Context, node *Node) (blob.Key, error) {
if node.BlobKey == "" {
id, err := uuid.NewV7()
if err != nil {
return "", err
}
return blob.Key(id.String()), nil
}
return node.BlobKey, nil
}
func (r *FlatKeyResolver) ResolveDeletionKeys(ctx context.Context, node *Node, allKeys []blob.Key) (*DeletionPlan, error) {
return &DeletionPlan{Keys: allKeys}, nil
}

View File

@@ -0,0 +1,40 @@
package virtualfs
import (
"context"
"fmt"
"github.com/get-drexa/drexa/internal/blob"
"github.com/uptrace/bun"
)
type HierarchicalKeyResolver struct {
db *bun.DB
}
var _ BlobKeyResolver = &HierarchicalKeyResolver{}
func NewHierarchicalKeyResolver(db *bun.DB) *HierarchicalKeyResolver {
return &HierarchicalKeyResolver{db: db}
}
func (r *HierarchicalKeyResolver) ShouldPersistKey() bool {
return false
}
func (r *HierarchicalKeyResolver) Resolve(ctx context.Context, node *Node) (blob.Key, error) {
path, err := buildNodeAbsolutePath(ctx, r.db, node.ID)
if err != nil {
return "", err
}
return blob.Key(fmt.Sprintf("%s/%s", node.AccountID, path)), nil
}
func (r *HierarchicalKeyResolver) ResolveDeletionKeys(ctx context.Context, node *Node, allKeys []blob.Key) (*DeletionPlan, error) {
path, err := buildNodeAbsolutePath(ctx, r.db, node.ID)
if err != nil {
return nil, err
}
return &DeletionPlan{Prefix: blob.Key(path)}, nil
}

View File

@@ -0,0 +1,21 @@
package virtualfs
import (
"context"
"github.com/get-drexa/drexa/internal/blob"
)
type BlobKeyResolver interface {
// ShouldPersistKey returns true if the resolved key should be stored in node.BlobKey.
// Flat keys (e.g. UUIDs) return true - key is generated once and stored.
// Hierarchical keys return false - key is derived from path each time.
ShouldPersistKey() bool
Resolve(ctx context.Context, node *Node) (blob.Key, error)
ResolveDeletionKeys(ctx context.Context, node *Node, allKeys []blob.Key) (*DeletionPlan, error)
}
type DeletionPlan struct {
Prefix blob.Key
Keys []blob.Key
}

View File

@@ -0,0 +1,53 @@
package virtualfs
import (
"time"
"github.com/get-drexa/drexa/internal/blob"
"github.com/google/uuid"
"github.com/uptrace/bun"
)
type NodeKind string
const (
NodeKindFile NodeKind = "file"
NodeKindDirectory NodeKind = "directory"
)
type NodeStatus string
const (
NodeStatusPending NodeStatus = "pending"
NodeStatusReady NodeStatus = "ready"
)
type Node struct {
bun.BaseModel `bun:"vfs_nodes"`
ID uuid.UUID `bun:",pk,type:uuid"`
PublicID string `bun:"public_id,notnull"`
AccountID uuid.UUID `bun:"account_id,notnull,type:uuid"`
ParentID uuid.UUID `bun:"parent_id,nullzero"`
Kind NodeKind `bun:"kind,notnull"`
Status NodeStatus `bun:"status,notnull"`
Name string `bun:"name,notnull"`
BlobKey blob.Key `bun:"blob_key,nullzero"`
Size int64 `bun:"size"`
MimeType string `bun:"mime_type,nullzero"`
CreatedAt time.Time `bun:"created_at,notnull,nullzero"`
UpdatedAt time.Time `bun:"updated_at,notnull,nullzero"`
DeletedAt time.Time `bun:"deleted_at,nullzero"`
}
func newNodeID() (uuid.UUID, error) {
return uuid.NewV7()
}
// IsAccessible returns true if the node can be accessed.
// If the node is not ready or if it is soft deleted, it cannot be accessed.
func (n *Node) IsAccessible() bool {
return n.DeletedAt.IsZero() && n.Status == NodeStatusReady
}

View File

@@ -0,0 +1,42 @@
package virtualfs
import (
"context"
"database/sql"
"errors"
"strings"
"github.com/google/uuid"
"github.com/uptrace/bun"
)
const absolutePathQuery = `WITH RECURSIVE path AS (
SELECT id, parent_id, name, 1 as depth
FROM vfs_nodes WHERE id = ? AND deleted_at IS NULL
UNION ALL
SELECT n.id, n.parent_id, n.name, p.depth + 1
FROM vfs_nodes n
JOIN path p ON n.id = p.parent_id
WHERE n.deleted_at IS NULL
)
SELECT name FROM path
WHERE EXISTS (SELECT 1 FROM path WHERE parent_id IS NULL)
ORDER BY depth DESC;`
func JoinPath(parts ...string) string {
return strings.Join(parts, "/")
}
func buildNodeAbsolutePath(ctx context.Context, db bun.IDB, nodeID uuid.UUID) (string, error) {
var path []string
err := db.NewRaw(absolutePathQuery, nodeID).Scan(ctx, &path)
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
return "", ErrNodeNotFound
}
return "", err
}
return JoinPath(path...), nil
}

View File

@@ -0,0 +1,520 @@
package virtualfs
import (
"bytes"
"context"
"crypto/rand"
"database/sql"
"encoding/binary"
"errors"
"io"
"github.com/gabriel-vasile/mimetype"
"github.com/get-drexa/drexa/internal/blob"
"github.com/get-drexa/drexa/internal/database"
"github.com/get-drexa/drexa/internal/ioext"
"github.com/google/uuid"
"github.com/sqids/sqids-go"
"github.com/uptrace/bun"
)
type VirtualFS struct {
blobStore blob.Store
keyResolver BlobKeyResolver
sqid *sqids.Sqids
}
type CreateNodeOptions struct {
ParentID uuid.UUID
Kind NodeKind
Name string
}
type CreateFileOptions struct {
ParentID uuid.UUID
Name string
}
type FileContent struct {
reader io.Reader
blobKey blob.Key
}
const RootDirectoryName = "root"
func FileContentFromReader(reader io.Reader) FileContent {
return FileContent{reader: reader}
}
func FileContentFromBlobKey(blobKey blob.Key) FileContent {
return FileContent{blobKey: blobKey}
}
func NewVirtualFS(blobStore blob.Store, keyResolver BlobKeyResolver) (*VirtualFS, error) {
sqid, err := sqids.New()
if err != nil {
return nil, err
}
return &VirtualFS{
blobStore: blobStore,
keyResolver: keyResolver,
sqid: sqid,
}, nil
}
func (vfs *VirtualFS) FindNode(ctx context.Context, db bun.IDB, accountID, fileID string) (*Node, error) {
var node Node
err := db.NewSelect().Model(&node).
Where("account_id = ?", accountID).
Where("id = ?", fileID).
Where("status = ?", NodeStatusReady).
Where("deleted_at IS NULL").
Scan(ctx)
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
return nil, ErrNodeNotFound
}
return nil, err
}
return &node, nil
}
func (vfs *VirtualFS) FindNodeByPublicID(ctx context.Context, db bun.IDB, accountID uuid.UUID, publicID string) (*Node, error) {
var node Node
err := db.NewSelect().Model(&node).
Where("account_id = ?", accountID).
Where("public_id = ?", publicID).
Where("status = ?", NodeStatusReady).
Where("deleted_at IS NULL").
Scan(ctx)
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
return nil, ErrNodeNotFound
}
return nil, err
}
return &node, nil
}
func (vfs *VirtualFS) ListChildren(ctx context.Context, db bun.IDB, node *Node) ([]*Node, error) {
if !node.IsAccessible() {
return nil, ErrNodeNotFound
}
var nodes []*Node
err := db.NewSelect().Model(&nodes).
Where("account_id = ?", node.AccountID).
Where("parent_id = ?", node.ID).
Where("status = ?", NodeStatusReady).
Where("deleted_at IS NULL").
Scan(ctx)
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
return make([]*Node, 0), nil
}
return nil, err
}
return nodes, nil
}
func (vfs *VirtualFS) CreateFile(ctx context.Context, db bun.IDB, accountID uuid.UUID, opts CreateFileOptions) (*Node, error) {
pid, err := vfs.generatePublicID()
if err != nil {
return nil, err
}
id, err := newNodeID()
if err != nil {
return nil, err
}
node := Node{
ID: id,
PublicID: pid,
AccountID: accountID,
ParentID: opts.ParentID,
Kind: NodeKindFile,
Status: NodeStatusPending,
Name: opts.Name,
}
if vfs.keyResolver.ShouldPersistKey() {
node.BlobKey, err = vfs.keyResolver.Resolve(ctx, &node)
if err != nil {
return nil, err
}
}
_, err = db.NewInsert().Model(&node).Returning("*").Exec(ctx)
if err != nil {
if database.IsUniqueViolation(err) {
return nil, ErrNodeConflict
}
return nil, err
}
return &node, nil
}
func (vfs *VirtualFS) WriteFile(ctx context.Context, db bun.IDB, node *Node, content FileContent) error {
if content.reader == nil && content.blobKey.IsNil() {
return blob.ErrInvalidFileContent
}
if !node.DeletedAt.IsZero() {
return ErrNodeNotFound
}
setCols := make([]string, 0, 4)
if content.reader != nil {
key, err := vfs.keyResolver.Resolve(ctx, node)
if err != nil {
return err
}
buf := make([]byte, 3072)
n, err := io.ReadFull(content.reader, buf)
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
return err
}
buf = buf[:n]
mt := mimetype.Detect(buf)
cr := ioext.NewCountingReader(io.MultiReader(bytes.NewReader(buf), content.reader))
err = vfs.blobStore.Put(ctx, key, cr)
if err != nil {
return err
}
if vfs.keyResolver.ShouldPersistKey() {
node.BlobKey = key
setCols = append(setCols, "blob_key")
}
node.MimeType = mt.String()
node.Size = cr.Count()
node.Status = NodeStatusReady
setCols = append(setCols, "mime_type", "size", "status")
} else {
node.BlobKey = content.blobKey
b, err := vfs.blobStore.ReadRange(ctx, content.blobKey, 0, 3072)
if err != nil {
return err
}
defer b.Close()
buf := make([]byte, 3072)
n, err := io.ReadFull(b, buf)
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
return err
}
buf = buf[:n]
mt := mimetype.Detect(buf)
node.MimeType = mt.String()
node.Status = NodeStatusReady
s, err := vfs.blobStore.ReadSize(ctx, content.blobKey)
if err != nil {
return err
}
node.Size = s
setCols = append(setCols, "mime_type", "blob_key", "size", "status")
}
_, err := db.NewUpdate().Model(node).
Column(setCols...).
WherePK().
Exec(ctx)
if err != nil {
return err
}
return nil
}
func (vfs *VirtualFS) CreateDirectory(ctx context.Context, db bun.IDB, accountID uuid.UUID, parentID uuid.UUID, name string) (*Node, error) {
pid, err := vfs.generatePublicID()
if err != nil {
return nil, err
}
id, err := newNodeID()
if err != nil {
return nil, err
}
node := Node{
ID: id,
PublicID: pid,
AccountID: accountID,
ParentID: parentID,
Kind: NodeKindDirectory,
Status: NodeStatusReady,
Name: name,
}
_, err = db.NewInsert().Model(node).Exec(ctx)
if err != nil {
if database.IsUniqueViolation(err) {
return nil, ErrNodeConflict
}
return nil, err
}
return &node, nil
}
func (vfs *VirtualFS) SoftDeleteNode(ctx context.Context, db bun.IDB, node *Node) error {
if !node.IsAccessible() {
return ErrNodeNotFound
}
_, err := db.NewUpdate().Model(node).
WherePK().
Where("deleted_at IS NULL").
Where("status = ?", NodeStatusReady).
Set("deleted_at = NOW()").
Returning("deleted_at").
Exec(ctx)
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
return ErrNodeNotFound
}
return err
}
return nil
}
func (vfs *VirtualFS) RestoreNode(ctx context.Context, db bun.IDB, node *Node) error {
if node.Status != NodeStatusReady {
return ErrNodeNotFound
}
_, err := db.NewUpdate().Model(node).
WherePK().
Where("deleted_at IS NOT NULL").
Set("deleted_at = NULL").
Returning("deleted_at").
Exec(ctx)
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
return ErrNodeNotFound
}
return err
}
return nil
}
func (vfs *VirtualFS) RenameNode(ctx context.Context, db bun.IDB, node *Node, name string) error {
if !node.IsAccessible() {
return ErrNodeNotFound
}
_, err := db.NewUpdate().Model(node).
WherePK().
Where("status = ?", NodeStatusReady).
Where("deleted_at IS NULL").
Set("name = ?", name).
Returning("name, updated_at").
Exec(ctx)
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
return ErrNodeNotFound
}
return err
}
return nil
}
func (vfs *VirtualFS) MoveNode(ctx context.Context, db bun.IDB, node *Node, parentID uuid.UUID) error {
if !node.IsAccessible() {
return ErrNodeNotFound
}
oldKey, err := vfs.keyResolver.Resolve(ctx, node)
if err != nil {
return err
}
_, err = db.NewUpdate().Model(node).
WherePK().
Where("status = ?", NodeStatusReady).
Where("deleted_at IS NULL").
Set("parent_id = ?", parentID).
Returning("parent_id, updated_at").
Exec(ctx)
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
return ErrNodeNotFound
}
if database.IsUniqueViolation(err) {
return ErrNodeConflict
}
return err
}
newKey, err := vfs.keyResolver.Resolve(ctx, node)
if err != nil {
return err
}
err = vfs.blobStore.Move(ctx, oldKey, newKey)
if err != nil {
return err
}
if vfs.keyResolver.ShouldPersistKey() {
node.BlobKey = newKey
_, err = db.NewUpdate().Model(node).
WherePK().
Set("blob_key = ?", newKey).
Exec(ctx)
if err != nil {
return err
}
}
return nil
}
func (vfs *VirtualFS) AbsolutePath(ctx context.Context, db bun.IDB, node *Node) (string, error) {
if !node.IsAccessible() {
return "", ErrNodeNotFound
}
return buildNodeAbsolutePath(ctx, db, node.ID)
}
func (vfs *VirtualFS) PermanentlyDeleteNode(ctx context.Context, db bun.IDB, node *Node) error {
if !node.IsAccessible() {
return ErrNodeNotFound
}
switch node.Kind {
case NodeKindFile:
return vfs.permanentlyDeleteFileNode(ctx, db, node)
case NodeKindDirectory:
return vfs.permanentlyDeleteDirectoryNode(ctx, db, node)
default:
return ErrUnsupportedOperation
}
}
func (vfs *VirtualFS) permanentlyDeleteFileNode(ctx context.Context, db bun.IDB, node *Node) error {
err := vfs.blobStore.Delete(ctx, node.BlobKey)
if err != nil {
return err
}
_, err = db.NewDelete().Model(node).WherePK().Exec(ctx)
if err != nil {
return err
}
return nil
}
func (vfs *VirtualFS) permanentlyDeleteDirectoryNode(ctx context.Context, db bun.IDB, node *Node) error {
const descendantsQuery = `WITH RECURSIVE descendants AS (
SELECT id, blob_key FROM vfs_nodes WHERE id = ?
UNION ALL
SELECT n.id, n.blob_key FROM vfs_nodes n
JOIN descendants d ON n.parent_id = d.id
)
SELECT id, blob_key FROM descendants`
type nodeRecord struct {
ID uuid.UUID `bun:"id"`
BlobKey blob.Key `bun:"blob_key"`
}
// If db is already a transaction, use it directly; otherwise start a new transaction
var tx bun.IDB
var startedTx *bun.Tx
switch v := db.(type) {
case *bun.DB:
newTx, err := v.BeginTx(ctx, nil)
if err != nil {
return err
}
startedTx = &newTx
tx = newTx
defer func() {
if startedTx != nil {
(*startedTx).Rollback()
}
}()
default:
// Assume it's already a transaction
tx = db
}
var records []nodeRecord
err := tx.NewRaw(descendantsQuery, node.ID).Scan(ctx, &records)
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
return ErrNodeNotFound
}
return err
}
if len(records) == 0 {
return ErrNodeNotFound
}
nodeIDs := make([]uuid.UUID, 0, len(records))
blobKeys := make([]blob.Key, 0, len(records))
for _, r := range records {
nodeIDs = append(nodeIDs, r.ID)
if !r.BlobKey.IsNil() {
blobKeys = append(blobKeys, r.BlobKey)
}
}
plan, err := vfs.keyResolver.ResolveDeletionKeys(ctx, node, blobKeys)
if err != nil {
return err
}
_, err = tx.NewDelete().
Model((*Node)(nil)).
Where("id IN (?)", bun.In(nodeIDs)).
Exec(ctx)
if err != nil {
return err
}
if !plan.Prefix.IsNil() {
_ = vfs.blobStore.DeletePrefix(ctx, plan.Prefix)
} else {
for _, key := range plan.Keys {
_ = vfs.blobStore.Delete(ctx, key)
}
}
// Only commit if we started the transaction
if startedTx != nil {
err := (*startedTx).Commit()
startedTx = nil // Prevent defer from rolling back
return err
}
return nil
}
func (vfs *VirtualFS) generatePublicID() (string, error) {
var b [8]byte
_, err := rand.Read(b[:])
if err != nil {
return "", err
}
n := binary.BigEndian.Uint64(b[:])
return vfs.sqid.Encode([]uint64{n})
}

View File

@@ -0,0 +1,54 @@
#!/bin/bash
# Install VS Code extensions from devcontainer.json
# This script reads extensions from the single source of truth
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
DEVCONTAINER_JSON="$SCRIPT_DIR/../.devcontainer/devcontainer.json"
echo "Installing VS Code extensions from devcontainer.json..."
if [ ! -f "$DEVCONTAINER_JSON" ]; then
echo "Error: devcontainer.json not found at $DEVCONTAINER_JSON"
exit 1
fi
# Use Bun to parse JSON and extract extensions
EXTENSIONS=$(bun -e "
const config = await Bun.file('$DEVCONTAINER_JSON').json();
const extensions = config?.customizations?.vscode?.extensions ?? [];
console.log(extensions.join('\n'));
")
if [ -z "$EXTENSIONS" ]; then
echo "No extensions found in devcontainer.json"
exit 0
fi
# Determine which CLI to use (code-server, cursor, or code)
if command -v code-server &> /dev/null; then
CLI="code-server"
elif command -v cursor &> /dev/null; then
CLI="cursor"
elif command -v code &> /dev/null; then
CLI="code"
else
echo "Warning: No VS Code CLI found (code-server, cursor, or code)"
echo "Extensions to install:"
echo "$EXTENSIONS"
exit 0
fi
echo "Using $CLI to install extensions..."
# Install each extension
echo "$EXTENSIONS" | while read -r ext; do
if [ -n "$ext" ]; then
echo "Installing: $ext"
$CLI --install-extension "$ext" 2>/dev/null || true
fi
done
echo "Extension installation complete!"