mirror of
https://github.com/get-drexa/drive.git
synced 2026-02-02 12:01:17 +00:00
refactor: account model overhaul
This commit is contained in:
@@ -8,6 +8,7 @@ require (
|
|||||||
github.com/google/uuid v1.6.0
|
github.com/google/uuid v1.6.0
|
||||||
github.com/sqids/sqids-go v0.4.1
|
github.com/sqids/sqids-go v0.4.1
|
||||||
github.com/swaggo/swag v1.16.6
|
github.com/swaggo/swag v1.16.6
|
||||||
|
github.com/testcontainers/testcontainers-go v0.39.0
|
||||||
github.com/uptrace/bun v1.2.16
|
github.com/uptrace/bun v1.2.16
|
||||||
github.com/uptrace/bun/extra/bundebug v1.2.16
|
github.com/uptrace/bun/extra/bundebug v1.2.16
|
||||||
golang.org/x/crypto v0.45.0
|
golang.org/x/crypto v0.45.0
|
||||||
@@ -15,17 +16,62 @@ require (
|
|||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
|
dario.cat/mergo v1.0.2 // indirect
|
||||||
|
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
||||||
github.com/KyleBanks/depth v1.2.1 // indirect
|
github.com/KyleBanks/depth v1.2.1 // indirect
|
||||||
|
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||||
github.com/PuerkitoBio/purell v1.1.1 // indirect
|
github.com/PuerkitoBio/purell v1.1.1 // indirect
|
||||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
|
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
|
||||||
|
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
|
||||||
|
github.com/containerd/errdefs v1.0.0 // indirect
|
||||||
|
github.com/containerd/errdefs/pkg v0.3.0 // indirect
|
||||||
|
github.com/containerd/log v0.1.0 // indirect
|
||||||
|
github.com/containerd/platforms v0.2.1 // indirect
|
||||||
|
github.com/cpuguy83/dockercfg v0.3.2 // indirect
|
||||||
|
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||||
|
github.com/distribution/reference v0.6.0 // indirect
|
||||||
|
github.com/docker/docker v28.3.3+incompatible // indirect
|
||||||
|
github.com/docker/go-connections v0.6.0 // indirect
|
||||||
|
github.com/docker/go-units v0.5.0 // indirect
|
||||||
|
github.com/ebitengine/purego v0.8.4 // indirect
|
||||||
github.com/fatih/color v1.18.0 // indirect
|
github.com/fatih/color v1.18.0 // indirect
|
||||||
|
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||||
|
github.com/go-logr/logr v1.4.3 // indirect
|
||||||
|
github.com/go-logr/stdr v1.2.2 // indirect
|
||||||
|
github.com/go-ole/go-ole v1.2.6 // indirect
|
||||||
github.com/go-openapi/jsonpointer v0.19.5 // indirect
|
github.com/go-openapi/jsonpointer v0.19.5 // indirect
|
||||||
github.com/go-openapi/jsonreference v0.19.6 // indirect
|
github.com/go-openapi/jsonreference v0.19.6 // indirect
|
||||||
github.com/go-openapi/spec v0.20.4 // indirect
|
github.com/go-openapi/spec v0.20.4 // indirect
|
||||||
github.com/go-openapi/swag v0.19.15 // indirect
|
github.com/go-openapi/swag v0.19.15 // indirect
|
||||||
|
github.com/gogo/protobuf v1.3.2 // indirect
|
||||||
github.com/josharian/intern v1.0.0 // indirect
|
github.com/josharian/intern v1.0.0 // indirect
|
||||||
|
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
|
||||||
|
github.com/magiconair/properties v1.8.10 // indirect
|
||||||
github.com/mailru/easyjson v0.7.6 // indirect
|
github.com/mailru/easyjson v0.7.6 // indirect
|
||||||
|
github.com/moby/docker-image-spec v1.3.1 // indirect
|
||||||
|
github.com/moby/go-archive v0.1.0 // indirect
|
||||||
|
github.com/moby/patternmatcher v0.6.0 // indirect
|
||||||
|
github.com/moby/sys/sequential v0.6.0 // indirect
|
||||||
|
github.com/moby/sys/user v0.4.0 // indirect
|
||||||
|
github.com/moby/sys/userns v0.1.0 // indirect
|
||||||
|
github.com/moby/term v0.5.0 // indirect
|
||||||
|
github.com/morikuni/aec v1.0.0 // indirect
|
||||||
|
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||||
|
github.com/opencontainers/image-spec v1.1.1 // indirect
|
||||||
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
|
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
|
||||||
|
github.com/shirou/gopsutil/v4 v4.25.6 // indirect
|
||||||
|
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||||
|
github.com/stretchr/testify v1.11.1 // indirect
|
||||||
|
github.com/testcontainers/testcontainers-go/modules/postgres v0.39.0 // indirect
|
||||||
|
github.com/tklauser/go-sysconf v0.3.12 // indirect
|
||||||
|
github.com/tklauser/numcpus v0.6.1 // indirect
|
||||||
|
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||||
|
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||||
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect
|
||||||
go.opentelemetry.io/otel v1.38.0 // indirect
|
go.opentelemetry.io/otel v1.38.0 // indirect
|
||||||
|
go.opentelemetry.io/otel/metric v1.38.0 // indirect
|
||||||
go.opentelemetry.io/otel/trace v1.38.0 // indirect
|
go.opentelemetry.io/otel/trace v1.38.0 // indirect
|
||||||
golang.org/x/mod v0.29.0 // indirect
|
golang.org/x/mod v0.29.0 // indirect
|
||||||
golang.org/x/net v0.47.0 // indirect
|
golang.org/x/net v0.47.0 // indirect
|
||||||
@@ -40,7 +86,7 @@ require (
|
|||||||
github.com/andybalholm/brotli v1.1.0 // indirect
|
github.com/andybalholm/brotli v1.1.0 // indirect
|
||||||
github.com/golang-jwt/jwt/v5 v5.3.0
|
github.com/golang-jwt/jwt/v5 v5.3.0
|
||||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||||
github.com/klauspost/compress v1.17.9 // indirect
|
github.com/klauspost/compress v1.18.0 // indirect
|
||||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||||
github.com/mattn/go-runewidth v0.0.16 // indirect
|
github.com/mattn/go-runewidth v0.0.16 // indirect
|
||||||
|
|||||||
@@ -1,19 +1,56 @@
|
|||||||
|
dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8=
|
||||||
|
dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA=
|
||||||
|
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
|
||||||
|
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||||
github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc=
|
github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc=
|
||||||
github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE=
|
github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE=
|
||||||
|
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
|
||||||
|
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
|
||||||
github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI=
|
github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI=
|
||||||
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M=
|
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M=
|
||||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||||
github.com/andybalholm/brotli v1.1.0 h1:eLKJA0d02Lf0mVpIDgYnqXcUn0GqVmEFny3VuID1U3M=
|
github.com/andybalholm/brotli v1.1.0 h1:eLKJA0d02Lf0mVpIDgYnqXcUn0GqVmEFny3VuID1U3M=
|
||||||
github.com/andybalholm/brotli v1.1.0/go.mod h1:sms7XGricyQI9K10gOSf56VKKWS4oLer58Q+mhRPtnY=
|
github.com/andybalholm/brotli v1.1.0/go.mod h1:sms7XGricyQI9K10gOSf56VKKWS4oLer58Q+mhRPtnY=
|
||||||
|
github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
|
||||||
|
github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||||
|
github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI=
|
||||||
|
github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
|
||||||
|
github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE=
|
||||||
|
github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk=
|
||||||
|
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
|
||||||
|
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
|
||||||
|
github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A=
|
||||||
|
github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw=
|
||||||
|
github.com/cpuguy83/dockercfg v0.3.2 h1:DlJTyZGBDlXqUZ2Dk2Q3xHs/FtnooJJVaad2S9GKorA=
|
||||||
|
github.com/cpuguy83/dockercfg v0.3.2/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc=
|
||||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
|
||||||
|
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||||
|
github.com/docker/docker v28.3.3+incompatible h1:Dypm25kh4rmk49v1eiVbsAtpAsYURjYkaKubwuBdxEI=
|
||||||
|
github.com/docker/docker v28.3.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||||
|
github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94=
|
||||||
|
github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE=
|
||||||
|
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||||
|
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||||
|
github.com/ebitengine/purego v0.8.4 h1:CF7LEKg5FFOsASUj0+QwaXf8Ht6TlFxg09+S9wz0omw=
|
||||||
|
github.com/ebitengine/purego v0.8.4/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
|
||||||
github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
|
github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
|
||||||
github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
|
github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
|
||||||
|
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||||
|
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||||
github.com/gabriel-vasile/mimetype v1.4.11 h1:AQvxbp830wPhHTqc1u7nzoLT+ZFxGY7emj5DR5DYFik=
|
github.com/gabriel-vasile/mimetype v1.4.11 h1:AQvxbp830wPhHTqc1u7nzoLT+ZFxGY7emj5DR5DYFik=
|
||||||
github.com/gabriel-vasile/mimetype v1.4.11/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s=
|
github.com/gabriel-vasile/mimetype v1.4.11/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s=
|
||||||
|
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||||
|
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
|
||||||
|
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||||
|
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||||
|
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||||
|
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
|
||||||
|
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||||
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||||
github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY=
|
github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY=
|
||||||
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||||
@@ -26,8 +63,11 @@ github.com/go-openapi/swag v0.19.15 h1:D2NRCBzS9/pEY3gP9Nl8aDqGUcPFrwG2p+CNFrLyr
|
|||||||
github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
|
github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
|
||||||
github.com/gofiber/fiber/v2 v2.52.9 h1:YjKl5DOiyP3j0mO61u3NTmK7or8GzzWzCFzkboyP5cw=
|
github.com/gofiber/fiber/v2 v2.52.9 h1:YjKl5DOiyP3j0mO61u3NTmK7or8GzzWzCFzkboyP5cw=
|
||||||
github.com/gofiber/fiber/v2 v2.52.9/go.mod h1:YEcBbO/FB+5M1IZNBP9FO3J9281zgPAreiI1oqg8nDw=
|
github.com/gofiber/fiber/v2 v2.52.9/go.mod h1:YEcBbO/FB+5M1IZNBP9FO3J9281zgPAreiI1oqg8nDw=
|
||||||
|
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||||
|
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||||
github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo=
|
github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo=
|
||||||
github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=
|
github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=
|
||||||
|
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||||
@@ -36,13 +76,21 @@ github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD
|
|||||||
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
|
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
|
||||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||||
|
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||||
|
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||||
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
|
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
|
||||||
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
||||||
|
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||||
|
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||||
|
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4=
|
||||||
|
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I=
|
||||||
|
github.com/magiconair/properties v1.8.10 h1:s31yESBquKXCV9a/ScB3ESkOjUYYv+X0rg8SYxI99mE=
|
||||||
|
github.com/magiconair/properties v1.8.10/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
|
||||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||||
github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA=
|
github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA=
|
||||||
@@ -53,23 +101,60 @@ github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWE
|
|||||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||||
github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
|
github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
|
||||||
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||||
|
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
|
||||||
|
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
|
||||||
|
github.com/moby/go-archive v0.1.0 h1:Kk/5rdW/g+H8NHdJW2gsXyZ7UnzvJNOy6VKJqueWdcQ=
|
||||||
|
github.com/moby/go-archive v0.1.0/go.mod h1:G9B+YoujNohJmrIYFBpSd54GTUB4lt9S+xVQvsJyFuo=
|
||||||
|
github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk=
|
||||||
|
github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc=
|
||||||
|
github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU=
|
||||||
|
github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko=
|
||||||
|
github.com/moby/sys/user v0.4.0 h1:jhcMKit7SA80hivmFJcbB1vqmw//wU61Zdui2eQXuMs=
|
||||||
|
github.com/moby/sys/user v0.4.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs=
|
||||||
|
github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g=
|
||||||
|
github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28=
|
||||||
|
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
|
||||||
|
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
|
||||||
|
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
|
||||||
|
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
||||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
|
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
|
||||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
||||||
|
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||||
|
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||||
|
github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040=
|
||||||
|
github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M=
|
||||||
|
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||||
|
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
|
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw=
|
||||||
|
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
|
||||||
github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg=
|
github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg=
|
||||||
github.com/puzpuzpuz/xsync/v3 v3.5.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
|
github.com/puzpuzpuz/xsync/v3 v3.5.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
|
||||||
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
|
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
|
||||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||||
|
github.com/shirou/gopsutil/v4 v4.25.6 h1:kLysI2JsKorfaFPcYmcJqbzROzsBWEOAtw6A7dIfqXs=
|
||||||
|
github.com/shirou/gopsutil/v4 v4.25.6/go.mod h1:PfybzyydfZcN+JMMjkF6Zb8Mq1A/VcogFFg7hj50W9c=
|
||||||
|
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||||
|
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||||
github.com/sqids/sqids-go v0.4.1 h1:eQKYzmAZbLlRwHeHYPF35QhgxwZHLnlmVj9AkIj/rrw=
|
github.com/sqids/sqids-go v0.4.1 h1:eQKYzmAZbLlRwHeHYPF35QhgxwZHLnlmVj9AkIj/rrw=
|
||||||
github.com/sqids/sqids-go v0.4.1/go.mod h1:EMwHuPQgSNFS0A49jESTfIQS+066XQTVhukrzEPScl8=
|
github.com/sqids/sqids-go v0.4.1/go.mod h1:EMwHuPQgSNFS0A49jESTfIQS+066XQTVhukrzEPScl8=
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
|
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||||
github.com/swaggo/swag v1.16.6 h1:qBNcx53ZaX+M5dxVyTrgQ0PJ/ACK+NzhwcbieTt+9yI=
|
github.com/swaggo/swag v1.16.6 h1:qBNcx53ZaX+M5dxVyTrgQ0PJ/ACK+NzhwcbieTt+9yI=
|
||||||
github.com/swaggo/swag v1.16.6/go.mod h1:ngP2etMK5a0P3QBizic5MEwpRmluJZPHjXcMoj4Xesg=
|
github.com/swaggo/swag v1.16.6/go.mod h1:ngP2etMK5a0P3QBizic5MEwpRmluJZPHjXcMoj4Xesg=
|
||||||
|
github.com/testcontainers/testcontainers-go v0.39.0 h1:uCUJ5tA+fcxbFAB0uP3pIK3EJ2IjjDUHFSZ1H1UxAts=
|
||||||
|
github.com/testcontainers/testcontainers-go v0.39.0/go.mod h1:qmHpkG7H5uPf/EvOORKvS6EuDkBUPE3zpVGaH9NL7f8=
|
||||||
|
github.com/testcontainers/testcontainers-go/modules/postgres v0.39.0 h1:REJz+XwNpGC/dCgTfYvM4SKqobNqDBfvhq74s2oHTUM=
|
||||||
|
github.com/testcontainers/testcontainers-go/modules/postgres v0.39.0/go.mod h1:4K2OhtHEeT+JSIFX4V8DkGKsyLa96Y2vLdd3xsxD5HE=
|
||||||
|
github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU=
|
||||||
|
github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
|
||||||
|
github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk=
|
||||||
|
github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY=
|
||||||
github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc h1:9lRDQMhESg+zvGYmW5DyG0UqvY96Bu5QYsTLvCHdrgo=
|
github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc h1:9lRDQMhESg+zvGYmW5DyG0UqvY96Bu5QYsTLvCHdrgo=
|
||||||
github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc/go.mod h1:bciPuU6GHm1iF1pBvUfxfsH0Wmnc2VbpgvbI9ZWuIRs=
|
github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc/go.mod h1:bciPuU6GHm1iF1pBvUfxfsH0Wmnc2VbpgvbI9ZWuIRs=
|
||||||
github.com/uptrace/bun v1.2.16 h1:QlObi6ZIK5Ao7kAALnh91HWYNZUBbVwye52fmlQM9kc=
|
github.com/uptrace/bun v1.2.16 h1:QlObi6ZIK5Ao7kAALnh91HWYNZUBbVwye52fmlQM9kc=
|
||||||
@@ -90,32 +175,72 @@ github.com/vmihailenco/msgpack/v5 v5.4.1 h1:cQriyiUvjTwOHg8QZaPihLWeRAAVoCpE00IU
|
|||||||
github.com/vmihailenco/msgpack/v5 v5.4.1/go.mod h1:GaZTsDaehaPpQVyxrf5mtQlH+pc21PIudVV/E3rRQok=
|
github.com/vmihailenco/msgpack/v5 v5.4.1/go.mod h1:GaZTsDaehaPpQVyxrf5mtQlH+pc21PIudVV/E3rRQok=
|
||||||
github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g=
|
github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g=
|
||||||
github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds=
|
github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds=
|
||||||
|
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
|
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
|
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
|
||||||
|
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||||
|
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
||||||
|
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
||||||
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk=
|
||||||
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw=
|
||||||
go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8=
|
go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8=
|
||||||
go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM=
|
go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM=
|
||||||
|
go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA=
|
||||||
|
go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI=
|
||||||
go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE=
|
go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE=
|
||||||
go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs=
|
go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs=
|
||||||
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
|
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
|
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=
|
golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=
|
||||||
golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
|
golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
|
||||||
|
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
|
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA=
|
golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA=
|
||||||
golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w=
|
golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w=
|
||||||
|
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||||
golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM=
|
golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM=
|
||||||
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
|
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
|
||||||
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
|
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
|
||||||
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
|
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
|
||||||
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||||
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||||
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||||
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
|
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
|
||||||
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
|
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
|
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
|
golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
|
||||||
golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
|
golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
|
||||||
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
|
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
|
||||||
|
|||||||
@@ -7,30 +7,37 @@ import (
|
|||||||
"github.com/uptrace/bun"
|
"github.com/uptrace/bun"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Account represents a storage account with quota information
|
type Role string
|
||||||
// @Description Storage account with usage and quota details
|
|
||||||
|
const (
|
||||||
|
RoleAdmin Role = "admin"
|
||||||
|
RoleMember Role = "member"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Status string
|
||||||
|
|
||||||
|
const (
|
||||||
|
StatusInvited Status = "invited"
|
||||||
|
StatusActive Status = "active"
|
||||||
|
StatusSuspended Status = "suspended"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Account represents a user's identity within an organization (principal / membership).
|
||||||
type Account struct {
|
type Account struct {
|
||||||
bun.BaseModel `bun:"accounts" swaggerignore:"true"`
|
bun.BaseModel `bun:"accounts" swaggerignore:"true"`
|
||||||
|
|
||||||
// Unique account identifier
|
ID uuid.UUID `bun:",pk,type:uuid" json:"id"`
|
||||||
ID uuid.UUID `bun:",pk,type:uuid" json:"id" example:"550e8400-e29b-41d4-a716-446655440000"`
|
OrgID uuid.UUID `bun:"org_id,notnull,type:uuid" json:"orgId"`
|
||||||
|
UserID uuid.UUID `bun:"user_id,notnull,type:uuid" json:"userId"`
|
||||||
|
|
||||||
// ID of the user who owns this account
|
Role Role `bun:"role,notnull" json:"role" example:"member"`
|
||||||
UserID uuid.UUID `bun:"user_id,notnull,type:uuid" json:"userId" example:"550e8400-e29b-41d4-a716-446655440001"`
|
Status Status `bun:"status,notnull" json:"status" example:"active"`
|
||||||
|
|
||||||
// Current storage usage in bytes
|
CreatedAt time.Time `bun:"created_at,notnull,nullzero" json:"createdAt"`
|
||||||
StorageUsageBytes int64 `bun:"storage_usage_bytes,notnull" json:"storageUsageBytes" example:"1073741824"`
|
UpdatedAt time.Time `bun:"updated_at,notnull,nullzero" json:"updatedAt"`
|
||||||
|
|
||||||
// Maximum storage quota in bytes
|
|
||||||
StorageQuotaBytes int64 `bun:"storage_quota_bytes,notnull" json:"storageQuotaBytes" example:"10737418240"`
|
|
||||||
|
|
||||||
// When the account was created (ISO 8601)
|
|
||||||
CreatedAt time.Time `bun:"created_at,notnull,nullzero" json:"createdAt" example:"2024-12-13T15:04:05Z"`
|
|
||||||
|
|
||||||
// When the account was last updated (ISO 8601)
|
|
||||||
UpdatedAt time.Time `bun:"updated_at,notnull,nullzero" json:"updatedAt" example:"2024-12-13T16:30:00Z"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func newAccountID() (uuid.UUID, error) {
|
func newAccountID() (uuid.UUID, error) {
|
||||||
return uuid.NewV7()
|
return uuid.NewV7()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -3,113 +3,33 @@ package account
|
|||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
|
||||||
"github.com/get-drexa/drexa/internal/auth"
|
|
||||||
"github.com/get-drexa/drexa/internal/httperr"
|
"github.com/get-drexa/drexa/internal/httperr"
|
||||||
"github.com/get-drexa/drexa/internal/reqctx"
|
"github.com/get-drexa/drexa/internal/reqctx"
|
||||||
"github.com/get-drexa/drexa/internal/user"
|
"github.com/get-drexa/drexa/internal/user"
|
||||||
"github.com/get-drexa/drexa/internal/virtualfs"
|
|
||||||
"github.com/gofiber/fiber/v2"
|
"github.com/gofiber/fiber/v2"
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"github.com/uptrace/bun"
|
"github.com/uptrace/bun"
|
||||||
)
|
)
|
||||||
|
|
||||||
type HTTPHandler struct {
|
type HTTPHandler struct {
|
||||||
accountService *Service
|
accountService *Service
|
||||||
authService *auth.Service
|
db *bun.DB
|
||||||
vfs *virtualfs.VirtualFS
|
authMiddleware fiber.Handler
|
||||||
db *bun.DB
|
|
||||||
authMiddleware fiber.Handler
|
|
||||||
cookieConfig auth.CookieConfig
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// registerAccountRequest represents a new account registration
|
func NewHTTPHandler(accountService *Service, db *bun.DB, authMiddleware fiber.Handler) *HTTPHandler {
|
||||||
// @Description Request to create a new account and user
|
return &HTTPHandler{
|
||||||
type registerAccountRequest struct {
|
accountService: accountService,
|
||||||
// Email address for the new account
|
db: db,
|
||||||
Email string `json:"email" example:"newuser@example.com"`
|
authMiddleware: authMiddleware,
|
||||||
// Password for the new account (min 8 characters)
|
}
|
||||||
Password string `json:"password" example:"securepassword123"`
|
|
||||||
// Display name for the user
|
|
||||||
DisplayName string `json:"displayName" example:"Jane Doe"`
|
|
||||||
// How to deliver tokens: "cookie" (set HTTP-only cookies) or "body" (include in response)
|
|
||||||
TokenDelivery string `json:"tokenDelivery" example:"body" enums:"cookie,body"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// registerAccountResponse represents a successful registration
|
func (h *HTTPHandler) RegisterRoutes(api fiber.Router) {
|
||||||
// @Description Response after successful account registration
|
|
||||||
type registerAccountResponse struct {
|
|
||||||
// The created account
|
|
||||||
Account *Account `json:"account"`
|
|
||||||
// The created user
|
|
||||||
User *user.User `json:"user"`
|
|
||||||
// JWT access token for immediate authentication (only included when tokenDelivery is "body")
|
|
||||||
AccessToken string `json:"accessToken,omitempty" example:"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiI1NTBlODQwMC1lMjliLTQxZDQtYTcxNi00NDY2NTU0NDAwMDAifQ.signature"`
|
|
||||||
// Base64 URL encoded refresh token (only included when tokenDelivery is "body")
|
|
||||||
RefreshToken string `json:"refreshToken,omitempty" example:"dR4nD0mUu1DkZXlCeXRlc0FuZFJhbmRvbURhdGFIZXJlMTIzNDU2Nzg5MGFi"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewHTTPHandler(accountService *Service, authService *auth.Service, vfs *virtualfs.VirtualFS, db *bun.DB, authMiddleware fiber.Handler, cookieConfig auth.CookieConfig) *HTTPHandler {
|
|
||||||
return &HTTPHandler{accountService: accountService, authService: authService, db: db, authMiddleware: authMiddleware, cookieConfig: cookieConfig}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *HTTPHandler) RegisterRoutes(api fiber.Router) *ScopedRouter {
|
|
||||||
api.Get("/accounts", h.authMiddleware, h.listAccounts)
|
api.Get("/accounts", h.authMiddleware, h.listAccounts)
|
||||||
api.Post("/accounts", h.registerAccount)
|
api.Get("/accounts/:accountID", h.authMiddleware, h.getAccount)
|
||||||
|
|
||||||
account := api.Group("/accounts/:accountID")
|
|
||||||
account.Use(h.authMiddleware)
|
|
||||||
account.Use(h.accountMiddleware)
|
|
||||||
|
|
||||||
account.Get("/", h.getAccount)
|
|
||||||
|
|
||||||
return &ScopedRouter{virtualfs.ScopedRouter{account}}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *HTTPHandler) accountMiddleware(c *fiber.Ctx) error {
|
|
||||||
u := reqctx.AuthenticatedUser(c).(*user.User)
|
|
||||||
|
|
||||||
accountID, err := uuid.Parse(c.Params("accountID"))
|
|
||||||
if err != nil {
|
|
||||||
return c.SendStatus(fiber.StatusNotFound)
|
|
||||||
}
|
|
||||||
|
|
||||||
account, err := h.accountService.AccountByID(c.Context(), h.db, u.ID, accountID)
|
|
||||||
if err != nil {
|
|
||||||
if errors.Is(err, ErrAccountNotFound) {
|
|
||||||
return c.SendStatus(fiber.StatusNotFound)
|
|
||||||
}
|
|
||||||
return httperr.Internal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
root, err := h.vfs.FindRootDirectory(c.Context(), h.db, account.ID)
|
|
||||||
if err != nil {
|
|
||||||
return httperr.Internal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
scope := &virtualfs.Scope{
|
|
||||||
AccountID: account.ID,
|
|
||||||
RootNodeID: root.ID,
|
|
||||||
AllowedOps: virtualfs.AllAllowedOps,
|
|
||||||
AllowedNodes: nil,
|
|
||||||
ActorKind: virtualfs.ScopeActorAccount,
|
|
||||||
ActorID: u.ID,
|
|
||||||
}
|
|
||||||
|
|
||||||
reqctx.SetVFSAccessScope(c, scope)
|
|
||||||
reqctx.SetCurrentAccount(c, account)
|
|
||||||
|
|
||||||
return c.Next()
|
|
||||||
}
|
|
||||||
|
|
||||||
// listAccounts lists all accounts for the authenticated user
|
|
||||||
// @Summary List accounts
|
|
||||||
// @Description Retrieve all accounts for the authenticated user
|
|
||||||
// @Tags accounts
|
|
||||||
// @Produce json
|
|
||||||
// @Security BearerAuth
|
|
||||||
// @Success 200 {array} Account "List of accounts for the authenticated user"
|
|
||||||
// @Failure 401 {string} string "Not authenticated"
|
|
||||||
// @Router /accounts [get]
|
|
||||||
func (h *HTTPHandler) listAccounts(c *fiber.Ctx) error {
|
func (h *HTTPHandler) listAccounts(c *fiber.Ctx) error {
|
||||||
u := reqctx.AuthenticatedUser(c).(*user.User)
|
u := reqctx.AuthenticatedUser(c).(*user.User)
|
||||||
accounts, err := h.accountService.ListAccounts(c.Context(), h.db, u.ID)
|
accounts, err := h.accountService.ListAccounts(c.Context(), h.db, u.ID)
|
||||||
@@ -119,91 +39,19 @@ func (h *HTTPHandler) listAccounts(c *fiber.Ctx) error {
|
|||||||
return c.JSON(accounts)
|
return c.JSON(accounts)
|
||||||
}
|
}
|
||||||
|
|
||||||
// getAccount retrieves account information
|
|
||||||
// @Summary Get account
|
|
||||||
// @Description Retrieve account details including storage usage and quota
|
|
||||||
// @Tags accounts
|
|
||||||
// @Produce json
|
|
||||||
// @Security BearerAuth
|
|
||||||
// @Param accountID path string true "Account ID" format(uuid)
|
|
||||||
// @Success 200 {object} Account "Account details"
|
|
||||||
// @Failure 401 {string} string "Not authenticated"
|
|
||||||
// @Failure 404 {string} string "Account not found"
|
|
||||||
// @Router /accounts/{accountID} [get]
|
|
||||||
func (h *HTTPHandler) getAccount(c *fiber.Ctx) error {
|
func (h *HTTPHandler) getAccount(c *fiber.Ctx) error {
|
||||||
account, ok := reqctx.CurrentAccount(c).(*Account)
|
u := reqctx.AuthenticatedUser(c).(*user.User)
|
||||||
if !ok || account == nil {
|
accountID, err := uuid.Parse(c.Params("accountID"))
|
||||||
|
if err != nil {
|
||||||
return c.SendStatus(fiber.StatusNotFound)
|
return c.SendStatus(fiber.StatusNotFound)
|
||||||
}
|
}
|
||||||
return c.JSON(account)
|
|
||||||
}
|
|
||||||
|
|
||||||
// registerAccount creates a new account and user
|
acc, err := h.accountService.AccountByID(c.Context(), h.db, u.ID, accountID)
|
||||||
// @Summary Register new account
|
|
||||||
// @Description Create a new user account with email and password. Returns the account, user, and authentication tokens. Tokens can be delivered via HTTP-only cookies or in the response body based on the tokenDelivery field.
|
|
||||||
// @Tags accounts
|
|
||||||
// @Accept json
|
|
||||||
// @Produce json
|
|
||||||
// @Param request body registerAccountRequest true "Registration details"
|
|
||||||
// @Success 200 {object} registerAccountResponse "Account created successfully"
|
|
||||||
// @Failure 400 {string} string "Invalid request body or token delivery method"
|
|
||||||
// @Failure 409 {string} string "Email already registered"
|
|
||||||
// @Router /accounts [post]
|
|
||||||
func (h *HTTPHandler) registerAccount(c *fiber.Ctx) error {
|
|
||||||
req := new(registerAccountRequest)
|
|
||||||
if err := c.BodyParser(req); err != nil {
|
|
||||||
return c.SendStatus(fiber.StatusBadRequest)
|
|
||||||
}
|
|
||||||
|
|
||||||
tx, err := h.db.BeginTx(c.Context(), nil)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return httperr.Internal(err)
|
if errors.Is(err, ErrAccountNotFound) {
|
||||||
}
|
return c.SendStatus(fiber.StatusNotFound)
|
||||||
defer tx.Rollback()
|
|
||||||
|
|
||||||
acc, u, err := h.accountService.Register(c.Context(), tx, RegisterOptions{
|
|
||||||
Email: req.Email,
|
|
||||||
Password: req.Password,
|
|
||||||
DisplayName: req.DisplayName,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
var ae *user.AlreadyExistsError
|
|
||||||
if errors.As(err, &ae) {
|
|
||||||
return c.SendStatus(fiber.StatusConflict)
|
|
||||||
}
|
|
||||||
if errors.Is(err, ErrAccountAlreadyExists) {
|
|
||||||
return c.SendStatus(fiber.StatusConflict)
|
|
||||||
}
|
}
|
||||||
return httperr.Internal(err)
|
return httperr.Internal(err)
|
||||||
}
|
}
|
||||||
|
return c.JSON(acc)
|
||||||
result, err := h.authService.GrantForUser(c.Context(), tx, u)
|
|
||||||
if err != nil {
|
|
||||||
return httperr.Internal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = tx.Commit()
|
|
||||||
if err != nil {
|
|
||||||
return httperr.Internal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
switch req.TokenDelivery {
|
|
||||||
default:
|
|
||||||
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{"error": "invalid token delivery method"})
|
|
||||||
|
|
||||||
case auth.TokenDeliveryCookie:
|
|
||||||
auth.SetAuthCookies(c, result.AccessToken, result.RefreshToken, h.cookieConfig)
|
|
||||||
return c.JSON(registerAccountResponse{
|
|
||||||
Account: acc,
|
|
||||||
User: u,
|
|
||||||
})
|
|
||||||
|
|
||||||
case auth.TokenDeliveryBody:
|
|
||||||
return c.JSON(registerAccountResponse{
|
|
||||||
Account: acc,
|
|
||||||
User: u,
|
|
||||||
AccessToken: result.AccessToken,
|
|
||||||
RefreshToken: result.RefreshToken,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,21 +0,0 @@
|
|||||||
package account
|
|
||||||
|
|
||||||
import "github.com/get-drexa/drexa/internal/virtualfs"
|
|
||||||
|
|
||||||
// ScopedRouter is a router with auth + account middleware applied.
|
|
||||||
// Routes registered on this router have access to:
|
|
||||||
// - The authenticated user via reqctx.AuthenticatedUser()
|
|
||||||
// - The current account via reqctx.CurrentAccount()
|
|
||||||
// - The VFS scope via reqctx.VFSAccessScope()
|
|
||||||
//
|
|
||||||
// This embeds virtualfs.ScopedRouter, so it can be passed to functions
|
|
||||||
// that only require VFS scope by calling VFSRouter().
|
|
||||||
type ScopedRouter struct {
|
|
||||||
virtualfs.ScopedRouter
|
|
||||||
}
|
|
||||||
|
|
||||||
// VFSRouter returns the embedded virtualfs.ScopedRouter for use with
|
|
||||||
// functions that only require VFS scope access.
|
|
||||||
func (r *ScopedRouter) VFSRouter() *virtualfs.ScopedRouter {
|
|
||||||
return &r.ScopedRouter
|
|
||||||
}
|
|
||||||
@@ -6,88 +6,38 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
|
|
||||||
"github.com/get-drexa/drexa/internal/database"
|
"github.com/get-drexa/drexa/internal/database"
|
||||||
"github.com/get-drexa/drexa/internal/password"
|
|
||||||
"github.com/get-drexa/drexa/internal/user"
|
|
||||||
"github.com/get-drexa/drexa/internal/virtualfs"
|
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"github.com/uptrace/bun"
|
"github.com/uptrace/bun"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Service struct {
|
type Service struct{}
|
||||||
userService user.Service
|
|
||||||
vfs *virtualfs.VirtualFS
|
func NewService() *Service {
|
||||||
|
return &Service{}
|
||||||
}
|
}
|
||||||
|
|
||||||
type RegisterOptions struct {
|
func (s *Service) CreateAccount(ctx context.Context, db bun.IDB, orgID uuid.UUID, userID uuid.UUID, role Role, status Status) (*Account, error) {
|
||||||
Email string
|
|
||||||
Password string
|
|
||||||
DisplayName string
|
|
||||||
}
|
|
||||||
|
|
||||||
type CreateAccountOptions struct {
|
|
||||||
OrganizationID uuid.UUID
|
|
||||||
QuotaBytes int64
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewService(userService *user.Service, vfs *virtualfs.VirtualFS) *Service {
|
|
||||||
return &Service{
|
|
||||||
userService: *userService,
|
|
||||||
vfs: vfs,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Service) Register(ctx context.Context, db bun.IDB, opts RegisterOptions) (*Account, *user.User, error) {
|
|
||||||
hashed, err := password.HashString(opts.Password)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
u, err := s.userService.RegisterUser(ctx, db, user.UserRegistrationOptions{
|
|
||||||
Email: opts.Email,
|
|
||||||
Password: hashed,
|
|
||||||
DisplayName: opts.DisplayName,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
acc, err := s.CreateAccount(ctx, db, u.ID, CreateAccountOptions{
|
|
||||||
// TODO: make quota configurable
|
|
||||||
QuotaBytes: 1024 * 1024 * 1024, // 1GB
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = s.vfs.CreateRootDirectory(ctx, db, acc.ID)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return acc, u, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Service) CreateAccount(ctx context.Context, db bun.IDB, userID uuid.UUID, opts CreateAccountOptions) (*Account, error) {
|
|
||||||
id, err := newAccountID()
|
id, err := newAccountID()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
account := &Account{
|
acc := &Account{
|
||||||
ID: id,
|
ID: id,
|
||||||
UserID: userID,
|
OrgID: orgID,
|
||||||
StorageQuotaBytes: opts.QuotaBytes,
|
UserID: userID,
|
||||||
|
Role: role,
|
||||||
|
Status: status,
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = db.NewInsert().Model(account).Returning("*").Exec(ctx)
|
_, err = db.NewInsert().Model(acc).Returning("*").Exec(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if database.IsUniqueViolation(err) {
|
if database.IsUniqueViolation(err) {
|
||||||
return nil, ErrAccountAlreadyExists
|
return nil, ErrAccountAlreadyExists
|
||||||
}
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
return acc, nil
|
||||||
return account, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Service) ListAccounts(ctx context.Context, db bun.IDB, userID uuid.UUID) ([]*Account, error) {
|
func (s *Service) ListAccounts(ctx context.Context, db bun.IDB, userID uuid.UUID) ([]*Account, error) {
|
||||||
@@ -102,26 +52,29 @@ func (s *Service) ListAccounts(ctx context.Context, db bun.IDB, userID uuid.UUID
|
|||||||
return accounts, nil
|
return accounts, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Service) AccountByUserID(ctx context.Context, db bun.IDB, userID uuid.UUID) (*Account, error) {
|
func (s *Service) AccountByID(ctx context.Context, db bun.IDB, userID uuid.UUID, id uuid.UUID) (*Account, error) {
|
||||||
var account Account
|
var acc Account
|
||||||
err := db.NewSelect().Model(&account).Where("user_id = ?", userID).Scan(ctx)
|
err := db.NewSelect().Model(&acc).Where("user_id = ?", userID).Where("id = ?", id).Scan(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, sql.ErrNoRows) {
|
if errors.Is(err, sql.ErrNoRows) {
|
||||||
return nil, ErrAccountNotFound
|
return nil, ErrAccountNotFound
|
||||||
}
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return &account, nil
|
return &acc, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Service) AccountByID(ctx context.Context, db bun.IDB, userID uuid.UUID, id uuid.UUID) (*Account, error) {
|
func (s *Service) FindUserAccountInOrg(ctx context.Context, db bun.IDB, orgID uuid.UUID, userID uuid.UUID) (*Account, error) {
|
||||||
var account Account
|
var acc Account
|
||||||
err := db.NewSelect().Model(&account).Where("user_id = ?", userID).Where("id = ?", id).Scan(ctx)
|
err := db.NewSelect().Model(&acc).
|
||||||
|
Where("org_id = ?", orgID).
|
||||||
|
Where("user_id = ?", userID).
|
||||||
|
Scan(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, sql.ErrNoRows) {
|
if errors.Is(err, sql.ErrNoRows) {
|
||||||
return nil, ErrAccountNotFound
|
return nil, ErrAccountNotFound
|
||||||
}
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return &account, nil
|
return &acc, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -147,14 +147,14 @@ func includeParam(c *fiber.Ctx) []string {
|
|||||||
// @Accept json
|
// @Accept json
|
||||||
// @Produce json
|
// @Produce json
|
||||||
// @Security BearerAuth
|
// @Security BearerAuth
|
||||||
// @Param accountID path string true "Account ID" format(uuid)
|
// @Param driveID path string true "Drive ID" format(uuid)
|
||||||
// @Param request body createDirectoryRequest true "Directory details"
|
// @Param request body createDirectoryRequest true "Directory details"
|
||||||
// @Param include query string false "Include additional fields" Enums(path)
|
// @Param include query string false "Include additional fields" Enums(path)
|
||||||
// @Success 200 {object} DirectoryInfo "Created directory"
|
// @Success 200 {object} DirectoryInfo "Created directory"
|
||||||
// @Failure 400 {object} map[string]string "Parent not found or not a directory"
|
// @Failure 400 {object} map[string]string "Parent not found or not a directory"
|
||||||
// @Failure 401 {string} string "Not authenticated"
|
// @Failure 401 {string} string "Not authenticated"
|
||||||
// @Failure 409 {object} map[string]string "Directory already exists"
|
// @Failure 409 {object} map[string]string "Directory already exists"
|
||||||
// @Router /accounts/{accountID}/directories [post]
|
// @Router /drives/{driveID}/directories [post]
|
||||||
func (h *HTTPHandler) createDirectory(c *fiber.Ctx) error {
|
func (h *HTTPHandler) createDirectory(c *fiber.Ctx) error {
|
||||||
scope, ok := scopeFromCtx(c)
|
scope, ok := scopeFromCtx(c)
|
||||||
if !ok {
|
if !ok {
|
||||||
@@ -230,13 +230,13 @@ func (h *HTTPHandler) createDirectory(c *fiber.Ctx) error {
|
|||||||
// @Tags directories
|
// @Tags directories
|
||||||
// @Produce json
|
// @Produce json
|
||||||
// @Security BearerAuth
|
// @Security BearerAuth
|
||||||
// @Param accountID path string true "Account ID" format(uuid)
|
// @Param driveID path string true "Drive ID" format(uuid)
|
||||||
// @Param directoryID path string true "Directory ID"
|
// @Param directoryID path string true "Directory ID"
|
||||||
// @Param include query string false "Include additional fields" Enums(path)
|
// @Param include query string false "Include additional fields" Enums(path)
|
||||||
// @Success 200 {object} DirectoryInfo "Directory metadata"
|
// @Success 200 {object} DirectoryInfo "Directory metadata"
|
||||||
// @Failure 401 {string} string "Not authenticated"
|
// @Failure 401 {string} string "Not authenticated"
|
||||||
// @Failure 404 {string} string "Directory not found"
|
// @Failure 404 {string} string "Directory not found"
|
||||||
// @Router /accounts/{accountID}/directories/{directoryID} [get]
|
// @Router /drives/{driveID}/directories/{directoryID} [get]
|
||||||
func (h *HTTPHandler) fetchDirectory(c *fiber.Ctx) error {
|
func (h *HTTPHandler) fetchDirectory(c *fiber.Ctx) error {
|
||||||
node := mustCurrentDirectoryNode(c)
|
node := mustCurrentDirectoryNode(c)
|
||||||
scope, ok := scopeFromCtx(c)
|
scope, ok := scopeFromCtx(c)
|
||||||
@@ -274,7 +274,7 @@ func (h *HTTPHandler) fetchDirectory(c *fiber.Ctx) error {
|
|||||||
// @Tags directories
|
// @Tags directories
|
||||||
// @Produce json
|
// @Produce json
|
||||||
// @Security BearerAuth
|
// @Security BearerAuth
|
||||||
// @Param accountID path string true "Account ID" format(uuid)
|
// @Param driveID path string true "Drive ID" format(uuid)
|
||||||
// @Param directoryID path string true "Directory ID (use 'root' for the root directory)"
|
// @Param directoryID path string true "Directory ID (use 'root' for the root directory)"
|
||||||
// @Param orderBy query string false "Sort field: name, createdAt, or updatedAt" Enums(name,createdAt,updatedAt)
|
// @Param orderBy query string false "Sort field: name, createdAt, or updatedAt" Enums(name,createdAt,updatedAt)
|
||||||
// @Param dir query string false "Sort direction: asc or desc" Enums(asc,desc)
|
// @Param dir query string false "Sort direction: asc or desc" Enums(asc,desc)
|
||||||
@@ -284,7 +284,7 @@ func (h *HTTPHandler) fetchDirectory(c *fiber.Ctx) error {
|
|||||||
// @Failure 400 {object} map[string]string "Invalid limit or cursor"
|
// @Failure 400 {object} map[string]string "Invalid limit or cursor"
|
||||||
// @Failure 401 {string} string "Not authenticated"
|
// @Failure 401 {string} string "Not authenticated"
|
||||||
// @Failure 404 {string} string "Directory not found"
|
// @Failure 404 {string} string "Directory not found"
|
||||||
// @Router /accounts/{accountID}/directories/{directoryID}/content [get]
|
// @Router /drives/{driveID}/directories/{directoryID}/content [get]
|
||||||
func (h *HTTPHandler) listDirectory(c *fiber.Ctx) error {
|
func (h *HTTPHandler) listDirectory(c *fiber.Ctx) error {
|
||||||
node := mustCurrentDirectoryNode(c)
|
node := mustCurrentDirectoryNode(c)
|
||||||
scope, ok := scopeFromCtx(c)
|
scope, ok := scopeFromCtx(c)
|
||||||
@@ -405,14 +405,14 @@ func (h *HTTPHandler) listDirectory(c *fiber.Ctx) error {
|
|||||||
// @Accept json
|
// @Accept json
|
||||||
// @Produce json
|
// @Produce json
|
||||||
// @Security BearerAuth
|
// @Security BearerAuth
|
||||||
// @Param accountID path string true "Account ID" format(uuid)
|
// @Param driveID path string true "Drive ID" format(uuid)
|
||||||
// @Param directoryID path string true "Directory ID"
|
// @Param directoryID path string true "Directory ID"
|
||||||
// @Param request body patchDirectoryRequest true "Directory update"
|
// @Param request body patchDirectoryRequest true "Directory update"
|
||||||
// @Success 200 {object} DirectoryInfo "Updated directory metadata"
|
// @Success 200 {object} DirectoryInfo "Updated directory metadata"
|
||||||
// @Failure 400 {object} map[string]string "Invalid request"
|
// @Failure 400 {object} map[string]string "Invalid request"
|
||||||
// @Failure 401 {string} string "Not authenticated"
|
// @Failure 401 {string} string "Not authenticated"
|
||||||
// @Failure 404 {string} string "Directory not found"
|
// @Failure 404 {string} string "Directory not found"
|
||||||
// @Router /accounts/{accountID}/directories/{directoryID} [patch]
|
// @Router /drives/{driveID}/directories/{directoryID} [patch]
|
||||||
func (h *HTTPHandler) patchDirectory(c *fiber.Ctx) error {
|
func (h *HTTPHandler) patchDirectory(c *fiber.Ctx) error {
|
||||||
node := mustCurrentDirectoryNode(c)
|
node := mustCurrentDirectoryNode(c)
|
||||||
scope, ok := scopeFromCtx(c)
|
scope, ok := scopeFromCtx(c)
|
||||||
@@ -464,14 +464,14 @@ func (h *HTTPHandler) patchDirectory(c *fiber.Ctx) error {
|
|||||||
// @Description Delete a directory permanently or move it to trash. Deleting a directory also affects all its contents.
|
// @Description Delete a directory permanently or move it to trash. Deleting a directory also affects all its contents.
|
||||||
// @Tags directories
|
// @Tags directories
|
||||||
// @Security BearerAuth
|
// @Security BearerAuth
|
||||||
// @Param accountID path string true "Account ID" format(uuid)
|
// @Param driveID path string true "Drive ID" format(uuid)
|
||||||
// @Param directoryID path string true "Directory ID"
|
// @Param directoryID path string true "Directory ID"
|
||||||
// @Param trash query bool false "Move to trash instead of permanent delete" default(false)
|
// @Param trash query bool false "Move to trash instead of permanent delete" default(false)
|
||||||
// @Success 200 {object} DirectoryInfo "Trashed directory info (when trash=true)"
|
// @Success 200 {object} DirectoryInfo "Trashed directory info (when trash=true)"
|
||||||
// @Success 204 {string} string "Directory deleted"
|
// @Success 204 {string} string "Directory deleted"
|
||||||
// @Failure 401 {string} string "Not authenticated"
|
// @Failure 401 {string} string "Not authenticated"
|
||||||
// @Failure 404 {string} string "Directory not found"
|
// @Failure 404 {string} string "Directory not found"
|
||||||
// @Router /accounts/{accountID}/directories/{directoryID} [delete]
|
// @Router /drives/{driveID}/directories/{directoryID} [delete]
|
||||||
func (h *HTTPHandler) deleteDirectory(c *fiber.Ctx) error {
|
func (h *HTTPHandler) deleteDirectory(c *fiber.Ctx) error {
|
||||||
node := mustCurrentDirectoryNode(c)
|
node := mustCurrentDirectoryNode(c)
|
||||||
scope, ok := scopeFromCtx(c)
|
scope, ok := scopeFromCtx(c)
|
||||||
@@ -524,14 +524,14 @@ func (h *HTTPHandler) deleteDirectory(c *fiber.Ctx) error {
|
|||||||
// @Description Delete multiple directories permanently or move them to trash. Deleting directories also affects all their contents. All items must be directories.
|
// @Description Delete multiple directories permanently or move them to trash. Deleting directories also affects all their contents. All items must be directories.
|
||||||
// @Tags directories
|
// @Tags directories
|
||||||
// @Security BearerAuth
|
// @Security BearerAuth
|
||||||
// @Param accountID path string true "Account ID" format(uuid)
|
// @Param driveID path string true "Drive ID" format(uuid)
|
||||||
// @Param id query string true "Comma-separated list of directory IDs to delete" example:"kRp2XYTq9A55,xYz123AbC456"
|
// @Param id query string true "Comma-separated list of directory IDs to delete" example:"kRp2XYTq9A55,xYz123AbC456"
|
||||||
// @Param trash query bool false "Move to trash instead of permanent delete" default(false)
|
// @Param trash query bool false "Move to trash instead of permanent delete" default(false)
|
||||||
// @Success 200 {array} DirectoryInfo "Trashed directories (when trash=true)"
|
// @Success 200 {array} DirectoryInfo "Trashed directories (when trash=true)"
|
||||||
// @Success 204 {string} string "Directories deleted"
|
// @Success 204 {string} string "Directories deleted"
|
||||||
// @Failure 400 {object} map[string]string "All items must be directories"
|
// @Failure 400 {object} map[string]string "All items must be directories"
|
||||||
// @Failure 401 {string} string "Not authenticated"
|
// @Failure 401 {string} string "Not authenticated"
|
||||||
// @Router /accounts/{accountID}/directories [delete]
|
// @Router /drives/{driveID}/directories [delete]
|
||||||
func (h *HTTPHandler) deleteDirectories(c *fiber.Ctx) error {
|
func (h *HTTPHandler) deleteDirectories(c *fiber.Ctx) error {
|
||||||
scope, ok := scopeFromCtx(c)
|
scope, ok := scopeFromCtx(c)
|
||||||
if !ok {
|
if !ok {
|
||||||
@@ -619,14 +619,14 @@ func (h *HTTPHandler) deleteDirectories(c *fiber.Ctx) error {
|
|||||||
// @Accept json
|
// @Accept json
|
||||||
// @Produce json
|
// @Produce json
|
||||||
// @Security BearerAuth
|
// @Security BearerAuth
|
||||||
// @Param accountID path string true "Account ID" format(uuid)
|
// @Param driveID path string true "Drive ID" format(uuid)
|
||||||
// @Param directoryID path string true "Target directory ID"
|
// @Param directoryID path string true "Target directory ID"
|
||||||
// @Param request body postDirectoryContentRequest true "Items to move"
|
// @Param request body postDirectoryContentRequest true "Items to move"
|
||||||
// @Success 200 {object} moveItemsToDirectoryResponse "Move operation results with moved, conflict, and error states"
|
// @Success 200 {object} moveItemsToDirectoryResponse "Move operation results with moved, conflict, and error states"
|
||||||
// @Failure 400 {object} map[string]string "Invalid request or items not in same directory"
|
// @Failure 400 {object} map[string]string "Invalid request or items not in same directory"
|
||||||
// @Failure 401 {string} string "Not authenticated"
|
// @Failure 401 {string} string "Not authenticated"
|
||||||
// @Failure 404 {object} map[string]string "One or more items not found"
|
// @Failure 404 {object} map[string]string "One or more items not found"
|
||||||
// @Router /accounts/{accountID}/directories/{directoryID}/content [post]
|
// @Router /drives/{driveID}/directories/{directoryID}/content [post]
|
||||||
func (h *HTTPHandler) moveItemsToDirectory(c *fiber.Ctx) error {
|
func (h *HTTPHandler) moveItemsToDirectory(c *fiber.Ctx) error {
|
||||||
scope, ok := scopeFromCtx(c)
|
scope, ok := scopeFromCtx(c)
|
||||||
if !ok {
|
if !ok {
|
||||||
@@ -769,19 +769,19 @@ func decodeListChildrenCursor(s string) (*decodedListChildrenCursor, error) {
|
|||||||
// @Description Get all share links that include this directory
|
// @Description Get all share links that include this directory
|
||||||
// @Tags directories
|
// @Tags directories
|
||||||
// @Produce json
|
// @Produce json
|
||||||
// @Param accountID path string true "Account ID" format(uuid)
|
// @Param driveID path string true "Drive ID" format(uuid)
|
||||||
// @Param directoryID path string true "Directory ID"
|
// @Param directoryID path string true "Directory ID"
|
||||||
// @Success 200 {array} sharing.Share "Array of shares"
|
// @Success 200 {array} sharing.Share "Array of shares"
|
||||||
// @Failure 401 {string} string "Not authenticated"
|
// @Failure 401 {string} string "Not authenticated"
|
||||||
// @Failure 404 {string} string "Directory not found"
|
// @Failure 404 {string} string "Directory not found"
|
||||||
// @Security BearerAuth
|
// @Security BearerAuth
|
||||||
// @Router /accounts/{accountID}/directories/{directoryID}/shares [get]
|
// @Router /drives/{driveID}/directories/{directoryID}/shares [get]
|
||||||
func (h *HTTPHandler) listDirectoryShares(c *fiber.Ctx) error {
|
func (h *HTTPHandler) listDirectoryShares(c *fiber.Ctx) error {
|
||||||
node := mustCurrentDirectoryNode(c)
|
node := mustCurrentDirectoryNode(c)
|
||||||
|
|
||||||
includesExpired := c.Query("includesExpired") == "true"
|
includesExpired := c.Query("includesExpired") == "true"
|
||||||
|
|
||||||
shares, err := h.sharingService.ListShares(c.Context(), h.db, node.AccountID, sharing.ListSharesOptions{
|
shares, err := h.sharingService.ListShares(c.Context(), h.db, node.DriveID, sharing.ListSharesOptions{
|
||||||
Items: []*virtualfs.Node{node},
|
Items: []*virtualfs.Node{node},
|
||||||
IncludesExpired: includesExpired,
|
IncludesExpired: includesExpired,
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -64,12 +64,12 @@ func (h *HTTPHandler) currentFileMiddleware(c *fiber.Ctx) error {
|
|||||||
// @Tags files
|
// @Tags files
|
||||||
// @Produce json
|
// @Produce json
|
||||||
// @Security BearerAuth
|
// @Security BearerAuth
|
||||||
// @Param accountID path string true "Account ID" format(uuid)
|
// @Param driveID path string true "Drive ID" format(uuid)
|
||||||
// @Param fileID path string true "File ID"
|
// @Param fileID path string true "File ID"
|
||||||
// @Success 200 {object} FileInfo "File metadata"
|
// @Success 200 {object} FileInfo "File metadata"
|
||||||
// @Failure 401 {string} string "Not authenticated"
|
// @Failure 401 {string} string "Not authenticated"
|
||||||
// @Failure 404 {string} string "File not found"
|
// @Failure 404 {string} string "File not found"
|
||||||
// @Router /accounts/{accountID}/files/{fileID} [get]
|
// @Router /drives/{driveID}/files/{fileID} [get]
|
||||||
func (h *HTTPHandler) fetchFile(c *fiber.Ctx) error {
|
func (h *HTTPHandler) fetchFile(c *fiber.Ctx) error {
|
||||||
node := mustCurrentFileNode(c)
|
node := mustCurrentFileNode(c)
|
||||||
i := FileInfo{
|
i := FileInfo{
|
||||||
@@ -91,13 +91,13 @@ func (h *HTTPHandler) fetchFile(c *fiber.Ctx) error {
|
|||||||
// @Tags files
|
// @Tags files
|
||||||
// @Produce application/octet-stream
|
// @Produce application/octet-stream
|
||||||
// @Security BearerAuth
|
// @Security BearerAuth
|
||||||
// @Param accountID path string true "Account ID" format(uuid)
|
// @Param driveID path string true "Drive ID" format(uuid)
|
||||||
// @Param fileID path string true "File ID"
|
// @Param fileID path string true "File ID"
|
||||||
// @Success 200 {file} binary "File content stream"
|
// @Success 200 {file} binary "File content stream"
|
||||||
// @Success 307 {string} string "Redirect to download URL"
|
// @Success 307 {string} string "Redirect to download URL"
|
||||||
// @Failure 401 {string} string "Not authenticated"
|
// @Failure 401 {string} string "Not authenticated"
|
||||||
// @Failure 404 {string} string "File not found"
|
// @Failure 404 {string} string "File not found"
|
||||||
// @Router /accounts/{accountID}/files/{fileID}/content [get]
|
// @Router /drives/{driveID}/files/{fileID}/content [get]
|
||||||
func (h *HTTPHandler) downloadFile(c *fiber.Ctx) error {
|
func (h *HTTPHandler) downloadFile(c *fiber.Ctx) error {
|
||||||
node := mustCurrentFileNode(c)
|
node := mustCurrentFileNode(c)
|
||||||
scope, ok := scopeFromCtx(c)
|
scope, ok := scopeFromCtx(c)
|
||||||
@@ -140,14 +140,14 @@ func (h *HTTPHandler) downloadFile(c *fiber.Ctx) error {
|
|||||||
// @Accept json
|
// @Accept json
|
||||||
// @Produce json
|
// @Produce json
|
||||||
// @Security BearerAuth
|
// @Security BearerAuth
|
||||||
// @Param accountID path string true "Account ID" format(uuid)
|
// @Param driveID path string true "Drive ID" format(uuid)
|
||||||
// @Param fileID path string true "File ID"
|
// @Param fileID path string true "File ID"
|
||||||
// @Param request body patchFileRequest true "File update"
|
// @Param request body patchFileRequest true "File update"
|
||||||
// @Success 200 {object} FileInfo "Updated file metadata"
|
// @Success 200 {object} FileInfo "Updated file metadata"
|
||||||
// @Failure 400 {object} map[string]string "Invalid request"
|
// @Failure 400 {object} map[string]string "Invalid request"
|
||||||
// @Failure 401 {string} string "Not authenticated"
|
// @Failure 401 {string} string "Not authenticated"
|
||||||
// @Failure 404 {string} string "File not found"
|
// @Failure 404 {string} string "File not found"
|
||||||
// @Router /accounts/{accountID}/files/{fileID} [patch]
|
// @Router /drives/{driveID}/files/{fileID} [patch]
|
||||||
func (h *HTTPHandler) patchFile(c *fiber.Ctx) error {
|
func (h *HTTPHandler) patchFile(c *fiber.Ctx) error {
|
||||||
node := mustCurrentFileNode(c)
|
node := mustCurrentFileNode(c)
|
||||||
scope, ok := scopeFromCtx(c)
|
scope, ok := scopeFromCtx(c)
|
||||||
@@ -201,14 +201,14 @@ func (h *HTTPHandler) patchFile(c *fiber.Ctx) error {
|
|||||||
// @Tags files
|
// @Tags files
|
||||||
// @Produce json
|
// @Produce json
|
||||||
// @Security BearerAuth
|
// @Security BearerAuth
|
||||||
// @Param accountID path string true "Account ID" format(uuid)
|
// @Param driveID path string true "Drive ID" format(uuid)
|
||||||
// @Param fileID path string true "File ID"
|
// @Param fileID path string true "File ID"
|
||||||
// @Param trash query bool false "Move to trash instead of permanent delete" default(false)
|
// @Param trash query bool false "Move to trash instead of permanent delete" default(false)
|
||||||
// @Success 200 {object} FileInfo "Trashed file info (when trash=true)"
|
// @Success 200 {object} FileInfo "Trashed file info (when trash=true)"
|
||||||
// @Success 204 {string} string "Permanently deleted (when trash=false)"
|
// @Success 204 {string} string "Permanently deleted (when trash=false)"
|
||||||
// @Failure 401 {string} string "Not authenticated"
|
// @Failure 401 {string} string "Not authenticated"
|
||||||
// @Failure 404 {string} string "File not found"
|
// @Failure 404 {string} string "File not found"
|
||||||
// @Router /accounts/{accountID}/files/{fileID} [delete]
|
// @Router /drives/{driveID}/files/{fileID} [delete]
|
||||||
func (h *HTTPHandler) deleteFile(c *fiber.Ctx) error {
|
func (h *HTTPHandler) deleteFile(c *fiber.Ctx) error {
|
||||||
node := mustCurrentFileNode(c)
|
node := mustCurrentFileNode(c)
|
||||||
scope, ok := scopeFromCtx(c)
|
scope, ok := scopeFromCtx(c)
|
||||||
@@ -264,14 +264,14 @@ func (h *HTTPHandler) deleteFile(c *fiber.Ctx) error {
|
|||||||
// @Description Delete multiple files permanently or move them to trash. All items must be files.
|
// @Description Delete multiple files permanently or move them to trash. All items must be files.
|
||||||
// @Tags files
|
// @Tags files
|
||||||
// @Security BearerAuth
|
// @Security BearerAuth
|
||||||
// @Param accountID path string true "Account ID" format(uuid)
|
// @Param driveID path string true "Drive ID" format(uuid)
|
||||||
// @Param id query string true "Comma-separated list of file IDs to delete" example:"mElnUNCm8F22,kRp2XYTq9A55"
|
// @Param id query string true "Comma-separated list of file IDs to delete" example:"mElnUNCm8F22,kRp2XYTq9A55"
|
||||||
// @Param trash query bool false "Move to trash instead of permanent delete" default(false)
|
// @Param trash query bool false "Move to trash instead of permanent delete" default(false)
|
||||||
// @Success 200 {array} FileInfo "Trashed files (when trash=true)"
|
// @Success 200 {array} FileInfo "Trashed files (when trash=true)"
|
||||||
// @Success 204 {string} string "Files deleted"
|
// @Success 204 {string} string "Files deleted"
|
||||||
// @Failure 400 {object} map[string]string "All items must be files"
|
// @Failure 400 {object} map[string]string "All items must be files"
|
||||||
// @Failure 401 {string} string "Not authenticated"
|
// @Failure 401 {string} string "Not authenticated"
|
||||||
// @Router /accounts/{accountID}/files [delete]
|
// @Router /drives/{driveID}/files [delete]
|
||||||
func (h *HTTPHandler) deleteFiles(c *fiber.Ctx) error {
|
func (h *HTTPHandler) deleteFiles(c *fiber.Ctx) error {
|
||||||
scope, ok := scopeFromCtx(c)
|
scope, ok := scopeFromCtx(c)
|
||||||
if !ok {
|
if !ok {
|
||||||
@@ -352,19 +352,19 @@ func (h *HTTPHandler) deleteFiles(c *fiber.Ctx) error {
|
|||||||
// @Description Get all share links that include this file
|
// @Description Get all share links that include this file
|
||||||
// @Tags files
|
// @Tags files
|
||||||
// @Produce json
|
// @Produce json
|
||||||
// @Param accountID path string true "Account ID" format(uuid)
|
// @Param driveID path string true "Drive ID" format(uuid)
|
||||||
// @Param fileID path string true "File ID"
|
// @Param fileID path string true "File ID"
|
||||||
// @Success 200 {array} sharing.Share "Array of shares"
|
// @Success 200 {array} sharing.Share "Array of shares"
|
||||||
// @Failure 401 {string} string "Not authenticated"
|
// @Failure 401 {string} string "Not authenticated"
|
||||||
// @Failure 404 {string} string "File not found"
|
// @Failure 404 {string} string "File not found"
|
||||||
// @Security BearerAuth
|
// @Security BearerAuth
|
||||||
// @Router /accounts/{accountID}/files/{fileID}/shares [get]
|
// @Router /drives/{driveID}/files/{fileID}/shares [get]
|
||||||
func (h *HTTPHandler) listFileShares(c *fiber.Ctx) error {
|
func (h *HTTPHandler) listFileShares(c *fiber.Ctx) error {
|
||||||
node := mustCurrentFileNode(c)
|
node := mustCurrentFileNode(c)
|
||||||
|
|
||||||
includesExpired := c.Query("includesExpired") == "true"
|
includesExpired := c.Query("includesExpired") == "true"
|
||||||
|
|
||||||
shares, err := h.sharingService.ListShares(c.Context(), h.db, node.AccountID, sharing.ListSharesOptions{
|
shares, err := h.sharingService.ListShares(c.Context(), h.db, node.DriveID, sharing.ListSharesOptions{
|
||||||
Items: []*virtualfs.Node{node},
|
Items: []*virtualfs.Node{node},
|
||||||
IncludesExpired: includesExpired,
|
IncludesExpired: includesExpired,
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -13,16 +13,48 @@ CREATE TABLE IF NOT EXISTS users (
|
|||||||
|
|
||||||
CREATE INDEX idx_users_email ON users(email);
|
CREATE INDEX idx_users_email ON users(email);
|
||||||
|
|
||||||
|
CREATE TABLE IF NOT EXISTS organizations (
|
||||||
|
id UUID PRIMARY KEY,
|
||||||
|
kind TEXT NOT NULL CHECK (kind IN ('personal', 'team')),
|
||||||
|
name TEXT NOT NULL,
|
||||||
|
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||||
|
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE INDEX idx_organizations_kind ON organizations(kind);
|
||||||
|
|
||||||
|
-- Accounts represent a user's identity within an organization (membership / principal).
|
||||||
CREATE TABLE IF NOT EXISTS accounts (
|
CREATE TABLE IF NOT EXISTS accounts (
|
||||||
id UUID PRIMARY KEY,
|
id UUID PRIMARY KEY,
|
||||||
|
org_id UUID NOT NULL REFERENCES organizations(id) ON DELETE CASCADE,
|
||||||
user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
||||||
|
role TEXT NOT NULL DEFAULT 'member' CHECK (role IN ('admin', 'member')),
|
||||||
|
status TEXT NOT NULL DEFAULT 'active' CHECK (status IN ('invited', 'active', 'suspended')),
|
||||||
|
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||||
|
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE UNIQUE INDEX idx_accounts_org_user_id ON accounts(org_id, user_id);
|
||||||
|
CREATE INDEX idx_accounts_user_id ON accounts(user_id);
|
||||||
|
CREATE INDEX idx_accounts_org_id ON accounts(org_id);
|
||||||
|
|
||||||
|
-- Drives are the storage tenants; VFS is partitioned by drive_id.
|
||||||
|
CREATE TABLE IF NOT EXISTS drives (
|
||||||
|
id UUID PRIMARY KEY,
|
||||||
|
public_id TEXT NOT NULL UNIQUE,
|
||||||
|
org_id UUID NOT NULL REFERENCES organizations(id) ON DELETE CASCADE,
|
||||||
|
owner_account_id UUID REFERENCES accounts(id) ON DELETE SET NULL, -- NULL = shared/org-owned drive
|
||||||
|
name TEXT NOT NULL,
|
||||||
storage_usage_bytes BIGINT NOT NULL DEFAULT 0,
|
storage_usage_bytes BIGINT NOT NULL DEFAULT 0,
|
||||||
storage_quota_bytes BIGINT NOT NULL,
|
storage_quota_bytes BIGINT NOT NULL,
|
||||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||||
);
|
);
|
||||||
|
|
||||||
CREATE INDEX idx_accounts_user_id ON accounts(user_id);
|
CREATE INDEX idx_drives_org_id ON drives(org_id);
|
||||||
|
CREATE INDEX idx_drives_owner_account_id ON drives(owner_account_id) WHERE owner_account_id IS NOT NULL;
|
||||||
|
CREATE UNIQUE INDEX idx_drives_org_owner_account_id ON drives(org_id, owner_account_id) WHERE owner_account_id IS NOT NULL;
|
||||||
|
CREATE INDEX idx_drives_public_id ON drives(public_id);
|
||||||
|
|
||||||
CREATE TABLE IF NOT EXISTS grants (
|
CREATE TABLE IF NOT EXISTS grants (
|
||||||
id UUID PRIMARY KEY,
|
id UUID PRIMARY KEY,
|
||||||
@@ -49,7 +81,7 @@ CREATE INDEX idx_refresh_tokens_expires_at ON refresh_tokens(expires_at);
|
|||||||
CREATE TABLE IF NOT EXISTS vfs_nodes (
|
CREATE TABLE IF NOT EXISTS vfs_nodes (
|
||||||
id UUID PRIMARY KEY,
|
id UUID PRIMARY KEY,
|
||||||
public_id TEXT NOT NULL UNIQUE, -- opaque ID for external API (no timestamp leak)
|
public_id TEXT NOT NULL UNIQUE, -- opaque ID for external API (no timestamp leak)
|
||||||
account_id UUID NOT NULL REFERENCES accounts(id) ON DELETE CASCADE,
|
drive_id UUID NOT NULL REFERENCES drives(id) ON DELETE CASCADE,
|
||||||
parent_id UUID REFERENCES vfs_nodes(id) ON DELETE CASCADE, -- NULL = root directory
|
parent_id UUID REFERENCES vfs_nodes(id) ON DELETE CASCADE, -- NULL = root directory
|
||||||
kind TEXT NOT NULL CHECK (kind IN ('file', 'directory')),
|
kind TEXT NOT NULL CHECK (kind IN ('file', 'directory')),
|
||||||
status TEXT NOT NULL DEFAULT 'ready' CHECK (status IN ('pending', 'ready')),
|
status TEXT NOT NULL DEFAULT 'ready' CHECK (status IN ('pending', 'ready')),
|
||||||
@@ -64,23 +96,25 @@ CREATE TABLE IF NOT EXISTS vfs_nodes (
|
|||||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||||
deleted_at TIMESTAMPTZ, -- soft delete for trash
|
deleted_at TIMESTAMPTZ, -- soft delete for trash
|
||||||
|
|
||||||
-- No duplicate names in same parent (per account, excluding deleted)
|
-- No duplicate names in same parent (per drive, excluding deleted)
|
||||||
CONSTRAINT unique_node_name UNIQUE NULLS NOT DISTINCT (account_id, parent_id, name, deleted_at)
|
CONSTRAINT unique_node_name UNIQUE NULLS NOT DISTINCT (drive_id, parent_id, name, deleted_at)
|
||||||
);
|
);
|
||||||
|
|
||||||
CREATE INDEX idx_vfs_nodes_account_id ON vfs_nodes(account_id) WHERE deleted_at IS NULL;
|
CREATE INDEX idx_vfs_nodes_drive_id ON vfs_nodes(drive_id) WHERE deleted_at IS NULL;
|
||||||
CREATE INDEX idx_vfs_nodes_parent_id ON vfs_nodes(parent_id) WHERE deleted_at IS NULL;
|
CREATE INDEX idx_vfs_nodes_parent_id ON vfs_nodes(parent_id) WHERE deleted_at IS NULL;
|
||||||
CREATE INDEX idx_vfs_nodes_account_parent ON vfs_nodes(account_id, parent_id) WHERE deleted_at IS NULL;
|
CREATE INDEX idx_vfs_nodes_drive_parent ON vfs_nodes(drive_id, parent_id) WHERE deleted_at IS NULL;
|
||||||
CREATE INDEX idx_vfs_nodes_kind ON vfs_nodes(account_id, kind) WHERE deleted_at IS NULL;
|
CREATE INDEX idx_vfs_nodes_kind ON vfs_nodes(drive_id, kind) WHERE deleted_at IS NULL;
|
||||||
CREATE INDEX idx_vfs_nodes_deleted ON vfs_nodes(account_id, deleted_at) WHERE deleted_at IS NOT NULL;
|
CREATE INDEX idx_vfs_nodes_deleted ON vfs_nodes(drive_id, deleted_at) WHERE deleted_at IS NOT NULL;
|
||||||
CREATE INDEX idx_vfs_nodes_public_id ON vfs_nodes(public_id);
|
CREATE INDEX idx_vfs_nodes_public_id ON vfs_nodes(public_id);
|
||||||
CREATE UNIQUE INDEX idx_vfs_nodes_account_root ON vfs_nodes(account_id) WHERE parent_id IS NULL; -- one root per account
|
CREATE UNIQUE INDEX idx_vfs_nodes_drive_root ON vfs_nodes(drive_id) WHERE parent_id IS NULL; -- one root per drive
|
||||||
CREATE INDEX idx_vfs_nodes_pending ON vfs_nodes(created_at) WHERE status = 'pending'; -- for cleanup job
|
CREATE INDEX idx_vfs_nodes_pending ON vfs_nodes(created_at) WHERE status = 'pending'; -- for cleanup job
|
||||||
|
|
||||||
CREATE TABLE IF NOT EXISTS node_shares (
|
CREATE TABLE IF NOT EXISTS node_shares (
|
||||||
id UUID PRIMARY KEY,
|
id UUID PRIMARY KEY,
|
||||||
-- the account that owns the share
|
-- storage tenant that owns the shared content
|
||||||
account_id UUID NOT NULL REFERENCES accounts(id) ON DELETE CASCADE,
|
drive_id UUID NOT NULL REFERENCES drives(id) ON DELETE CASCADE,
|
||||||
|
-- principal that created/managed the share record
|
||||||
|
created_by_account_id UUID NOT NULL REFERENCES accounts(id) ON DELETE RESTRICT,
|
||||||
public_id TEXT NOT NULL UNIQUE, -- opaque ID for external API (no timestamp leak)
|
public_id TEXT NOT NULL UNIQUE, -- opaque ID for external API (no timestamp leak)
|
||||||
-- parent directory of the items in this share
|
-- parent directory of the items in this share
|
||||||
shared_directory_id UUID NOT NULL REFERENCES vfs_nodes(id) ON DELETE CASCADE,
|
shared_directory_id UUID NOT NULL REFERENCES vfs_nodes(id) ON DELETE CASCADE,
|
||||||
@@ -93,6 +127,8 @@ CREATE TABLE IF NOT EXISTS node_shares (
|
|||||||
CREATE INDEX idx_node_shares_public_id ON node_shares(public_id);
|
CREATE INDEX idx_node_shares_public_id ON node_shares(public_id);
|
||||||
CREATE INDEX idx_node_shares_shared_directory_id ON node_shares(shared_directory_id);
|
CREATE INDEX idx_node_shares_shared_directory_id ON node_shares(shared_directory_id);
|
||||||
CREATE INDEX idx_node_shares_expires_at ON node_shares(expires_at) WHERE expires_at IS NOT NULL;
|
CREATE INDEX idx_node_shares_expires_at ON node_shares(expires_at) WHERE expires_at IS NOT NULL;
|
||||||
|
CREATE INDEX idx_node_shares_drive_id ON node_shares(drive_id);
|
||||||
|
CREATE INDEX idx_node_shares_created_by_account_id ON node_shares(created_by_account_id);
|
||||||
|
|
||||||
CREATE TABLE IF NOT EXISTS share_permissions (
|
CREATE TABLE IF NOT EXISTS share_permissions (
|
||||||
id UUID PRIMARY KEY,
|
id UUID PRIMARY KEY,
|
||||||
@@ -139,6 +175,15 @@ $$ LANGUAGE plpgsql;
|
|||||||
CREATE TRIGGER update_users_updated_at BEFORE UPDATE ON users
|
CREATE TRIGGER update_users_updated_at BEFORE UPDATE ON users
|
||||||
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
|
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
|
||||||
|
|
||||||
|
CREATE TRIGGER update_organizations_updated_at BEFORE UPDATE ON organizations
|
||||||
|
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
|
||||||
|
|
||||||
|
CREATE TRIGGER update_accounts_updated_at BEFORE UPDATE ON accounts
|
||||||
|
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
|
||||||
|
|
||||||
|
CREATE TRIGGER update_drives_updated_at BEFORE UPDATE ON drives
|
||||||
|
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
|
||||||
|
|
||||||
CREATE TRIGGER update_vfs_nodes_updated_at BEFORE UPDATE ON vfs_nodes
|
CREATE TRIGGER update_vfs_nodes_updated_at BEFORE UPDATE ON vfs_nodes
|
||||||
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
|
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
|
||||||
|
|
||||||
@@ -151,8 +196,5 @@ CREATE TRIGGER update_share_permissions_updated_at BEFORE UPDATE ON share_permis
|
|||||||
CREATE TRIGGER update_share_items_updated_at BEFORE UPDATE ON share_items
|
CREATE TRIGGER update_share_items_updated_at BEFORE UPDATE ON share_items
|
||||||
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
|
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
|
||||||
|
|
||||||
CREATE TRIGGER update_accounts_updated_at BEFORE UPDATE ON accounts
|
|
||||||
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
|
|
||||||
|
|
||||||
CREATE TRIGGER update_grants_updated_at BEFORE UPDATE ON grants
|
CREATE TRIGGER update_grants_updated_at BEFORE UPDATE ON grants
|
||||||
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
|
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
|
||||||
|
|||||||
221
apps/backend/internal/drexa/api_integration_test.go
Normal file
221
apps/backend/internal/drexa/api_integration_test.go
Normal file
@@ -0,0 +1,221 @@
|
|||||||
|
//go:build integration
|
||||||
|
|
||||||
|
package drexa
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/get-drexa/drexa/internal/database"
|
||||||
|
"github.com/gofiber/fiber/v2"
|
||||||
|
"github.com/testcontainers/testcontainers-go/modules/postgres"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestRegistrationFlow(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
pg, err := runPostgres(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Skipf("postgres testcontainer unavailable (docker not running/configured?): %v", err)
|
||||||
|
}
|
||||||
|
t.Cleanup(func() { _ = pg.Terminate(ctx) })
|
||||||
|
|
||||||
|
postgresURL, err := pg.ConnectionString(ctx, "sslmode=disable")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("postgres connection string: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
blobRoot, err := os.MkdirTemp("", "drexa-blobs-*")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("temp blob dir: %v", err)
|
||||||
|
}
|
||||||
|
t.Cleanup(func() { _ = os.RemoveAll(blobRoot) })
|
||||||
|
|
||||||
|
s, err := NewServer(Config{
|
||||||
|
Server: ServerConfig{Port: 8080},
|
||||||
|
Database: DatabaseConfig{
|
||||||
|
PostgresURL: postgresURL,
|
||||||
|
},
|
||||||
|
JWT: JWTConfig{
|
||||||
|
Issuer: "drexa-test",
|
||||||
|
Audience: "drexa-test",
|
||||||
|
SecretKey: []byte("drexa-test-secret"),
|
||||||
|
},
|
||||||
|
Storage: StorageConfig{
|
||||||
|
Mode: StorageModeFlat,
|
||||||
|
Backend: StorageBackendFS,
|
||||||
|
RootPath: blobRoot,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("NewServer: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := database.RunMigrations(ctx, s.db); err != nil {
|
||||||
|
t.Fatalf("RunMigrations: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
type registerResponse struct {
|
||||||
|
Account struct {
|
||||||
|
ID string `json:"id"`
|
||||||
|
OrgID string `json:"orgId"`
|
||||||
|
UserID string `json:"userId"`
|
||||||
|
Role string `json:"role"`
|
||||||
|
Status string `json:"status"`
|
||||||
|
} `json:"account"`
|
||||||
|
User struct {
|
||||||
|
ID string `json:"id"`
|
||||||
|
DisplayName string `json:"displayName"`
|
||||||
|
Email string `json:"email"`
|
||||||
|
} `json:"user"`
|
||||||
|
Drive struct {
|
||||||
|
ID string `json:"id"`
|
||||||
|
} `json:"drive"`
|
||||||
|
AccessToken string `json:"accessToken"`
|
||||||
|
RefreshToken string `json:"refreshToken"`
|
||||||
|
}
|
||||||
|
|
||||||
|
registerBody := map[string]any{
|
||||||
|
"email": "alice@example.com",
|
||||||
|
"password": "password123",
|
||||||
|
"displayName": "Alice",
|
||||||
|
"tokenDelivery": "body",
|
||||||
|
}
|
||||||
|
|
||||||
|
var reg registerResponse
|
||||||
|
doJSON(t, s.app, http.MethodPost, "/api/accounts", "", registerBody, http.StatusOK, ®)
|
||||||
|
if reg.AccessToken == "" {
|
||||||
|
t.Fatalf("expected access token in registration response")
|
||||||
|
}
|
||||||
|
if reg.User.Email != "alice@example.com" {
|
||||||
|
t.Fatalf("unexpected registered user email: %q", reg.User.Email)
|
||||||
|
}
|
||||||
|
if reg.Account.ID == "" || reg.Drive.ID == "" {
|
||||||
|
t.Fatalf("expected account.id and drive.id to be set")
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("users/me", func(t *testing.T) {
|
||||||
|
var me struct {
|
||||||
|
ID string `json:"id"`
|
||||||
|
DisplayName string `json:"displayName"`
|
||||||
|
Email string `json:"email"`
|
||||||
|
}
|
||||||
|
doJSON(t, s.app, http.MethodGet, "/api/users/me", reg.AccessToken, nil, http.StatusOK, &me)
|
||||||
|
if me.ID != reg.User.ID {
|
||||||
|
t.Fatalf("unexpected user id: got %q want %q", me.ID, reg.User.ID)
|
||||||
|
}
|
||||||
|
if me.Email != reg.User.Email {
|
||||||
|
t.Fatalf("unexpected user email: got %q want %q", me.Email, reg.User.Email)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("accounts/:id", func(t *testing.T) {
|
||||||
|
var got struct {
|
||||||
|
ID string `json:"id"`
|
||||||
|
OrgID string `json:"orgId"`
|
||||||
|
UserID string `json:"userId"`
|
||||||
|
Role string `json:"role"`
|
||||||
|
Status string `json:"status"`
|
||||||
|
}
|
||||||
|
doJSON(t, s.app, http.MethodGet, fmt.Sprintf("/api/accounts/%s", reg.Account.ID), reg.AccessToken, nil, http.StatusOK, &got)
|
||||||
|
if got.ID != reg.Account.ID {
|
||||||
|
t.Fatalf("unexpected account id: got %q want %q", got.ID, reg.Account.ID)
|
||||||
|
}
|
||||||
|
if got.UserID != reg.User.ID {
|
||||||
|
t.Fatalf("unexpected account userId: got %q want %q", got.UserID, reg.User.ID)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("root directory empty", func(t *testing.T) {
|
||||||
|
var resp struct {
|
||||||
|
Items []any `json:"items"`
|
||||||
|
}
|
||||||
|
doJSON(
|
||||||
|
t,
|
||||||
|
s.app,
|
||||||
|
http.MethodGet,
|
||||||
|
fmt.Sprintf("/api/drives/%s/directories/root/content?limit=100", reg.Drive.ID),
|
||||||
|
reg.AccessToken,
|
||||||
|
nil,
|
||||||
|
http.StatusOK,
|
||||||
|
&resp,
|
||||||
|
)
|
||||||
|
if len(resp.Items) != 0 {
|
||||||
|
t.Fatalf("expected empty root directory, got %d items", len(resp.Items))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func runPostgres(ctx context.Context) (_ *postgres.PostgresContainer, err error) {
|
||||||
|
defer func() {
|
||||||
|
if r := recover(); r != nil {
|
||||||
|
err = fmt.Errorf("testcontainers panic: %v", r)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
return postgres.Run(
|
||||||
|
ctx,
|
||||||
|
"postgres:16-alpine",
|
||||||
|
postgres.WithDatabase("drexa"),
|
||||||
|
postgres.WithUsername("drexa"),
|
||||||
|
postgres.WithPassword("drexa"),
|
||||||
|
postgres.BasicWaitStrategies(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func doJSON(
|
||||||
|
t *testing.T,
|
||||||
|
app *fiber.App,
|
||||||
|
method string,
|
||||||
|
path string,
|
||||||
|
accessToken string,
|
||||||
|
body any,
|
||||||
|
wantStatus int,
|
||||||
|
out any,
|
||||||
|
) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
var reqBody *bytes.Reader
|
||||||
|
if body == nil {
|
||||||
|
reqBody = bytes.NewReader(nil)
|
||||||
|
} else {
|
||||||
|
b, err := json.Marshal(body)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("json marshal: %v", err)
|
||||||
|
}
|
||||||
|
reqBody = bytes.NewReader(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
req := httptest.NewRequest(method, path, reqBody)
|
||||||
|
req.Header.Set("Content-Type", "application/json")
|
||||||
|
if accessToken != "" {
|
||||||
|
req.Header.Set("Authorization", "Bearer "+accessToken)
|
||||||
|
}
|
||||||
|
|
||||||
|
res, err := app.Test(req, 10_000)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("%s %s: %v", method, path, err)
|
||||||
|
}
|
||||||
|
defer res.Body.Close()
|
||||||
|
|
||||||
|
if res.StatusCode != wantStatus {
|
||||||
|
b, _ := io.ReadAll(res.Body)
|
||||||
|
t.Fatalf("%s %s: status %d want %d body=%s", method, path, res.StatusCode, wantStatus, string(b))
|
||||||
|
}
|
||||||
|
|
||||||
|
if out == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := json.NewDecoder(res.Body).Decode(out); err != nil {
|
||||||
|
t.Fatalf("%s %s: decode response: %v", method, path, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -10,7 +10,10 @@ import (
|
|||||||
"github.com/get-drexa/drexa/internal/blob"
|
"github.com/get-drexa/drexa/internal/blob"
|
||||||
"github.com/get-drexa/drexa/internal/catalog"
|
"github.com/get-drexa/drexa/internal/catalog"
|
||||||
"github.com/get-drexa/drexa/internal/database"
|
"github.com/get-drexa/drexa/internal/database"
|
||||||
|
"github.com/get-drexa/drexa/internal/drive"
|
||||||
"github.com/get-drexa/drexa/internal/httperr"
|
"github.com/get-drexa/drexa/internal/httperr"
|
||||||
|
"github.com/get-drexa/drexa/internal/organization"
|
||||||
|
"github.com/get-drexa/drexa/internal/registration"
|
||||||
"github.com/get-drexa/drexa/internal/sharing"
|
"github.com/get-drexa/drexa/internal/sharing"
|
||||||
"github.com/get-drexa/drexa/internal/upload"
|
"github.com/get-drexa/drexa/internal/upload"
|
||||||
"github.com/get-drexa/drexa/internal/user"
|
"github.com/get-drexa/drexa/internal/user"
|
||||||
@@ -100,13 +103,16 @@ func NewServer(c Config) (*Server, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
userService := user.NewService()
|
userService := user.NewService()
|
||||||
|
organizationService := organization.NewService()
|
||||||
|
accountService := account.NewService()
|
||||||
|
driveService := drive.NewService()
|
||||||
authService := auth.NewService(userService, auth.TokenConfig{
|
authService := auth.NewService(userService, auth.TokenConfig{
|
||||||
Issuer: c.JWT.Issuer,
|
Issuer: c.JWT.Issuer,
|
||||||
Audience: c.JWT.Audience,
|
Audience: c.JWT.Audience,
|
||||||
SecretKey: c.JWT.SecretKey,
|
SecretKey: c.JWT.SecretKey,
|
||||||
})
|
})
|
||||||
uploadService := upload.NewService(vfs, blobStore)
|
uploadService := upload.NewService(vfs, blobStore)
|
||||||
accountService := account.NewService(userService, vfs)
|
registrationService := registration.NewService(userService, organizationService, accountService, driveService, vfs)
|
||||||
|
|
||||||
cookieConfig := auth.CookieConfig{
|
cookieConfig := auth.CookieConfig{
|
||||||
Domain: c.Cookie.Domain,
|
Domain: c.Cookie.Domain,
|
||||||
@@ -119,15 +125,18 @@ func NewServer(c Config) (*Server, error) {
|
|||||||
auth.NewHTTPHandler(authService, db, cookieConfig).RegisterRoutes(api)
|
auth.NewHTTPHandler(authService, db, cookieConfig).RegisterRoutes(api)
|
||||||
user.NewHTTPHandler(userService, db, authMiddleware).RegisterRoutes(api)
|
user.NewHTTPHandler(userService, db, authMiddleware).RegisterRoutes(api)
|
||||||
|
|
||||||
accountRouter := account.NewHTTPHandler(accountService, authService, vfs, db, authMiddleware, cookieConfig).RegisterRoutes(api)
|
account.NewHTTPHandler(accountService, db, authMiddleware).RegisterRoutes(api)
|
||||||
upload.NewHTTPHandler(uploadService, db).RegisterRoutes(accountRouter.VFSRouter())
|
registration.NewHTTPHandler(registrationService, authService, db, cookieConfig).RegisterRoutes(api)
|
||||||
|
|
||||||
shareHTTP := sharing.NewHTTPHandler(sharingService, accountService, vfs, db, optionalAuthMiddleware)
|
driveRouter := drive.NewHTTPHandler(driveService, accountService, vfs, db, authMiddleware).RegisterRoutes(api)
|
||||||
|
upload.NewHTTPHandler(uploadService, db).RegisterRoutes(driveRouter)
|
||||||
|
|
||||||
|
shareHTTP := sharing.NewHTTPHandler(sharingService, accountService, driveService, vfs, db, optionalAuthMiddleware)
|
||||||
shareRoutes := shareHTTP.RegisterShareConsumeRoutes(api)
|
shareRoutes := shareHTTP.RegisterShareConsumeRoutes(api)
|
||||||
shareHTTP.RegisterShareManagementRoutes(accountRouter)
|
shareHTTP.RegisterShareManagementRoutes(driveRouter)
|
||||||
|
|
||||||
catalogHTTP := catalog.NewHTTPHandler(sharingService, vfs, db)
|
catalogHTTP := catalog.NewHTTPHandler(sharingService, vfs, db)
|
||||||
catalogHTTP.RegisterRoutes(accountRouter.VFSRouter())
|
catalogHTTP.RegisterRoutes(driveRouter)
|
||||||
catalogHTTP.RegisterRoutes(shareRoutes)
|
catalogHTTP.RegisterRoutes(shareRoutes)
|
||||||
|
|
||||||
s := &Server{
|
s := &Server{
|
||||||
|
|||||||
29
apps/backend/internal/drive/drive.go
Normal file
29
apps/backend/internal/drive/drive.go
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
package drive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/google/uuid"
|
||||||
|
"github.com/uptrace/bun"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Drive struct {
|
||||||
|
bun.BaseModel `bun:"drives" swaggerignore:"true"`
|
||||||
|
|
||||||
|
ID uuid.UUID `bun:",pk,type:uuid" json:"id"`
|
||||||
|
PublicID string `bun:"public_id,notnull" json:"publicId"`
|
||||||
|
OrgID uuid.UUID `bun:"org_id,notnull,type:uuid" json:"orgId"`
|
||||||
|
Name string `bun:"name,notnull" json:"name"`
|
||||||
|
|
||||||
|
OwnerAccountID *uuid.UUID `bun:"owner_account_id,type:uuid" json:"ownerAccountId,omitempty"`
|
||||||
|
|
||||||
|
StorageUsageBytes int64 `bun:"storage_usage_bytes,notnull" json:"storageUsageBytes"`
|
||||||
|
StorageQuotaBytes int64 `bun:"storage_quota_bytes,notnull" json:"storageQuotaBytes"`
|
||||||
|
|
||||||
|
CreatedAt time.Time `bun:"created_at,notnull,nullzero" json:"createdAt"`
|
||||||
|
UpdatedAt time.Time `bun:"updated_at,notnull,nullzero" json:"updatedAt"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func newDriveID() (uuid.UUID, error) {
|
||||||
|
return uuid.NewV7()
|
||||||
|
}
|
||||||
9
apps/backend/internal/drive/err.go
Normal file
9
apps/backend/internal/drive/err.go
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
package drive
|
||||||
|
|
||||||
|
import "errors"
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrDriveNotFound = errors.New("drive not found")
|
||||||
|
ErrDriveForbidden = errors.New("drive forbidden")
|
||||||
|
ErrDriveNotAllowed = errors.New("drive not allowed")
|
||||||
|
)
|
||||||
114
apps/backend/internal/drive/http.go
Normal file
114
apps/backend/internal/drive/http.go
Normal file
@@ -0,0 +1,114 @@
|
|||||||
|
package drive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
|
||||||
|
"github.com/get-drexa/drexa/internal/account"
|
||||||
|
"github.com/get-drexa/drexa/internal/httperr"
|
||||||
|
"github.com/get-drexa/drexa/internal/reqctx"
|
||||||
|
"github.com/get-drexa/drexa/internal/user"
|
||||||
|
"github.com/get-drexa/drexa/internal/virtualfs"
|
||||||
|
"github.com/gofiber/fiber/v2"
|
||||||
|
"github.com/google/uuid"
|
||||||
|
"github.com/uptrace/bun"
|
||||||
|
)
|
||||||
|
|
||||||
|
type HTTPHandler struct {
|
||||||
|
driveService *Service
|
||||||
|
accountService *account.Service
|
||||||
|
vfs *virtualfs.VirtualFS
|
||||||
|
db *bun.DB
|
||||||
|
authMiddleware fiber.Handler
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewHTTPHandler(driveService *Service, accountService *account.Service, vfs *virtualfs.VirtualFS, db *bun.DB, authMiddleware fiber.Handler) *HTTPHandler {
|
||||||
|
return &HTTPHandler{
|
||||||
|
driveService: driveService,
|
||||||
|
accountService: accountService,
|
||||||
|
vfs: vfs,
|
||||||
|
db: db,
|
||||||
|
authMiddleware: authMiddleware,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *HTTPHandler) RegisterRoutes(api fiber.Router) *virtualfs.ScopedRouter {
|
||||||
|
api.Get("/drives", h.authMiddleware, h.listDrives)
|
||||||
|
|
||||||
|
drive := api.Group("/drives/:driveID")
|
||||||
|
drive.Use(h.authMiddleware)
|
||||||
|
drive.Use(h.driveMiddleware)
|
||||||
|
|
||||||
|
drive.Get("/", h.getDrive)
|
||||||
|
|
||||||
|
return &virtualfs.ScopedRouter{Router: drive}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *HTTPHandler) listDrives(c *fiber.Ctx) error {
|
||||||
|
u := reqctx.AuthenticatedUser(c).(*user.User)
|
||||||
|
|
||||||
|
drives, err := h.driveService.ListDrivesForUser(c.Context(), h.db, u.ID)
|
||||||
|
if err != nil {
|
||||||
|
return httperr.Internal(err)
|
||||||
|
}
|
||||||
|
return c.JSON(drives)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *HTTPHandler) getDrive(c *fiber.Ctx) error {
|
||||||
|
drive, ok := reqctx.CurrentDrive(c).(*Drive)
|
||||||
|
if !ok || drive == nil {
|
||||||
|
return c.SendStatus(fiber.StatusNotFound)
|
||||||
|
}
|
||||||
|
return c.JSON(drive)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *HTTPHandler) driveMiddleware(c *fiber.Ctx) error {
|
||||||
|
u := reqctx.AuthenticatedUser(c).(*user.User)
|
||||||
|
|
||||||
|
driveID, err := uuid.Parse(c.Params("driveID"))
|
||||||
|
if err != nil {
|
||||||
|
return c.SendStatus(fiber.StatusNotFound)
|
||||||
|
}
|
||||||
|
|
||||||
|
drive, err := h.driveService.DriveByID(c.Context(), h.db, driveID)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, ErrDriveNotFound) {
|
||||||
|
return c.SendStatus(fiber.StatusNotFound)
|
||||||
|
}
|
||||||
|
return httperr.Internal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
acc, err := h.accountService.FindUserAccountInOrg(c.Context(), h.db, drive.OrgID, u.ID)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, account.ErrAccountNotFound) {
|
||||||
|
return c.SendStatus(fiber.StatusNotFound)
|
||||||
|
}
|
||||||
|
return httperr.Internal(err)
|
||||||
|
}
|
||||||
|
if acc.Status != account.StatusActive {
|
||||||
|
return c.SendStatus(fiber.StatusNotFound)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !h.driveService.CanAccessDrive(drive, acc.OrgID, acc.ID) {
|
||||||
|
return c.SendStatus(fiber.StatusNotFound)
|
||||||
|
}
|
||||||
|
|
||||||
|
root, err := h.vfs.FindRootDirectory(c.Context(), h.db, drive.ID)
|
||||||
|
if err != nil {
|
||||||
|
return httperr.Internal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
scope := &virtualfs.Scope{
|
||||||
|
DriveID: drive.ID,
|
||||||
|
RootNodeID: root.ID,
|
||||||
|
AllowedOps: virtualfs.AllAllowedOps,
|
||||||
|
AllowedNodes: nil,
|
||||||
|
ActorKind: virtualfs.ScopeActorAccount,
|
||||||
|
ActorID: acc.ID,
|
||||||
|
}
|
||||||
|
|
||||||
|
reqctx.SetCurrentDrive(c, drive)
|
||||||
|
reqctx.SetCurrentAccount(c, acc)
|
||||||
|
reqctx.SetVFSAccessScope(c, scope)
|
||||||
|
|
||||||
|
return c.Next()
|
||||||
|
}
|
||||||
130
apps/backend/internal/drive/service.go
Normal file
130
apps/backend/internal/drive/service.go
Normal file
@@ -0,0 +1,130 @@
|
|||||||
|
package drive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/rand"
|
||||||
|
"database/sql"
|
||||||
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
|
||||||
|
"github.com/get-drexa/drexa/internal/account"
|
||||||
|
"github.com/google/uuid"
|
||||||
|
"github.com/sqids/sqids-go"
|
||||||
|
"github.com/uptrace/bun"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Service struct{}
|
||||||
|
|
||||||
|
type CreateDriveOptions struct {
|
||||||
|
OrgID uuid.UUID
|
||||||
|
OwnerAccountID *uuid.UUID
|
||||||
|
Name string
|
||||||
|
QuotaBytes int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewService() *Service {
|
||||||
|
return &Service{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Service) CreateDrive(ctx context.Context, db bun.IDB, opts CreateDriveOptions) (*Drive, error) {
|
||||||
|
publicID, err := generatePublicID()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
id, err := newDriveID()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
drive := &Drive{
|
||||||
|
ID: id,
|
||||||
|
PublicID: publicID,
|
||||||
|
OrgID: opts.OrgID,
|
||||||
|
OwnerAccountID: opts.OwnerAccountID,
|
||||||
|
Name: opts.Name,
|
||||||
|
StorageQuotaBytes: opts.QuotaBytes,
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = db.NewInsert().Model(drive).Returning("*").Exec(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return drive, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Service) DriveByID(ctx context.Context, db bun.IDB, id uuid.UUID) (*Drive, error) {
|
||||||
|
var drive Drive
|
||||||
|
err := db.NewSelect().Model(&drive).Where("id = ?", id).Scan(ctx)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, sql.ErrNoRows) {
|
||||||
|
return nil, ErrDriveNotFound
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &drive, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListAccessibleDrives returns drives a principal account can access:
|
||||||
|
// - personal drives: owner_account_id = account.ID
|
||||||
|
// - shared drives: owner_account_id IS NULL (future)
|
||||||
|
func (s *Service) ListAccessibleDrives(ctx context.Context, db bun.IDB, orgID uuid.UUID, accountID uuid.UUID) ([]*Drive, error) {
|
||||||
|
var drives []*Drive
|
||||||
|
err := db.NewSelect().Model(&drives).
|
||||||
|
Where("org_id = ?", orgID).
|
||||||
|
Where("owner_account_id IS NULL OR owner_account_id = ?", accountID).
|
||||||
|
Scan(ctx)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, sql.ErrNoRows) {
|
||||||
|
return make([]*Drive, 0), nil
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return drives, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListDrivesForUser returns all drives the user can access across orgs.
|
||||||
|
func (s *Service) ListDrivesForUser(ctx context.Context, db bun.IDB, userID uuid.UUID) ([]*Drive, error) {
|
||||||
|
var drives []*Drive
|
||||||
|
err := db.NewSelect().Model(&drives).
|
||||||
|
Join("JOIN accounts a ON a.org_id = drives.org_id").
|
||||||
|
Where("a.user_id = ?", userID).
|
||||||
|
Where("a.status = ?", account.StatusActive).
|
||||||
|
Where("drives.owner_account_id IS NULL OR drives.owner_account_id = a.id").
|
||||||
|
Scan(ctx)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, sql.ErrNoRows) {
|
||||||
|
return make([]*Drive, 0), nil
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return drives, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Service) CanAccessDrive(drive *Drive, orgID uuid.UUID, accountID uuid.UUID) bool {
|
||||||
|
if drive == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if drive.OrgID != orgID {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if drive.OwnerAccountID == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return *drive.OwnerAccountID == accountID
|
||||||
|
}
|
||||||
|
|
||||||
|
func generatePublicID() (string, error) {
|
||||||
|
sqid, err := sqids.New()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
var b [8]byte
|
||||||
|
_, err = rand.Read(b[:])
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
n := binary.BigEndian.Uint64(b[:])
|
||||||
|
return sqid.Encode([]uint64{n})
|
||||||
|
}
|
||||||
31
apps/backend/internal/organization/organization.go
Normal file
31
apps/backend/internal/organization/organization.go
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
package organization
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/google/uuid"
|
||||||
|
"github.com/uptrace/bun"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Kind string
|
||||||
|
|
||||||
|
const (
|
||||||
|
KindPersonal Kind = "personal"
|
||||||
|
KindTeam Kind = "team"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Organization struct {
|
||||||
|
bun.BaseModel `bun:"organizations" swaggerignore:"true"`
|
||||||
|
|
||||||
|
ID uuid.UUID `bun:",pk,type:uuid" json:"id"`
|
||||||
|
Kind Kind `bun:"kind,notnull" json:"kind" example:"personal"`
|
||||||
|
Name string `bun:"name,notnull" json:"name" example:"Personal"`
|
||||||
|
|
||||||
|
CreatedAt time.Time `bun:"created_at,notnull,nullzero" json:"createdAt"`
|
||||||
|
UpdatedAt time.Time `bun:"updated_at,notnull,nullzero" json:"updatedAt"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func newOrganizationID() (uuid.UUID, error) {
|
||||||
|
return uuid.NewV7()
|
||||||
|
}
|
||||||
|
|
||||||
44
apps/backend/internal/organization/service.go
Normal file
44
apps/backend/internal/organization/service.go
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
package organization
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/google/uuid"
|
||||||
|
"github.com/uptrace/bun"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Service struct{}
|
||||||
|
|
||||||
|
func NewService() *Service {
|
||||||
|
return &Service{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Service) CreatePersonalOrganization(ctx context.Context, db bun.IDB, name string) (*Organization, error) {
|
||||||
|
id, err := newOrganizationID()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
org := &Organization{
|
||||||
|
ID: id,
|
||||||
|
Kind: KindPersonal,
|
||||||
|
Name: name,
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = db.NewInsert().Model(org).Returning("*").Exec(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return org, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Service) OrganizationByID(ctx context.Context, db bun.IDB, id uuid.UUID) (*Organization, error) {
|
||||||
|
var org Organization
|
||||||
|
err := db.NewSelect().Model(&org).Where("id = ?", id).Scan(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &org, nil
|
||||||
|
}
|
||||||
|
|
||||||
107
apps/backend/internal/registration/http.go
Normal file
107
apps/backend/internal/registration/http.go
Normal file
@@ -0,0 +1,107 @@
|
|||||||
|
package registration
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
|
||||||
|
"github.com/get-drexa/drexa/internal/account"
|
||||||
|
"github.com/get-drexa/drexa/internal/auth"
|
||||||
|
"github.com/get-drexa/drexa/internal/drive"
|
||||||
|
"github.com/get-drexa/drexa/internal/httperr"
|
||||||
|
"github.com/get-drexa/drexa/internal/user"
|
||||||
|
"github.com/gofiber/fiber/v2"
|
||||||
|
"github.com/uptrace/bun"
|
||||||
|
)
|
||||||
|
|
||||||
|
type HTTPHandler struct {
|
||||||
|
service *Service
|
||||||
|
authService *auth.Service
|
||||||
|
db *bun.DB
|
||||||
|
cookieConfig auth.CookieConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
type registerAccountRequest struct {
|
||||||
|
Email string `json:"email"`
|
||||||
|
Password string `json:"password"`
|
||||||
|
DisplayName string `json:"displayName"`
|
||||||
|
TokenDelivery string `json:"tokenDelivery" enums:"cookie,body"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type registerAccountResponse struct {
|
||||||
|
Account *account.Account `json:"account"`
|
||||||
|
User *user.User `json:"user"`
|
||||||
|
Drive *drive.Drive `json:"drive"`
|
||||||
|
|
||||||
|
AccessToken string `json:"accessToken,omitempty"`
|
||||||
|
RefreshToken string `json:"refreshToken,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewHTTPHandler(service *Service, authService *auth.Service, db *bun.DB, cookieConfig auth.CookieConfig) *HTTPHandler {
|
||||||
|
return &HTTPHandler{
|
||||||
|
service: service,
|
||||||
|
authService: authService,
|
||||||
|
db: db,
|
||||||
|
cookieConfig: cookieConfig,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *HTTPHandler) RegisterRoutes(api fiber.Router) {
|
||||||
|
api.Post("/accounts", h.registerAccount)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *HTTPHandler) registerAccount(c *fiber.Ctx) error {
|
||||||
|
req := new(registerAccountRequest)
|
||||||
|
if err := c.BodyParser(req); err != nil {
|
||||||
|
return c.SendStatus(fiber.StatusBadRequest)
|
||||||
|
}
|
||||||
|
|
||||||
|
tx, err := h.db.BeginTx(c.Context(), nil)
|
||||||
|
if err != nil {
|
||||||
|
return httperr.Internal(err)
|
||||||
|
}
|
||||||
|
defer tx.Rollback()
|
||||||
|
|
||||||
|
result, err := h.service.Register(c.Context(), tx, RegisterOptions{
|
||||||
|
Email: req.Email,
|
||||||
|
Password: req.Password,
|
||||||
|
DisplayName: req.DisplayName,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
var ae *user.AlreadyExistsError
|
||||||
|
if errors.As(err, &ae) {
|
||||||
|
return c.SendStatus(fiber.StatusConflict)
|
||||||
|
}
|
||||||
|
if errors.Is(err, account.ErrAccountAlreadyExists) {
|
||||||
|
return c.SendStatus(fiber.StatusConflict)
|
||||||
|
}
|
||||||
|
return httperr.Internal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
grant, err := h.authService.GrantForUser(c.Context(), tx, result.User)
|
||||||
|
if err != nil {
|
||||||
|
return httperr.Internal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := tx.Commit(); err != nil {
|
||||||
|
return httperr.Internal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp := registerAccountResponse{
|
||||||
|
Account: result.Account,
|
||||||
|
User: result.User,
|
||||||
|
Drive: result.Drive,
|
||||||
|
}
|
||||||
|
|
||||||
|
switch req.TokenDelivery {
|
||||||
|
default:
|
||||||
|
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{"error": "invalid token delivery method"})
|
||||||
|
|
||||||
|
case auth.TokenDeliveryCookie:
|
||||||
|
auth.SetAuthCookies(c, grant.AccessToken, grant.RefreshToken, h.cookieConfig)
|
||||||
|
return c.JSON(resp)
|
||||||
|
|
||||||
|
case auth.TokenDeliveryBody:
|
||||||
|
resp.AccessToken = grant.AccessToken
|
||||||
|
resp.RefreshToken = grant.RefreshToken
|
||||||
|
return c.JSON(resp)
|
||||||
|
}
|
||||||
|
}
|
||||||
90
apps/backend/internal/registration/service.go
Normal file
90
apps/backend/internal/registration/service.go
Normal file
@@ -0,0 +1,90 @@
|
|||||||
|
package registration
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/get-drexa/drexa/internal/account"
|
||||||
|
"github.com/get-drexa/drexa/internal/organization"
|
||||||
|
"github.com/get-drexa/drexa/internal/password"
|
||||||
|
"github.com/get-drexa/drexa/internal/user"
|
||||||
|
"github.com/get-drexa/drexa/internal/virtualfs"
|
||||||
|
"github.com/get-drexa/drexa/internal/drive"
|
||||||
|
"github.com/uptrace/bun"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Service struct {
|
||||||
|
userService user.Service
|
||||||
|
organizationService organization.Service
|
||||||
|
accountService account.Service
|
||||||
|
driveService drive.Service
|
||||||
|
vfs *virtualfs.VirtualFS
|
||||||
|
}
|
||||||
|
|
||||||
|
type RegisterOptions struct {
|
||||||
|
Email string
|
||||||
|
Password string
|
||||||
|
DisplayName string
|
||||||
|
}
|
||||||
|
|
||||||
|
type RegisterResult struct {
|
||||||
|
Account *account.Account
|
||||||
|
User *user.User
|
||||||
|
Drive *drive.Drive
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewService(userService *user.Service, organizationService *organization.Service, accountService *account.Service, driveService *drive.Service, vfs *virtualfs.VirtualFS) *Service {
|
||||||
|
return &Service{
|
||||||
|
userService: *userService,
|
||||||
|
organizationService: *organizationService,
|
||||||
|
accountService: *accountService,
|
||||||
|
driveService: *driveService,
|
||||||
|
vfs: vfs,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Service) Register(ctx context.Context, db bun.IDB, opts RegisterOptions) (*RegisterResult, error) {
|
||||||
|
hashed, err := password.HashString(opts.Password)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
u, err := s.userService.RegisterUser(ctx, db, user.UserRegistrationOptions{
|
||||||
|
Email: opts.Email,
|
||||||
|
Password: hashed,
|
||||||
|
DisplayName: opts.DisplayName,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
org, err := s.organizationService.CreatePersonalOrganization(ctx, db, "Personal")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
acc, err := s.accountService.CreateAccount(ctx, db, org.ID, u.ID, account.RoleAdmin, account.StatusActive)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
drv, err := s.driveService.CreateDrive(ctx, db, drive.CreateDriveOptions{
|
||||||
|
OrgID: org.ID,
|
||||||
|
OwnerAccountID: &acc.ID,
|
||||||
|
Name: "My Drive",
|
||||||
|
QuotaBytes: 1024 * 1024 * 1024, // 1GB
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = s.vfs.CreateRootDirectory(ctx, db, drv.ID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &RegisterResult{
|
||||||
|
Account: acc,
|
||||||
|
User: u,
|
||||||
|
Drive: drv,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
@@ -9,6 +9,7 @@ import (
|
|||||||
const authenticatedUserKey = "authenticatedUser"
|
const authenticatedUserKey = "authenticatedUser"
|
||||||
const vfsAccessScope = "vfsAccessScope"
|
const vfsAccessScope = "vfsAccessScope"
|
||||||
const currentAccountKey = "currentAccount"
|
const currentAccountKey = "currentAccount"
|
||||||
|
const currentDriveKey = "currentDrive"
|
||||||
|
|
||||||
var ErrUnauthenticatedRequest = errors.New("unauthenticated request")
|
var ErrUnauthenticatedRequest = errors.New("unauthenticated request")
|
||||||
|
|
||||||
@@ -29,6 +30,11 @@ func SetCurrentAccount(c *fiber.Ctx, account any) {
|
|||||||
c.Locals(currentAccountKey, account)
|
c.Locals(currentAccountKey, account)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetCurrentDrive sets the current drive in the fiber context.
|
||||||
|
func SetCurrentDrive(c *fiber.Ctx, drive any) {
|
||||||
|
c.Locals(currentDriveKey, drive)
|
||||||
|
}
|
||||||
|
|
||||||
// SetVFSAccessScope sets the VFS access scope in the fiber context.
|
// SetVFSAccessScope sets the VFS access scope in the fiber context.
|
||||||
func SetVFSAccessScope(c *fiber.Ctx, scope any) {
|
func SetVFSAccessScope(c *fiber.Ctx, scope any) {
|
||||||
c.Locals(vfsAccessScope, scope)
|
c.Locals(vfsAccessScope, scope)
|
||||||
@@ -39,6 +45,11 @@ func CurrentAccount(c *fiber.Ctx) any {
|
|||||||
return c.Locals(currentAccountKey)
|
return c.Locals(currentAccountKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CurrentDrive returns the current drive from the given fiber context.
|
||||||
|
func CurrentDrive(c *fiber.Ctx) any {
|
||||||
|
return c.Locals(currentDriveKey)
|
||||||
|
}
|
||||||
|
|
||||||
// VFSAccessScope returns the VFS access scope from the given fiber context.
|
// VFSAccessScope returns the VFS access scope from the given fiber context.
|
||||||
func VFSAccessScope(c *fiber.Ctx) any {
|
func VFSAccessScope(c *fiber.Ctx) any {
|
||||||
return c.Locals(vfsAccessScope)
|
return c.Locals(vfsAccessScope)
|
||||||
|
|||||||
@@ -5,21 +5,22 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/get-drexa/drexa/internal/account"
|
"github.com/get-drexa/drexa/internal/account"
|
||||||
|
"github.com/get-drexa/drexa/internal/drive"
|
||||||
"github.com/get-drexa/drexa/internal/httperr"
|
"github.com/get-drexa/drexa/internal/httperr"
|
||||||
"github.com/get-drexa/drexa/internal/nullable"
|
"github.com/get-drexa/drexa/internal/nullable"
|
||||||
"github.com/get-drexa/drexa/internal/reqctx"
|
"github.com/get-drexa/drexa/internal/reqctx"
|
||||||
"github.com/get-drexa/drexa/internal/user"
|
"github.com/get-drexa/drexa/internal/user"
|
||||||
"github.com/get-drexa/drexa/internal/virtualfs"
|
"github.com/get-drexa/drexa/internal/virtualfs"
|
||||||
"github.com/gofiber/fiber/v2"
|
"github.com/gofiber/fiber/v2"
|
||||||
"github.com/google/uuid"
|
|
||||||
"github.com/uptrace/bun"
|
"github.com/uptrace/bun"
|
||||||
)
|
)
|
||||||
|
|
||||||
type HTTPHandler struct {
|
type HTTPHandler struct {
|
||||||
sharingService *Service
|
sharingService *Service
|
||||||
accountService *account.Service
|
accountService *account.Service
|
||||||
vfs *virtualfs.VirtualFS
|
driveService *drive.Service
|
||||||
db *bun.DB
|
vfs *virtualfs.VirtualFS
|
||||||
|
db *bun.DB
|
||||||
optionalAuthMiddleware fiber.Handler
|
optionalAuthMiddleware fiber.Handler
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -39,12 +40,13 @@ type patchShareRequest struct {
|
|||||||
ExpiresAt nullable.Time `json:"expiresAt" example:"2025-01-15T00:00:00Z"`
|
ExpiresAt nullable.Time `json:"expiresAt" example:"2025-01-15T00:00:00Z"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewHTTPHandler(sharingService *Service, accountService *account.Service, vfs *virtualfs.VirtualFS, db *bun.DB, optionalAuthMiddleware fiber.Handler) *HTTPHandler {
|
func NewHTTPHandler(sharingService *Service, accountService *account.Service, driveService *drive.Service, vfs *virtualfs.VirtualFS, db *bun.DB, optionalAuthMiddleware fiber.Handler) *HTTPHandler {
|
||||||
return &HTTPHandler{
|
return &HTTPHandler{
|
||||||
sharingService: sharingService,
|
sharingService: sharingService,
|
||||||
accountService: accountService,
|
accountService: accountService,
|
||||||
vfs: vfs,
|
driveService: driveService,
|
||||||
db: db,
|
vfs: vfs,
|
||||||
|
db: db,
|
||||||
optionalAuthMiddleware: optionalAuthMiddleware,
|
optionalAuthMiddleware: optionalAuthMiddleware,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -57,7 +59,7 @@ func (h *HTTPHandler) RegisterShareConsumeRoutes(r fiber.Router) *virtualfs.Scop
|
|||||||
return &virtualfs.ScopedRouter{Router: g}
|
return &virtualfs.ScopedRouter{Router: g}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *HTTPHandler) RegisterShareManagementRoutes(api *account.ScopedRouter) {
|
func (h *HTTPHandler) RegisterShareManagementRoutes(api *virtualfs.ScopedRouter) {
|
||||||
g := api.Group("/shares")
|
g := api.Group("/shares")
|
||||||
g.Post("/", h.createShare)
|
g.Post("/", h.createShare)
|
||||||
g.Get("/:shareID", h.getShare)
|
g.Get("/:shareID", h.getShare)
|
||||||
@@ -76,33 +78,23 @@ func (h *HTTPHandler) shareMiddleware(c *fiber.Ctx) error {
|
|||||||
return httperr.Internal(err)
|
return httperr.Internal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// a share can be public or shared to specific accounts
|
|
||||||
// if latter, the accountId query param is expected and the route should be authenticated
|
|
||||||
// then the correct account is found using the authenticated user and the accountId query param
|
|
||||||
// finally, the account scope is resolved for the share
|
|
||||||
// otherwise, consumerAccount will be nil to attempt to resolve a public scope for the share
|
|
||||||
|
|
||||||
var consumerAccount *account.Account
|
var consumerAccount *account.Account
|
||||||
|
u, _ := reqctx.AuthenticatedUser(c).(*user.User)
|
||||||
qAccountID := c.Query("accountId")
|
if u != nil {
|
||||||
if qAccountID != "" {
|
drive, err := h.driveService.DriveByID(c.Context(), h.db, share.DriveID)
|
||||||
consumerAccountID, err := uuid.Parse(qAccountID)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
|
return httperr.Internal(err)
|
||||||
"error": "invalid account ID",
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
u, _ := reqctx.AuthenticatedUser(c).(*user.User)
|
consumerAccount, err = h.accountService.FindUserAccountInOrg(c.Context(), h.db, drive.OrgID, u.ID)
|
||||||
if u == nil {
|
|
||||||
return c.SendStatus(fiber.StatusUnauthorized)
|
|
||||||
}
|
|
||||||
consumerAccount, err = h.accountService.AccountByID(c.Context(), h.db, u.ID, consumerAccountID)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, account.ErrAccountNotFound) {
|
if errors.Is(err, account.ErrAccountNotFound) {
|
||||||
return c.SendStatus(fiber.StatusNotFound)
|
consumerAccount = nil
|
||||||
|
} else {
|
||||||
|
return httperr.Internal(err)
|
||||||
}
|
}
|
||||||
return httperr.Internal(err)
|
} else if consumerAccount.Status != account.StatusActive {
|
||||||
|
consumerAccount = nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -132,19 +124,28 @@ func (h *HTTPHandler) shareMiddleware(c *fiber.Ctx) error {
|
|||||||
// @Tags shares
|
// @Tags shares
|
||||||
// @Accept json
|
// @Accept json
|
||||||
// @Produce json
|
// @Produce json
|
||||||
// @Param accountID path string true "Account ID" format(uuid)
|
// @Param driveID path string true "Drive ID" format(uuid)
|
||||||
// @Param shareID path string true "Share ID"
|
// @Param shareID path string true "Share ID"
|
||||||
// @Success 200 {object} Share "Share details"
|
// @Success 200 {object} Share "Share details"
|
||||||
// @Failure 401 {string} string "Not authenticated"
|
// @Failure 401 {string} string "Not authenticated"
|
||||||
// @Failure 404 {string} string "Share not found"
|
// @Failure 404 {string} string "Share not found"
|
||||||
// @Security BearerAuth
|
// @Security BearerAuth
|
||||||
// @Router /accounts/{accountID}/shares/{shareID} [get]
|
// @Router /drives/{driveID}/shares/{shareID} [get]
|
||||||
func (h *HTTPHandler) getShare(c *fiber.Ctx) error {
|
func (h *HTTPHandler) getShare(c *fiber.Ctx) error {
|
||||||
shareID := c.Params("shareID")
|
shareID := c.Params("shareID")
|
||||||
share, err := h.sharingService.FindShareByPublicID(c.Context(), h.db, shareID)
|
share, err := h.sharingService.FindShareByPublicID(c.Context(), h.db, shareID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if errors.Is(err, ErrShareNotFound) {
|
||||||
|
return c.SendStatus(fiber.StatusNotFound)
|
||||||
|
}
|
||||||
return httperr.Internal(err)
|
return httperr.Internal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
drive, _ := reqctx.CurrentDrive(c).(*drive.Drive)
|
||||||
|
if drive == nil || share.DriveID != drive.ID {
|
||||||
|
return c.SendStatus(fiber.StatusNotFound)
|
||||||
|
}
|
||||||
|
|
||||||
return c.JSON(share)
|
return c.JSON(share)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -154,14 +155,14 @@ func (h *HTTPHandler) getShare(c *fiber.Ctx) error {
|
|||||||
// @Tags shares
|
// @Tags shares
|
||||||
// @Accept json
|
// @Accept json
|
||||||
// @Produce json
|
// @Produce json
|
||||||
// @Param accountID path string true "Account ID" format(uuid)
|
// @Param driveID path string true "Drive ID" format(uuid)
|
||||||
// @Param request body createShareRequest true "Share details"
|
// @Param request body createShareRequest true "Share details"
|
||||||
// @Success 200 {object} Share "Created share"
|
// @Success 200 {object} Share "Created share"
|
||||||
// @Failure 400 {object} map[string]string "Invalid request, items not in same directory, or root directory cannot be shared"
|
// @Failure 400 {object} map[string]string "Invalid request, items not in same directory, or root directory cannot be shared"
|
||||||
// @Failure 401 {string} string "Not authenticated"
|
// @Failure 401 {string} string "Not authenticated"
|
||||||
// @Failure 404 {object} map[string]string "One or more items not found"
|
// @Failure 404 {object} map[string]string "One or more items not found"
|
||||||
// @Security BearerAuth
|
// @Security BearerAuth
|
||||||
// @Router /accounts/{accountID}/shares [post]
|
// @Router /drives/{driveID}/shares [post]
|
||||||
func (h *HTTPHandler) createShare(c *fiber.Ctx) error {
|
func (h *HTTPHandler) createShare(c *fiber.Ctx) error {
|
||||||
scope, ok := scopeFromCtx(c)
|
scope, ok := scopeFromCtx(c)
|
||||||
if !ok {
|
if !ok {
|
||||||
@@ -173,6 +174,11 @@ func (h *HTTPHandler) createShare(c *fiber.Ctx) error {
|
|||||||
return c.SendStatus(fiber.StatusUnauthorized)
|
return c.SendStatus(fiber.StatusUnauthorized)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
drive, _ := reqctx.CurrentDrive(c).(*drive.Drive)
|
||||||
|
if drive == nil {
|
||||||
|
return c.SendStatus(fiber.StatusUnauthorized)
|
||||||
|
}
|
||||||
|
|
||||||
var req createShareRequest
|
var req createShareRequest
|
||||||
if err := c.BodyParser(&req); err != nil {
|
if err := c.BodyParser(&req); err != nil {
|
||||||
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
|
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
|
||||||
@@ -207,7 +213,7 @@ func (h *HTTPHandler) createShare(c *fiber.Ctx) error {
|
|||||||
opts.ExpiresAt = *req.ExpiresAt
|
opts.ExpiresAt = *req.ExpiresAt
|
||||||
}
|
}
|
||||||
|
|
||||||
share, err := h.sharingService.CreateShare(c.Context(), tx, acc.ID, opts)
|
share, err := h.sharingService.CreateShare(c.Context(), tx, drive.ID, acc.ID, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, ErrNotSameParent) {
|
if errors.Is(err, ErrNotSameParent) {
|
||||||
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{"error": "items must be in the same directory"})
|
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{"error": "items must be in the same directory"})
|
||||||
@@ -232,7 +238,7 @@ func (h *HTTPHandler) createShare(c *fiber.Ctx) error {
|
|||||||
// @Tags shares
|
// @Tags shares
|
||||||
// @Accept json
|
// @Accept json
|
||||||
// @Produce json
|
// @Produce json
|
||||||
// @Param accountID path string true "Account ID" format(uuid)
|
// @Param driveID path string true "Drive ID" format(uuid)
|
||||||
// @Param shareID path string true "Share ID"
|
// @Param shareID path string true "Share ID"
|
||||||
// @Param request body patchShareRequest true "Share details"
|
// @Param request body patchShareRequest true "Share details"
|
||||||
// @Success 200 {object} Share "Updated share"
|
// @Success 200 {object} Share "Updated share"
|
||||||
@@ -240,7 +246,7 @@ func (h *HTTPHandler) createShare(c *fiber.Ctx) error {
|
|||||||
// @Failure 401 {string} string "Not authenticated"
|
// @Failure 401 {string} string "Not authenticated"
|
||||||
// @Failure 404 {string} string "Share not found"
|
// @Failure 404 {string} string "Share not found"
|
||||||
// @Security BearerAuth
|
// @Security BearerAuth
|
||||||
// @Router /accounts/{accountID}/shares/{shareID} [patch]
|
// @Router /drives/{driveID}/shares/{shareID} [patch]
|
||||||
func (h *HTTPHandler) updateShare(c *fiber.Ctx) error {
|
func (h *HTTPHandler) updateShare(c *fiber.Ctx) error {
|
||||||
shareID := c.Params("shareID")
|
shareID := c.Params("shareID")
|
||||||
|
|
||||||
@@ -252,6 +258,16 @@ func (h *HTTPHandler) updateShare(c *fiber.Ctx) error {
|
|||||||
return httperr.Internal(err)
|
return httperr.Internal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
drive, _ := reqctx.CurrentDrive(c).(*drive.Drive)
|
||||||
|
if drive == nil || share.DriveID != drive.ID {
|
||||||
|
return c.SendStatus(fiber.StatusNotFound)
|
||||||
|
}
|
||||||
|
|
||||||
|
acc, _ := reqctx.CurrentAccount(c).(*account.Account)
|
||||||
|
if acc == nil || (acc.Role != account.RoleAdmin && share.CreatedByAccountID != acc.ID) {
|
||||||
|
return c.SendStatus(fiber.StatusNotFound)
|
||||||
|
}
|
||||||
|
|
||||||
var req patchShareRequest
|
var req patchShareRequest
|
||||||
if err := c.BodyParser(&req); err != nil {
|
if err := c.BodyParser(&req); err != nil {
|
||||||
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
|
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{
|
||||||
@@ -291,16 +307,34 @@ func (h *HTTPHandler) updateShare(c *fiber.Ctx) error {
|
|||||||
// @Summary Delete share
|
// @Summary Delete share
|
||||||
// @Description Delete a share link, revoking access for all users
|
// @Description Delete a share link, revoking access for all users
|
||||||
// @Tags shares
|
// @Tags shares
|
||||||
// @Param accountID path string true "Account ID" format(uuid)
|
// @Param driveID path string true "Drive ID" format(uuid)
|
||||||
// @Param shareID path string true "Share ID"
|
// @Param shareID path string true "Share ID"
|
||||||
// @Success 204 {string} string "Share deleted"
|
// @Success 204 {string} string "Share deleted"
|
||||||
// @Failure 401 {string} string "Not authenticated"
|
// @Failure 401 {string} string "Not authenticated"
|
||||||
// @Failure 404 {string} string "Share not found"
|
// @Failure 404 {string} string "Share not found"
|
||||||
// @Security BearerAuth
|
// @Security BearerAuth
|
||||||
// @Router /accounts/{accountID}/shares/{shareID} [delete]
|
// @Router /drives/{driveID}/shares/{shareID} [delete]
|
||||||
func (h *HTTPHandler) deleteShare(c *fiber.Ctx) error {
|
func (h *HTTPHandler) deleteShare(c *fiber.Ctx) error {
|
||||||
shareID := c.Params("shareID")
|
shareID := c.Params("shareID")
|
||||||
|
|
||||||
|
share, err := h.sharingService.FindShareByPublicID(c.Context(), h.db, shareID)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, ErrShareNotFound) {
|
||||||
|
return c.SendStatus(fiber.StatusNotFound)
|
||||||
|
}
|
||||||
|
return httperr.Internal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
drive, _ := reqctx.CurrentDrive(c).(*drive.Drive)
|
||||||
|
if drive == nil || share.DriveID != drive.ID {
|
||||||
|
return c.SendStatus(fiber.StatusNotFound)
|
||||||
|
}
|
||||||
|
|
||||||
|
acc, _ := reqctx.CurrentAccount(c).(*account.Account)
|
||||||
|
if acc == nil || (acc.Role != account.RoleAdmin && share.CreatedByAccountID != acc.ID) {
|
||||||
|
return c.SendStatus(fiber.StatusNotFound)
|
||||||
|
}
|
||||||
|
|
||||||
tx, err := h.db.BeginTx(c.Context(), nil)
|
tx, err := h.db.BeginTx(c.Context(), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return httperr.Internal(err)
|
return httperr.Internal(err)
|
||||||
|
|||||||
@@ -59,7 +59,7 @@ func NewService(vfs *virtualfs.VirtualFS) (*Service, error) {
|
|||||||
|
|
||||||
// CreateShare creates a share record for its allowed items.
|
// CreateShare creates a share record for its allowed items.
|
||||||
// A share is a partial share of a directory: the share root is always the common parent directory of all items.
|
// A share is a partial share of a directory: the share root is always the common parent directory of all items.
|
||||||
func (s *Service) CreateShare(ctx context.Context, db bun.IDB, accountID uuid.UUID, opts CreateShareOptions) (*Share, error) {
|
func (s *Service) CreateShare(ctx context.Context, db bun.IDB, driveID uuid.UUID, createdByAccountID uuid.UUID, opts CreateShareOptions) (*Share, error) {
|
||||||
if len(opts.Items) == 0 {
|
if len(opts.Items) == 0 {
|
||||||
return nil, ErrShareNoItems
|
return nil, ErrShareNoItems
|
||||||
}
|
}
|
||||||
@@ -87,12 +87,13 @@ func (s *Service) CreateShare(ctx context.Context, db bun.IDB, accountID uuid.UU
|
|||||||
|
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
sh := &Share{
|
sh := &Share{
|
||||||
ID: id,
|
ID: id,
|
||||||
AccountID: accountID,
|
DriveID: driveID,
|
||||||
PublicID: pid,
|
CreatedByAccountID: createdByAccountID,
|
||||||
SharedDirectoryID: sharedDirectoryID,
|
PublicID: pid,
|
||||||
CreatedAt: now,
|
SharedDirectoryID: sharedDirectoryID,
|
||||||
UpdatedAt: now,
|
CreatedAt: now,
|
||||||
|
UpdatedAt: now,
|
||||||
}
|
}
|
||||||
|
|
||||||
if !opts.ExpiresAt.IsZero() {
|
if !opts.ExpiresAt.IsZero() {
|
||||||
@@ -165,11 +166,11 @@ func (s *Service) FindShareByPublicID(ctx context.Context, db bun.IDB, publicID
|
|||||||
return sh, nil
|
return sh, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Service) ListShares(ctx context.Context, db bun.IDB, accountID uuid.UUID, opts ListSharesOptions) ([]Share, error) {
|
func (s *Service) ListShares(ctx context.Context, db bun.IDB, driveID uuid.UUID, opts ListSharesOptions) ([]Share, error) {
|
||||||
var shares []Share
|
var shares []Share
|
||||||
|
|
||||||
q := db.NewSelect().Model(&shares).
|
q := db.NewSelect().Model(&shares).
|
||||||
Where("account_id = ?", accountID)
|
Where("drive_id = ?", driveID)
|
||||||
|
|
||||||
if !opts.IncludesExpired {
|
if !opts.IncludesExpired {
|
||||||
q = q.Where("expires_at IS NULL OR expires_at > NOW()")
|
q = q.Where("expires_at IS NULL OR expires_at > NOW()")
|
||||||
@@ -260,7 +261,7 @@ func (s *Service) ResolveScopeForShare(ctx context.Context, db bun.IDB, consumer
|
|||||||
}
|
}
|
||||||
|
|
||||||
scope := &virtualfs.Scope{
|
scope := &virtualfs.Scope{
|
||||||
AccountID: share.AccountID,
|
DriveID: share.DriveID,
|
||||||
RootNodeID: share.SharedDirectoryID,
|
RootNodeID: share.SharedDirectoryID,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -12,8 +12,9 @@ import (
|
|||||||
type Share struct {
|
type Share struct {
|
||||||
bun.BaseModel `bun:"node_shares"`
|
bun.BaseModel `bun:"node_shares"`
|
||||||
|
|
||||||
ID uuid.UUID `bun:",pk,type:uuid" json:"-"`
|
ID uuid.UUID `bun:",pk,type:uuid" json:"-"`
|
||||||
AccountID uuid.UUID `bun:"account_id,notnull,type:uuid" json:"-"`
|
DriveID uuid.UUID `bun:"drive_id,notnull,type:uuid" json:"-"`
|
||||||
|
CreatedByAccountID uuid.UUID `bun:"created_by_account_id,notnull,type:uuid" json:"-"`
|
||||||
// Unique share identifier (public ID)
|
// Unique share identifier (public ID)
|
||||||
PublicID string `bun:"public_id,notnull" json:"id" example:"kRp2XYTq9A55"`
|
PublicID string `bun:"public_id,notnull" json:"id" example:"kRp2XYTq9A55"`
|
||||||
SharedDirectoryID uuid.UUID `bun:"shared_directory_id,notnull,type:uuid" json:"-"`
|
SharedDirectoryID uuid.UUID `bun:"shared_directory_id,notnull,type:uuid" json:"-"`
|
||||||
|
|||||||
@@ -51,14 +51,14 @@ func (h *HTTPHandler) RegisterRoutes(api *virtualfs.ScopedRouter) {
|
|||||||
// @Accept json
|
// @Accept json
|
||||||
// @Produce json
|
// @Produce json
|
||||||
// @Security BearerAuth
|
// @Security BearerAuth
|
||||||
// @Param accountID path string true "Account ID" format(uuid)
|
// @Param driveID path string true "Drive ID" format(uuid)
|
||||||
// @Param request body createUploadRequest true "Upload details"
|
// @Param request body createUploadRequest true "Upload details"
|
||||||
// @Success 200 {object} Upload "Upload session created"
|
// @Success 200 {object} Upload "Upload session created"
|
||||||
// @Failure 400 {object} map[string]string "Parent is not a directory"
|
// @Failure 400 {object} map[string]string "Parent is not a directory"
|
||||||
// @Failure 401 {string} string "Not authenticated"
|
// @Failure 401 {string} string "Not authenticated"
|
||||||
// @Failure 404 {string} string "Parent directory not found"
|
// @Failure 404 {string} string "Parent directory not found"
|
||||||
// @Failure 409 {object} map[string]string "File with this name already exists"
|
// @Failure 409 {object} map[string]string "File with this name already exists"
|
||||||
// @Router /accounts/{accountID}/uploads [post]
|
// @Router /drives/{driveID}/uploads [post]
|
||||||
func (h *HTTPHandler) Create(c *fiber.Ctx) error {
|
func (h *HTTPHandler) Create(c *fiber.Ctx) error {
|
||||||
scopeAny := reqctx.VFSAccessScope(c)
|
scopeAny := reqctx.VFSAccessScope(c)
|
||||||
scope, ok := scopeAny.(*virtualfs.Scope)
|
scope, ok := scopeAny.(*virtualfs.Scope)
|
||||||
@@ -107,13 +107,13 @@ func (h *HTTPHandler) Create(c *fiber.Ctx) error {
|
|||||||
// @Tags uploads
|
// @Tags uploads
|
||||||
// @Accept application/octet-stream
|
// @Accept application/octet-stream
|
||||||
// @Security BearerAuth
|
// @Security BearerAuth
|
||||||
// @Param accountID path string true "Account ID" format(uuid)
|
// @Param driveID path string true "Drive ID" format(uuid)
|
||||||
// @Param uploadID path string true "Upload session ID"
|
// @Param uploadID path string true "Upload session ID"
|
||||||
// @Param file body []byte true "File content (binary)"
|
// @Param file body []byte true "File content (binary)"
|
||||||
// @Success 204 {string} string "Content received successfully"
|
// @Success 204 {string} string "Content received successfully"
|
||||||
// @Failure 401 {string} string "Not authenticated"
|
// @Failure 401 {string} string "Not authenticated"
|
||||||
// @Failure 404 {string} string "Upload session not found"
|
// @Failure 404 {string} string "Upload session not found"
|
||||||
// @Router /accounts/{accountID}/uploads/{uploadID}/content [put]
|
// @Router /drives/{driveID}/uploads/{uploadID}/content [put]
|
||||||
func (h *HTTPHandler) ReceiveContent(c *fiber.Ctx) error {
|
func (h *HTTPHandler) ReceiveContent(c *fiber.Ctx) error {
|
||||||
scopeAny := reqctx.VFSAccessScope(c)
|
scopeAny := reqctx.VFSAccessScope(c)
|
||||||
scope, ok := scopeAny.(*virtualfs.Scope)
|
scope, ok := scopeAny.(*virtualfs.Scope)
|
||||||
@@ -148,14 +148,14 @@ func (h *HTTPHandler) ReceiveContent(c *fiber.Ctx) error {
|
|||||||
// @Accept json
|
// @Accept json
|
||||||
// @Produce json
|
// @Produce json
|
||||||
// @Security BearerAuth
|
// @Security BearerAuth
|
||||||
// @Param accountID path string true "Account ID" format(uuid)
|
// @Param driveID path string true "Drive ID" format(uuid)
|
||||||
// @Param uploadID path string true "Upload session ID"
|
// @Param uploadID path string true "Upload session ID"
|
||||||
// @Param request body updateUploadRequest true "Status update"
|
// @Param request body updateUploadRequest true "Status update"
|
||||||
// @Success 200 {object} Upload "Upload completed"
|
// @Success 200 {object} Upload "Upload completed"
|
||||||
// @Failure 400 {object} map[string]string "Content not uploaded yet or invalid status"
|
// @Failure 400 {object} map[string]string "Content not uploaded yet or invalid status"
|
||||||
// @Failure 401 {string} string "Not authenticated"
|
// @Failure 401 {string} string "Not authenticated"
|
||||||
// @Failure 404 {string} string "Upload session not found"
|
// @Failure 404 {string} string "Upload session not found"
|
||||||
// @Router /accounts/{accountID}/uploads/{uploadID} [patch]
|
// @Router /drives/{driveID}/uploads/{uploadID} [patch]
|
||||||
func (h *HTTPHandler) Update(c *fiber.Ctx) error {
|
func (h *HTTPHandler) Update(c *fiber.Ctx) error {
|
||||||
scopeAny := reqctx.VFSAccessScope(c)
|
scopeAny := reqctx.VFSAccessScope(c)
|
||||||
scope, ok := scopeAny.(*virtualfs.Scope)
|
scope, ok := scopeAny.(*virtualfs.Scope)
|
||||||
|
|||||||
@@ -103,7 +103,7 @@ func (s *Service) ReceiveUpload(ctx context.Context, db bun.IDB, uploadID string
|
|||||||
return ErrUnauthorized
|
return ErrUnauthorized
|
||||||
}
|
}
|
||||||
|
|
||||||
if upload.TargetNode.AccountID != scope.AccountID {
|
if upload.TargetNode.DriveID != scope.DriveID {
|
||||||
return ErrNotFound
|
return ErrNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -132,7 +132,7 @@ func (s *Service) CompleteUpload(ctx context.Context, db bun.IDB, uploadID strin
|
|||||||
return nil, ErrUnauthorized
|
return nil, ErrUnauthorized
|
||||||
}
|
}
|
||||||
|
|
||||||
if upload.TargetNode.AccountID != scope.AccountID {
|
if upload.TargetNode.DriveID != scope.DriveID {
|
||||||
return nil, ErrNotFound
|
return nil, ErrNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -25,5 +25,5 @@ type Upload struct {
|
|||||||
// Internal target node reference
|
// Internal target node reference
|
||||||
TargetNode *virtualfs.Node `json:"-" swaggerignore:"true"`
|
TargetNode *virtualfs.Node `json:"-" swaggerignore:"true"`
|
||||||
// URL to upload file content to
|
// URL to upload file content to
|
||||||
UploadURL string `json:"uploadUrl" example:"https://api.example.com/api/accounts/550e8400-e29b-41d4-a716-446655440000/uploads/xNq5RVBt3K88/content"`
|
UploadURL string `json:"uploadUrl" example:"https://api.example.com/api/drives/550e8400-e29b-41d4-a716-446655440000/uploads/xNq5RVBt3K88/content"`
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -29,7 +29,7 @@ func (r *HierarchicalKeyResolver) Resolve(ctx context.Context, db bun.IDB, node
|
|||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
return blob.Key(fmt.Sprintf("%s/%s", node.AccountID, path)), nil
|
return blob.Key(fmt.Sprintf("%s/%s", node.DriveID, path)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *HierarchicalKeyResolver) ResolveDeletionKeys(ctx context.Context, node *Node, allKeys []blob.Key) (*DeletionPlan, error) {
|
func (r *HierarchicalKeyResolver) ResolveDeletionKeys(ctx context.Context, node *Node, allKeys []blob.Key) (*DeletionPlan, error) {
|
||||||
@@ -37,7 +37,7 @@ func (r *HierarchicalKeyResolver) ResolveDeletionKeys(ctx context.Context, node
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return &DeletionPlan{Prefix: blob.Key(fmt.Sprintf("%s/%s", node.AccountID, path))}, nil
|
return &DeletionPlan{Prefix: blob.Key(fmt.Sprintf("%s/%s", node.DriveID, path))}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ResolveBulkMoveOps computes blob move operations for nodes being moved to a new parent.
|
// ResolveBulkMoveOps computes blob move operations for nodes being moved to a new parent.
|
||||||
@@ -48,7 +48,7 @@ func (r *HierarchicalKeyResolver) ResolveBulkMoveOps(ctx context.Context, db bun
|
|||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
accountID := nodes[0].AccountID
|
driveID := nodes[0].DriveID
|
||||||
oldParentID := nodes[0].ParentID
|
oldParentID := nodes[0].ParentID
|
||||||
|
|
||||||
for _, node := range nodes[1:] {
|
for _, node := range nodes[1:] {
|
||||||
@@ -70,8 +70,8 @@ func (r *HierarchicalKeyResolver) ResolveBulkMoveOps(ctx context.Context, db bun
|
|||||||
// For each node, construct old and new keys using the precomputed parent paths
|
// For each node, construct old and new keys using the precomputed parent paths
|
||||||
ops := make([]BlobMoveOp, len(nodes))
|
ops := make([]BlobMoveOp, len(nodes))
|
||||||
for i, node := range nodes {
|
for i, node := range nodes {
|
||||||
oldKey := blob.Key(fmt.Sprintf("%s/%s/%s", accountID, oldParentPath, node.Name))
|
oldKey := blob.Key(fmt.Sprintf("%s/%s/%s", driveID, oldParentPath, node.Name))
|
||||||
newKey := blob.Key(fmt.Sprintf("%s/%s/%s", accountID, newParentPath, node.Name))
|
newKey := blob.Key(fmt.Sprintf("%s/%s/%s", driveID, newParentPath, node.Name))
|
||||||
ops[i] = BlobMoveOp{Node: node, OldKey: oldKey, NewKey: newKey}
|
ops[i] = BlobMoveOp{Node: node, OldKey: oldKey, NewKey: newKey}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -25,13 +25,13 @@ const (
|
|||||||
type Node struct {
|
type Node struct {
|
||||||
bun.BaseModel `bun:"vfs_nodes"`
|
bun.BaseModel `bun:"vfs_nodes"`
|
||||||
|
|
||||||
ID uuid.UUID `bun:",pk,type:uuid"`
|
ID uuid.UUID `bun:",pk,type:uuid"`
|
||||||
PublicID string `bun:"public_id,notnull"`
|
PublicID string `bun:"public_id,notnull"`
|
||||||
AccountID uuid.UUID `bun:"account_id,notnull,type:uuid"`
|
DriveID uuid.UUID `bun:"drive_id,notnull,type:uuid"`
|
||||||
ParentID uuid.UUID `bun:"parent_id,nullzero"`
|
ParentID uuid.UUID `bun:"parent_id,nullzero"`
|
||||||
Kind NodeKind `bun:"kind,notnull"`
|
Kind NodeKind `bun:"kind,notnull"`
|
||||||
Status NodeStatus `bun:"status,notnull"`
|
Status NodeStatus `bun:"status,notnull"`
|
||||||
Name string `bun:"name,notnull"`
|
Name string `bun:"name,notnull"`
|
||||||
|
|
||||||
BlobKey blob.Key `bun:"blob_key,nullzero"`
|
BlobKey blob.Key `bun:"blob_key,nullzero"`
|
||||||
Size int64 `bun:"size"`
|
Size int64 `bun:"size"`
|
||||||
|
|||||||
@@ -5,8 +5,8 @@ import "github.com/google/uuid"
|
|||||||
// Scope defines the bounded view of the virtual filesystem that a caller is allowed to operate on.
|
// Scope defines the bounded view of the virtual filesystem that a caller is allowed to operate on.
|
||||||
// It is populated by higher layers (account/share middleware) and enforced by VFS methods.
|
// It is populated by higher layers (account/share middleware) and enforced by VFS methods.
|
||||||
type Scope struct {
|
type Scope struct {
|
||||||
// AccountID is the owner of the storage. It stays constant even when a share actor accesses it.
|
// DriveID is the owner of the storage (the tenant). It stays constant even when a share actor accesses it.
|
||||||
AccountID uuid.UUID
|
DriveID uuid.UUID
|
||||||
|
|
||||||
// RootNodeID is the top-most node the caller is allowed to traverse; all accesses must stay under it.
|
// RootNodeID is the top-most node the caller is allowed to traverse; all accesses must stay under it.
|
||||||
// It must be set for all VFS access operations.
|
// It must be set for all VFS access operations.
|
||||||
|
|||||||
@@ -81,7 +81,7 @@ FROM node_paths
|
|||||||
WHERE id = ?;`
|
WHERE id = ?;`
|
||||||
|
|
||||||
func isScopeSet(scope *Scope) bool {
|
func isScopeSet(scope *Scope) bool {
|
||||||
return scope != nil && scope.AccountID != uuid.Nil && scope.RootNodeID != uuid.Nil
|
return scope != nil && scope.DriveID != uuid.Nil && scope.RootNodeID != uuid.Nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// canAccessNode checks if the scope permits the operation and allows access to the node.
|
// canAccessNode checks if the scope permits the operation and allows access to the node.
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ import "github.com/gofiber/fiber/v2"
|
|||||||
// returns a valid *Scope for all registered routes.
|
// returns a valid *Scope for all registered routes.
|
||||||
//
|
//
|
||||||
// This is the base type for routers that provide VFS access scope.
|
// This is the base type for routers that provide VFS access scope.
|
||||||
// More specific router types (like account.ScopedRouter) may embed this
|
// More specific router types may embed this
|
||||||
// to provide additional guarantees.
|
// to provide additional guarantees.
|
||||||
type ScopedRouter struct {
|
type ScopedRouter struct {
|
||||||
fiber.Router
|
fiber.Router
|
||||||
|
|||||||
@@ -94,7 +94,7 @@ func (vfs *VirtualFS) FindNode(ctx context.Context, db bun.IDB, fileID string, s
|
|||||||
|
|
||||||
var node Node
|
var node Node
|
||||||
err := db.NewSelect().Model(&node).
|
err := db.NewSelect().Model(&node).
|
||||||
Where("account_id = ?", scope.AccountID).
|
Where("drive_id = ?", scope.DriveID).
|
||||||
Where("id = ?", fileID).
|
Where("id = ?", fileID).
|
||||||
Where("status = ?", NodeStatusReady).
|
Where("status = ?", NodeStatusReady).
|
||||||
Where("deleted_at IS NULL").
|
Where("deleted_at IS NULL").
|
||||||
@@ -135,7 +135,7 @@ func (vfs *VirtualFS) FindNodesByPublicID(ctx context.Context, db bun.IDB, publi
|
|||||||
|
|
||||||
var nodes []*Node
|
var nodes []*Node
|
||||||
err := db.NewSelect().Model(&nodes).
|
err := db.NewSelect().Model(&nodes).
|
||||||
Where("account_id = ?", scope.AccountID).
|
Where("drive_id = ?", scope.DriveID).
|
||||||
Where("public_id IN (?)", bun.In(publicIDs)).
|
Where("public_id IN (?)", bun.In(publicIDs)).
|
||||||
Where("status = ?", NodeStatusReady).
|
Where("status = ?", NodeStatusReady).
|
||||||
Scan(ctx)
|
Scan(ctx)
|
||||||
@@ -146,11 +146,11 @@ func (vfs *VirtualFS) FindNodesByPublicID(ctx context.Context, db bun.IDB, publi
|
|||||||
return vfs.filterNodesByScope(ctx, db, scope, nodes)
|
return vfs.filterNodesByScope(ctx, db, scope, nodes)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (vfs *VirtualFS) FindRootDirectory(ctx context.Context, db bun.IDB, accountID uuid.UUID) (*Node, error) {
|
func (vfs *VirtualFS) FindRootDirectory(ctx context.Context, db bun.IDB, driveID uuid.UUID) (*Node, error) {
|
||||||
root := new(Node)
|
root := new(Node)
|
||||||
|
|
||||||
err := db.NewSelect().Model(root).
|
err := db.NewSelect().Model(root).
|
||||||
Where("account_id = ?", accountID).
|
Where("drive_id = ?", driveID).
|
||||||
Where("parent_id IS NULL").
|
Where("parent_id IS NULL").
|
||||||
Where("status = ?", NodeStatusReady).
|
Where("status = ?", NodeStatusReady).
|
||||||
Where("deleted_at IS NULL").
|
Where("deleted_at IS NULL").
|
||||||
@@ -166,8 +166,8 @@ func (vfs *VirtualFS) FindRootDirectory(ctx context.Context, db bun.IDB, account
|
|||||||
return root, nil
|
return root, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateRootDirectory creates the account root directory node.
|
// CreateRootDirectory creates the drive root directory node.
|
||||||
func (vfs *VirtualFS) CreateRootDirectory(ctx context.Context, db bun.IDB, accountID uuid.UUID) (*Node, error) {
|
func (vfs *VirtualFS) CreateRootDirectory(ctx context.Context, db bun.IDB, driveID uuid.UUID) (*Node, error) {
|
||||||
pid, err := vfs.generatePublicID()
|
pid, err := vfs.generatePublicID()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -181,7 +181,7 @@ func (vfs *VirtualFS) CreateRootDirectory(ctx context.Context, db bun.IDB, accou
|
|||||||
node := &Node{
|
node := &Node{
|
||||||
ID: id,
|
ID: id,
|
||||||
PublicID: pid,
|
PublicID: pid,
|
||||||
AccountID: accountID,
|
DriveID: driveID,
|
||||||
ParentID: uuid.Nil,
|
ParentID: uuid.Nil,
|
||||||
Kind: NodeKindDirectory,
|
Kind: NodeKindDirectory,
|
||||||
Status: NodeStatusReady,
|
Status: NodeStatusReady,
|
||||||
@@ -212,7 +212,7 @@ func (vfs *VirtualFS) ListChildren(ctx context.Context, db bun.IDB, node *Node,
|
|||||||
|
|
||||||
var nodes []*Node
|
var nodes []*Node
|
||||||
q := db.NewSelect().Model(&nodes).
|
q := db.NewSelect().Model(&nodes).
|
||||||
Where("account_id = ?", node.AccountID).
|
Where("drive_id = ?", node.DriveID).
|
||||||
Where("parent_id = ?", node.ID).
|
Where("parent_id = ?", node.ID).
|
||||||
Where("status = ?", NodeStatusReady).
|
Where("status = ?", NodeStatusReady).
|
||||||
Where("deleted_at IS NULL")
|
Where("deleted_at IS NULL")
|
||||||
@@ -326,13 +326,13 @@ func (vfs *VirtualFS) CreateFile(ctx context.Context, db bun.IDB, opts CreateFil
|
|||||||
}
|
}
|
||||||
|
|
||||||
node := Node{
|
node := Node{
|
||||||
ID: id,
|
ID: id,
|
||||||
PublicID: pid,
|
PublicID: pid,
|
||||||
AccountID: scope.AccountID,
|
DriveID: scope.DriveID,
|
||||||
ParentID: opts.ParentID,
|
ParentID: opts.ParentID,
|
||||||
Kind: NodeKindFile,
|
Kind: NodeKindFile,
|
||||||
Status: NodeStatusPending,
|
Status: NodeStatusPending,
|
||||||
Name: opts.Name,
|
Name: opts.Name,
|
||||||
}
|
}
|
||||||
|
|
||||||
if vfs.keyResolver.ShouldPersistKey() {
|
if vfs.keyResolver.ShouldPersistKey() {
|
||||||
@@ -492,13 +492,13 @@ func (vfs *VirtualFS) CreateDirectory(ctx context.Context, db bun.IDB, parentID
|
|||||||
}
|
}
|
||||||
|
|
||||||
node := &Node{
|
node := &Node{
|
||||||
ID: id,
|
ID: id,
|
||||||
PublicID: pid,
|
PublicID: pid,
|
||||||
AccountID: scope.AccountID,
|
DriveID: scope.DriveID,
|
||||||
ParentID: parentID,
|
ParentID: parentID,
|
||||||
Kind: NodeKindDirectory,
|
Kind: NodeKindDirectory,
|
||||||
Status: NodeStatusReady,
|
Status: NodeStatusReady,
|
||||||
Name: name,
|
Name: name,
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = db.NewInsert().Model(node).Exec(ctx)
|
_, err = db.NewInsert().Model(node).Exec(ctx)
|
||||||
@@ -739,13 +739,13 @@ func (vfs *VirtualFS) MoveNodesInSameDirectory(ctx context.Context, db bun.IDB,
|
|||||||
nodeNames[i] = node.Name
|
nodeNames[i] = node.Name
|
||||||
}
|
}
|
||||||
|
|
||||||
var destinationConflicts []*Node
|
var destinationConflicts []*Node
|
||||||
err = db.NewSelect().Model(&destinationConflicts).
|
err = db.NewSelect().Model(&destinationConflicts).
|
||||||
Where("account_id = ?", allowedNodes[0].AccountID).
|
Where("drive_id = ?", allowedNodes[0].DriveID).
|
||||||
Where("parent_id = ?", newParentID).
|
Where("parent_id = ?", newParentID).
|
||||||
Where("deleted_at IS NULL").
|
Where("deleted_at IS NULL").
|
||||||
Where("name IN (?)", bun.In(nodeNames)).
|
Where("name IN (?)", bun.In(nodeNames)).
|
||||||
Scan(ctx)
|
Scan(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -0,0 +1,9 @@
|
|||||||
|
import { createFileRoute } from '@tanstack/react-router'
|
||||||
|
|
||||||
|
export const Route = createFileRoute('/_authenticated')({
|
||||||
|
component: RouteComponent,
|
||||||
|
})
|
||||||
|
|
||||||
|
function RouteComponent() {
|
||||||
|
return <div>Hello "/_authenticated"!</div>
|
||||||
|
}
|
||||||
@@ -3,11 +3,12 @@ import { atom } from "jotai"
|
|||||||
|
|
||||||
export const Account = type({
|
export const Account = type({
|
||||||
id: "string",
|
id: "string",
|
||||||
|
orgId: "string",
|
||||||
userId: "string",
|
userId: "string",
|
||||||
|
role: "'admin'|'member'",
|
||||||
|
status: "'invited'|'active'|'suspended'",
|
||||||
createdAt: "string.date.iso.parse",
|
createdAt: "string.date.iso.parse",
|
||||||
updatedAt: "string.date.iso.parse",
|
updatedAt: "string.date.iso.parse",
|
||||||
storageUsageBytes: "number",
|
|
||||||
storageQuotaBytes: "number",
|
|
||||||
})
|
})
|
||||||
export type Account = typeof Account.infer
|
export type Account = typeof Account.infer
|
||||||
|
|
||||||
|
|||||||
@@ -5,6 +5,8 @@ import { accountsQuery } from "@/account/api"
|
|||||||
import { fetchApi } from "@/lib/api"
|
import { fetchApi } from "@/lib/api"
|
||||||
import { currentUserQuery } from "@/user/api"
|
import { currentUserQuery } from "@/user/api"
|
||||||
import { User } from "@/user/user"
|
import { User } from "@/user/user"
|
||||||
|
import { drivesQuery } from "@/drive/api"
|
||||||
|
import { Drive } from "@/drive/drive"
|
||||||
|
|
||||||
const LoginResponseSchema = type({
|
const LoginResponseSchema = type({
|
||||||
user: User,
|
user: User,
|
||||||
@@ -13,6 +15,7 @@ const LoginResponseSchema = type({
|
|||||||
const SignUpResponse = type({
|
const SignUpResponse = type({
|
||||||
account: Account,
|
account: Account,
|
||||||
user: User,
|
user: User,
|
||||||
|
drive: Drive,
|
||||||
})
|
})
|
||||||
|
|
||||||
export const loginMutation = mutationOptions({
|
export const loginMutation = mutationOptions({
|
||||||
@@ -29,6 +32,7 @@ export const loginMutation = mutationOptions({
|
|||||||
onSuccess: (data, _, __, context) => {
|
onSuccess: (data, _, __, context) => {
|
||||||
context.client.setQueryData(currentUserQuery.queryKey, data.user)
|
context.client.setQueryData(currentUserQuery.queryKey, data.user)
|
||||||
context.client.invalidateQueries(accountsQuery)
|
context.client.invalidateQueries(accountsQuery)
|
||||||
|
context.client.invalidateQueries(drivesQuery)
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -50,5 +54,6 @@ export const signUpMutation = mutationOptions({
|
|||||||
onSuccess: (data, _, __, context) => {
|
onSuccess: (data, _, __, context) => {
|
||||||
context.client.setQueryData(currentUserQuery.queryKey, data.user)
|
context.client.setQueryData(currentUserQuery.queryKey, data.user)
|
||||||
context.client.setQueryData(accountsQuery.queryKey, [data.account])
|
context.client.setQueryData(accountsQuery.queryKey, [data.account])
|
||||||
|
context.client.setQueryData(drivesQuery.queryKey, [data.drive])
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
|||||||
11
apps/drive-web/src/drive/api.ts
Normal file
11
apps/drive-web/src/drive/api.ts
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
import { queryOptions } from "@tanstack/react-query"
|
||||||
|
import { fetchApi } from "@/lib/api"
|
||||||
|
import { Drive } from "./drive"
|
||||||
|
|
||||||
|
export const drivesQuery = queryOptions({
|
||||||
|
queryKey: ["drives"],
|
||||||
|
queryFn: async () =>
|
||||||
|
fetchApi("GET", "/drives", {
|
||||||
|
returns: Drive.array(),
|
||||||
|
}).then(([_, result]) => result),
|
||||||
|
})
|
||||||
17
apps/drive-web/src/drive/drive.ts
Normal file
17
apps/drive-web/src/drive/drive.ts
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
import { type } from "arktype"
|
||||||
|
import { atom } from "jotai"
|
||||||
|
|
||||||
|
export const Drive = type({
|
||||||
|
id: "string",
|
||||||
|
publicId: "string",
|
||||||
|
orgId: "string",
|
||||||
|
"ownerAccountId?": "string",
|
||||||
|
name: "string",
|
||||||
|
createdAt: "string.date.iso.parse",
|
||||||
|
updatedAt: "string.date.iso.parse",
|
||||||
|
storageUsageBytes: "number",
|
||||||
|
storageQuotaBytes: "number",
|
||||||
|
})
|
||||||
|
export type Drive = typeof Drive.infer
|
||||||
|
|
||||||
|
export const currentDriveAtom = atom<Drive | null>(null)
|
||||||
@@ -31,7 +31,7 @@ import {
|
|||||||
import { formatError } from "@/lib/error"
|
import { formatError } from "@/lib/error"
|
||||||
import { directoryContentQueryKey } from "@/vfs/api"
|
import { directoryContentQueryKey } from "@/vfs/api"
|
||||||
import type { DirectoryInfoWithPath } from "@/vfs/vfs"
|
import type { DirectoryInfoWithPath } from "@/vfs/vfs"
|
||||||
import { currentAccountAtom } from "../account/account"
|
import { currentDriveAtom } from "@/drive/drive"
|
||||||
import {
|
import {
|
||||||
clearAllFileUploadStatusesAtom,
|
clearAllFileUploadStatusesAtom,
|
||||||
clearFileUploadStatusesAtom,
|
clearFileUploadStatusesAtom,
|
||||||
@@ -68,12 +68,12 @@ function useUploadFilesAtom({
|
|||||||
() =>
|
() =>
|
||||||
mutationOptions({
|
mutationOptions({
|
||||||
mutationFn: async (files: PickedFile[]) => {
|
mutationFn: async (files: PickedFile[]) => {
|
||||||
const account = store.get(currentAccountAtom)
|
const drive = store.get(currentDriveAtom)
|
||||||
if (!account) throw new Error("No account selected")
|
if (!drive) throw new Error("No drive selected")
|
||||||
|
|
||||||
const promises = files.map((pickedFile) =>
|
const promises = files.map((pickedFile) =>
|
||||||
uploadFile({
|
uploadFile({
|
||||||
account,
|
drive,
|
||||||
file: pickedFile.file,
|
file: pickedFile.file,
|
||||||
targetDirectory,
|
targetDirectory,
|
||||||
onStart: () => {
|
onStart: () => {
|
||||||
@@ -136,11 +136,11 @@ function useUploadFilesAtom({
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Invalidate all queries for the target directory (with any params)
|
// Invalidate all queries for the target directory (with any params)
|
||||||
const account = store.get(currentAccountAtom)
|
const drive = store.get(currentDriveAtom)
|
||||||
if (account) {
|
if (drive) {
|
||||||
client.invalidateQueries({
|
client.invalidateQueries({
|
||||||
queryKey: directoryContentQueryKey(
|
queryKey: directoryContentQueryKey(
|
||||||
account.id,
|
drive.id,
|
||||||
targetDirectory.id,
|
targetDirectory.id,
|
||||||
),
|
),
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
import { type } from "arktype"
|
import { type } from "arktype"
|
||||||
import type { Account } from "@/account/account"
|
|
||||||
import { ApiError, fetchApi } from "@/lib/api"
|
import { ApiError, fetchApi } from "@/lib/api"
|
||||||
|
import type { Drive } from "@/drive/drive"
|
||||||
import type { DirectoryInfoWithPath } from "@/vfs/vfs"
|
import type { DirectoryInfoWithPath } from "@/vfs/vfs"
|
||||||
|
|
||||||
export const UploadStatus = type.enumerated("pending", "completed", "failed")
|
export const UploadStatus = type.enumerated("pending", "completed", "failed")
|
||||||
@@ -14,13 +14,13 @@ export const Upload = type({
|
|||||||
export type Upload = typeof Upload.infer
|
export type Upload = typeof Upload.infer
|
||||||
|
|
||||||
export async function uploadFile({
|
export async function uploadFile({
|
||||||
account,
|
drive,
|
||||||
file,
|
file,
|
||||||
targetDirectory,
|
targetDirectory,
|
||||||
onStart,
|
onStart,
|
||||||
onProgress,
|
onProgress,
|
||||||
}: {
|
}: {
|
||||||
account: Account
|
drive: Drive
|
||||||
file: File
|
file: File
|
||||||
targetDirectory: DirectoryInfoWithPath
|
targetDirectory: DirectoryInfoWithPath
|
||||||
onStart: (xhr: XMLHttpRequest) => void
|
onStart: (xhr: XMLHttpRequest) => void
|
||||||
@@ -28,7 +28,7 @@ export async function uploadFile({
|
|||||||
}) {
|
}) {
|
||||||
const [, upload] = await fetchApi(
|
const [, upload] = await fetchApi(
|
||||||
"POST",
|
"POST",
|
||||||
`/accounts/${account.id}/uploads`,
|
`/drives/${drive.id}/uploads`,
|
||||||
{
|
{
|
||||||
body: JSON.stringify({
|
body: JSON.stringify({
|
||||||
name: file.name,
|
name: file.name,
|
||||||
@@ -45,7 +45,7 @@ export async function uploadFile({
|
|||||||
onProgress,
|
onProgress,
|
||||||
})
|
})
|
||||||
|
|
||||||
await fetchApi("PATCH", `/accounts/${account.id}/uploads/${upload.id}`, {
|
await fetchApi("PATCH", `/drives/${drive.id}/uploads/${upload.id}`, {
|
||||||
body: JSON.stringify({
|
body: JSON.stringify({
|
||||||
status: "completed",
|
status: "completed",
|
||||||
}),
|
}),
|
||||||
|
|||||||
@@ -5,15 +5,21 @@ export type ApiRoute =
|
|||||||
| "/auth/tokens"
|
| "/auth/tokens"
|
||||||
| "/accounts"
|
| "/accounts"
|
||||||
| `/accounts/${string}`
|
| `/accounts/${string}`
|
||||||
| `/accounts/${string}/uploads`
|
| "/drives"
|
||||||
| `/accounts/${string}/uploads/${string}/content`
|
| `/drives/${string}`
|
||||||
| `/accounts/${string}/uploads/${string}`
|
| `/drives/${string}/uploads`
|
||||||
| `/accounts/${string}/files${string}`
|
| `/drives/${string}/uploads/${string}/content`
|
||||||
| `/accounts/${string}/files/${string}`
|
| `/drives/${string}/uploads/${string}`
|
||||||
| `/accounts/${string}/files/${string}/content`
|
| `/drives/${string}/files${string}`
|
||||||
| `/accounts/${string}/directories`
|
| `/drives/${string}/files/${string}`
|
||||||
| `/accounts/${string}/directories/${string}`
|
| `/drives/${string}/files/${string}/content`
|
||||||
| `/accounts/${string}/directories/${string}/content`
|
| `/drives/${string}/files/${string}/shares${string}`
|
||||||
|
| `/drives/${string}/directories`
|
||||||
|
| `/drives/${string}/directories/${string}`
|
||||||
|
| `/drives/${string}/directories/${string}/content`
|
||||||
|
| `/drives/${string}/directories/${string}/shares${string}`
|
||||||
|
| `/drives/${string}/shares`
|
||||||
|
| `/drives/${string}/shares/${string}`
|
||||||
| `/shares/${string}`
|
| `/shares/${string}`
|
||||||
| `/shares/${string}/directories${string}`
|
| `/shares/${string}/directories${string}`
|
||||||
| `/shares/${string}/files${string}`
|
| `/shares/${string}/files${string}`
|
||||||
|
|||||||
@@ -2,30 +2,30 @@ import { createFileRoute, Navigate, Outlet } from "@tanstack/react-router"
|
|||||||
import { useAtomValue } from "jotai"
|
import { useAtomValue } from "jotai"
|
||||||
import { atomEffect } from "jotai-effect"
|
import { atomEffect } from "jotai-effect"
|
||||||
import { atomWithQuery } from "jotai-tanstack-query"
|
import { atomWithQuery } from "jotai-tanstack-query"
|
||||||
import { accountsQuery } from "@/account/api"
|
|
||||||
import { LoadingSpinner } from "@/components/ui/loading-spinner"
|
import { LoadingSpinner } from "@/components/ui/loading-spinner"
|
||||||
import { currentAccountAtom } from "../account/account"
|
import { drivesQuery } from "@/drive/api"
|
||||||
|
import { currentDriveAtom } from "@/drive/drive"
|
||||||
|
|
||||||
export const Route = createFileRoute("/_authenticated")({
|
export const Route = createFileRoute("/_authenticated")({
|
||||||
component: AuthenticatedLayout,
|
component: AuthenticatedLayout,
|
||||||
})
|
})
|
||||||
|
|
||||||
const accountsAtom = atomWithQuery(() => accountsQuery)
|
const drivesAtom = atomWithQuery(() => drivesQuery)
|
||||||
const selectFirstAccountEffect = atomEffect((get, set) => {
|
const selectFirstDriveEffect = atomEffect((get, set) => {
|
||||||
const { data: accounts } = get(accountsAtom)
|
const { data: drives } = get(drivesAtom)
|
||||||
const firstAccount = accounts?.[0]
|
const firstDrive = drives?.[0]
|
||||||
if (firstAccount && get.peek(currentAccountAtom) === null) {
|
if (firstDrive && get.peek(currentDriveAtom) === null) {
|
||||||
set(currentAccountAtom, firstAccount)
|
set(currentDriveAtom, firstDrive)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
function AuthenticatedLayout() {
|
function AuthenticatedLayout() {
|
||||||
const { data: accounts, isLoading: isLoadingAccounts } =
|
const { data: drives, isLoading: isLoadingDrives } =
|
||||||
useAtomValue(accountsAtom)
|
useAtomValue(drivesAtom)
|
||||||
|
|
||||||
useAtomValue(selectFirstAccountEffect)
|
useAtomValue(selectFirstDriveEffect)
|
||||||
|
|
||||||
if (isLoadingAccounts) {
|
if (isLoadingDrives) {
|
||||||
return (
|
return (
|
||||||
<div className="flex h-screen w-full items-center justify-center">
|
<div className="flex h-screen w-full items-center justify-center">
|
||||||
<LoadingSpinner className="size-10" />
|
<LoadingSpinner className="size-10" />
|
||||||
@@ -33,7 +33,7 @@ function AuthenticatedLayout() {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!accounts) {
|
if (!drives) {
|
||||||
return <Navigate replace to="/login" />
|
return <Navigate replace to="/login" />
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ import {
|
|||||||
} from "lucide-react"
|
} from "lucide-react"
|
||||||
import { lazy, Suspense, useCallback, useContext } from "react"
|
import { lazy, Suspense, useCallback, useContext } from "react"
|
||||||
import { toast } from "sonner"
|
import { toast } from "sonner"
|
||||||
import { currentAccountAtom } from "@/account/account"
|
import { currentDriveAtom } from "@/drive/drive"
|
||||||
import { DirectoryIcon } from "@/components/icons/directory-icon"
|
import { DirectoryIcon } from "@/components/icons/directory-icon"
|
||||||
import { TextFileIcon } from "@/components/icons/text-file-icon"
|
import { TextFileIcon } from "@/components/icons/text-file-icon"
|
||||||
import { Button } from "@/components/ui/button"
|
import { Button } from "@/components/ui/button"
|
||||||
@@ -378,7 +378,7 @@ function DirectoryContentContextMenu({
|
|||||||
const [target, setTarget] = useAtom(contextMenuTargetItemsAtom)
|
const [target, setTarget] = useAtom(contextMenuTargetItemsAtom)
|
||||||
const setBackgroundTaskProgress = useSetAtom(backgroundTaskProgressAtom)
|
const setBackgroundTaskProgress = useSetAtom(backgroundTaskProgressAtom)
|
||||||
const setCutItems = useSetAtom(cutItemsAtom)
|
const setCutItems = useSetAtom(cutItemsAtom)
|
||||||
const account = useAtomValue(currentAccountAtom)
|
const drive = useAtomValue(currentDriveAtom)
|
||||||
const { directory } = useContext(DirectoryPageContext)
|
const { directory } = useContext(DirectoryPageContext)
|
||||||
const search = Route.useSearch()
|
const search = Route.useSearch()
|
||||||
const setActiveDialogData = useSetAtom(activeDialogDataAtom)
|
const setActiveDialogData = useSetAtom(activeDialogDataAtom)
|
||||||
@@ -390,11 +390,11 @@ function DirectoryContentContextMenu({
|
|||||||
setBackgroundTaskProgress({
|
setBackgroundTaskProgress({
|
||||||
label: "Moving items to trash…",
|
label: "Moving items to trash…",
|
||||||
})
|
})
|
||||||
if (!account) {
|
if (!drive) {
|
||||||
return null
|
return null
|
||||||
}
|
}
|
||||||
return optimisticallyRemoveDirectoryItems(client, {
|
return optimisticallyRemoveDirectoryItems(client, {
|
||||||
queryKey: directoryContentQueryKey(account.id, directory.id, {
|
queryKey: directoryContentQueryKey(drive.id, directory.id, {
|
||||||
orderBy: search.orderBy,
|
orderBy: search.orderBy,
|
||||||
direction: search.direction,
|
direction: search.direction,
|
||||||
}),
|
}),
|
||||||
|
|||||||
@@ -1,9 +1,8 @@
|
|||||||
import { useQuery } from "@tanstack/react-query"
|
import { useQuery } from "@tanstack/react-query"
|
||||||
import { createFileRoute } from "@tanstack/react-router"
|
import { createFileRoute } from "@tanstack/react-router"
|
||||||
import { type } from "arktype"
|
import { type } from "arktype"
|
||||||
import { atom, useAtomValue } from "jotai"
|
import { atom } from "jotai"
|
||||||
import { useCallback, useMemo } from "react"
|
import { useCallback, useMemo } from "react"
|
||||||
import { currentAccountAtom } from "@/account/account"
|
|
||||||
import { DirectoryPageContext } from "@/directories/directory-page/context"
|
import { DirectoryPageContext } from "@/directories/directory-page/context"
|
||||||
import {
|
import {
|
||||||
DEFAULT_DIRECTORY_CONTENT_ORDER_BY,
|
DEFAULT_DIRECTORY_CONTENT_ORDER_BY,
|
||||||
@@ -48,11 +47,9 @@ function RouteComponent() {
|
|||||||
const { shareId, directoryId } = Route.useParams()
|
const { shareId, directoryId } = Route.useParams()
|
||||||
const search = Route.useSearch()
|
const search = Route.useSearch()
|
||||||
const navigate = Route.useNavigate()
|
const navigate = Route.useNavigate()
|
||||||
const account = useAtomValue(currentAccountAtom)
|
|
||||||
const accountId = account?.id
|
|
||||||
|
|
||||||
const { data: directoryInfo, isLoading: isLoadingDirectoryInfo } = useQuery(
|
const { data: directoryInfo, isLoading: isLoadingDirectoryInfo } = useQuery(
|
||||||
shareDirectoryInfoQuery({ shareId, directoryId, accountId }),
|
shareDirectoryInfoQuery({ shareId, directoryId }),
|
||||||
)
|
)
|
||||||
|
|
||||||
const directoryUrlById = useCallback(
|
const directoryUrlById = useCallback(
|
||||||
@@ -74,9 +71,8 @@ function RouteComponent() {
|
|||||||
orderBy: search.orderBy,
|
orderBy: search.orderBy,
|
||||||
direction: search.direction,
|
direction: search.direction,
|
||||||
limit: 100,
|
limit: 100,
|
||||||
accountId,
|
|
||||||
}),
|
}),
|
||||||
[shareId, directoryId, search.orderBy, search.direction, accountId],
|
[shareId, directoryId, search.orderBy, search.direction],
|
||||||
)
|
)
|
||||||
|
|
||||||
const applySorting = useCallback(
|
const applySorting = useCallback(
|
||||||
@@ -99,11 +95,10 @@ function RouteComponent() {
|
|||||||
const url = shareFileContentUrl({
|
const url = shareFileContentUrl({
|
||||||
shareId,
|
shareId,
|
||||||
fileId: file.id,
|
fileId: file.id,
|
||||||
accountId,
|
|
||||||
})
|
})
|
||||||
window.open(url, "_blank", "noopener,noreferrer")
|
window.open(url, "_blank", "noopener,noreferrer")
|
||||||
},
|
},
|
||||||
[shareId, accountId],
|
[shareId],
|
||||||
)
|
)
|
||||||
|
|
||||||
if (isLoadingDirectoryInfo) {
|
if (isLoadingDirectoryInfo) {
|
||||||
|
|||||||
@@ -33,34 +33,29 @@ function buildQueryString(params: Record<string, string | undefined>): string {
|
|||||||
type ShareDirectoryInfoQueryParams = {
|
type ShareDirectoryInfoQueryParams = {
|
||||||
shareId: string
|
shareId: string
|
||||||
directoryId: string
|
directoryId: string
|
||||||
accountId?: string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
export const shareDirectoryInfoQueryKey = (
|
export const shareDirectoryInfoQueryKey = (
|
||||||
shareId: string,
|
shareId: string,
|
||||||
directoryId: string,
|
directoryId: string,
|
||||||
accountId?: string,
|
|
||||||
): readonly unknown[] => [
|
): readonly unknown[] => [
|
||||||
"shares",
|
"shares",
|
||||||
shareId,
|
shareId,
|
||||||
"directories",
|
"directories",
|
||||||
directoryId,
|
directoryId,
|
||||||
"info",
|
"info",
|
||||||
accountId ?? "public",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
export function shareDirectoryInfoQuery({
|
export function shareDirectoryInfoQuery({
|
||||||
shareId,
|
shareId,
|
||||||
directoryId,
|
directoryId,
|
||||||
accountId,
|
|
||||||
}: ShareDirectoryInfoQueryParams) {
|
}: ShareDirectoryInfoQueryParams) {
|
||||||
const queryString = buildQueryString({
|
const queryString = buildQueryString({
|
||||||
include: "path",
|
include: "path",
|
||||||
accountId,
|
|
||||||
})
|
})
|
||||||
|
|
||||||
return queryOptions({
|
return queryOptions({
|
||||||
queryKey: shareDirectoryInfoQueryKey(shareId, directoryId, accountId),
|
queryKey: shareDirectoryInfoQueryKey(shareId, directoryId),
|
||||||
queryFn: () =>
|
queryFn: () =>
|
||||||
fetchApi(
|
fetchApi(
|
||||||
"GET",
|
"GET",
|
||||||
@@ -76,7 +71,6 @@ type ShareDirectoryContentQueryParams = {
|
|||||||
orderBy: DirectoryContentOrderBy
|
orderBy: DirectoryContentOrderBy
|
||||||
direction: DirectoryContentOrderDirection
|
direction: DirectoryContentOrderDirection
|
||||||
limit: number
|
limit: number
|
||||||
accountId?: string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
export const shareDirectoryContentQueryKey = (
|
export const shareDirectoryContentQueryKey = (
|
||||||
@@ -85,7 +79,6 @@ export const shareDirectoryContentQueryKey = (
|
|||||||
params?: {
|
params?: {
|
||||||
orderBy?: DirectoryContentOrderBy
|
orderBy?: DirectoryContentOrderBy
|
||||||
direction?: DirectoryContentOrderDirection
|
direction?: DirectoryContentOrderDirection
|
||||||
accountId?: string
|
|
||||||
},
|
},
|
||||||
): readonly unknown[] => [
|
): readonly unknown[] => [
|
||||||
"shares",
|
"shares",
|
||||||
@@ -98,7 +91,6 @@ export const shareDirectoryContentQueryKey = (
|
|||||||
{
|
{
|
||||||
orderBy: params.orderBy,
|
orderBy: params.orderBy,
|
||||||
direction: params.direction,
|
direction: params.direction,
|
||||||
accountId: params.accountId,
|
|
||||||
},
|
},
|
||||||
]
|
]
|
||||||
: []),
|
: []),
|
||||||
@@ -117,13 +109,11 @@ export function shareDirectoryContentQuery({
|
|||||||
orderBy,
|
orderBy,
|
||||||
direction,
|
direction,
|
||||||
limit,
|
limit,
|
||||||
accountId,
|
|
||||||
}: ShareDirectoryContentQueryParams) {
|
}: ShareDirectoryContentQueryParams) {
|
||||||
return infiniteQueryOptions({
|
return infiniteQueryOptions({
|
||||||
queryKey: shareDirectoryContentQueryKey(shareId, directoryId, {
|
queryKey: shareDirectoryContentQueryKey(shareId, directoryId, {
|
||||||
orderBy,
|
orderBy,
|
||||||
direction,
|
direction,
|
||||||
accountId,
|
|
||||||
}),
|
}),
|
||||||
initialPageParam: {
|
initialPageParam: {
|
||||||
orderBy,
|
orderBy,
|
||||||
@@ -137,7 +127,6 @@ export function shareDirectoryContentQuery({
|
|||||||
dir: pageParam.direction,
|
dir: pageParam.direction,
|
||||||
limit: String(pageParam.limit),
|
limit: String(pageParam.limit),
|
||||||
cursor: pageParam.cursor || undefined,
|
cursor: pageParam.cursor || undefined,
|
||||||
accountId,
|
|
||||||
})
|
})
|
||||||
return fetchApi(
|
return fetchApi(
|
||||||
"GET",
|
"GET",
|
||||||
@@ -158,17 +147,12 @@ export function shareDirectoryContentQuery({
|
|||||||
type ShareFileContentUrlParams = {
|
type ShareFileContentUrlParams = {
|
||||||
shareId: string
|
shareId: string
|
||||||
fileId: string
|
fileId: string
|
||||||
accountId?: string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
export function shareFileContentUrl({
|
export function shareFileContentUrl({
|
||||||
shareId,
|
shareId,
|
||||||
fileId,
|
fileId,
|
||||||
accountId,
|
|
||||||
}: ShareFileContentUrlParams): string {
|
}: ShareFileContentUrlParams): string {
|
||||||
const url = buildShareApiUrl(`/shares/${shareId}/files/${fileId}/content`)
|
const url = buildShareApiUrl(`/shares/${shareId}/files/${fileId}/content`)
|
||||||
if (accountId) {
|
|
||||||
url.searchParams.set("accountId", accountId)
|
|
||||||
}
|
|
||||||
return url.toString()
|
return url.toString()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,20 +1,20 @@
|
|||||||
import { mutationOptions, queryOptions, skipToken } from "@tanstack/react-query"
|
import { mutationOptions, queryOptions, skipToken } from "@tanstack/react-query"
|
||||||
import { atom } from "jotai"
|
import { atom } from "jotai"
|
||||||
import { atomFamily } from "jotai/utils"
|
import { atomFamily } from "jotai/utils"
|
||||||
import { currentAccountAtom } from "@/account/account"
|
|
||||||
import { fetchApi, Nothing } from "@/lib/api"
|
import { fetchApi, Nothing } from "@/lib/api"
|
||||||
|
import { currentDriveAtom } from "@/drive/drive"
|
||||||
import { Share } from "./share"
|
import { Share } from "./share"
|
||||||
|
|
||||||
export const fileSharesQueryAtom = atomFamily((fileId: string) =>
|
export const fileSharesQueryAtom = atomFamily((fileId: string) =>
|
||||||
atom((get) => {
|
atom((get) => {
|
||||||
const account = get(currentAccountAtom)
|
const drive = get(currentDriveAtom)
|
||||||
return queryOptions({
|
return queryOptions({
|
||||||
queryKey: ["accounts", account?.id, "shares", { fileId }],
|
queryKey: ["drives", drive?.id, "shares", { fileId }],
|
||||||
queryFn: account
|
queryFn: drive
|
||||||
? () =>
|
? () =>
|
||||||
fetchApi(
|
fetchApi(
|
||||||
"GET",
|
"GET",
|
||||||
`/accounts/${account.id}/files/${fileId}/shares?includesExpired=true`,
|
`/drives/${drive.id}/files/${fileId}/shares?includesExpired=true`,
|
||||||
{ returns: Share.array() },
|
{ returns: Share.array() },
|
||||||
).then(([_, result]) => result)
|
).then(([_, result]) => result)
|
||||||
: skipToken,
|
: skipToken,
|
||||||
@@ -24,14 +24,14 @@ export const fileSharesQueryAtom = atomFamily((fileId: string) =>
|
|||||||
|
|
||||||
export const directorySharesQueryAtom = atomFamily((directoryId: string) =>
|
export const directorySharesQueryAtom = atomFamily((directoryId: string) =>
|
||||||
atom((get) => {
|
atom((get) => {
|
||||||
const account = get(currentAccountAtom)
|
const drive = get(currentDriveAtom)
|
||||||
return queryOptions({
|
return queryOptions({
|
||||||
queryKey: ["accounts", account?.id, "shares", { directoryId }],
|
queryKey: ["drives", drive?.id, "shares", { directoryId }],
|
||||||
queryFn: account
|
queryFn: drive
|
||||||
? () =>
|
? () =>
|
||||||
fetchApi(
|
fetchApi(
|
||||||
"GET",
|
"GET",
|
||||||
`/accounts/${account.id}/directories/${directoryId}/shares?includesExpired=true`,
|
`/drives/${drive.id}/directories/${directoryId}/shares?includesExpired=true`,
|
||||||
{ returns: Share.array() },
|
{ returns: Share.array() },
|
||||||
).then(([_, result]) => result)
|
).then(([_, result]) => result)
|
||||||
: skipToken,
|
: skipToken,
|
||||||
@@ -42,12 +42,12 @@ export const directorySharesQueryAtom = atomFamily((directoryId: string) =>
|
|||||||
export const createShareMutationAtom = atom((get) =>
|
export const createShareMutationAtom = atom((get) =>
|
||||||
mutationOptions({
|
mutationOptions({
|
||||||
mutationFn: async ({ items }: { items: string[] }) => {
|
mutationFn: async ({ items }: { items: string[] }) => {
|
||||||
const account = get(currentAccountAtom)
|
const drive = get(currentDriveAtom)
|
||||||
if (!account) throw new Error("No account selected")
|
if (!drive) throw new Error("No drive selected")
|
||||||
|
|
||||||
const [_, result] = await fetchApi(
|
const [_, result] = await fetchApi(
|
||||||
"POST",
|
"POST",
|
||||||
`/accounts/${account.id}/shares`,
|
`/drives/${drive.id}/shares`,
|
||||||
{
|
{
|
||||||
body: JSON.stringify({ items }),
|
body: JSON.stringify({ items }),
|
||||||
returns: Share,
|
returns: Share,
|
||||||
@@ -62,12 +62,12 @@ export const createShareMutationAtom = atom((get) =>
|
|||||||
export const deleteShareMutationAtom = atom((get) =>
|
export const deleteShareMutationAtom = atom((get) =>
|
||||||
mutationOptions({
|
mutationOptions({
|
||||||
mutationFn: async ({ shareId }: { shareId: string }) => {
|
mutationFn: async ({ shareId }: { shareId: string }) => {
|
||||||
const account = get(currentAccountAtom)
|
const drive = get(currentDriveAtom)
|
||||||
if (!account) throw new Error("No account selected")
|
if (!drive) throw new Error("No drive selected")
|
||||||
|
|
||||||
await fetchApi(
|
await fetchApi(
|
||||||
"DELETE",
|
"DELETE",
|
||||||
`/accounts/${account.id}/shares/${shareId}`,
|
`/drives/${drive.id}/shares/${shareId}`,
|
||||||
{ returns: Nothing },
|
{ returns: Nothing },
|
||||||
)
|
)
|
||||||
},
|
},
|
||||||
@@ -83,12 +83,12 @@ export const updateShareMutationAtom = atom((get) =>
|
|||||||
shareId: string
|
shareId: string
|
||||||
expiresAt?: Date | null
|
expiresAt?: Date | null
|
||||||
}) => {
|
}) => {
|
||||||
const account = get(currentAccountAtom)
|
const drive = get(currentDriveAtom)
|
||||||
if (!account) throw new Error("No account selected")
|
if (!drive) throw new Error("No drive selected")
|
||||||
|
|
||||||
await fetchApi(
|
await fetchApi(
|
||||||
"PATCH",
|
"PATCH",
|
||||||
`/accounts/${account.id}/shares/${shareId}`,
|
`/drives/${drive.id}/shares/${shareId}`,
|
||||||
{ body: JSON.stringify({ expiresAt }), returns: Share },
|
{ body: JSON.stringify({ expiresAt }), returns: Share },
|
||||||
)
|
)
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -8,8 +8,8 @@ import {
|
|||||||
import { type } from "arktype"
|
import { type } from "arktype"
|
||||||
import { atom } from "jotai"
|
import { atom } from "jotai"
|
||||||
import { atomFamily } from "jotai/utils"
|
import { atomFamily } from "jotai/utils"
|
||||||
import { currentAccountAtom } from "@/account/account"
|
|
||||||
import { fetchApi } from "@/lib/api"
|
import { fetchApi } from "@/lib/api"
|
||||||
|
import { currentDriveAtom } from "@/drive/drive"
|
||||||
import {
|
import {
|
||||||
DirectoryContent,
|
DirectoryContent,
|
||||||
DirectoryInfo,
|
DirectoryInfo,
|
||||||
@@ -30,23 +30,23 @@ export type DirectoryContentResponseType = typeof DirectoryContentResponse.infer
|
|||||||
*/
|
*/
|
||||||
export const fileUrlAtom = atomFamily((fileId: string) =>
|
export const fileUrlAtom = atomFamily((fileId: string) =>
|
||||||
atom((get) => {
|
atom((get) => {
|
||||||
const account = get(currentAccountAtom)
|
const drive = get(currentDriveAtom)
|
||||||
if (!account) {
|
if (!drive) {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
return `${import.meta.env.VITE_API_URL}/accounts/${account.id}/files/${fileId}/content`
|
return `${import.meta.env.VITE_API_URL}/drives/${drive.id}/files/${fileId}/content`
|
||||||
}),
|
}),
|
||||||
)
|
)
|
||||||
|
|
||||||
export const rootDirectoryQueryAtom = atom((get) => {
|
export const rootDirectoryQueryAtom = atom((get) => {
|
||||||
const account = get(currentAccountAtom)
|
const drive = get(currentDriveAtom)
|
||||||
return queryOptions({
|
return queryOptions({
|
||||||
queryKey: ["accounts", account?.id, "directories", "root"],
|
queryKey: ["drives", drive?.id, "directories", "root"],
|
||||||
queryFn: account
|
queryFn: drive
|
||||||
? () =>
|
? () =>
|
||||||
fetchApi(
|
fetchApi(
|
||||||
"GET",
|
"GET",
|
||||||
`/accounts/${account.id}/directories/root?include=path`,
|
`/drives/${drive.id}/directories/root?include=path`,
|
||||||
{ returns: DirectoryInfoWithPath },
|
{ returns: DirectoryInfoWithPath },
|
||||||
).then(([_, result]) => result)
|
).then(([_, result]) => result)
|
||||||
: skipToken,
|
: skipToken,
|
||||||
@@ -55,14 +55,14 @@ export const rootDirectoryQueryAtom = atom((get) => {
|
|||||||
|
|
||||||
export const directoryInfoQueryAtom = atomFamily((directoryId: string) =>
|
export const directoryInfoQueryAtom = atomFamily((directoryId: string) =>
|
||||||
atom((get) => {
|
atom((get) => {
|
||||||
const account = get(currentAccountAtom)
|
const drive = get(currentDriveAtom)
|
||||||
return queryOptions({
|
return queryOptions({
|
||||||
queryKey: ["accounts", account?.id, "directories", directoryId],
|
queryKey: ["drives", drive?.id, "directories", directoryId],
|
||||||
queryFn: account
|
queryFn: drive
|
||||||
? () =>
|
? () =>
|
||||||
fetchApi(
|
fetchApi(
|
||||||
"GET",
|
"GET",
|
||||||
`/accounts/${account.id}/directories/${directoryId}?include=path`,
|
`/drives/${drive.id}/directories/${directoryId}?include=path`,
|
||||||
{ returns: DirectoryInfoWithPath },
|
{ returns: DirectoryInfoWithPath },
|
||||||
).then(([_, result]) => result)
|
).then(([_, result]) => result)
|
||||||
: skipToken,
|
: skipToken,
|
||||||
@@ -100,15 +100,15 @@ type DirectoryContentPageParam = {
|
|||||||
}
|
}
|
||||||
|
|
||||||
export const directoryContentQueryKey = (
|
export const directoryContentQueryKey = (
|
||||||
accountId: string | undefined,
|
driveId: string | undefined,
|
||||||
directoryId: string,
|
directoryId: string,
|
||||||
params?: {
|
params?: {
|
||||||
orderBy?: DirectoryContentOrderBy
|
orderBy?: DirectoryContentOrderBy
|
||||||
direction?: DirectoryContentOrderDirection
|
direction?: DirectoryContentOrderDirection
|
||||||
},
|
},
|
||||||
): readonly unknown[] => [
|
): readonly unknown[] => [
|
||||||
"accounts",
|
"drives",
|
||||||
accountId,
|
driveId,
|
||||||
"directories",
|
"directories",
|
||||||
directoryId,
|
directoryId,
|
||||||
"content",
|
"content",
|
||||||
@@ -126,9 +126,9 @@ export type DirectoryContentQuery = ReturnType<
|
|||||||
export const directoryContentQueryAtom = atomFamily(
|
export const directoryContentQueryAtom = atomFamily(
|
||||||
({ directoryId, orderBy, direction, limit }: DirectoryContentQueryParams) =>
|
({ directoryId, orderBy, direction, limit }: DirectoryContentQueryParams) =>
|
||||||
atom((get) => {
|
atom((get) => {
|
||||||
const account = get(currentAccountAtom)
|
const drive = get(currentDriveAtom)
|
||||||
return infiniteQueryOptions({
|
return infiniteQueryOptions({
|
||||||
queryKey: directoryContentQueryKey(account?.id, directoryId, {
|
queryKey: directoryContentQueryKey(drive?.id, directoryId, {
|
||||||
orderBy,
|
orderBy,
|
||||||
direction,
|
direction,
|
||||||
}),
|
}),
|
||||||
@@ -139,13 +139,13 @@ export const directoryContentQueryAtom = atomFamily(
|
|||||||
cursor: "",
|
cursor: "",
|
||||||
},
|
},
|
||||||
queryFn: ({ pageParam }) =>
|
queryFn: ({ pageParam }) =>
|
||||||
account
|
drive
|
||||||
? fetchApi(
|
? fetchApi(
|
||||||
"GET",
|
"GET",
|
||||||
`/accounts/${account.id}/directories/${directoryId}/content?orderBy=${pageParam.orderBy}&dir=${pageParam.direction}&limit=${pageParam.limit}${pageParam.cursor ? `&cursor=${pageParam.cursor}` : ""}`,
|
`/drives/${drive.id}/directories/${directoryId}/content?orderBy=${pageParam.orderBy}&dir=${pageParam.direction}&limit=${pageParam.limit}${pageParam.cursor ? `&cursor=${pageParam.cursor}` : ""}`,
|
||||||
{ returns: DirectoryContentResponse },
|
{ returns: DirectoryContentResponse },
|
||||||
).then(([_, result]) => result)
|
).then(([_, result]) => result)
|
||||||
: Promise.reject(new Error("No account selected")),
|
: Promise.reject(new Error("No drive selected")),
|
||||||
getNextPageParam: (lastPage, _pages, lastPageParam) =>
|
getNextPageParam: (lastPage, _pages, lastPageParam) =>
|
||||||
lastPage.nextCursor
|
lastPage.nextCursor
|
||||||
? {
|
? {
|
||||||
@@ -163,13 +163,13 @@ export const directoryContentQueryAtom = atomFamily(
|
|||||||
)
|
)
|
||||||
|
|
||||||
export const createDirectoryMutationAtom = atom((get) => {
|
export const createDirectoryMutationAtom = atom((get) => {
|
||||||
const account = get(currentAccountAtom)
|
const drive = get(currentDriveAtom)
|
||||||
return mutationOptions({
|
return mutationOptions({
|
||||||
mutationFn: async (data: { name: string; parentId: string }) => {
|
mutationFn: async (data: { name: string; parentId: string }) => {
|
||||||
if (!account) throw new Error("No account selected")
|
if (!drive) throw new Error("No drive selected")
|
||||||
return fetchApi(
|
return fetchApi(
|
||||||
"POST",
|
"POST",
|
||||||
`/accounts/${account.id}/directories?include=path`,
|
`/drives/${drive.id}/directories?include=path`,
|
||||||
{
|
{
|
||||||
body: JSON.stringify({
|
body: JSON.stringify({
|
||||||
name: data.name,
|
name: data.name,
|
||||||
@@ -180,9 +180,9 @@ export const createDirectoryMutationAtom = atom((get) => {
|
|||||||
).then(([_, result]) => result)
|
).then(([_, result]) => result)
|
||||||
},
|
},
|
||||||
onSuccess: (_data, { parentId }, _context, { client }) => {
|
onSuccess: (_data, { parentId }, _context, { client }) => {
|
||||||
if (account) {
|
if (drive) {
|
||||||
client.invalidateQueries({
|
client.invalidateQueries({
|
||||||
queryKey: directoryContentQueryKey(account.id, parentId),
|
queryKey: directoryContentQueryKey(drive.id, parentId),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@@ -209,9 +209,9 @@ export const moveDirectoryItemsMutationAtom = atom((get) =>
|
|||||||
targetDirectory: DirectoryInfo | string
|
targetDirectory: DirectoryInfo | string
|
||||||
items: DirectoryItem[]
|
items: DirectoryItem[]
|
||||||
}) => {
|
}) => {
|
||||||
const account = get(currentAccountAtom)
|
const drive = get(currentDriveAtom)
|
||||||
if (!account) {
|
if (!drive) {
|
||||||
throw new Error("Account not found")
|
throw new Error("Drive not found")
|
||||||
}
|
}
|
||||||
|
|
||||||
const dirId =
|
const dirId =
|
||||||
@@ -221,7 +221,7 @@ export const moveDirectoryItemsMutationAtom = atom((get) =>
|
|||||||
|
|
||||||
const [, result] = await fetchApi(
|
const [, result] = await fetchApi(
|
||||||
"POST",
|
"POST",
|
||||||
`/accounts/${account.id}/directories/${dirId}/content`,
|
`/drives/${drive.id}/directories/${dirId}/content`,
|
||||||
{
|
{
|
||||||
body: JSON.stringify({
|
body: JSON.stringify({
|
||||||
items: items.map((item) => item.id),
|
items: items.map((item) => item.id),
|
||||||
@@ -232,8 +232,8 @@ export const moveDirectoryItemsMutationAtom = atom((get) =>
|
|||||||
return result
|
return result
|
||||||
},
|
},
|
||||||
onSuccess: (_data, { targetDirectory, items }, _result, { client }) => {
|
onSuccess: (_data, { targetDirectory, items }, _result, { client }) => {
|
||||||
const account = get(currentAccountAtom)
|
const drive = get(currentDriveAtom)
|
||||||
if (!account) return
|
if (!drive) return
|
||||||
|
|
||||||
const dirId =
|
const dirId =
|
||||||
typeof targetDirectory === "string"
|
typeof targetDirectory === "string"
|
||||||
@@ -241,13 +241,13 @@ export const moveDirectoryItemsMutationAtom = atom((get) =>
|
|||||||
: targetDirectory.id
|
: targetDirectory.id
|
||||||
// Invalidate using base key (without params) to invalidate all queries for these directories
|
// Invalidate using base key (without params) to invalidate all queries for these directories
|
||||||
client.invalidateQueries({
|
client.invalidateQueries({
|
||||||
queryKey: directoryContentQueryKey(account.id, dirId),
|
queryKey: directoryContentQueryKey(drive.id, dirId),
|
||||||
})
|
})
|
||||||
for (const item of items) {
|
for (const item of items) {
|
||||||
if (item.parentId) {
|
if (item.parentId) {
|
||||||
client.invalidateQueries({
|
client.invalidateQueries({
|
||||||
queryKey: directoryContentQueryKey(
|
queryKey: directoryContentQueryKey(
|
||||||
account.id,
|
drive.id,
|
||||||
item.parentId,
|
item.parentId,
|
||||||
),
|
),
|
||||||
})
|
})
|
||||||
@@ -260,9 +260,9 @@ export const moveDirectoryItemsMutationAtom = atom((get) =>
|
|||||||
export const moveToTrashMutationAtom = atom((get) =>
|
export const moveToTrashMutationAtom = atom((get) =>
|
||||||
mutationOptions({
|
mutationOptions({
|
||||||
mutationFn: async (items: DirectoryItem[]) => {
|
mutationFn: async (items: DirectoryItem[]) => {
|
||||||
const account = get(currentAccountAtom)
|
const drive = get(currentDriveAtom)
|
||||||
if (!account) {
|
if (!drive) {
|
||||||
throw new Error("Account not found")
|
throw new Error("Drive not found")
|
||||||
}
|
}
|
||||||
|
|
||||||
const fileIds: string[] = []
|
const fileIds: string[] = []
|
||||||
@@ -285,7 +285,7 @@ export const moveToTrashMutationAtom = atom((get) =>
|
|||||||
fileDeleteParams.set("trash", "true")
|
fileDeleteParams.set("trash", "true")
|
||||||
deleteFilesPromise = fetchApi(
|
deleteFilesPromise = fetchApi(
|
||||||
"DELETE",
|
"DELETE",
|
||||||
`/accounts/${account.id}/files?${fileDeleteParams.toString()}`,
|
`/drives/${drive.id}/files?${fileDeleteParams.toString()}`,
|
||||||
{
|
{
|
||||||
returns: FileInfo.array(),
|
returns: FileInfo.array(),
|
||||||
},
|
},
|
||||||
@@ -301,7 +301,7 @@ export const moveToTrashMutationAtom = atom((get) =>
|
|||||||
directoryDeleteParams.set("trash", "true")
|
directoryDeleteParams.set("trash", "true")
|
||||||
deleteDirectoriesPromise = fetchApi(
|
deleteDirectoriesPromise = fetchApi(
|
||||||
"DELETE",
|
"DELETE",
|
||||||
`/accounts/${account.id}/directories?${directoryDeleteParams.toString()}`,
|
`/drives/${drive.id}/directories?${directoryDeleteParams.toString()}`,
|
||||||
{
|
{
|
||||||
returns: DirectoryInfo.array(),
|
returns: DirectoryInfo.array(),
|
||||||
},
|
},
|
||||||
@@ -318,14 +318,14 @@ export const moveToTrashMutationAtom = atom((get) =>
|
|||||||
return [...deletedFiles, ...deletedDirectories]
|
return [...deletedFiles, ...deletedDirectories]
|
||||||
},
|
},
|
||||||
onSuccess: (_data, items, _result, { client }) => {
|
onSuccess: (_data, items, _result, { client }) => {
|
||||||
const account = get(currentAccountAtom)
|
const drive = get(currentDriveAtom)
|
||||||
if (account) {
|
if (drive) {
|
||||||
// Invalidate using base key (without params) to invalidate all queries for these directories
|
// Invalidate using base key (without params) to invalidate all queries for these directories
|
||||||
for (const item of items) {
|
for (const item of items) {
|
||||||
if (item.parentId) {
|
if (item.parentId) {
|
||||||
client.invalidateQueries({
|
client.invalidateQueries({
|
||||||
queryKey: directoryContentQueryKey(
|
queryKey: directoryContentQueryKey(
|
||||||
account.id,
|
drive.id,
|
||||||
item.parentId,
|
item.parentId,
|
||||||
),
|
),
|
||||||
})
|
})
|
||||||
@@ -339,14 +339,14 @@ export const moveToTrashMutationAtom = atom((get) =>
|
|||||||
export const renameFileMutationAtom = atom((get) =>
|
export const renameFileMutationAtom = atom((get) =>
|
||||||
mutationOptions({
|
mutationOptions({
|
||||||
mutationFn: async (file: FileInfo) => {
|
mutationFn: async (file: FileInfo) => {
|
||||||
const account = get(currentAccountAtom)
|
const drive = get(currentDriveAtom)
|
||||||
if (!account) {
|
if (!drive) {
|
||||||
throw new Error("Account not found")
|
throw new Error("Drive not found")
|
||||||
}
|
}
|
||||||
|
|
||||||
const [, result] = await fetchApi(
|
const [, result] = await fetchApi(
|
||||||
"PATCH",
|
"PATCH",
|
||||||
`/accounts/${account.id}/files/${file.id}`,
|
`/drives/${drive.id}/files/${file.id}`,
|
||||||
{
|
{
|
||||||
body: JSON.stringify({ name: file.name }),
|
body: JSON.stringify({ name: file.name }),
|
||||||
returns: FileInfo,
|
returns: FileInfo,
|
||||||
@@ -361,14 +361,14 @@ export const renameFileMutationAtom = atom((get) =>
|
|||||||
export const renameDirectoryMutationAtom = atom((get) =>
|
export const renameDirectoryMutationAtom = atom((get) =>
|
||||||
mutationOptions({
|
mutationOptions({
|
||||||
mutationFn: async (directory: DirectoryInfo) => {
|
mutationFn: async (directory: DirectoryInfo) => {
|
||||||
const account = get(currentAccountAtom)
|
const drive = get(currentDriveAtom)
|
||||||
if (!account) {
|
if (!drive) {
|
||||||
throw new Error("Account not found")
|
throw new Error("Drive not found")
|
||||||
}
|
}
|
||||||
|
|
||||||
const [, result] = await fetchApi(
|
const [, result] = await fetchApi(
|
||||||
"PATCH",
|
"PATCH",
|
||||||
`/accounts/${account.id}/directories/${directory.id}`,
|
`/drives/${drive.id}/directories/${directory.id}`,
|
||||||
{
|
{
|
||||||
body: JSON.stringify({ name: directory.name }),
|
body: JSON.stringify({ name: directory.name }),
|
||||||
returns: DirectoryInfo,
|
returns: DirectoryInfo,
|
||||||
|
|||||||
@@ -7,17 +7,17 @@
|
|||||||
|
|
||||||
# Routing + auth conventions
|
# Routing + auth conventions
|
||||||
|
|
||||||
- Account-scoped resources live under `/accounts/:accountID`; always apply auth + account middleware.
|
- Drive-scoped resources live under `/drives/:driveID`; always apply auth + drive middleware.
|
||||||
- Auth middleware must be the source of truth for the current user (via `reqctx`).
|
- Auth middleware must be the source of truth for the current user (via `reqctx`).
|
||||||
- Support both bearer-token and cookie flows; pick one per client surface.
|
- Support both bearer-token and cookie flows; pick one per client surface.
|
||||||
- Use transactions for multi-step writes or cross-table changes.
|
- Use transactions for multi-step writes or cross-table changes.
|
||||||
|
|
||||||
# Data model relationships (high level)
|
# Data model relationships (high level)
|
||||||
|
|
||||||
- Users own accounts.
|
- Users have accounts (principals) within organizations.
|
||||||
- Accounts own VFS nodes (files + directories).
|
- Drives own VFS nodes (files + directories).
|
||||||
- Auth grants own refresh tokens.
|
- Auth grants own refresh tokens.
|
||||||
- Node share tokens exist for future sharing flows.
|
- Node shares grant scoped access into drives.
|
||||||
|
|
||||||
# Virtual filesystem + storage
|
# Virtual filesystem + storage
|
||||||
|
|
||||||
|
|||||||
21
dev/docs/integration-tests.md
Normal file
21
dev/docs/integration-tests.md
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
# Integration tests (Go + testcontainers)
|
||||||
|
|
||||||
|
## Requirements
|
||||||
|
|
||||||
|
- A working Docker daemon reachable from the devcontainer.
|
||||||
|
- Recommended: `.devcontainer/devcontainer.json` includes `docker-outside-of-docker`, which uses the host/Codespaces Docker.
|
||||||
|
- Alternative: switch to `docker-in-docker` if you don’t have host Docker access.
|
||||||
|
|
||||||
|
## Run
|
||||||
|
|
||||||
|
From `apps/backend`:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
go test -tags=integration ./internal/drexa -run TestRegistrationFlow -count=1
|
||||||
|
```
|
||||||
|
|
||||||
|
Notes:
|
||||||
|
|
||||||
|
- The first run pulls `postgres:16-alpine` (needs network access).
|
||||||
|
- Tests are tagged `integration` so they never run in normal `go test ./...` by accident.
|
||||||
|
- If Docker isn’t available, the test skips (rather than failing).
|
||||||
106
dev/docs/models.md
Normal file
106
dev/docs/models.md
Normal file
@@ -0,0 +1,106 @@
|
|||||||
|
# Data models (users, orgs, accounts, drives)
|
||||||
|
|
||||||
|
This doc describes the current model for identity, org membership, and storage tenancy.
|
||||||
|
|
||||||
|
## Core ideas
|
||||||
|
|
||||||
|
- `Account` is a **principal** (a user’s identity within an org), not a storage tenant.
|
||||||
|
- `Drive` is the **storage tenant** (the partition key for VFS + blobs).
|
||||||
|
- VFS authorization separates:
|
||||||
|
- **tenant**: which drive is being accessed, and
|
||||||
|
- **actor**: who is performing the action.
|
||||||
|
|
||||||
|
## Entities
|
||||||
|
|
||||||
|
### User
|
||||||
|
|
||||||
|
Authenticated human identity.
|
||||||
|
|
||||||
|
### Organization
|
||||||
|
|
||||||
|
Top-level container for org-owned resources:
|
||||||
|
|
||||||
|
- billing, policies, API keys, integrations, audit log, etc (all keyed by `org_id`)
|
||||||
|
|
||||||
|
### Account (principal)
|
||||||
|
|
||||||
|
Represents a user’s identity within an org (membership).
|
||||||
|
|
||||||
|
Fields:
|
||||||
|
|
||||||
|
- `org_id`, `user_id`
|
||||||
|
- `role` (org-level role), `status` (invited/active/removed/etc)
|
||||||
|
- timestamps
|
||||||
|
|
||||||
|
Invariant:
|
||||||
|
|
||||||
|
- `UNIQUE(org_id, user_id)` (one account per user per org)
|
||||||
|
|
||||||
|
### Drive (storage tenant)
|
||||||
|
|
||||||
|
Storage container that owns the filesystem.
|
||||||
|
|
||||||
|
Fields:
|
||||||
|
|
||||||
|
- `org_id`
|
||||||
|
- optional `owner_account_id` (for “personal drive inside this org”)
|
||||||
|
- quota/usage metadata (quotas are per-drive)
|
||||||
|
- timestamps
|
||||||
|
|
||||||
|
Invariants:
|
||||||
|
|
||||||
|
- Personal drive: `UNIQUE(org_id, owner_account_id)` (one personal drive per account per org).
|
||||||
|
- Shared/org drive: `owner_account_id IS NULL`.
|
||||||
|
|
||||||
|
## Relationships
|
||||||
|
|
||||||
|
```
|
||||||
|
users 1 ── * accounts * ── 1 organizations
|
||||||
|
organizations 1 ── * drives 1 ── * vfs_nodes
|
||||||
|
```
|
||||||
|
|
||||||
|
Interpretation:
|
||||||
|
|
||||||
|
- A user can be in many orgs (via many accounts).
|
||||||
|
- An org can have many members (many accounts).
|
||||||
|
- Storage is owned by drives (org-scoped), not by accounts.
|
||||||
|
|
||||||
|
## VFS tenancy + scope
|
||||||
|
|
||||||
|
`Scope` should carry the VFS tenant key and actor identity separately:
|
||||||
|
|
||||||
|
- `Scope.DriveID` = the storage tenant partition key (used to constrain all VFS reads/writes).
|
||||||
|
- `Scope.RootNodeID` + `Scope.AllowedNodes` = the accessible boundary within that drive.
|
||||||
|
- `Scope.ActorKind` + `Scope.ActorID` = who performs the action (authenticated account vs share link).
|
||||||
|
|
||||||
|
## Sharing model
|
||||||
|
|
||||||
|
Separate “where the shared content lives” from “who created/manages the share record”.
|
||||||
|
|
||||||
|
Shape:
|
||||||
|
|
||||||
|
```
|
||||||
|
node_shares.drive_id = storage tenant containing the shared nodes
|
||||||
|
node_shares.created_by_account_id = principal that created the share
|
||||||
|
|
||||||
|
share_permissions.account_id = principal allowed (NULL = public)
|
||||||
|
```
|
||||||
|
|
||||||
|
Consumption semantics:
|
||||||
|
|
||||||
|
- `Scope.DriveID` comes from `node_shares.drive_id` (tenant).
|
||||||
|
- `Scope.Actor…` comes from either the share link (public) or the consuming principal (authenticated consumption).
|
||||||
|
|
||||||
|
## Drive patterns
|
||||||
|
|
||||||
|
Two common patterns can coexist:
|
||||||
|
|
||||||
|
- **Personal drives**: one private drive per account per org (`owner_account_id` set). This matches “each user gets their own drive in every org they join”.
|
||||||
|
- **Org-owned/shared drives**: `owner_account_id = NULL`, access controlled via org role/policies (and optionally per-drive ACLs).
|
||||||
|
|
||||||
|
“Personal orgs” are just orgs with a single member account and a single personal drive.
|
||||||
|
|
||||||
|
## Authorization checks (high level)
|
||||||
|
|
||||||
|
- Org-owned resources: user can access org `X` iff they have an active `accounts` row `(org_id=X, user_id=U)`.
|
||||||
|
- Drive access: user can access drive `D` iff they’re a member of `D.org_id` and their role/policies allow the requested operation.
|
||||||
Reference in New Issue
Block a user