diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 285b897..87730fb 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -35,6 +35,6 @@ // Comment out to connect as root instead. More info: https://aka.ms/vscode-remote/containers/non-root. "remoteUser": "node", "features": { - "golang": "1.19" + "golang": "1.21" } } diff --git a/.dockerignore b/.dockerignore index 720e7a0..804ab22 100644 --- a/.dockerignore +++ b/.dockerignore @@ -22,3 +22,4 @@ **/secrets.dev.yaml **/values.dev.yaml README.md +!Dockerfile.rootless diff --git a/.github/workflows/partial-backend.yaml b/.github/workflows/partial-backend.yaml index 42c308f..fe4dac2 100644 --- a/.github/workflows/partial-backend.yaml +++ b/.github/workflows/partial-backend.yaml @@ -7,12 +7,12 @@ jobs: Go: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up Go - uses: actions/setup-go@v3 + uses: actions/setup-go@v5 with: - go-version: 1.19 + go-version: "1.21" - name: Install Task uses: arduino/setup-task@v1 @@ -20,7 +20,7 @@ jobs: repo-token: ${{ secrets.GITHUB_TOKEN }} - name: golangci-lint - uses: golangci/golangci-lint-action@v3 + uses: golangci/golangci-lint-action@v4 with: # Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version version: latest diff --git a/.github/workflows/partial-frontend.yaml b/.github/workflows/partial-frontend.yaml index 9f85179..f849406 100644 --- a/.github/workflows/partial-frontend.yaml +++ b/.github/workflows/partial-frontend.yaml @@ -9,11 +9,11 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 - - uses: pnpm/action-setup@v2.2.4 + - uses: pnpm/action-setup@v3.0.0 with: version: 6.0.2 @@ -34,7 +34,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 @@ -44,15 +44,15 @@ jobs: repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Set up Go - uses: actions/setup-go@v3 + uses: actions/setup-go@v5 with: - go-version: 1.19 + go-version: "1.21" - - uses: actions/setup-node@v3 + - uses: actions/setup-node@v4 with: node-version: 18 - - uses: pnpm/action-setup@v2.2.4 + - uses: pnpm/action-setup@v3.0.0 with: version: 6.0.2 diff --git a/.github/workflows/partial-publish.yaml b/.github/workflows/partial-publish.yaml index 39f57e9..542171d 100644 --- a/.github/workflows/partial-publish.yaml +++ b/.github/workflows/partial-publish.yaml @@ -20,22 +20,22 @@ jobs: name: "Publish Homebox" runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up Go - uses: actions/setup-go@v3 + uses: actions/setup-go@v5 with: - go-version: 1.19 + go-version: "1.20" - name: Set up QEMU id: qemu - uses: docker/setup-qemu-action@v2 + uses: docker/setup-qemu-action@v3 with: image: tonistiigi/binfmt:latest platforms: all - name: install buildx id: buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 with: install: true @@ -44,7 +44,7 @@ jobs: env: CR_PAT: ${{ secrets.GH_TOKEN }} - - name: build nightly the image + - name: build nightly image if: ${{ inputs.release == false }} run: | docker build --push --no-cache \ @@ -53,6 +53,16 @@ jobs: --build-arg=BUILD_TIME=$(date -u +"%Y-%m-%dT%H:%M:%SZ") \ --platform=linux/amd64,linux/arm64,linux/arm/v7 . + - name: build nightly-rootless image + if: ${{ inputs.release == false }} + run: | + docker build --push --no-cache \ + --tag=ghcr.io/hay-kot/homebox:${{ inputs.tag }}-rootless \ + --build-arg=COMMIT=$(git rev-parse HEAD) \ + --build-arg=BUILD_TIME=$(date -u +"%Y-%m-%dT%H:%M:%SZ") \ + --file Dockerfile.rootless \ + --platform=linux/amd64,linux/arm64,linux/arm/v7 . + - name: build release tagged the image if: ${{ inputs.release == true }} run: | @@ -64,3 +74,16 @@ jobs: --build-arg COMMIT=$(git rev-parse HEAD) \ --build-arg BUILD_TIME=$(date -u +"%Y-%m-%dT%H:%M:%SZ") \ --platform linux/amd64,linux/arm64,linux/arm/v7 . + + - name: build release tagged the rootless image + if: ${{ inputs.release == true }} + run: | + docker build --push --no-cache \ + --tag ghcr.io/hay-kot/homebox:nightly-rootless \ + --tag ghcr.io/hay-kot/homebox:latest-rootless \ + --tag ghcr.io/hay-kot/homebox:${{ inputs.tag }}-rootless \ + --build-arg VERSION=${{ inputs.tag }} \ + --build-arg COMMIT=$(git rev-parse HEAD) \ + --build-arg BUILD_TIME=$(date -u +"%Y-%m-%dT%H:%M:%SZ") \ + --platform linux/amd64,linux/arm64,linux/arm/v7 \ + --file Dockerfile.rootless . diff --git a/.github/workflows/publish.yaml b/.github/workflows/publish.yaml index 666e5a7..e91e8ec 100644 --- a/.github/workflows/publish.yaml +++ b/.github/workflows/publish.yaml @@ -1,73 +1,29 @@ -name: Build Nightly +name: Publish Dockers on: push: branches: - main - release: - types: - - published env: FLY_API_TOKEN: ${{ secrets.FLY_API_TOKEN }} jobs: - backend-tests: - name: "Backend Server Tests" - uses: hay-kot/homebox/.github/workflows/partial-backend.yaml@main - - frontend-tests: - name: "Frontend and End-to-End Tests" - uses: hay-kot/homebox/.github/workflows/partial-frontend.yaml@main - deploy: name: "Deploy Nightly to Fly.io" runs-on: ubuntu-latest - needs: - - backend-tests - - frontend-tests steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: superfly/flyctl-actions/setup-flyctl@master - run: flyctl deploy --remote-only publish-nightly: name: "Publish Nightly" if: github.event_name != 'release' - needs: - - backend-tests - - frontend-tests uses: hay-kot/homebox/.github/workflows/partial-publish.yaml@main with: tag: nightly secrets: GH_TOKEN: ${{ secrets.CR_PAT }} - publish-tag: - name: "Publish Tag" - if: github.event_name == 'release' - needs: - - backend-tests - - frontend-tests - uses: hay-kot/homebox/.github/workflows/partial-publish.yaml@main - with: - release: true - tag: ${{ github.event.release.tag_name }} - secrets: - GH_TOKEN: ${{ secrets.CR_PAT }} - deploy-docs: - name: Deploy docs - needs: - - publish-tag - runs-on: ubuntu-latest - steps: - - name: Checkout main - uses: actions/checkout@v3 - - - name: Deploy docs - uses: mhausenblas/mkdocs-deploy-gh-pages@master - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - CONFIG_FILE: docs/mkdocs.yml - EXTRA_PACKAGES: build-base diff --git a/.github/workflows/pull-requests.yaml b/.github/workflows/pull-requests.yaml index 2debdbd..f39539b 100644 --- a/.github/workflows/pull-requests.yaml +++ b/.github/workflows/pull-requests.yaml @@ -12,4 +12,4 @@ jobs: frontend-tests: name: "Frontend and End-to-End Tests" - uses: ./.github/workflows/partial-frontend.yaml + uses: ./.github/workflows/partial-frontend.yaml \ No newline at end of file diff --git a/.github/workflows/tag.yaml b/.github/workflows/tag.yaml new file mode 100644 index 0000000..8ac7c54 --- /dev/null +++ b/.github/workflows/tag.yaml @@ -0,0 +1,77 @@ +name: Publish Release + +on: + push: + tags: + - v* + +env: + FLY_API_TOKEN: ${{ secrets.FLY_API_TOKEN }} + +jobs: + backend-tests: + name: "Backend Server Tests" + uses: hay-kot/homebox/.github/workflows/partial-backend.yaml@main + + frontend-tests: + name: "Frontend and End-to-End Tests" + uses: hay-kot/homebox/.github/workflows/partial-frontend.yaml@main + + goreleaser: + name: goreleaser + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Set up Go + uses: actions/setup-go@v5 + + - uses: pnpm/action-setup@v2 + with: + version: 7.30.1 + + - name: Build Frontend and Copy to Backend + working-directory: frontend + run: | + pnpm install --shamefully-hoist + pnpm run build + cp -r ./.output/public ../backend/app/api/static/ + + - name: Run GoReleaser + uses: goreleaser/goreleaser-action@v5 + with: + workdir: "backend" + distribution: goreleaser + version: latest + args: release --clean + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + publish-tag: + name: "Publish Tag" + uses: hay-kot/homebox/.github/workflows/partial-publish.yaml@main + with: + release: true + tag: ${{ github.ref_name }} + secrets: + GH_TOKEN: ${{ secrets.CR_PAT }} + + deploy-docs: + name: Deploy docs + needs: + - publish-tag + - goreleaser + runs-on: ubuntu-latest + steps: + - name: Checkout main + uses: actions/checkout@v4 + + - name: Deploy docs + uses: mhausenblas/mkdocs-deploy-gh-pages@master + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + CONFIG_FILE: docs/mkdocs.yml + EXTRA_PACKAGES: build-base \ No newline at end of file diff --git a/.gitignore b/.gitignore index 05e4ebd..d247138 100644 --- a/.gitignore +++ b/.gitignore @@ -48,4 +48,10 @@ dist .pnpm-store backend/app/api/app -backend/app/api/__debug_bin \ No newline at end of file +backend/app/api/__debug_bin +dist/ + +# Nuxt Publish Dir +backend/app/api/static/public/* +!backend/app/api/static/public/.gitkeep +backend/api \ No newline at end of file diff --git a/.scaffold/model/scaffold.yaml b/.scaffold/model/scaffold.yaml new file mode 100644 index 0000000..028d2fa --- /dev/null +++ b/.scaffold/model/scaffold.yaml @@ -0,0 +1,33 @@ +--- +# yaml-language-server: $schema=https://hay-kot.github.io/scaffold/schema.json +messages: + pre: | + # Ent Model Generation + + With Boilerplate! + post: | + Complete! + +questions: + - name: "model" + prompt: + message: "What is the name of the model? (PascalCase)" + required: true + + - name: "by_group" + prompt: + confirm: "Include a Group Edge? (group_id -> id)" + required: true + +rewrites: + - from: 'templates/model.go' + to: 'backend/internal/data/ent/schema/{{ lower .Scaffold.model }}.go' + +inject: + - name: "Insert Groups Edge" + path: 'backend/internal/data/ent/schema/group.go' + at: // $scaffold_edge + template: | + {{- if .Scaffold.by_group -}} + owned("{{ lower .Scaffold.model }}s", {{ .Scaffold.model }}.Type), + {{- end -}} diff --git a/.scaffold/model/templates/model.go b/.scaffold/model/templates/model.go new file mode 100644 index 0000000..b73ac16 --- /dev/null +++ b/.scaffold/model/templates/model.go @@ -0,0 +1,40 @@ +package schema + +import ( + "entgo.io/ent" + + "github.com/hay-kot/homebox/backend/internal/data/ent/schema/mixins" +) + +type {{ .Scaffold.model }} struct { + ent.Schema +} + +func ({{ .Scaffold.model }}) Mixin() []ent.Mixin { + return []ent.Mixin{ + mixins.BaseMixin{}, + {{- if .Scaffold.by_group }} + GroupMixin{ref: "{{ snakecase .Scaffold.model }}s"}, + {{- end }} + } +} + +// Fields of the {{ .Scaffold.model }}. +func ({{ .Scaffold.model }}) Fields() []ent.Field { + return []ent.Field{ + // field.String("name"). + } +} + +// Edges of the {{ .Scaffold.model }}. +func ({{ .Scaffold.model }}) Edges() []ent.Edge { + return []ent.Edge{ + // edge.From("group", Group.Type). + } +} + +func ({{ .Scaffold.model }}) Indexes() []ent.Index { + return []ent.Index{ + // index.Fields("token"), + } +} \ No newline at end of file diff --git a/.vscode/settings.json b/.vscode/settings.json index 2383c21..09c7a0e 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -16,7 +16,7 @@ "editor.formatOnSave": false, "editor.defaultFormatter": "dbaeumer.vscode-eslint", "editor.codeActionsOnSave": { - "source.fixAll.eslint": true + "source.fixAll.eslint": "explicit" }, "[typescript]": { "editor.defaultFormatter": "dbaeumer.vscode-eslint" diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 90095ac..52b2a0f 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,16 +1,16 @@ # Contributing -## We Develop with Github +## We Develop with GitHub -We use github to host code, to track issues and feature requests, as well as accept pull requests. +We use GitHub to host code, to track issues and feature requests, as well as accept pull requests. ## Branch Flow -We use the `main` branch as the development branch. All PRs should be made to the `main` branch from a feature branch. To create a pull request you can use the following steps: +We use the `main` branch as the development branch. All PRs should be made to the `main` branch from a feature branch. To create a pull request, you can use the following steps: 1. Fork the repository and create a new branch from `main`. 2. If you've added code that should be tested, add tests. -3. If you've changed API's, update the documentation. +3. If you've changed APIs, update the documentation. 4. Ensure that the test suite and linters pass 5. Issue your pull request @@ -18,7 +18,7 @@ We use the `main` branch as the development branch. All PRs should be made to th ### Prerequisites -There is a devcontainer available for this project. If you are using VSCode, you can use the devcontainer to get started. If you are not using VSCode, you can need to ensure that you have the following tools installed: +There is a devcontainer available for this project. If you are using VSCode, you can use the devcontainer to get started. If you are not using VSCode, you need to ensure that you have the following tools installed: - [Go 1.19+](https://golang.org/doc/install) - [Swaggo](https://github.com/swaggo/swag) @@ -31,21 +31,27 @@ If you're using `taskfile` you can run `task --list-all` for a list of all comma ### Setup -If you're using the taskfile you can use the `task setup` command to run the required setup commands. Otherwise you can review the commands required in the `Taskfile.yml` file. +If you're using the taskfile, you can use the `task setup` command to run the required setup commands. Otherwise, you can review the commands required in the `Taskfile.yml` file. -Note that when installing dependencies with pnpm you must use the `--shamefully-hoist` flag. If you don't use this flag you will get an error when running the the frontend server. +Note that when installing dependencies with pnpm you must use the `--shamefully-hoist` flag. If you don't use this flag, you will get an error when running the frontend server. ### API Development Notes start command `task go:run` 1. API Server does not auto reload. You'll need to restart the server after making changes. -2. Unit tests should be written in Go, however end-to-end or user story tests should be written in TypeScript using the client library in the frontend directory. +2. Unit tests should be written in Go, however, end-to-end or user story tests should be written in TypeScript using the client library in the frontend directory. ### Frontend Development Notes start command `task: ui:dev` 1. The frontend is a Vue 3 app with Nuxt.js that uses Tailwind and DaisyUI for styling. -2. We're using Vitest for our automated testing. you can run these with `task ui:watch`. -3. Tests require the API server to be running and in some cases the first run will fail due to a race condition. If this happens just run the tests again and they should pass. \ No newline at end of file +2. We're using Vitest for our automated testing. You can run these with `task ui:watch`. +3. Tests require the API server to be running, and in some cases the first run will fail due to a race condition. If this happens, just run the tests again and they should pass. + +## Publishing Release + +Create a new tag in GitHub with the version number vX.X.X. This will trigger a new release to be created. + +Test -> Goreleaser -> Publish Release -> Trigger Docker Builds -> Deploy Docs + Fly.io Demo \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index ad837d5..11d5c74 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,50 +1,48 @@ - -# Build Nuxt -FROM node:17-alpine as frontend-builder -WORKDIR /app -RUN npm install -g pnpm -COPY frontend/package.json frontend/pnpm-lock.yaml ./ -RUN pnpm install --frozen-lockfile --shamefully-hoist -COPY frontend . -RUN pnpm build - -# Build API -FROM golang:alpine AS builder -ARG BUILD_TIME -ARG COMMIT -ARG VERSION -RUN apk update && \ - apk upgrade && \ - apk add --update git build-base gcc g++ - -WORKDIR /go/src/app -COPY ./backend . -RUN go get -d -v ./... -RUN rm -rf ./app/api/public -COPY --from=frontend-builder /app/.output/public ./app/api/static/public -RUN CGO_ENABLED=1 GOOS=linux go build \ - -ldflags "-s -w -X main.commit=$COMMIT -X main.buildTime=$BUILD_TIME -X main.version=$VERSION" \ - -o /go/bin/api \ - -v ./app/api/*.go - -# Production Stage -FROM alpine:latest - -ENV HBOX_MODE=production -ENV HBOX_STORAGE_DATA=/data/ -ENV HBOX_STORAGE_SQLITE_URL=/data/homebox.db?_fk=1 - -RUN apk --no-cache add ca-certificates -RUN mkdir /app -COPY --from=builder /go/bin/api /app - -RUN chmod +x /app/api - -LABEL Name=homebox Version=0.0.1 -LABEL org.opencontainers.image.source="https://github.com/hay-kot/homebox" -EXPOSE 7745 -WORKDIR /app -VOLUME [ "/data" ] - -ENTRYPOINT [ "/app/api" ] -CMD [ "/data/config.yml" ] + +# Build Nuxt +FROM r.batts.cloud/nodejs:18 as frontend-builder +WORKDIR /app +RUN npm install -g pnpm@latest-9 +COPY frontend/package.json frontend/pnpm-lock.yaml ./ +RUN pnpm install --frozen-lockfile --shamefully-hoist +COPY frontend . +RUN pnpm build + +# Build API +FROM r.batts.cloud/golang:1.24 AS builder +ARG BUILD_TIME +ARG COMMIT +ARG VERSION +RUN apt update && \ + apt install -y git build-essential gcc g++ + +WORKDIR /go/src/app +COPY ./backend . +RUN go get -d -v ./... +RUN rm -rf ./app/api/public +COPY --from=frontend-builder /app/.output/public ./app/api/static/public +RUN CGO_ENABLED=0 GOOS=linux go build \ + -ldflags "-s -w -X main.commit=$COMMIT -X main.buildTime=$BUILD_TIME -X main.version=$VERSION" \ + -o /go/bin/api \ + -v ./app/api/*.go + +# Production Stage +FROM r.batts.cloud/debian:trixie + +ENV HBOX_MODE=production +ENV HBOX_STORAGE_DATA=/data/ +ENV HBOX_STORAGE_SQLITE_URL=/data/homebox.db?_pragma=busy_timeout=2000&_pragma=journal_mode=WAL&_fk=1 + +RUN mkdir /app +COPY --from=builder /go/bin/api /app + +RUN chmod +x /app/api + +LABEL Name=homebox Version=0.0.1 +LABEL org.opencontainers.image.source="https://github.com/hay-kot/homebox" +EXPOSE 7745 +WORKDIR /app +VOLUME [ "/data" ] + +ENTRYPOINT [ "/app/api" ] +CMD [ "/data/config.yml" ] diff --git a/Dockerfile.rootless b/Dockerfile.rootless new file mode 100644 index 0000000..e1c98aa --- /dev/null +++ b/Dockerfile.rootless @@ -0,0 +1,53 @@ + +# Build Nuxt +FROM node:17-alpine as frontend-builder +WORKDIR /app +RUN npm install -g pnpm +COPY frontend/package.json frontend/pnpm-lock.yaml ./ +RUN pnpm install --frozen-lockfile --shamefully-hoist +COPY frontend . +RUN pnpm build + +# Build API +FROM golang:alpine AS builder +ARG BUILD_TIME +ARG COMMIT +ARG VERSION +RUN apk update && \ + apk upgrade && \ + apk add --update git build-base gcc g++ + +WORKDIR /go/src/app +COPY ./backend . +RUN go get -d -v ./... +RUN rm -rf ./app/api/public +COPY --from=frontend-builder /app/.output/public ./app/api/static/public +RUN CGO_ENABLED=0 GOOS=linux go build \ + -ldflags "-s -w -X main.commit=$COMMIT -X main.buildTime=$BUILD_TIME -X main.version=$VERSION" \ + -o /go/bin/api \ + -v ./app/api/*.go && \ + chmod +x /go/bin/api && \ + # create a directory so that we can copy it in the next stage + mkdir /data + +# Production Stage +FROM gcr.io/distroless/static + +ENV HBOX_MODE=production +ENV HBOX_STORAGE_DATA=/data/ +ENV HBOX_STORAGE_SQLITE_URL=/data/homebox.db?_fk=1 + +# Copy the binary and the (empty) /data dir and +# change the ownership to the low-privileged user +COPY --from=builder --chown=nonroot /go/bin/api /app +COPY --from=builder --chown=nonroot /data /data + +LABEL Name=homebox Version=0.0.1 +LABEL org.opencontainers.image.source="https://github.com/hay-kot/homebox" +EXPOSE 7745 +VOLUME [ "/data" ] + +# Drop root and run as low-privileged user +USER nonroot +ENTRYPOINT [ "/app" ] +CMD [ "/data/config.yml" ] diff --git a/README.md b/README.md index 691504f..148322b 100644 --- a/README.md +++ b/README.md @@ -16,12 +16,28 @@ [Configuration & Docker Compose](https://hay-kot.github.io/homebox/quick-start) ```bash -docker run --name=homebox \ - --restart=always \ - --publish=3100:7745 \ - ghcr.io/hay-kot/homebox:latest +# If using the rootless image, ensure data +# folder has correct permissions +mkdir -p /path/to/data/folder +chown 65532:65532 -R /path/to/data/folder +docker run -d \ + --name homebox \ + --restart unless-stopped \ + --publish 3100:7745 \ + --env TZ=Europe/Bucharest \ + --volume /path/to/data/folder/:/data \ + ghcr.io/hay-kot/homebox:latest +# ghcr.io/hay-kot/homebox:latest-rootless ``` + +## Contributing + +Contributions are what make the open source community such an amazing place to learn, inspire, and create. Any contributions you make are **greatly appreciated**. + +If you are not a coder, you can still contribute financially. Financial contributions help me prioritize working on this project over others and helps me know that there is a real demand for project development. + +Buy Me A Coffee ## Credits - Logo by [@lakotelman](https://github.com/lakotelman) diff --git a/Taskfile.yml b/Taskfile.yml index e619395..4d9c1aa 100644 --- a/Taskfile.yml +++ b/Taskfile.yml @@ -1,7 +1,8 @@ version: "3" env: - HBOX_STORAGE_SQLITE_URL: .data/homebox.db?_fk=1 + HBOX_LOG_LEVEL: debug + HBOX_STORAGE_SQLITE_URL: .data/homebox.db?_pragma=busy_timeout=1000&_pragma=journal_mode=WAL&_fk=1 HBOX_OPTIONS_ALLOW_REGISTRATION: true UNSAFE_DISABLE_PASSWORD_PROJECTION: "yes_i_am_sure" tasks: @@ -12,61 +13,77 @@ tasks: - cd backend && go mod tidy - cd frontend && pnpm install --shamefully-hoist - generate: - desc: | - Generates collateral files from the backend project - including swagger docs and typescripts type for the frontend - deps: - - db:generate + swag: + desc: Generate swagger docs + dir: backend/app/api/static/ + vars: + API: "../" + INTERNAL: "../../../internal" + PKGS: "../../../pkgs" + cmds: + - swag fmt --dir={{ .API }} + - swag init --dir={{ .API }},{{ .INTERNAL }}/core/services,{{ .INTERNAL }}/data/repo --parseDependency + sources: + - "./backend/app/api/**/*" + - "./backend/internal/data/**" + - "./backend/internal/core/services/**/*" + - "./backend/app/tools/typegen/main.go" + + typescript-types: + desc: Generates typescript types from swagger definition cmds: - - cd backend/app/api/static && swag fmt --dir=../ - - cd backend/app/api/static && swag init --dir=../,../../../internal,../../../pkgs - | npx swagger-typescript-api \ --no-client \ --modular \ --path ./backend/app/api/static/docs/swagger.json \ --output ./frontend/lib/api/types - - go run ./scripts/process-types/*.go ./frontend/lib/api/types/data-contracts.ts + - go run ./backend/app/tools/typegen/main.go ./frontend/lib/api/types/data-contracts.ts sources: - - "./backend/app/api/**/*" - - "./backend/internal/data/**" - - "./backend/internal/services/**/*" - - "./scripts/process-types.py" - generates: - - "./frontend/lib/api/types/data-contracts.ts" - - "./backend/internal/data/ent/schema" - - "./backend/app/api/static/docs/swagger.json" - - "./backend/app/api/static/docs/swagger.yaml" + - ./backend/app/tools/typegen/main.go + - ./backend/app/api/static/docs/swagger.json + + generate: + deps: + - db:generate + cmds: + - task: swag + - task: typescript-types + - cp ./backend/app/api/static/docs/swagger.json docs/docs/api/openapi-2.0.json go:run: desc: Starts the backend api server (depends on generate task) + dir: backend deps: - generate cmds: - - cd backend && go run ./app/api/ {{ .CLI_ARGS }} + - go run ./app/api/ {{ .CLI_ARGS }} silent: false go:test: desc: Runs all go tests using gotestsum - supports passing gotestsum args + dir: backend cmds: - - cd backend && gotestsum {{ .CLI_ARGS }} ./... + - gotestsum {{ .CLI_ARGS }} ./... go:coverage: desc: Runs all go tests with -race flag and generates a coverage report + dir: backend cmds: - - cd backend && go test -race -coverprofile=coverage.out -covermode=atomic ./app/... ./internal/... ./pkgs/... -v -cover + - go test -race -coverprofile=coverage.out -covermode=atomic ./app/... ./internal/... ./pkgs/... -v -cover silent: true go:tidy: desc: Runs go mod tidy on the backend + dir: backend cmds: - - cd backend && go mod tidy + - go mod tidy go:lint: desc: Runs golangci-lint + dir: backend cmds: - - cd backend && golangci-lint run ./... + - golangci-lint run ./... go:all: desc: Runs all go test and lint related tasks @@ -77,19 +94,18 @@ tasks: go:build: desc: Builds the backend binary + dir: backend cmds: - - cd backend && go build -o ../build/backend ./app/api + - go build -o ../build/backend ./app/api db:generate: desc: Run Entgo.io Code Generation + dir: backend/internal/ cmds: - | - cd backend/internal/ && go generate ./... \ - --template=./data/ent/schema/templates/has_id.tmpl + go generate ./... sources: - "./backend/internal/data/ent/schema/**/*" - generates: - - "./backend/internal/ent/" db:migration: desc: Runs the database diff engine to generate a SQL migration files @@ -100,23 +116,27 @@ tasks: ui:watch: desc: Starts the vitest test runner in watch mode + dir: frontend cmds: - - cd frontend && pnpm run test:watch + - pnpm run test:watch ui:dev: desc: Run frontend development server + dir: frontend cmds: - - cd frontend && pnpm dev + - pnpm dev ui:fix: desc: Runs prettier and eslint on the frontend + dir: frontend cmds: - - cd frontend && pnpm run lint:fix + - pnpm run lint:fix ui:check: desc: Runs type checking + dir: frontend cmds: - - cd frontend && pnpm run typecheck + - pnpm run typecheck test:ci: desc: Runs end-to-end test on a live server (only for use in CI) @@ -134,4 +154,4 @@ tasks: - task: go:all - task: ui:check - task: ui:fix - - task: test:ci \ No newline at end of file + - task: test:ci diff --git a/backend/.gitignore b/backend/.gitignore new file mode 100644 index 0000000..cde0123 --- /dev/null +++ b/backend/.gitignore @@ -0,0 +1,2 @@ + +dist/ diff --git a/backend/.golangci.yml b/backend/.golangci.yml new file mode 100644 index 0000000..8f63110 --- /dev/null +++ b/backend/.golangci.yml @@ -0,0 +1,74 @@ +run: + timeout: 10m + skip-dirs: + - internal/data/ent.* +linters-settings: + goconst: + min-len: 5 + min-occurrences: 5 + exhaustive: + default-signifies-exhaustive: true + revive: + ignore-generated-header: false + severity: warning + confidence: 3 + depguard: + rules: + main: + deny: + - pkg: io/util + desc: | + Deprecated: As of Go 1.16, the same functionality is now provided by + package io or package os, and those implementations should be + preferred in new code. See the specific function documentation for + details. + gocritic: + enabled-checks: + - ruleguard + testifylint: + enable-all: true + tagalign: + order: + - json + - schema + - yaml + - yml + - toml + - validate +linters: + disable-all: true + enable: + - asciicheck + - bodyclose + - depguard + - dogsled + - errcheck + - errorlint + - exhaustive + - exportloopref + - gochecknoinits + - goconst + - gocritic + - gocyclo + - gofmt + - goprintffuncname + - gosimple + - govet + - ineffassign + - misspell + - nakedret + - revive + - staticcheck + - stylecheck + - tagalign + - testifylint + - typecheck + - typecheck + - unconvert + - unused + - whitespace + - zerologlint + - sqlclosecheck +issues: + exclude-use-default: false + fix: true diff --git a/backend/.goreleaser.yaml b/backend/.goreleaser.yaml new file mode 100644 index 0000000..37752ec --- /dev/null +++ b/backend/.goreleaser.yaml @@ -0,0 +1,54 @@ +# This is an example .goreleaser.yml file with some sensible defaults. +# Make sure to check the documentation at https://goreleaser.com +before: + hooks: + # you may remove this if you don't need go generate + - go generate ./... +builds: + - main: ./app/api + env: + - CGO_ENABLED=0 + goos: + - linux + - windows + - darwin + goarch: + - amd64 + - "386" + - arm + - arm64 + ignore: + - goos: windows + goarch: arm + - goos: windows + goarch: "386" + +archives: + - format: tar.gz + # this name template makes the OS and Arch compatible with the results of uname. + name_template: >- + {{ .ProjectName }}_ + {{- title .Os }}_ + {{- if eq .Arch "amd64" }}x86_64 + {{- else if eq .Arch "386" }}i386 + {{- else }}{{ .Arch }}{{ end }} + {{- if .Arm }}v{{ .Arm }}{{ end }} + # use zip for windows archives + format_overrides: + - goos: windows + format: zip +checksum: + name_template: 'checksums.txt' +snapshot: + name_template: "{{ incpatch .Version }}-next" +changelog: + sort: asc + filters: + exclude: + - '^docs:' + - '^test:' + +# The lines beneath this are called `modelines`. See `:help modeline` +# Feel free to remove those if you don't want/use them. +# yaml-language-server: $schema=https://goreleaser.com/static/schema.json +# vim: set ts=2 sw=2 tw=0 fo=cnqoj diff --git a/backend/app/api/app.go b/backend/app/api/app.go index 854c4e5..5d285d3 100644 --- a/backend/app/api/app.go +++ b/backend/app/api/app.go @@ -1,23 +1,21 @@ package main import ( - "time" - "github.com/hay-kot/homebox/backend/internal/core/services" + "github.com/hay-kot/homebox/backend/internal/core/services/reporting/eventbus" "github.com/hay-kot/homebox/backend/internal/data/ent" "github.com/hay-kot/homebox/backend/internal/data/repo" "github.com/hay-kot/homebox/backend/internal/sys/config" "github.com/hay-kot/homebox/backend/pkgs/mailer" - "github.com/hay-kot/homebox/backend/pkgs/server" ) type app struct { conf *config.Config mailer mailer.Mailer db *ent.Client - server *server.Server repos *repo.AllRepos services *services.AllServices + bus *eventbus.EventBus } func new(conf *config.Config) *app { @@ -35,10 +33,3 @@ func new(conf *config.Config) *app { return s } - -func (a *app) startBgTask(t time.Duration, fn func()) { - for { - a.server.Background(fn) - time.Sleep(t) - } -} diff --git a/backend/app/api/bgrunner.go b/backend/app/api/bgrunner.go new file mode 100644 index 0000000..ce4b7cc --- /dev/null +++ b/backend/app/api/bgrunner.go @@ -0,0 +1,37 @@ +package main + +import ( + "context" + "time" +) + +type BackgroundTask struct { + name string + Interval time.Duration + Fn func(context.Context) +} + +func (tsk *BackgroundTask) Name() string { + return tsk.name +} + +func NewTask(name string, interval time.Duration, fn func(context.Context)) *BackgroundTask { + return &BackgroundTask{ + Interval: interval, + Fn: fn, + } +} + +func (tsk *BackgroundTask) Start(ctx context.Context) error { + timer := time.NewTimer(tsk.Interval) + + for { + select { + case <-ctx.Done(): + return nil + case <-timer.C: + timer.Reset(tsk.Interval) + tsk.Fn(ctx) + } + } +} diff --git a/backend/app/api/demo.go b/backend/app/api/demo.go index 538655d..183e0e0 100644 --- a/backend/app/api/demo.go +++ b/backend/app/api/demo.go @@ -2,57 +2,61 @@ package main import ( "context" - "encoding/csv" "strings" + "time" "github.com/hay-kot/homebox/backend/internal/core/services" "github.com/rs/zerolog/log" ) func (a *app) SetupDemo() { - csvText := `Import Ref,Location,Labels,Quantity,Name,Description,Insured,Serial Number,Model Number,Manufacturer,Notes,Purchase From,Purchased Price,Purchased Time,Lifetime Warranty,Warranty Expires,Warranty Details,Sold To,Sold Price,Sold Time,Sold Notes + csvText := `HB.import_ref,HB.location,HB.labels,HB.quantity,HB.name,HB.description,HB.insured,HB.serial_number,HB.model_number,HB.manufacturer,HB.notes,HB.purchase_from,HB.purchase_price,HB.purchase_time,HB.lifetime_warranty,HB.warranty_expires,HB.warranty_details,HB.sold_to,HB.sold_price,HB.sold_time,HB.sold_notes ,Garage,IOT;Home Assistant; Z-Wave,1,Zooz Universal Relay ZEN17,"Zooz 700 Series Z-Wave Universal Relay ZEN17 for Awnings, Garage Doors, Sprinklers, and More | 2 NO-C-NC Relays (20A, 10A) | Signal Repeater | Hub Required (Compatible with SmartThings and Hubitat)",,,ZEN17,Zooz,,Amazon,39.95,10/13/2021,,,,,,, ,Living Room,IOT;Home Assistant; Z-Wave,1,Zooz Motion Sensor,"Zooz Z-Wave Plus S2 Motion Sensor ZSE18 with Magnetic Mount, Works with Vera and SmartThings",,,ZSE18,Zooz,,Amazon,29.95,10/15/2021,,,,,,, ,Office,IOT;Home Assistant; Z-Wave,1,Zooz 110v Power Switch,"Zooz Z-Wave Plus Power Switch ZEN15 for 110V AC Units, Sump Pumps, Humidifiers, and More",,,ZEN15,Zooz,,Amazon,39.95,10/13/2021,,,,,,, ,Downstairs,IOT;Home Assistant; Z-Wave,1,Ecolink Z-Wave PIR Motion Sensor,"Ecolink Z-Wave PIR Motion Detector Pet Immune, White (PIRZWAVE2.5-ECO)",,,PIRZWAVE2.5-ECO,Ecolink,,Amazon,35.58,10/21/2020,,,,,,, ,Entry,IOT;Home Assistant; Z-Wave,1,Yale Security Touchscreen Deadbolt,"Yale Security YRD226-ZW2-619 YRD226ZW2619 Touchscreen Deadbolt, Satin Nickel",,,YRD226ZW2619,Yale,,Amazon,120.39,10/14/2020,,,,,,, -,Kitchen,IOT;Home Assistant; Z-Wave,1,Smart Rocker Light Dimmer,"UltraPro Z-Wave Smart Rocker Light Dimmer with QuickFit and SimpleWire, 3-Way Ready, Compatible with Alexa, Google Assistant, ZWave Hub Required, Repeater/Range Extender, White Paddle Only, 39351",,,‎39351,Honeywell,,Amazon,65.98,09/30/0202,,,,,,, +,Kitchen,IOT;Home Assistant; Z-Wave,1,Smart Rocker Light Dimmer,"UltraPro Z-Wave Smart Rocker Light Dimmer with QuickFit and SimpleWire, 3-Way Ready, Compatible with Alexa, Google Assistant, ZWave Hub Required, Repeater/Range Extender, White Paddle Only, 39351",,,39351,Honeywell,,Amazon,65.98,09/30/0202,,,,,,, ` - var ( - registration = services.UserRegistration{ - Email: "demo@example.com", - Name: "Demo", - Password: "demo", - } - ) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + registration := services.UserRegistration{ + Email: "demo@example.com", + Name: "Demo", + Password: "demo", + } // First check if we've already setup a demo user and skip if so - _, err := a.services.User.Login(context.Background(), registration.Email, registration.Password) + log.Debug().Msg("Checking if demo user already exists") + _, err := a.services.User.Login(ctx, registration.Email, registration.Password, false) if err == nil { + log.Info().Msg("Demo user already exists, skipping setup") return } - _, err = a.services.User.RegisterUser(context.Background(), registration) + log.Debug().Msg("Demo user does not exist, setting up demo") + _, err = a.services.User.RegisterUser(ctx, registration) if err != nil { log.Err(err).Msg("Failed to register demo user") log.Fatal().Msg("Failed to setup demo") } - token, _ := a.services.User.Login(context.Background(), registration.Email, registration.Password) - self, _ := a.services.User.GetSelf(context.Background(), token.Raw) - - // Read CSV Text - reader := csv.NewReader(strings.NewReader(csvText)) - reader.Comma = ',' - - records, err := reader.ReadAll() + token, err := a.services.User.Login(ctx, registration.Email, registration.Password, false) if err != nil { - log.Err(err).Msg("Failed to read CSV") + log.Err(err).Msg("Failed to login demo user") log.Fatal().Msg("Failed to setup demo") + return + } + self, err := a.services.User.GetSelf(ctx, token.Raw) + if err != nil { + log.Err(err).Msg("Failed to get self") + log.Fatal().Msg("Failed to setup demo") + return } - _, err = a.services.Items.CsvImport(context.Background(), self.GroupID, records) + _, err = a.services.Items.CsvImport(ctx, self.GroupID, strings.NewReader(csvText)) if err != nil { log.Err(err).Msg("Failed to import CSV") log.Fatal().Msg("Failed to setup demo") diff --git a/backend/app/api/handlers/debughandlers/debug.go b/backend/app/api/handlers/debughandlers/debug.go index ffba624..5f66fed 100644 --- a/backend/app/api/handlers/debughandlers/debug.go +++ b/backend/app/api/handlers/debughandlers/debug.go @@ -1,3 +1,4 @@ +// Package debughandlers provides handlers for debugging. package debughandlers import ( diff --git a/backend/app/api/handlers/v1/controller.go b/backend/app/api/handlers/v1/controller.go index f5790c8..eb60212 100644 --- a/backend/app/api/handlers/v1/controller.go +++ b/backend/app/api/handlers/v1/controller.go @@ -1,13 +1,38 @@ +// Package v1 provides the API handlers for version 1 of the API. package v1 import ( + "encoding/json" "net/http" + "time" + "github.com/google/uuid" "github.com/hay-kot/homebox/backend/internal/core/services" + "github.com/hay-kot/homebox/backend/internal/core/services/reporting/eventbus" "github.com/hay-kot/homebox/backend/internal/data/repo" - "github.com/hay-kot/homebox/backend/pkgs/server" + "github.com/hay-kot/httpkit/errchain" + "github.com/hay-kot/httpkit/server" + "github.com/rs/zerolog/log" + + "github.com/olahol/melody" ) +type Results[T any] struct { + Items []T `json:"items"` +} + +func WrapResults[T any](items []T) Results[T] { + return Results[T]{Items: items} +} + +type Wrapped struct { + Item interface{} `json:"item"` +} + +func Wrap(v any) Wrapped { + return Wrapped{Item: v} +} + func WithMaxUploadSize(maxUploadSize int64) func(*V1Controller) { return func(ctrl *V1Controller) { ctrl.maxUploadSize = maxUploadSize @@ -26,12 +51,20 @@ func WithRegistration(allowRegistration bool) func(*V1Controller) { } } +func WithSecureCookies(secure bool) func(*V1Controller) { + return func(ctrl *V1Controller) { + ctrl.cookieSecure = secure + } +} + type V1Controller struct { + cookieSecure bool repo *repo.AllRepos svc *services.AllServices maxUploadSize int64 isDemo bool allowRegistration bool + bus *eventbus.EventBus } type ( @@ -43,7 +76,7 @@ type ( BuildTime string `json:"buildTime"` } - ApiSummary struct { + APISummary struct { Healthy bool `json:"health"` Versions []string `json:"versions"` Title string `json:"title"` @@ -54,17 +87,18 @@ type ( } ) -func BaseUrlFunc(prefix string) func(s string) string { +func BaseURLFunc(prefix string) func(s string) string { return func(s string) string { return prefix + "/v1" + s } } -func NewControllerV1(svc *services.AllServices, repos *repo.AllRepos, options ...func(*V1Controller)) *V1Controller { +func NewControllerV1(svc *services.AllServices, repos *repo.AllRepos, bus *eventbus.EventBus, options ...func(*V1Controller)) *V1Controller { ctrl := &V1Controller{ repo: repos, svc: svc, allowRegistration: true, + bus: bus, } for _, opt := range options { @@ -75,20 +109,105 @@ func NewControllerV1(svc *services.AllServices, repos *repo.AllRepos, options .. } // HandleBase godoc -// @Summary Retrieves the basic information about the API -// @Tags Base -// @Produce json -// @Success 200 {object} ApiSummary -// @Router /v1/status [GET] -func (ctrl *V1Controller) HandleBase(ready ReadyFunc, build Build) server.HandlerFunc { +// +// @Summary Application Info +// @Tags Base +// @Produce json +// @Success 200 {object} APISummary +// @Router /v1/status [GET] +func (ctrl *V1Controller) HandleBase(ready ReadyFunc, build Build) errchain.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) error { - return server.Respond(w, http.StatusOK, ApiSummary{ + return server.JSON(w, http.StatusOK, APISummary{ Healthy: ready(), - Title: "Go API Template", - Message: "Welcome to the Go API Template Application!", + Title: "Homebox", + Message: "Track, Manage, and Organize your Things", Build: build, Demo: ctrl.isDemo, AllowRegistration: ctrl.allowRegistration, }) } } + +// HandleCurrency godoc +// +// @Summary Currency +// @Tags Base +// @Produce json +// @Success 200 {object} currencies.Currency +// @Router /v1/currency [GET] +func (ctrl *V1Controller) HandleCurrency() errchain.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) error { + // Set Cache for 10 Minutes + w.Header().Set("Cache-Control", "max-age=600") + + return server.JSON(w, http.StatusOK, ctrl.svc.Currencies.Slice()) + } +} + +func (ctrl *V1Controller) HandleCacheWS() errchain.HandlerFunc { + type eventMsg struct { + Event string `json:"event"` + } + + m := melody.New() + + m.HandleConnect(func(s *melody.Session) { + auth := services.NewContext(s.Request.Context()) + s.Set("gid", auth.GID) + }) + + factory := func(e string) func(data any) { + return func(data any) { + eventData, ok := data.(eventbus.GroupMutationEvent) + if !ok { + log.Log().Msgf("invalid event data: %v", data) + return + } + + msg := &eventMsg{Event: e} + + jsonBytes, err := json.Marshal(msg) + if err != nil { + log.Log().Msgf("error marshling event data %v: %v", data, err) + return + } + + _ = m.BroadcastFilter(jsonBytes, func(s *melody.Session) bool { + groupIDStr, ok := s.Get("gid") + if !ok { + return false + } + + GID := groupIDStr.(uuid.UUID) + return GID == eventData.GID + }) + } + } + + ctrl.bus.Subscribe(eventbus.EventLabelMutation, factory("label.mutation")) + ctrl.bus.Subscribe(eventbus.EventLocationMutation, factory("location.mutation")) + ctrl.bus.Subscribe(eventbus.EventItemMutation, factory("item.mutation")) + + // Persistent asynchronous ticker that keeps all websocket connections alive with periodic pings. + go func() { + const interval = 10 * time.Second + + ping := time.NewTicker(interval) + defer ping.Stop() + + for range ping.C { + msg := &eventMsg{Event: "ping"} + + pingBytes, err := json.Marshal(msg) + if err != nil { + log.Log().Msgf("error marshaling ping: %v", err) + } else { + _ = m.Broadcast(pingBytes) + } + } + }() + + return func(w http.ResponseWriter, r *http.Request) error { + return m.HandleRequest(w, r) + } +} diff --git a/backend/app/api/handlers/v1/partials.go b/backend/app/api/handlers/v1/partials.go index 763805f..5c81ad5 100644 --- a/backend/app/api/handlers/v1/partials.go +++ b/backend/app/api/handlers/v1/partials.go @@ -21,7 +21,7 @@ func (ctrl *V1Controller) routeID(r *http.Request) (uuid.UUID, error) { func (ctrl *V1Controller) routeUUID(r *http.Request, key string) (uuid.UUID, error) { ID, err := uuid.Parse(chi.URLParam(r, key)) if err != nil { - return uuid.Nil, validate.NewInvalidRouteKeyError(key) + return uuid.Nil, validate.NewRouteKeyError(key) } return ID, nil } diff --git a/backend/app/api/handlers/v1/v1_ctrl_actions.go b/backend/app/api/handlers/v1/v1_ctrl_actions.go index ea490c0..75f39a5 100644 --- a/backend/app/api/handlers/v1/v1_ctrl_actions.go +++ b/backend/app/api/handlers/v1/v1_ctrl_actions.go @@ -1,11 +1,14 @@ package v1 import ( + "context" "net/http" + "github.com/google/uuid" "github.com/hay-kot/homebox/backend/internal/core/services" "github.com/hay-kot/homebox/backend/internal/sys/validate" - "github.com/hay-kot/homebox/backend/pkgs/server" + "github.com/hay-kot/httpkit/errchain" + "github.com/hay-kot/httpkit/server" "github.com/rs/zerolog/log" ) @@ -13,44 +16,68 @@ type ActionAmountResult struct { Completed int `json:"completed"` } -// HandleGroupInvitationsCreate godoc -// @Summary Ensures all items in the database have an asset id -// @Tags Group -// @Produce json -// @Success 200 {object} ActionAmountResult -// @Router /v1/actions/ensure-asset-ids [Post] -// @Security Bearer -func (ctrl *V1Controller) HandleEnsureAssetID() server.HandlerFunc { +func actionHandlerFactory(ref string, fn func(context.Context, uuid.UUID) (int, error)) errchain.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) error { ctx := services.NewContext(r.Context()) - totalCompleted, err := ctrl.svc.Items.EnsureAssetID(ctx, ctx.GID) + totalCompleted, err := fn(ctx, ctx.GID) if err != nil { - log.Err(err).Msg("failed to ensure asset id") + log.Err(err).Str("action_ref", ref).Msg("failed to run action") return validate.NewRequestError(err, http.StatusInternalServerError) } - return server.Respond(w, http.StatusOK, ActionAmountResult{Completed: totalCompleted}) + return server.JSON(w, http.StatusOK, ActionAmountResult{Completed: totalCompleted}) } } +// HandleEnsureAssetID godoc +// +// @Summary Ensure Asset IDs +// @Description Ensures all items in the database have an asset ID +// @Tags Actions +// @Produce json +// @Success 200 {object} ActionAmountResult +// @Router /v1/actions/ensure-asset-ids [Post] +// @Security Bearer +func (ctrl *V1Controller) HandleEnsureAssetID() errchain.HandlerFunc { + return actionHandlerFactory("ensure asset IDs", ctrl.svc.Items.EnsureAssetID) +} + +// HandleEnsureImportRefs godoc +// +// @Summary Ensures Import Refs +// @Description Ensures all items in the database have an import ref +// @Tags Actions +// @Produce json +// @Success 200 {object} ActionAmountResult +// @Router /v1/actions/ensure-import-refs [Post] +// @Security Bearer +func (ctrl *V1Controller) HandleEnsureImportRefs() errchain.HandlerFunc { + return actionHandlerFactory("ensure import refs", ctrl.svc.Items.EnsureImportRef) +} + // HandleItemDateZeroOut godoc -// @Summary Resets all item date fields to the beginning of the day -// @Tags Group -// @Produce json -// @Success 200 {object} ActionAmountResult -// @Router /v1/actions/zero-item-time-fields [Post] -// @Security Bearer -func (ctrl *V1Controller) HandleItemDateZeroOut() server.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) error { - ctx := services.NewContext(r.Context()) - - totalCompleted, err := ctrl.repo.Items.ZeroOutTimeFields(ctx, ctx.GID) - if err != nil { - log.Err(err).Msg("failed to ensure asset id") - return validate.NewRequestError(err, http.StatusInternalServerError) - } - - return server.Respond(w, http.StatusOK, ActionAmountResult{Completed: totalCompleted}) - } +// +// @Summary Zero Out Time Fields +// @Description Resets all item date fields to the beginning of the day +// @Tags Actions +// @Produce json +// @Success 200 {object} ActionAmountResult +// @Router /v1/actions/zero-item-time-fields [Post] +// @Security Bearer +func (ctrl *V1Controller) HandleItemDateZeroOut() errchain.HandlerFunc { + return actionHandlerFactory("zero out date time", ctrl.repo.Items.ZeroOutTimeFields) +} + +// HandleSetPrimaryPhotos godoc +// +// @Summary Set Primary Photos +// @Description Sets the first photo of each item as the primary photo +// @Tags Actions +// @Produce json +// @Success 200 {object} ActionAmountResult +// @Router /v1/actions/set-primary-photos [Post] +// @Security Bearer +func (ctrl *V1Controller) HandleSetPrimaryPhotos() errchain.HandlerFunc { + return actionHandlerFactory("ensure asset IDs", ctrl.repo.Items.SetPrimaryPhotos) } diff --git a/backend/app/api/handlers/v1/v1_ctrl_assets.go b/backend/app/api/handlers/v1/v1_ctrl_assets.go index f91a009..91e9a3c 100644 --- a/backend/app/api/handlers/v1/v1_ctrl_assets.go +++ b/backend/app/api/handlers/v1/v1_ctrl_assets.go @@ -9,26 +9,28 @@ import ( "github.com/hay-kot/homebox/backend/internal/core/services" "github.com/hay-kot/homebox/backend/internal/data/repo" "github.com/hay-kot/homebox/backend/internal/sys/validate" - "github.com/hay-kot/homebox/backend/pkgs/server" + "github.com/hay-kot/httpkit/errchain" + "github.com/hay-kot/httpkit/server" "github.com/rs/zerolog/log" ) -// HandleItemGet godocs -// @Summary Gets an item by Asset ID -// @Tags Assets -// @Produce json -// @Param id path string true "Asset ID" -// @Success 200 {object} repo.PaginationResult[repo.ItemSummary]{} -// @Router /v1/assets/{id} [GET] -// @Security Bearer -func (ctrl *V1Controller) HandleAssetGet() server.HandlerFunc { +// HandleAssetGet godocs +// +// @Summary Get Item by Asset ID +// @Tags Items +// @Produce json +// @Param id path string true "Asset ID" +// @Success 200 {object} repo.PaginationResult[repo.ItemSummary]{} +// @Router /v1/assets/{id} [GET] +// @Security Bearer +func (ctrl *V1Controller) HandleAssetGet() errchain.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) error { ctx := services.NewContext(r.Context()) - assetIdParam := chi.URLParam(r, "id") - assetIdParam = strings.ReplaceAll(assetIdParam, "-", "") // Remove dashes + assetIDParam := chi.URLParam(r, "id") + assetIDParam = strings.ReplaceAll(assetIDParam, "-", "") // Remove dashes // Convert the asset ID to an int64 - assetId, err := strconv.ParseInt(assetIdParam, 10, 64) + assetID, err := strconv.ParseInt(assetIDParam, 10, 64) if err != nil { return err } @@ -37,7 +39,7 @@ func (ctrl *V1Controller) HandleAssetGet() server.HandlerFunc { if pageParam != "" { page, err = strconv.ParseInt(pageParam, 10, 64) if err != nil { - return server.Respond(w, http.StatusBadRequest, "Invalid page number") + return server.JSON(w, http.StatusBadRequest, "Invalid page number") } } @@ -46,15 +48,15 @@ func (ctrl *V1Controller) HandleAssetGet() server.HandlerFunc { if pageSizeParam != "" { pageSize, err = strconv.ParseInt(pageSizeParam, 10, 64) if err != nil { - return server.Respond(w, http.StatusBadRequest, "Invalid page size") + return server.JSON(w, http.StatusBadRequest, "Invalid page size") } } - items, err := ctrl.repo.Items.QueryByAssetID(r.Context(), ctx.GID, repo.AssetID(assetId), int(page), int(pageSize)) + items, err := ctrl.repo.Items.QueryByAssetID(r.Context(), ctx.GID, repo.AssetID(assetID), int(page), int(pageSize)) if err != nil { log.Err(err).Msg("failed to get item") return validate.NewRequestError(err, http.StatusInternalServerError) } - return server.Respond(w, http.StatusOK, items) + return server.JSON(w, http.StatusOK, items) } } diff --git a/backend/app/api/handlers/v1/v1_ctrl_auth.go b/backend/app/api/handlers/v1/v1_ctrl_auth.go index a3dbc58..47b69fd 100644 --- a/backend/app/api/handlers/v1/v1_ctrl_auth.go +++ b/backend/app/api/handlers/v1/v1_ctrl_auth.go @@ -3,15 +3,23 @@ package v1 import ( "errors" "net/http" + "strconv" "strings" "time" "github.com/hay-kot/homebox/backend/internal/core/services" "github.com/hay-kot/homebox/backend/internal/sys/validate" - "github.com/hay-kot/homebox/backend/pkgs/server" + "github.com/hay-kot/httpkit/errchain" + "github.com/hay-kot/httpkit/server" "github.com/rs/zerolog/log" ) +const ( + cookieNameToken = "hb.auth.token" + cookieNameRemember = "hb.auth.remember" + cookieNameSession = "hb.auth.session" +) + type ( TokenResponse struct { Token string `json:"token"` @@ -20,62 +28,100 @@ type ( } LoginForm struct { - Username string `json:"username"` - Password string `json:"password"` + Username string `json:"username"` + Password string `json:"password"` + StayLoggedIn bool `json:"stayLoggedIn"` } ) +type CookieContents struct { + Token string + ExpiresAt time.Time + Remember bool +} + +func GetCookies(r *http.Request) (*CookieContents, error) { + cookie, err := r.Cookie(cookieNameToken) + if err != nil { + return nil, errors.New("authorization cookie is required") + } + + rememberCookie, err := r.Cookie(cookieNameRemember) + if err != nil { + return nil, errors.New("remember cookie is required") + } + + return &CookieContents{ + Token: cookie.Value, + ExpiresAt: cookie.Expires, + Remember: rememberCookie.Value == "true", + }, nil +} + +// AuthProvider is an interface that can be implemented by any authentication provider. +// to extend authentication methods for the API. +type AuthProvider interface { + // Name returns the name of the authentication provider. This should be a unique name. + // that is URL friendly. + // + // Example: "local", "ldap" + Name() string + // Authenticate is called when a user attempts to login to the API. The implementation + // should return an error if the user cannot be authenticated. If an error is returned + // the API controller will return a vague error message to the user. + // + // Authenticate should do the following: + // + // 1. Ensure that the user exists within the database (either create, or get) + // 2. On successful authentication, they must set the user cookies. + Authenticate(w http.ResponseWriter, r *http.Request) (services.UserAuthTokenDetail, error) +} + // HandleAuthLogin godoc -// @Summary User Login -// @Tags Authentication -// @Accept x-www-form-urlencoded -// @Accept application/json -// @Param username formData string false "string" example(admin@admin.com) -// @Param password formData string false "string" example(admin) -// @Produce json -// @Success 200 {object} TokenResponse -// @Router /v1/users/login [POST] -func (ctrl *V1Controller) HandleAuthLogin() server.HandlerFunc { +// +// @Summary User Login +// @Tags Authentication +// @Accept x-www-form-urlencoded +// @Accept application/json +// @Param username formData string false "string" example(admin@admin.com) +// @Param password formData string false "string" example(admin) +// @Param payload body LoginForm true "Login Data" +// @Param provider query string false "auth provider" +// @Produce json +// @Success 200 {object} TokenResponse +// @Router /v1/users/login [POST] +func (ctrl *V1Controller) HandleAuthLogin(ps ...AuthProvider) errchain.HandlerFunc { + if len(ps) == 0 { + panic("no auth providers provided") + } + + providers := make(map[string]AuthProvider) + for _, p := range ps { + log.Info().Str("name", p.Name()).Msg("registering auth provider") + providers[p.Name()] = p + } + return func(w http.ResponseWriter, r *http.Request) error { - loginForm := &LoginForm{} - - switch r.Header.Get("Content-Type") { - case server.ContentFormUrlEncoded: - err := r.ParseForm() - if err != nil { - return server.Respond(w, http.StatusBadRequest, server.Wrap(err)) - } - - loginForm.Username = r.PostFormValue("username") - loginForm.Password = r.PostFormValue("password") - case server.ContentJSON: - err := server.Decode(r, loginForm) - if err != nil { - log.Err(err).Msg("failed to decode login form") - } - default: - return server.Respond(w, http.StatusBadRequest, errors.New("invalid content type")) + // Extract provider query + provider := r.URL.Query().Get("provider") + if provider == "" { + provider = "local" } - if loginForm.Username == "" || loginForm.Password == "" { - return validate.NewFieldErrors( - validate.FieldError{ - Field: "username", - Error: "username or password is empty", - }, - validate.FieldError{ - Field: "password", - Error: "username or password is empty", - }, - ) + // Get the provider + p, ok := providers[provider] + if !ok { + return validate.NewRequestError(errors.New("invalid auth provider"), http.StatusBadRequest) } - newToken, err := ctrl.svc.User.Login(r.Context(), strings.ToLower(loginForm.Username), loginForm.Password) + newToken, err := p.Authenticate(w, r) if err != nil { - return validate.NewRequestError(errors.New("authentication failed"), http.StatusInternalServerError) + log.Err(err).Msg("failed to authenticate") + return server.JSON(w, http.StatusInternalServerError, err.Error()) } - return server.Respond(w, http.StatusOK, TokenResponse{ + ctrl.setCookies(w, noPort(r.Host), newToken.Raw, newToken.ExpiresAt, true) + return server.JSON(w, http.StatusOK, TokenResponse{ Token: "Bearer " + newToken.Raw, ExpiresAt: newToken.ExpiresAt, AttachmentToken: newToken.AttachmentToken, @@ -84,12 +130,13 @@ func (ctrl *V1Controller) HandleAuthLogin() server.HandlerFunc { } // HandleAuthLogout godoc -// @Summary User Logout -// @Tags Authentication -// @Success 204 -// @Router /v1/users/logout [POST] -// @Security Bearer -func (ctrl *V1Controller) HandleAuthLogout() server.HandlerFunc { +// +// @Summary User Logout +// @Tags Authentication +// @Success 204 +// @Router /v1/users/logout [POST] +// @Security Bearer +func (ctrl *V1Controller) HandleAuthLogout() errchain.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) error { token := services.UseTokenCtx(r.Context()) if token == "" { @@ -101,19 +148,21 @@ func (ctrl *V1Controller) HandleAuthLogout() server.HandlerFunc { return validate.NewRequestError(err, http.StatusInternalServerError) } - return server.Respond(w, http.StatusNoContent, nil) + ctrl.unsetCookies(w, noPort(r.Host)) + return server.JSON(w, http.StatusNoContent, nil) } } -// HandleAuthLogout godoc -// @Summary User Token Refresh -// @Description handleAuthRefresh returns a handler that will issue a new token from an existing token. -// @Description This does not validate that the user still exists within the database. -// @Tags Authentication -// @Success 200 -// @Router /v1/users/refresh [GET] -// @Security Bearer -func (ctrl *V1Controller) HandleAuthRefresh() server.HandlerFunc { +// HandleAuthRefresh godoc +// +// @Summary User Token Refresh +// @Description handleAuthRefresh returns a handler that will issue a new token from an existing token. +// @Description This does not validate that the user still exists within the database. +// @Tags Authentication +// @Success 200 +// @Router /v1/users/refresh [GET] +// @Security Bearer +func (ctrl *V1Controller) HandleAuthRefresh() errchain.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) error { requestToken := services.UseTokenCtx(r.Context()) if requestToken == "" { @@ -125,6 +174,78 @@ func (ctrl *V1Controller) HandleAuthRefresh() server.HandlerFunc { return validate.NewUnauthorizedError() } - return server.Respond(w, http.StatusOK, newToken) + ctrl.setCookies(w, noPort(r.Host), newToken.Raw, newToken.ExpiresAt, false) + return server.JSON(w, http.StatusOK, newToken) } } + +func noPort(host string) string { + return strings.Split(host, ":")[0] +} + +func (ctrl *V1Controller) setCookies(w http.ResponseWriter, domain, token string, expires time.Time, remember bool) { + http.SetCookie(w, &http.Cookie{ + Name: cookieNameRemember, + Value: strconv.FormatBool(remember), + Expires: expires, + Domain: domain, + Secure: ctrl.cookieSecure, + HttpOnly: true, + Path: "/", + }) + + // Set HTTP only cookie + http.SetCookie(w, &http.Cookie{ + Name: cookieNameToken, + Value: token, + Expires: expires, + Domain: domain, + Secure: ctrl.cookieSecure, + HttpOnly: true, + Path: "/", + }) + + // Set Fake Session cookie + http.SetCookie(w, &http.Cookie{ + Name: cookieNameSession, + Value: "true", + Expires: expires, + Domain: domain, + Secure: ctrl.cookieSecure, + HttpOnly: false, + Path: "/", + }) +} + +func (ctrl *V1Controller) unsetCookies(w http.ResponseWriter, domain string) { + http.SetCookie(w, &http.Cookie{ + Name: cookieNameToken, + Value: "", + Expires: time.Unix(0, 0), + Domain: domain, + Secure: ctrl.cookieSecure, + HttpOnly: true, + Path: "/", + }) + + http.SetCookie(w, &http.Cookie{ + Name: cookieNameRemember, + Value: "false", + Expires: time.Unix(0, 0), + Domain: domain, + Secure: ctrl.cookieSecure, + HttpOnly: true, + Path: "/", + }) + + // Set Fake Session cookie + http.SetCookie(w, &http.Cookie{ + Name: cookieNameSession, + Value: "false", + Expires: time.Unix(0, 0), + Domain: domain, + Secure: ctrl.cookieSecure, + HttpOnly: false, + Path: "/", + }) +} diff --git a/backend/app/api/handlers/v1/v1_ctrl_group.go b/backend/app/api/handlers/v1/v1_ctrl_group.go index a3e8992..69bc024 100644 --- a/backend/app/api/handlers/v1/v1_ctrl_group.go +++ b/backend/app/api/handlers/v1/v1_ctrl_group.go @@ -7,13 +7,13 @@ import ( "github.com/hay-kot/homebox/backend/internal/core/services" "github.com/hay-kot/homebox/backend/internal/data/repo" "github.com/hay-kot/homebox/backend/internal/sys/validate" - "github.com/hay-kot/homebox/backend/pkgs/server" - "github.com/rs/zerolog/log" + "github.com/hay-kot/homebox/backend/internal/web/adapters" + "github.com/hay-kot/httpkit/errchain" ) type ( GroupInvitationCreate struct { - Uses int `json:"uses"` + Uses int `json:"uses" validate:"required,min=1,max=100"` ExpiresAt time.Time `json:"expiresAt"` } @@ -25,93 +25,73 @@ type ( ) // HandleGroupGet godoc -// @Summary Get the current user's group -// @Tags Group -// @Produce json -// @Success 200 {object} repo.Group -// @Router /v1/groups [Get] -// @Security Bearer -func (ctrl *V1Controller) HandleGroupGet() server.HandlerFunc { - return ctrl.handleGroupGeneral() +// +// @Summary Get Group +// @Tags Group +// @Produce json +// @Success 200 {object} repo.Group +// @Router /v1/groups [Get] +// @Security Bearer +func (ctrl *V1Controller) HandleGroupGet() errchain.HandlerFunc { + fn := func(r *http.Request) (repo.Group, error) { + auth := services.NewContext(r.Context()) + return ctrl.repo.Groups.GroupByID(auth, auth.GID) + } + + return adapters.Command(fn, http.StatusOK) } // HandleGroupUpdate godoc -// @Summary Updates some fields of the current users group -// @Tags Group -// @Produce json -// @Param payload body repo.GroupUpdate true "User Data" -// @Success 200 {object} repo.Group -// @Router /v1/groups [Put] -// @Security Bearer -func (ctrl *V1Controller) HandleGroupUpdate() server.HandlerFunc { - return ctrl.handleGroupGeneral() -} +// +// @Summary Update Group +// @Tags Group +// @Produce json +// @Param payload body repo.GroupUpdate true "User Data" +// @Success 200 {object} repo.Group +// @Router /v1/groups [Put] +// @Security Bearer +func (ctrl *V1Controller) HandleGroupUpdate() errchain.HandlerFunc { + fn := func(r *http.Request, body repo.GroupUpdate) (repo.Group, error) { + auth := services.NewContext(r.Context()) -func (ctrl *V1Controller) handleGroupGeneral() server.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) error { - ctx := services.NewContext(r.Context()) - - switch r.Method { - case http.MethodGet: - group, err := ctrl.repo.Groups.GroupByID(ctx, ctx.GID) - if err != nil { - log.Err(err).Msg("failed to get group") - return validate.NewRequestError(err, http.StatusInternalServerError) - } - - return server.Respond(w, http.StatusOK, group) - - case http.MethodPut: - data := repo.GroupUpdate{} - if err := server.Decode(r, &data); err != nil { - return validate.NewRequestError(err, http.StatusBadRequest) - } - - group, err := ctrl.svc.Group.UpdateGroup(ctx, data) - if err != nil { - log.Err(err).Msg("failed to update group") - return validate.NewRequestError(err, http.StatusInternalServerError) - } - - return server.Respond(w, http.StatusOK, group) + ok := ctrl.svc.Currencies.IsSupported(body.Currency) + if !ok { + return repo.Group{}, validate.NewFieldErrors( + validate.NewFieldError("currency", "currency '"+body.Currency+"' is not supported"), + ) } - return nil + return ctrl.svc.Group.UpdateGroup(auth, body) } + + return adapters.Action(fn, http.StatusOK) } // HandleGroupInvitationsCreate godoc -// @Summary Get the current user -// @Tags Group -// @Produce json -// @Param payload body GroupInvitationCreate true "User Data" -// @Success 200 {object} GroupInvitation -// @Router /v1/groups/invitations [Post] -// @Security Bearer -func (ctrl *V1Controller) HandleGroupInvitationsCreate() server.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) error { - data := GroupInvitationCreate{} - if err := server.Decode(r, &data); err != nil { - log.Err(err).Msg("failed to decode user registration data") - return validate.NewRequestError(err, http.StatusBadRequest) +// +// @Summary Create Group Invitation +// @Tags Group +// @Produce json +// @Param payload body GroupInvitationCreate true "User Data" +// @Success 200 {object} GroupInvitation +// @Router /v1/groups/invitations [Post] +// @Security Bearer +func (ctrl *V1Controller) HandleGroupInvitationsCreate() errchain.HandlerFunc { + fn := func(r *http.Request, body GroupInvitationCreate) (GroupInvitation, error) { + if body.ExpiresAt.IsZero() { + body.ExpiresAt = time.Now().Add(time.Hour * 24) } - if data.ExpiresAt.IsZero() { - data.ExpiresAt = time.Now().Add(time.Hour * 24) - } + auth := services.NewContext(r.Context()) - ctx := services.NewContext(r.Context()) + token, err := ctrl.svc.Group.NewInvitation(auth, body.Uses, body.ExpiresAt) - token, err := ctrl.svc.Group.NewInvitation(ctx, data.Uses, data.ExpiresAt) - if err != nil { - log.Err(err).Msg("failed to create new token") - return validate.NewRequestError(err, http.StatusInternalServerError) - } - - return server.Respond(w, http.StatusCreated, GroupInvitation{ + return GroupInvitation{ Token: token, - ExpiresAt: data.ExpiresAt, - Uses: data.Uses, - }) + ExpiresAt: body.ExpiresAt, + Uses: body.Uses, + }, err } + + return adapters.Action(fn, http.StatusCreated) } diff --git a/backend/app/api/handlers/v1/v1_ctrl_items.go b/backend/app/api/handlers/v1/v1_ctrl_items.go index b82d2f3..6a25663 100644 --- a/backend/app/api/handlers/v1/v1_ctrl_items.go +++ b/backend/app/api/handlers/v1/v1_ctrl_items.go @@ -2,30 +2,36 @@ package v1 import ( "database/sql" + "encoding/csv" "errors" "net/http" "strings" + "github.com/google/uuid" "github.com/hay-kot/homebox/backend/internal/core/services" "github.com/hay-kot/homebox/backend/internal/data/repo" "github.com/hay-kot/homebox/backend/internal/sys/validate" - "github.com/hay-kot/homebox/backend/pkgs/server" + "github.com/hay-kot/homebox/backend/internal/web/adapters" + "github.com/hay-kot/httpkit/errchain" + "github.com/hay-kot/httpkit/server" "github.com/rs/zerolog/log" ) // HandleItemsGetAll godoc -// @Summary Get All Items -// @Tags Items -// @Produce json -// @Param q query string false "search string" -// @Param page query int false "page number" -// @Param pageSize query int false "items per page" -// @Param labels query []string false "label Ids" collectionFormat(multi) -// @Param locations query []string false "location Ids" collectionFormat(multi) -// @Success 200 {object} repo.PaginationResult[repo.ItemSummary]{} -// @Router /v1/items [GET] -// @Security Bearer -func (ctrl *V1Controller) HandleItemsGetAll() server.HandlerFunc { +// +// @Summary Query All Items +// @Tags Items +// @Produce json +// @Param q query string false "search string" +// @Param page query int false "page number" +// @Param pageSize query int false "items per page" +// @Param labels query []string false "label Ids" collectionFormat(multi) +// @Param locations query []string false "location Ids" collectionFormat(multi) +// @Param parentIds query []string false "parent Ids" collectionFormat(multi) +// @Success 200 {object} repo.PaginationResult[repo.ItemSummary]{} +// @Router /v1/items [GET] +// @Security Bearer +func (ctrl *V1Controller) HandleItemsGetAll() errchain.HandlerFunc { extractQuery := func(r *http.Request) repo.ItemQuery { params := r.URL.Query() @@ -51,8 +57,10 @@ func (ctrl *V1Controller) HandleItemsGetAll() server.HandlerFunc { Search: params.Get("q"), LocationIDs: queryUUIDList(params, "locations"), LabelIDs: queryUUIDList(params, "labels"), + ParentItemIDs: queryUUIDList(params, "parentIds"), IncludeArchived: queryBool(params.Get("includeArchived")), Fields: filterFieldItems(params["fields"]), + OrderBy: params.Get("orderBy"), } if strings.HasPrefix(v.Search, "#") { @@ -74,174 +82,211 @@ func (ctrl *V1Controller) HandleItemsGetAll() server.HandlerFunc { items, err := ctrl.repo.Items.QueryByGroup(ctx, ctx.GID, extractQuery(r)) if err != nil { if errors.Is(err, sql.ErrNoRows) { - return server.Respond(w, http.StatusOK, repo.PaginationResult[repo.ItemSummary]{ + return server.JSON(w, http.StatusOK, repo.PaginationResult[repo.ItemSummary]{ Items: []repo.ItemSummary{}, }) } log.Err(err).Msg("failed to get items") return validate.NewRequestError(err, http.StatusInternalServerError) } - return server.Respond(w, http.StatusOK, items) + return server.JSON(w, http.StatusOK, items) } } +// HandleItemFullPath godoc +// +// @Summary Get the full path of an item +// @Tags Items +// @Produce json +// @Param id path string true "Item ID" +// @Success 200 {object} []repo.ItemPath +// @Router /v1/items/{id}/path [GET] +// @Security Bearer +func (ctrl *V1Controller) HandleItemFullPath() errchain.HandlerFunc { + fn := func(r *http.Request, ID uuid.UUID) ([]repo.ItemPath, error) { + auth := services.NewContext(r.Context()) + item, err := ctrl.repo.Items.GetOneByGroup(auth, auth.GID, ID) + if err != nil { + return nil, err + } + + paths, err := ctrl.repo.Locations.PathForLoc(auth, auth.GID, item.Location.ID) + if err != nil { + return nil, err + } + + if item.Parent != nil { + paths = append(paths, repo.ItemPath{ + Type: repo.ItemTypeItem, + ID: item.Parent.ID, + Name: item.Parent.Name, + }) + } + + paths = append(paths, repo.ItemPath{ + Type: repo.ItemTypeItem, + ID: item.ID, + Name: item.Name, + }) + + return paths, nil + } + + return adapters.CommandID("id", fn, http.StatusOK) +} + // HandleItemsCreate godoc -// @Summary Create a new item -// @Tags Items -// @Produce json -// @Param payload body repo.ItemCreate true "Item Data" -// @Success 200 {object} repo.ItemSummary -// @Router /v1/items [POST] -// @Security Bearer -func (ctrl *V1Controller) HandleItemsCreate() server.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) error { - createData := repo.ItemCreate{} - if err := server.Decode(r, &createData); err != nil { - log.Err(err).Msg("failed to decode request body") - return validate.NewRequestError(err, http.StatusInternalServerError) - } - - ctx := services.NewContext(r.Context()) - item, err := ctrl.svc.Items.Create(ctx, createData) - if err != nil { - log.Err(err).Msg("failed to create item") - return validate.NewRequestError(err, http.StatusInternalServerError) - } - - return server.Respond(w, http.StatusCreated, item) +// +// @Summary Create Item +// @Tags Items +// @Produce json +// @Param payload body repo.ItemCreate true "Item Data" +// @Success 201 {object} repo.ItemSummary +// @Router /v1/items [POST] +// @Security Bearer +func (ctrl *V1Controller) HandleItemsCreate() errchain.HandlerFunc { + fn := func(r *http.Request, body repo.ItemCreate) (repo.ItemOut, error) { + return ctrl.svc.Items.Create(services.NewContext(r.Context()), body) } + + return adapters.Action(fn, http.StatusCreated) } // HandleItemGet godocs -// @Summary Gets a item and fields -// @Tags Items -// @Produce json -// @Param id path string true "Item ID" -// @Success 200 {object} repo.ItemOut -// @Router /v1/items/{id} [GET] -// @Security Bearer -func (ctrl *V1Controller) HandleItemGet() server.HandlerFunc { - return ctrl.handleItemsGeneral() +// +// @Summary Get Item +// @Tags Items +// @Produce json +// @Param id path string true "Item ID" +// @Success 200 {object} repo.ItemOut +// @Router /v1/items/{id} [GET] +// @Security Bearer +func (ctrl *V1Controller) HandleItemGet() errchain.HandlerFunc { + fn := func(r *http.Request, ID uuid.UUID) (repo.ItemOut, error) { + auth := services.NewContext(r.Context()) + + return ctrl.repo.Items.GetOneByGroup(auth, auth.GID, ID) + } + + return adapters.CommandID("id", fn, http.StatusOK) } // HandleItemDelete godocs -// @Summary deletes a item -// @Tags Items -// @Produce json -// @Param id path string true "Item ID" -// @Success 204 -// @Router /v1/items/{id} [DELETE] -// @Security Bearer -func (ctrl *V1Controller) HandleItemDelete() server.HandlerFunc { - return ctrl.handleItemsGeneral() +// +// @Summary Delete Item +// @Tags Items +// @Produce json +// @Param id path string true "Item ID" +// @Success 204 +// @Router /v1/items/{id} [DELETE] +// @Security Bearer +func (ctrl *V1Controller) HandleItemDelete() errchain.HandlerFunc { + fn := func(r *http.Request, ID uuid.UUID) (any, error) { + auth := services.NewContext(r.Context()) + err := ctrl.repo.Items.DeleteByGroup(auth, auth.GID, ID) + return nil, err + } + + return adapters.CommandID("id", fn, http.StatusNoContent) } // HandleItemUpdate godocs -// @Summary updates a item -// @Tags Items -// @Produce json -// @Param id path string true "Item ID" -// @Param payload body repo.ItemUpdate true "Item Data" -// @Success 200 {object} repo.ItemOut -// @Router /v1/items/{id} [PUT] -// @Security Bearer -func (ctrl *V1Controller) HandleItemUpdate() server.HandlerFunc { - return ctrl.handleItemsGeneral() +// +// @Summary Update Item +// @Tags Items +// @Produce json +// @Param id path string true "Item ID" +// @Param payload body repo.ItemUpdate true "Item Data" +// @Success 200 {object} repo.ItemOut +// @Router /v1/items/{id} [PUT] +// @Security Bearer +func (ctrl *V1Controller) HandleItemUpdate() errchain.HandlerFunc { + fn := func(r *http.Request, ID uuid.UUID, body repo.ItemUpdate) (repo.ItemOut, error) { + auth := services.NewContext(r.Context()) + + body.ID = ID + return ctrl.repo.Items.UpdateByGroup(auth, auth.GID, body) + } + + return adapters.ActionID("id", fn, http.StatusOK) } -func (ctrl *V1Controller) handleItemsGeneral() server.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) error { - ctx := services.NewContext(r.Context()) - ID, err := ctrl.routeID(r) +// HandleItemPatch godocs +// +// @Summary Update Item +// @Tags Items +// @Produce json +// @Param id path string true "Item ID" +// @Param payload body repo.ItemPatch true "Item Data" +// @Success 200 {object} repo.ItemOut +// @Router /v1/items/{id} [Patch] +// @Security Bearer +func (ctrl *V1Controller) HandleItemPatch() errchain.HandlerFunc { + fn := func(r *http.Request, ID uuid.UUID, body repo.ItemPatch) (repo.ItemOut, error) { + auth := services.NewContext(r.Context()) + + body.ID = ID + err := ctrl.repo.Items.Patch(auth, auth.GID, ID, body) if err != nil { - return err + return repo.ItemOut{}, err } - switch r.Method { - case http.MethodGet: - items, err := ctrl.repo.Items.GetOneByGroup(r.Context(), ctx.GID, ID) - if err != nil { - log.Err(err).Msg("failed to get item") - return validate.NewRequestError(err, http.StatusInternalServerError) - } - return server.Respond(w, http.StatusOK, items) - case http.MethodDelete: - err = ctrl.repo.Items.DeleteByGroup(r.Context(), ctx.GID, ID) - if err != nil { - log.Err(err).Msg("failed to delete item") - return validate.NewRequestError(err, http.StatusInternalServerError) - } - return server.Respond(w, http.StatusNoContent, nil) - case http.MethodPut: - body := repo.ItemUpdate{} - if err := server.Decode(r, &body); err != nil { - log.Err(err).Msg("failed to decode request body") - return validate.NewRequestError(err, http.StatusInternalServerError) - } - body.ID = ID - result, err := ctrl.repo.Items.UpdateByGroup(r.Context(), ctx.GID, body) - if err != nil { - log.Err(err).Msg("failed to update item") - return validate.NewRequestError(err, http.StatusInternalServerError) - } - return server.Respond(w, http.StatusOK, result) - } - - return nil + return ctrl.repo.Items.GetOneByGroup(auth, auth.GID, ID) } + + return adapters.ActionID("id", fn, http.StatusOK) } // HandleGetAllCustomFieldNames godocs -// @Summary imports items into the database -// @Tags Items -// @Produce json -// @Success 200 -// @Router /v1/items/fields [GET] -// @Success 200 {object} []string -// @Security Bearer -func (ctrl *V1Controller) HandleGetAllCustomFieldNames() server.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) error { - ctx := services.NewContext(r.Context()) - - v, err := ctrl.repo.Items.GetAllCustomFieldNames(r.Context(), ctx.GID) - if err != nil { - return err - } - - return server.Respond(w, http.StatusOK, v) +// +// @Summary Get All Custom Field Names +// @Tags Items +// @Produce json +// @Success 200 +// @Router /v1/items/fields [GET] +// @Success 200 {object} []string +// @Security Bearer +func (ctrl *V1Controller) HandleGetAllCustomFieldNames() errchain.HandlerFunc { + fn := func(r *http.Request) ([]string, error) { + auth := services.NewContext(r.Context()) + return ctrl.repo.Items.GetAllCustomFieldNames(auth, auth.GID) } + + return adapters.Command(fn, http.StatusOK) } // HandleGetAllCustomFieldValues godocs -// @Summary imports items into the database -// @Tags Items -// @Produce json -// @Success 200 -// @Router /v1/items/fields/values [GET] -// @Success 200 {object} []string -// @Security Bearer -func (ctrl *V1Controller) HandleGetAllCustomFieldValues() server.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) error { - ctx := services.NewContext(r.Context()) - - v, err := ctrl.repo.Items.GetAllCustomFieldValues(r.Context(), ctx.GID, r.URL.Query().Get("field")) - if err != nil { - return err - } - - return server.Respond(w, http.StatusOK, v) +// +// @Summary Get All Custom Field Values +// @Tags Items +// @Produce json +// @Success 200 +// @Router /v1/items/fields/values [GET] +// @Success 200 {object} []string +// @Security Bearer +func (ctrl *V1Controller) HandleGetAllCustomFieldValues() errchain.HandlerFunc { + type query struct { + Field string `schema:"field" validate:"required"` } + + fn := func(r *http.Request, q query) ([]string, error) { + auth := services.NewContext(r.Context()) + return ctrl.repo.Items.GetAllCustomFieldValues(auth, auth.GID, q.Field) + } + + return adapters.Query(fn, http.StatusOK) } // HandleItemsImport godocs -// @Summary imports items into the database -// @Tags Items -// @Produce json -// @Success 204 -// @Param csv formData file true "Image to upload" -// @Router /v1/items/import [Post] -// @Security Bearer -func (ctrl *V1Controller) HandleItemsImport() server.HandlerFunc { +// +// @Summary Import Items +// @Tags Items +// @Produce json +// @Success 204 +// @Param csv formData file true "Image to upload" +// @Router /v1/items/import [Post] +// @Security Bearer +func (ctrl *V1Controller) HandleItemsImport() errchain.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) error { err := r.ParseMultipartForm(ctrl.maxUploadSize << 20) if err != nil { @@ -255,20 +300,40 @@ func (ctrl *V1Controller) HandleItemsImport() server.HandlerFunc { return validate.NewRequestError(err, http.StatusInternalServerError) } - data, err := services.ReadCsv(file) - if err != nil { - log.Err(err).Msg("failed to read csv") - return validate.NewRequestError(err, http.StatusInternalServerError) - } - user := services.UseUserCtx(r.Context()) - _, err = ctrl.svc.Items.CsvImport(r.Context(), user.GroupID, data) + _, err = ctrl.svc.Items.CsvImport(r.Context(), user.GroupID, file) if err != nil { log.Err(err).Msg("failed to import items") return validate.NewRequestError(err, http.StatusInternalServerError) } - return server.Respond(w, http.StatusNoContent, nil) + return server.JSON(w, http.StatusNoContent, nil) + } +} + +// HandleItemsExport godocs +// +// @Summary Export Items +// @Tags Items +// @Success 200 {string} string "text/csv" +// @Router /v1/items/export [GET] +// @Security Bearer +func (ctrl *V1Controller) HandleItemsExport() errchain.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) error { + ctx := services.NewContext(r.Context()) + + csvData, err := ctrl.svc.Items.ExportTSV(r.Context(), ctx.GID) + if err != nil { + log.Err(err).Msg("failed to export items") + return validate.NewRequestError(err, http.StatusInternalServerError) + } + + w.Header().Set("Content-Type", "text/tsv") + w.Header().Set("Content-Disposition", "attachment;filename=homebox-items.tsv") + + writer := csv.NewWriter(w) + writer.Comma = '\t' + return writer.WriteAll(csvData) } } diff --git a/backend/app/api/handlers/v1/v1_ctrl_items_attachments.go b/backend/app/api/handlers/v1/v1_ctrl_items_attachments.go index 00545a4..ae2782a 100644 --- a/backend/app/api/handlers/v1/v1_ctrl_items_attachments.go +++ b/backend/app/api/handlers/v1/v1_ctrl_items_attachments.go @@ -3,12 +3,15 @@ package v1 import ( "errors" "net/http" + "path/filepath" + "strings" "github.com/hay-kot/homebox/backend/internal/core/services" "github.com/hay-kot/homebox/backend/internal/data/ent/attachment" "github.com/hay-kot/homebox/backend/internal/data/repo" "github.com/hay-kot/homebox/backend/internal/sys/validate" - "github.com/hay-kot/homebox/backend/pkgs/server" + "github.com/hay-kot/httpkit/errchain" + "github.com/hay-kot/httpkit/server" "github.com/rs/zerolog/log" ) @@ -18,25 +21,25 @@ type ( } ) -// HandleItemsImport godocs -// @Summary imports items into the database -// @Tags Items Attachments -// @Produce json -// @Param id path string true "Item ID" -// @Param file formData file true "File attachment" -// @Param type formData string true "Type of file" -// @Param name formData string true "name of the file including extension" -// @Success 200 {object} repo.ItemOut -// @Failure 422 {object} server.ErrorResponse -// @Router /v1/items/{id}/attachments [POST] -// @Security Bearer -func (ctrl *V1Controller) HandleItemAttachmentCreate() server.HandlerFunc { +// HandleItemAttachmentCreate godocs +// +// @Summary Create Item Attachment +// @Tags Items Attachments +// @Produce json +// @Param id path string true "Item ID" +// @Param file formData file true "File attachment" +// @Param type formData string true "Type of file" +// @Param name formData string true "name of the file including extension" +// @Success 200 {object} repo.ItemOut +// @Failure 422 {object} validate.ErrorResponse +// @Router /v1/items/{id}/attachments [POST] +// @Security Bearer +func (ctrl *V1Controller) HandleItemAttachmentCreate() errchain.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) error { err := r.ParseMultipartForm(ctrl.maxUploadSize << 20) if err != nil { log.Err(err).Msg("failed to parse multipart form") return validate.NewRequestError(errors.New("failed to parse multipart form"), http.StatusBadRequest) - } errs := validate.NewFieldErrors() @@ -60,12 +63,20 @@ func (ctrl *V1Controller) HandleItemAttachmentCreate() server.HandlerFunc { } if !errs.Nil() { - return server.Respond(w, http.StatusUnprocessableEntity, errs) + return server.JSON(w, http.StatusUnprocessableEntity, errs) } attachmentType := r.FormValue("type") if attachmentType == "" { - attachmentType = attachment.TypeAttachment.String() + // Attempt to auto-detect the type of the file + ext := filepath.Ext(attachmentName) + + switch strings.ToLower(ext) { + case ".jpg", ".jpeg", ".png", ".webp", ".gif", ".bmp", ".tiff": + attachmentType = attachment.TypePhoto.String() + default: + attachmentType = attachment.TypeAttachment.String() + } } id, err := ctrl.routeID(r) @@ -87,45 +98,48 @@ func (ctrl *V1Controller) HandleItemAttachmentCreate() server.HandlerFunc { return validate.NewRequestError(err, http.StatusInternalServerError) } - return server.Respond(w, http.StatusCreated, item) + return server.JSON(w, http.StatusCreated, item) } } // HandleItemAttachmentGet godocs -// @Summary retrieves an attachment for an item -// @Tags Items Attachments -// @Produce application/octet-stream -// @Param id path string true "Item ID" -// @Param attachment_id path string true "Attachment ID" -// @Success 200 {object} ItemAttachmentToken -// @Router /v1/items/{id}/attachments/{attachment_id} [GET] -// @Security Bearer -func (ctrl *V1Controller) HandleItemAttachmentGet() server.HandlerFunc { +// +// @Summary Get Item Attachment +// @Tags Items Attachments +// @Produce application/octet-stream +// @Param id path string true "Item ID" +// @Param attachment_id path string true "Attachment ID" +// @Success 200 {object} ItemAttachmentToken +// @Router /v1/items/{id}/attachments/{attachment_id} [GET] +// @Security Bearer +func (ctrl *V1Controller) HandleItemAttachmentGet() errchain.HandlerFunc { return ctrl.handleItemAttachmentsHandler } // HandleItemAttachmentDelete godocs -// @Summary retrieves an attachment for an item -// @Tags Items Attachments -// @Param id path string true "Item ID" -// @Param attachment_id path string true "Attachment ID" -// @Success 204 -// @Router /v1/items/{id}/attachments/{attachment_id} [DELETE] -// @Security Bearer -func (ctrl *V1Controller) HandleItemAttachmentDelete() server.HandlerFunc { +// +// @Summary Delete Item Attachment +// @Tags Items Attachments +// @Param id path string true "Item ID" +// @Param attachment_id path string true "Attachment ID" +// @Success 204 +// @Router /v1/items/{id}/attachments/{attachment_id} [DELETE] +// @Security Bearer +func (ctrl *V1Controller) HandleItemAttachmentDelete() errchain.HandlerFunc { return ctrl.handleItemAttachmentsHandler } // HandleItemAttachmentUpdate godocs -// @Summary retrieves an attachment for an item -// @Tags Items Attachments -// @Param id path string true "Item ID" -// @Param attachment_id path string true "Attachment ID" -// @Param payload body repo.ItemAttachmentUpdate true "Attachment Update" -// @Success 200 {object} repo.ItemOut -// @Router /v1/items/{id}/attachments/{attachment_id} [PUT] -// @Security Bearer -func (ctrl *V1Controller) HandleItemAttachmentUpdate() server.HandlerFunc { +// +// @Summary Update Item Attachment +// @Tags Items Attachments +// @Param id path string true "Item ID" +// @Param attachment_id path string true "Attachment ID" +// @Param payload body repo.ItemAttachmentUpdate true "Attachment Update" +// @Success 200 {object} repo.ItemOut +// @Router /v1/items/{id}/attachments/{attachment_id} [PUT] +// @Security Bearer +func (ctrl *V1Controller) HandleItemAttachmentUpdate() errchain.HandlerFunc { return ctrl.handleItemAttachmentsHandler } @@ -160,7 +174,7 @@ func (ctrl *V1Controller) handleItemAttachmentsHandler(w http.ResponseWriter, r return validate.NewRequestError(err, http.StatusInternalServerError) } - return server.Respond(w, http.StatusNoContent, nil) + return server.JSON(w, http.StatusNoContent, nil) // Update Attachment Handler case http.MethodPut: @@ -178,7 +192,7 @@ func (ctrl *V1Controller) handleItemAttachmentsHandler(w http.ResponseWriter, r return validate.NewRequestError(err, http.StatusInternalServerError) } - return server.Respond(w, http.StatusOK, val) + return server.JSON(w, http.StatusOK, val) } return nil diff --git a/backend/app/api/handlers/v1/v1_ctrl_labels.go b/backend/app/api/handlers/v1/v1_ctrl_labels.go index 2551b46..dae23db 100644 --- a/backend/app/api/handlers/v1/v1_ctrl_labels.go +++ b/backend/app/api/handlers/v1/v1_ctrl_labels.go @@ -3,141 +3,100 @@ package v1 import ( "net/http" + "github.com/google/uuid" "github.com/hay-kot/homebox/backend/internal/core/services" - "github.com/hay-kot/homebox/backend/internal/data/ent" "github.com/hay-kot/homebox/backend/internal/data/repo" - "github.com/hay-kot/homebox/backend/internal/sys/validate" - "github.com/hay-kot/homebox/backend/pkgs/server" - "github.com/rs/zerolog/log" + "github.com/hay-kot/homebox/backend/internal/web/adapters" + "github.com/hay-kot/httpkit/errchain" ) // HandleLabelsGetAll godoc -// @Summary Get All Labels -// @Tags Labels -// @Produce json -// @Success 200 {object} server.Results{items=[]repo.LabelOut} -// @Router /v1/labels [GET] -// @Security Bearer -func (ctrl *V1Controller) HandleLabelsGetAll() server.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) error { - user := services.UseUserCtx(r.Context()) - labels, err := ctrl.repo.Labels.GetAll(r.Context(), user.GroupID) - if err != nil { - log.Err(err).Msg("error getting labels") - return validate.NewRequestError(err, http.StatusInternalServerError) - } - return server.Respond(w, http.StatusOK, server.Results{Items: labels}) +// +// @Summary Get All Labels +// @Tags Labels +// @Produce json +// @Success 200 {object} []repo.LabelOut +// @Router /v1/labels [GET] +// @Security Bearer +func (ctrl *V1Controller) HandleLabelsGetAll() errchain.HandlerFunc { + fn := func(r *http.Request) ([]repo.LabelSummary, error) { + auth := services.NewContext(r.Context()) + return ctrl.repo.Labels.GetAll(auth, auth.GID) } + + return adapters.Command(fn, http.StatusOK) } // HandleLabelsCreate godoc -// @Summary Create a new label -// @Tags Labels -// @Produce json -// @Param payload body repo.LabelCreate true "Label Data" -// @Success 200 {object} repo.LabelSummary -// @Router /v1/labels [POST] -// @Security Bearer -func (ctrl *V1Controller) HandleLabelsCreate() server.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) error { - createData := repo.LabelCreate{} - if err := server.Decode(r, &createData); err != nil { - log.Err(err).Msg("error decoding label create data") - return validate.NewRequestError(err, http.StatusInternalServerError) - } - - user := services.UseUserCtx(r.Context()) - label, err := ctrl.repo.Labels.Create(r.Context(), user.GroupID, createData) - if err != nil { - log.Err(err).Msg("error creating label") - return validate.NewRequestError(err, http.StatusInternalServerError) - } - - return server.Respond(w, http.StatusCreated, label) +// +// @Summary Create Label +// @Tags Labels +// @Produce json +// @Param payload body repo.LabelCreate true "Label Data" +// @Success 200 {object} repo.LabelSummary +// @Router /v1/labels [POST] +// @Security Bearer +func (ctrl *V1Controller) HandleLabelsCreate() errchain.HandlerFunc { + fn := func(r *http.Request, data repo.LabelCreate) (repo.LabelOut, error) { + auth := services.NewContext(r.Context()) + return ctrl.repo.Labels.Create(auth, auth.GID, data) } + + return adapters.Action(fn, http.StatusCreated) } // HandleLabelDelete godocs -// @Summary deletes a label -// @Tags Labels -// @Produce json -// @Param id path string true "Label ID" -// @Success 204 -// @Router /v1/labels/{id} [DELETE] -// @Security Bearer -func (ctrl *V1Controller) HandleLabelDelete() server.HandlerFunc { - return ctrl.handleLabelsGeneral() +// +// @Summary Delete Label +// @Tags Labels +// @Produce json +// @Param id path string true "Label ID" +// @Success 204 +// @Router /v1/labels/{id} [DELETE] +// @Security Bearer +func (ctrl *V1Controller) HandleLabelDelete() errchain.HandlerFunc { + fn := func(r *http.Request, ID uuid.UUID) (any, error) { + auth := services.NewContext(r.Context()) + err := ctrl.repo.Labels.DeleteByGroup(auth, auth.GID, ID) + return nil, err + } + + return adapters.CommandID("id", fn, http.StatusNoContent) } // HandleLabelGet godocs -// @Summary Gets a label and fields -// @Tags Labels -// @Produce json -// @Param id path string true "Label ID" -// @Success 200 {object} repo.LabelOut -// @Router /v1/labels/{id} [GET] -// @Security Bearer -func (ctrl *V1Controller) HandleLabelGet() server.HandlerFunc { - return ctrl.handleLabelsGeneral() +// +// @Summary Get Label +// @Tags Labels +// @Produce json +// @Param id path string true "Label ID" +// @Success 200 {object} repo.LabelOut +// @Router /v1/labels/{id} [GET] +// @Security Bearer +func (ctrl *V1Controller) HandleLabelGet() errchain.HandlerFunc { + fn := func(r *http.Request, ID uuid.UUID) (repo.LabelOut, error) { + auth := services.NewContext(r.Context()) + return ctrl.repo.Labels.GetOneByGroup(auth, auth.GID, ID) + } + + return adapters.CommandID("id", fn, http.StatusOK) } // HandleLabelUpdate godocs -// @Summary updates a label -// @Tags Labels -// @Produce json -// @Param id path string true "Label ID" -// @Success 200 {object} repo.LabelOut -// @Router /v1/labels/{id} [PUT] -// @Security Bearer -func (ctrl *V1Controller) HandleLabelUpdate() server.HandlerFunc { - return ctrl.handleLabelsGeneral() -} - -func (ctrl *V1Controller) handleLabelsGeneral() server.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) error { - ctx := services.NewContext(r.Context()) - ID, err := ctrl.routeID(r) - if err != nil { - return err - } - - switch r.Method { - case http.MethodGet: - labels, err := ctrl.repo.Labels.GetOneByGroup(r.Context(), ctx.GID, ID) - if err != nil { - if ent.IsNotFound(err) { - log.Err(err). - Str("id", ID.String()). - Msg("label not found") - return validate.NewRequestError(err, http.StatusNotFound) - } - log.Err(err).Msg("error getting label") - return validate.NewRequestError(err, http.StatusInternalServerError) - } - return server.Respond(w, http.StatusOK, labels) - - case http.MethodDelete: - err = ctrl.repo.Labels.DeleteByGroup(ctx, ctx.GID, ID) - if err != nil { - log.Err(err).Msg("error deleting label") - return validate.NewRequestError(err, http.StatusInternalServerError) - } - return server.Respond(w, http.StatusNoContent, nil) - - case http.MethodPut: - body := repo.LabelUpdate{} - if err := server.Decode(r, &body); err != nil { - return validate.NewRequestError(err, http.StatusInternalServerError) - } - - body.ID = ID - result, err := ctrl.repo.Labels.UpdateByGroup(ctx, ctx.GID, body) - if err != nil { - return validate.NewRequestError(err, http.StatusInternalServerError) - } - return server.Respond(w, http.StatusOK, result) - } - - return nil +// +// @Summary Update Label +// @Tags Labels +// @Produce json +// @Param id path string true "Label ID" +// @Success 200 {object} repo.LabelOut +// @Router /v1/labels/{id} [PUT] +// @Security Bearer +func (ctrl *V1Controller) HandleLabelUpdate() errchain.HandlerFunc { + fn := func(r *http.Request, ID uuid.UUID, data repo.LabelUpdate) (repo.LabelOut, error) { + auth := services.NewContext(r.Context()) + data.ID = ID + return ctrl.repo.Labels.UpdateByGroup(auth, auth.GID, data) } + + return adapters.ActionID("id", fn, http.StatusOK) } diff --git a/backend/app/api/handlers/v1/v1_ctrl_locations.go b/backend/app/api/handlers/v1/v1_ctrl_locations.go index 8f71ab9..d84ce31 100644 --- a/backend/app/api/handlers/v1/v1_ctrl_locations.go +++ b/backend/app/api/handlers/v1/v1_ctrl_locations.go @@ -3,186 +3,120 @@ package v1 import ( "net/http" + "github.com/google/uuid" "github.com/hay-kot/homebox/backend/internal/core/services" - "github.com/hay-kot/homebox/backend/internal/data/ent" "github.com/hay-kot/homebox/backend/internal/data/repo" - "github.com/hay-kot/homebox/backend/internal/sys/validate" - "github.com/hay-kot/homebox/backend/pkgs/server" - "github.com/rs/zerolog/log" + "github.com/hay-kot/homebox/backend/internal/web/adapters" + "github.com/hay-kot/httpkit/errchain" ) // HandleLocationTreeQuery godoc -// @Summary Get All Locations -// @Tags Locations -// @Produce json -// @Param withItems query bool false "include items in response tree" -// @Success 200 {object} server.Results{items=[]repo.TreeItem} -// @Router /v1/locations/tree [GET] -// @Security Bearer -func (ctrl *V1Controller) HandleLocationTreeQuery() server.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) error { - user := services.UseUserCtx(r.Context()) - - q := r.URL.Query() - - withItems := queryBool(q.Get("withItems")) - - locTree, err := ctrl.repo.Locations.Tree( - r.Context(), - user.GroupID, - repo.TreeQuery{ - WithItems: withItems, - }, - ) - if err != nil { - log.Err(err).Msg("failed to get locations tree") - return validate.NewRequestError(err, http.StatusInternalServerError) - } - - return server.Respond(w, http.StatusOK, server.Results{Items: locTree}) +// +// @Summary Get Locations Tree +// @Tags Locations +// @Produce json +// @Param withItems query bool false "include items in response tree" +// @Success 200 {object} []repo.TreeItem +// @Router /v1/locations/tree [GET] +// @Security Bearer +func (ctrl *V1Controller) HandleLocationTreeQuery() errchain.HandlerFunc { + fn := func(r *http.Request, query repo.TreeQuery) ([]repo.TreeItem, error) { + auth := services.NewContext(r.Context()) + return ctrl.repo.Locations.Tree(auth, auth.GID, query) } + + return adapters.Query(fn, http.StatusOK) } // HandleLocationGetAll godoc -// @Summary Get All Locations -// @Tags Locations -// @Produce json -// @Param filterChildren query bool false "Filter locations with parents" -// @Success 200 {object} server.Results{items=[]repo.LocationOutCount} -// @Router /v1/locations [GET] -// @Security Bearer -func (ctrl *V1Controller) HandleLocationGetAll() server.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) error { - user := services.UseUserCtx(r.Context()) - - q := r.URL.Query() - - filter := repo.LocationQuery{ - FilterChildren: queryBool(q.Get("filterChildren")), - } - - locations, err := ctrl.repo.Locations.GetAll(r.Context(), user.GroupID, filter) - if err != nil { - log.Err(err).Msg("failed to get locations") - return validate.NewRequestError(err, http.StatusInternalServerError) - } - - return server.Respond(w, http.StatusOK, server.Results{Items: locations}) +// +// @Summary Get All Locations +// @Tags Locations +// @Produce json +// @Param filterChildren query bool false "Filter locations with parents" +// @Success 200 {object} []repo.LocationOutCount +// @Router /v1/locations [GET] +// @Security Bearer +func (ctrl *V1Controller) HandleLocationGetAll() errchain.HandlerFunc { + fn := func(r *http.Request, q repo.LocationQuery) ([]repo.LocationOutCount, error) { + auth := services.NewContext(r.Context()) + return ctrl.repo.Locations.GetAll(auth, auth.GID, q) } + + return adapters.Query(fn, http.StatusOK) } // HandleLocationCreate godoc -// @Summary Create a new location -// @Tags Locations -// @Produce json -// @Param payload body repo.LocationCreate true "Location Data" -// @Success 200 {object} repo.LocationSummary -// @Router /v1/locations [POST] -// @Security Bearer -func (ctrl *V1Controller) HandleLocationCreate() server.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) error { - createData := repo.LocationCreate{} - if err := server.Decode(r, &createData); err != nil { - log.Err(err).Msg("failed to decode location create data") - return validate.NewRequestError(err, http.StatusInternalServerError) - } - - user := services.UseUserCtx(r.Context()) - location, err := ctrl.repo.Locations.Create(r.Context(), user.GroupID, createData) - if err != nil { - log.Err(err).Msg("failed to create location") - return validate.NewRequestError(err, http.StatusInternalServerError) - } - - return server.Respond(w, http.StatusCreated, location) +// +// @Summary Create Location +// @Tags Locations +// @Produce json +// @Param payload body repo.LocationCreate true "Location Data" +// @Success 200 {object} repo.LocationSummary +// @Router /v1/locations [POST] +// @Security Bearer +func (ctrl *V1Controller) HandleLocationCreate() errchain.HandlerFunc { + fn := func(r *http.Request, createData repo.LocationCreate) (repo.LocationOut, error) { + auth := services.NewContext(r.Context()) + return ctrl.repo.Locations.Create(auth, auth.GID, createData) } + + return adapters.Action(fn, http.StatusCreated) } -// HandleLocationDelete godocs -// @Summary deletes a location -// @Tags Locations -// @Produce json -// @Param id path string true "Location ID" -// @Success 204 -// @Router /v1/locations/{id} [DELETE] -// @Security Bearer -func (ctrl *V1Controller) HandleLocationDelete() server.HandlerFunc { - return ctrl.handleLocationGeneral() -} - -// HandleLocationGet godocs -// @Summary Gets a location and fields -// @Tags Locations -// @Produce json -// @Param id path string true "Location ID" -// @Success 200 {object} repo.LocationOut -// @Router /v1/locations/{id} [GET] -// @Security Bearer -func (ctrl *V1Controller) HandleLocationGet() server.HandlerFunc { - return ctrl.handleLocationGeneral() -} - -// HandleLocationUpdate godocs -// @Summary updates a location -// @Tags Locations -// @Produce json -// @Param id path string true "Location ID" -// @Param payload body repo.LocationUpdate true "Location Data" -// @Success 200 {object} repo.LocationOut -// @Router /v1/locations/{id} [PUT] -// @Security Bearer -func (ctrl *V1Controller) HandleLocationUpdate() server.HandlerFunc { - return ctrl.handleLocationGeneral() -} - -func (ctrl *V1Controller) handleLocationGeneral() server.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) error { - ctx := services.NewContext(r.Context()) - ID, err := ctrl.routeID(r) - if err != nil { - return err - } - - switch r.Method { - case http.MethodGet: - location, err := ctrl.repo.Locations.GetOneByGroup(r.Context(), ctx.GID, ID) - if err != nil { - l := log.Err(err). - Str("ID", ID.String()). - Str("GID", ctx.GID.String()) - - if ent.IsNotFound(err) { - l.Msg("location not found") - return validate.NewRequestError(err, http.StatusNotFound) - } - - l.Msg("failed to get location") - return validate.NewRequestError(err, http.StatusInternalServerError) - } - return server.Respond(w, http.StatusOK, location) - case http.MethodPut: - body := repo.LocationUpdate{} - if err := server.Decode(r, &body); err != nil { - log.Err(err).Msg("failed to decode location update data") - return validate.NewRequestError(err, http.StatusInternalServerError) - } - - body.ID = ID - - result, err := ctrl.repo.Locations.UpdateOneByGroup(r.Context(), ctx.GID, ID, body) - if err != nil { - log.Err(err).Msg("failed to update location") - return validate.NewRequestError(err, http.StatusInternalServerError) - } - return server.Respond(w, http.StatusOK, result) - case http.MethodDelete: - err = ctrl.repo.Locations.DeleteByGroup(r.Context(), ctx.GID, ID) - if err != nil { - log.Err(err).Msg("failed to delete location") - return validate.NewRequestError(err, http.StatusInternalServerError) - } - return server.Respond(w, http.StatusNoContent, nil) - } - return nil +// HandleLocationDelete godoc +// +// @Summary Delete Location +// @Tags Locations +// @Produce json +// @Param id path string true "Location ID" +// @Success 204 +// @Router /v1/locations/{id} [DELETE] +// @Security Bearer +func (ctrl *V1Controller) HandleLocationDelete() errchain.HandlerFunc { + fn := func(r *http.Request, ID uuid.UUID) (any, error) { + auth := services.NewContext(r.Context()) + err := ctrl.repo.Locations.DeleteByGroup(auth, auth.GID, ID) + return nil, err } + + return adapters.CommandID("id", fn, http.StatusNoContent) +} + +// HandleLocationGet godoc +// +// @Summary Get Location +// @Tags Locations +// @Produce json +// @Param id path string true "Location ID" +// @Success 200 {object} repo.LocationOut +// @Router /v1/locations/{id} [GET] +// @Security Bearer +func (ctrl *V1Controller) HandleLocationGet() errchain.HandlerFunc { + fn := func(r *http.Request, ID uuid.UUID) (repo.LocationOut, error) { + auth := services.NewContext(r.Context()) + return ctrl.repo.Locations.GetOneByGroup(auth, auth.GID, ID) + } + + return adapters.CommandID("id", fn, http.StatusOK) +} + +// HandleLocationUpdate godoc +// +// @Summary Update Location +// @Tags Locations +// @Produce json +// @Param id path string true "Location ID" +// @Param payload body repo.LocationUpdate true "Location Data" +// @Success 200 {object} repo.LocationOut +// @Router /v1/locations/{id} [PUT] +// @Security Bearer +func (ctrl *V1Controller) HandleLocationUpdate() errchain.HandlerFunc { + fn := func(r *http.Request, ID uuid.UUID, body repo.LocationUpdate) (repo.LocationOut, error) { + auth := services.NewContext(r.Context()) + body.ID = ID + return ctrl.repo.Locations.UpdateByGroup(auth, auth.GID, ID, body) + } + + return adapters.ActionID("id", fn, http.StatusOK) } diff --git a/backend/app/api/handlers/v1/v1_ctrl_maint_entry.go b/backend/app/api/handlers/v1/v1_ctrl_maint_entry.go index 3f3f1a1..e94c12a 100644 --- a/backend/app/api/handlers/v1/v1_ctrl_maint_entry.go +++ b/backend/app/api/handlers/v1/v1_ctrl_maint_entry.go @@ -3,123 +3,80 @@ package v1 import ( "net/http" + "github.com/google/uuid" "github.com/hay-kot/homebox/backend/internal/core/services" "github.com/hay-kot/homebox/backend/internal/data/repo" - "github.com/hay-kot/homebox/backend/internal/sys/validate" - "github.com/hay-kot/homebox/backend/pkgs/server" - "github.com/rs/zerolog/log" + "github.com/hay-kot/homebox/backend/internal/web/adapters" + "github.com/hay-kot/httpkit/errchain" ) -// HandleMaintenanceGetLog godoc -// @Summary Get Maintenance Log -// @Tags Maintenance -// @Produce json -// @Success 200 {object} repo.MaintenanceLog -// @Router /v1/items/{id}/maintenance [GET] -// @Security Bearer -func (ctrl *V1Controller) HandleMaintenanceLogGet() server.HandlerFunc { - return ctrl.handleMaintenanceLog() +// HandleMaintenanceLogGet godoc +// +// @Summary Get Maintenance Log +// @Tags Maintenance +// @Produce json +// @Success 200 {object} repo.MaintenanceLog +// @Router /v1/items/{id}/maintenance [GET] +// @Security Bearer +func (ctrl *V1Controller) HandleMaintenanceLogGet() errchain.HandlerFunc { + fn := func(r *http.Request, ID uuid.UUID, q repo.MaintenanceLogQuery) (repo.MaintenanceLog, error) { + auth := services.NewContext(r.Context()) + return ctrl.repo.MaintEntry.GetLog(auth, auth.GID, ID, q) + } + + return adapters.QueryID("id", fn, http.StatusOK) } // HandleMaintenanceEntryCreate godoc -// @Summary Create Maintenance Entry -// @Tags Maintenance -// @Produce json -// @Param payload body repo.MaintenanceEntryCreate true "Entry Data" -// @Success 200 {object} repo.MaintenanceEntry -// @Router /v1/items/{id}/maintenance [POST] -// @Security Bearer -func (ctrl *V1Controller) HandleMaintenanceEntryCreate() server.HandlerFunc { - return ctrl.handleMaintenanceLog() +// +// @Summary Create Maintenance Entry +// @Tags Maintenance +// @Produce json +// @Param payload body repo.MaintenanceEntryCreate true "Entry Data" +// @Success 201 {object} repo.MaintenanceEntry +// @Router /v1/items/{id}/maintenance [POST] +// @Security Bearer +func (ctrl *V1Controller) HandleMaintenanceEntryCreate() errchain.HandlerFunc { + fn := func(r *http.Request, itemID uuid.UUID, body repo.MaintenanceEntryCreate) (repo.MaintenanceEntry, error) { + auth := services.NewContext(r.Context()) + return ctrl.repo.MaintEntry.Create(auth, itemID, body) + } + + return adapters.ActionID("id", fn, http.StatusCreated) } // HandleMaintenanceEntryDelete godoc -// @Summary Delete Maintenance Entry -// @Tags Maintenance -// @Produce json -// @Success 204 -// @Router /v1/items/{id}/maintenance/{entry_id} [DELETE] -// @Security Bearer -func (ctrl *V1Controller) HandleMaintenanceEntryDelete() server.HandlerFunc { - return ctrl.handleMaintenanceLog() +// +// @Summary Delete Maintenance Entry +// @Tags Maintenance +// @Produce json +// @Success 204 +// @Router /v1/items/{id}/maintenance/{entry_id} [DELETE] +// @Security Bearer +func (ctrl *V1Controller) HandleMaintenanceEntryDelete() errchain.HandlerFunc { + fn := func(r *http.Request, entryID uuid.UUID) (any, error) { + auth := services.NewContext(r.Context()) + err := ctrl.repo.MaintEntry.Delete(auth, entryID) + return nil, err + } + + return adapters.CommandID("entry_id", fn, http.StatusNoContent) } // HandleMaintenanceEntryUpdate godoc -// @Summary Update Maintenance Entry -// @Tags Maintenance -// @Produce json -// @Param payload body repo.MaintenanceEntryUpdate true "Entry Data" -// @Success 200 {object} repo.MaintenanceEntry -// @Router /v1/items/{id}/maintenance/{entry_id} [PUT] -// @Security Bearer -func (ctrl *V1Controller) HandleMaintenanceEntryUpdate() server.HandlerFunc { - return ctrl.handleMaintenanceLog() -} - -func (ctrl *V1Controller) handleMaintenanceLog() server.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) error { - ctx := services.NewContext(r.Context()) - itemID, err := ctrl.routeID(r) - if err != nil { - return err - } - - switch r.Method { - case http.MethodGet: - mlog, err := ctrl.repo.MaintEntry.GetLog(ctx, itemID) - if err != nil { - log.Err(err).Msg("failed to get items") - return validate.NewRequestError(err, http.StatusInternalServerError) - } - return server.Respond(w, http.StatusOK, mlog) - case http.MethodPost: - var create repo.MaintenanceEntryCreate - err := server.Decode(r, &create) - if err != nil { - return validate.NewRequestError(err, http.StatusBadRequest) - } - - entry, err := ctrl.repo.MaintEntry.Create(ctx, itemID, create) - if err != nil { - log.Err(err).Msg("failed to create item") - return validate.NewRequestError(err, http.StatusInternalServerError) - } - - return server.Respond(w, http.StatusCreated, entry) - case http.MethodPut: - entryID, err := ctrl.routeUUID(r, "entry_id") - if err != nil { - return err - } - - var update repo.MaintenanceEntryUpdate - err = server.Decode(r, &update) - if err != nil { - return validate.NewRequestError(err, http.StatusBadRequest) - } - - entry, err := ctrl.repo.MaintEntry.Update(ctx, entryID, update) - if err != nil { - log.Err(err).Msg("failed to update item") - return validate.NewRequestError(err, http.StatusInternalServerError) - } - - return server.Respond(w, http.StatusOK, entry) - case http.MethodDelete: - entryID, err := ctrl.routeUUID(r, "entry_id") - if err != nil { - return err - } - - err = ctrl.repo.MaintEntry.Delete(ctx, entryID) - if err != nil { - log.Err(err).Msg("failed to delete item") - return validate.NewRequestError(err, http.StatusInternalServerError) - } - - return server.Respond(w, http.StatusNoContent, nil) - } - - return nil +// +// @Summary Update Maintenance Entry +// @Tags Maintenance +// @Produce json +// @Param payload body repo.MaintenanceEntryUpdate true "Entry Data" +// @Success 200 {object} repo.MaintenanceEntry +// @Router /v1/items/{id}/maintenance/{entry_id} [PUT] +// @Security Bearer +func (ctrl *V1Controller) HandleMaintenanceEntryUpdate() errchain.HandlerFunc { + fn := func(r *http.Request, entryID uuid.UUID, body repo.MaintenanceEntryUpdate) (repo.MaintenanceEntry, error) { + auth := services.NewContext(r.Context()) + return ctrl.repo.MaintEntry.Update(auth, entryID, body) } + + return adapters.ActionID("entry_id", fn, http.StatusOK) } diff --git a/backend/app/api/handlers/v1/v1_ctrl_notifiers.go b/backend/app/api/handlers/v1/v1_ctrl_notifiers.go new file mode 100644 index 0000000..3c64dc7 --- /dev/null +++ b/backend/app/api/handlers/v1/v1_ctrl_notifiers.go @@ -0,0 +1,105 @@ +package v1 + +import ( + "net/http" + + "github.com/containrrr/shoutrrr" + "github.com/google/uuid" + "github.com/hay-kot/homebox/backend/internal/core/services" + "github.com/hay-kot/homebox/backend/internal/data/repo" + "github.com/hay-kot/homebox/backend/internal/web/adapters" + "github.com/hay-kot/httpkit/errchain" +) + +// HandleGetUserNotifiers godoc +// +// @Summary Get Notifiers +// @Tags Notifiers +// @Produce json +// @Success 200 {object} []repo.NotifierOut +// @Router /v1/notifiers [GET] +// @Security Bearer +func (ctrl *V1Controller) HandleGetUserNotifiers() errchain.HandlerFunc { + fn := func(r *http.Request, _ struct{}) ([]repo.NotifierOut, error) { + user := services.UseUserCtx(r.Context()) + return ctrl.repo.Notifiers.GetByUser(r.Context(), user.ID) + } + + return adapters.Query(fn, http.StatusOK) +} + +// HandleCreateNotifier godoc +// +// @Summary Create Notifier +// @Tags Notifiers +// @Produce json +// @Param payload body repo.NotifierCreate true "Notifier Data" +// @Success 200 {object} repo.NotifierOut +// @Router /v1/notifiers [POST] +// @Security Bearer +func (ctrl *V1Controller) HandleCreateNotifier() errchain.HandlerFunc { + fn := func(r *http.Request, in repo.NotifierCreate) (repo.NotifierOut, error) { + auth := services.NewContext(r.Context()) + return ctrl.repo.Notifiers.Create(auth, auth.GID, auth.UID, in) + } + + return adapters.Action(fn, http.StatusCreated) +} + +// HandleDeleteNotifier godocs +// +// @Summary Delete a Notifier +// @Tags Notifiers +// @Param id path string true "Notifier ID" +// @Success 204 +// @Router /v1/notifiers/{id} [DELETE] +// @Security Bearer +func (ctrl *V1Controller) HandleDeleteNotifier() errchain.HandlerFunc { + fn := func(r *http.Request, ID uuid.UUID) (any, error) { + auth := services.NewContext(r.Context()) + return nil, ctrl.repo.Notifiers.Delete(auth, auth.UID, ID) + } + + return adapters.CommandID("id", fn, http.StatusNoContent) +} + +// HandleUpdateNotifier godocs +// +// @Summary Update Notifier +// @Tags Notifiers +// @Param id path string true "Notifier ID" +// @Param payload body repo.NotifierUpdate true "Notifier Data" +// @Success 200 {object} repo.NotifierOut +// @Router /v1/notifiers/{id} [PUT] +// @Security Bearer +func (ctrl *V1Controller) HandleUpdateNotifier() errchain.HandlerFunc { + fn := func(r *http.Request, ID uuid.UUID, in repo.NotifierUpdate) (repo.NotifierOut, error) { + auth := services.NewContext(r.Context()) + return ctrl.repo.Notifiers.Update(auth, auth.UID, ID, in) + } + + return adapters.ActionID("id", fn, http.StatusOK) +} + +// HandlerNotifierTest godoc +// +// @Summary Test Notifier +// @Tags Notifiers +// @Produce json +// @Param id path string true "Notifier ID" +// @Param url query string true "URL" +// @Success 204 +// @Router /v1/notifiers/test [POST] +// @Security Bearer +func (ctrl *V1Controller) HandlerNotifierTest() errchain.HandlerFunc { + type body struct { + URL string `json:"url" validate:"required"` + } + + fn := func(r *http.Request, q body) (any, error) { + err := shoutrrr.Send(q.URL, "Test message from Homebox") + return nil, err + } + + return adapters.Action(fn, http.StatusOK) +} diff --git a/backend/app/api/handlers/v1/v1_ctrl_qrcode.go b/backend/app/api/handlers/v1/v1_ctrl_qrcode.go index 5055274..25f7c75 100644 --- a/backend/app/api/handlers/v1/v1_ctrl_qrcode.go +++ b/backend/app/api/handlers/v1/v1_ctrl_qrcode.go @@ -5,9 +5,10 @@ import ( "image/png" "io" "net/http" + "net/url" - "github.com/hay-kot/homebox/backend/internal/sys/validate" - "github.com/hay-kot/homebox/backend/pkgs/server" + "github.com/hay-kot/homebox/backend/internal/web/adapters" + "github.com/hay-kot/httpkit/errchain" "github.com/yeqown/go-qrcode/v2" "github.com/yeqown/go-qrcode/writer/standard" @@ -19,32 +20,36 @@ var qrcodeLogo []byte // HandleGenerateQRCode godoc // -// @Summary Encode data into QRCode +// @Summary Create QR Code // @Tags Items // @Produce json // @Param data query string false "data to be encoded into qrcode" // @Success 200 {string} string "image/jpeg" // @Router /v1/qrcode [GET] // @Security Bearer -func (ctrl *V1Controller) HandleGenerateQRCode() server.HandlerFunc { - const MaxLength = 4_296 // assume alphanumeric characters only +func (ctrl *V1Controller) HandleGenerateQRCode() errchain.HandlerFunc { + type query struct { + // 4,296 characters is the maximum length of a QR code + Data string `schema:"data" validate:"required,max=4296"` + } return func(w http.ResponseWriter, r *http.Request) error { - data := r.URL.Query().Get("data") + q, err := adapters.DecodeQuery[query](r) + if err != nil { + return err + } image, err := png.Decode(bytes.NewReader(qrcodeLogo)) if err != nil { panic(err) } - if len(data) > MaxLength { - return validate.NewFieldErrors(validate.FieldError{ - Field: "data", - Error: "max length is 4,296 characters exceeded", - }) + decodedStr, err := url.QueryUnescape(q.Data) + if err != nil { + return err } - qrc, err := qrcode.New(data) + qrc, err := qrcode.New(decodedStr) if err != nil { return err } diff --git a/backend/app/api/handlers/v1/v1_ctrl_reporting.go b/backend/app/api/handlers/v1/v1_ctrl_reporting.go index 09f2ae6..40f0d22 100644 --- a/backend/app/api/handlers/v1/v1_ctrl_reporting.go +++ b/backend/app/api/handlers/v1/v1_ctrl_reporting.go @@ -4,28 +4,28 @@ import ( "net/http" "github.com/hay-kot/homebox/backend/internal/core/services" - "github.com/hay-kot/homebox/backend/pkgs/server" + "github.com/hay-kot/httpkit/errchain" ) // HandleBillOfMaterialsExport godoc // -// @Summary Generates a Bill of Materials CSV +// @Summary Export Bill of Materials // @Tags Reporting // @Produce json // @Success 200 {string} string "text/csv" // @Router /v1/reporting/bill-of-materials [GET] // @Security Bearer -func (ctrl *V1Controller) HandleBillOfMaterialsExport() server.HandlerFunc { +func (ctrl *V1Controller) HandleBillOfMaterialsExport() errchain.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) error { actor := services.UseUserCtx(r.Context()) - csv, err := ctrl.svc.Reporting.BillOfMaterialsTSV(r.Context(), actor.GroupID) + csv, err := ctrl.svc.Items.ExportBillOfMaterialsTSV(r.Context(), actor.GroupID) if err != nil { return err } - w.Header().Set("Content-Type", "text/csv") - w.Header().Set("Content-Disposition", "attachment; filename=bom.csv") + w.Header().Set("Content-Type", "text/tsv") + w.Header().Set("Content-Disposition", "attachment; filename=bill-of-materials.tsv") _, err = w.Write(csv) return err } diff --git a/backend/app/api/handlers/v1/v1_ctrl_statistics.go b/backend/app/api/handlers/v1/v1_ctrl_statistics.go index 6c09bc6..0a5a319 100644 --- a/backend/app/api/handlers/v1/v1_ctrl_statistics.go +++ b/backend/app/api/handlers/v1/v1_ctrl_statistics.go @@ -5,80 +5,75 @@ import ( "time" "github.com/hay-kot/homebox/backend/internal/core/services" + "github.com/hay-kot/homebox/backend/internal/data/repo" "github.com/hay-kot/homebox/backend/internal/sys/validate" - "github.com/hay-kot/homebox/backend/pkgs/server" + "github.com/hay-kot/homebox/backend/internal/web/adapters" + "github.com/hay-kot/httpkit/errchain" + "github.com/hay-kot/httpkit/server" ) -// HandleGroupGet godoc -// @Summary Get the current user's group statistics -// @Tags Statistics -// @Produce json -// @Success 200 {object} []repo.TotalsByOrganizer -// @Router /v1/groups/statistics/locations [GET] -// @Security Bearer -func (ctrl *V1Controller) HandleGroupStatisticsLocations() server.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) error { - ctx := services.NewContext(r.Context()) - - stats, err := ctrl.repo.Groups.StatsLocationsByPurchasePrice(ctx, ctx.GID) - if err != nil { - return validate.NewRequestError(err, http.StatusInternalServerError) - } - - return server.Respond(w, http.StatusOK, stats) +// HandleGroupStatisticsLocations godoc +// +// @Summary Get Location Statistics +// @Tags Statistics +// @Produce json +// @Success 200 {object} []repo.TotalsByOrganizer +// @Router /v1/groups/statistics/locations [GET] +// @Security Bearer +func (ctrl *V1Controller) HandleGroupStatisticsLocations() errchain.HandlerFunc { + fn := func(r *http.Request) ([]repo.TotalsByOrganizer, error) { + auth := services.NewContext(r.Context()) + return ctrl.repo.Groups.StatsLocationsByPurchasePrice(auth, auth.GID) } + + return adapters.Command(fn, http.StatusOK) } -// HandleGroupGet godoc -// @Summary Get the current user's group statistics -// @Tags Statistics -// @Produce json -// @Success 200 {object} []repo.TotalsByOrganizer -// @Router /v1/groups/statistics/labels [GET] -// @Security Bearer -func (ctrl *V1Controller) HandleGroupStatisticsLabels() server.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) error { - ctx := services.NewContext(r.Context()) - - stats, err := ctrl.repo.Groups.StatsLabelsByPurchasePrice(ctx, ctx.GID) - if err != nil { - return validate.NewRequestError(err, http.StatusInternalServerError) - } - - return server.Respond(w, http.StatusOK, stats) +// HandleGroupStatisticsLabels godoc +// +// @Summary Get Label Statistics +// @Tags Statistics +// @Produce json +// @Success 200 {object} []repo.TotalsByOrganizer +// @Router /v1/groups/statistics/labels [GET] +// @Security Bearer +func (ctrl *V1Controller) HandleGroupStatisticsLabels() errchain.HandlerFunc { + fn := func(r *http.Request) ([]repo.TotalsByOrganizer, error) { + auth := services.NewContext(r.Context()) + return ctrl.repo.Groups.StatsLabelsByPurchasePrice(auth, auth.GID) } + + return adapters.Command(fn, http.StatusOK) } -// HandleGroupGet godoc -// @Summary Get the current user's group statistics -// @Tags Statistics -// @Produce json -// @Success 200 {object} repo.GroupStatistics -// @Router /v1/groups/statistics [GET] -// @Security Bearer -func (ctrl *V1Controller) HandleGroupStatistics() server.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) error { - ctx := services.NewContext(r.Context()) - - stats, err := ctrl.repo.Groups.StatsGroup(ctx, ctx.GID) - if err != nil { - return validate.NewRequestError(err, http.StatusInternalServerError) - } - - return server.Respond(w, http.StatusOK, stats) +// HandleGroupStatistics godoc +// +// @Summary Get Group Statistics +// @Tags Statistics +// @Produce json +// @Success 200 {object} repo.GroupStatistics +// @Router /v1/groups/statistics [GET] +// @Security Bearer +func (ctrl *V1Controller) HandleGroupStatistics() errchain.HandlerFunc { + fn := func(r *http.Request) (repo.GroupStatistics, error) { + auth := services.NewContext(r.Context()) + return ctrl.repo.Groups.StatsGroup(auth, auth.GID) } + + return adapters.Command(fn, http.StatusOK) } -// HandleGroupGet godoc -// @Summary Queries the changes overtime of the purchase price over time -// @Tags Statistics -// @Produce json -// @Success 200 {object} repo.ValueOverTime -// @Param start query string false "start date" -// @Param end query string false "end date" -// @Router /v1/groups/statistics/purchase-price [GET] -// @Security Bearer -func (ctrl *V1Controller) HandleGroupStatisticsPriceOverTime() server.HandlerFunc { +// HandleGroupStatisticsPriceOverTime godoc +// +// @Summary Get Purchase Price Statistics +// @Tags Statistics +// @Produce json +// @Success 200 {object} repo.ValueOverTime +// @Param start query string false "start date" +// @Param end query string false "end date" +// @Router /v1/groups/statistics/purchase-price [GET] +// @Security Bearer +func (ctrl *V1Controller) HandleGroupStatisticsPriceOverTime() errchain.HandlerFunc { parseDate := func(datestr string, defaultDate time.Time) (time.Time, error) { if datestr == "" { return defaultDate, nil @@ -104,6 +99,6 @@ func (ctrl *V1Controller) HandleGroupStatisticsPriceOverTime() server.HandlerFun return validate.NewRequestError(err, http.StatusInternalServerError) } - return server.Respond(w, http.StatusOK, stats) + return server.JSON(w, http.StatusOK, stats) } } diff --git a/backend/app/api/handlers/v1/v1_ctrl_user.go b/backend/app/api/handlers/v1/v1_ctrl_user.go index 0d034c2..8708d24 100644 --- a/backend/app/api/handlers/v1/v1_ctrl_user.go +++ b/backend/app/api/handlers/v1/v1_ctrl_user.go @@ -8,18 +8,20 @@ import ( "github.com/hay-kot/homebox/backend/internal/core/services" "github.com/hay-kot/homebox/backend/internal/data/repo" "github.com/hay-kot/homebox/backend/internal/sys/validate" - "github.com/hay-kot/homebox/backend/pkgs/server" + "github.com/hay-kot/httpkit/errchain" + "github.com/hay-kot/httpkit/server" "github.com/rs/zerolog/log" ) -// HandleUserSelf godoc -// @Summary Get the current user -// @Tags User -// @Produce json -// @Param payload body services.UserRegistration true "User Data" -// @Success 204 -// @Router /v1/users/register [Post] -func (ctrl *V1Controller) HandleUserRegistration() server.HandlerFunc { +// HandleUserRegistration godoc +// +// @Summary Register New User +// @Tags User +// @Produce json +// @Param payload body services.UserRegistration true "User Data" +// @Success 204 +// @Router /v1/users/register [Post] +func (ctrl *V1Controller) HandleUserRegistration() errchain.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) error { regData := services.UserRegistration{} @@ -38,18 +40,19 @@ func (ctrl *V1Controller) HandleUserRegistration() server.HandlerFunc { return validate.NewRequestError(err, http.StatusInternalServerError) } - return server.Respond(w, http.StatusNoContent, nil) + return server.JSON(w, http.StatusNoContent, nil) } } // HandleUserSelf godoc -// @Summary Get the current user -// @Tags User -// @Produce json -// @Success 200 {object} server.Result{item=repo.UserOut} -// @Router /v1/users/self [GET] -// @Security Bearer -func (ctrl *V1Controller) HandleUserSelf() server.HandlerFunc { +// +// @Summary Get User Self +// @Tags User +// @Produce json +// @Success 200 {object} Wrapped{item=repo.UserOut} +// @Router /v1/users/self [GET] +// @Security Bearer +func (ctrl *V1Controller) HandleUserSelf() errchain.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) error { token := services.UseTokenCtx(r.Context()) usr, err := ctrl.svc.User.GetSelf(r.Context(), token) @@ -58,19 +61,20 @@ func (ctrl *V1Controller) HandleUserSelf() server.HandlerFunc { return validate.NewRequestError(err, http.StatusInternalServerError) } - return server.Respond(w, http.StatusOK, server.Wrap(usr)) + return server.JSON(w, http.StatusOK, Wrap(usr)) } } // HandleUserSelfUpdate godoc -// @Summary Update the current user -// @Tags User -// @Produce json -// @Param payload body repo.UserUpdate true "User Data" -// @Success 200 {object} server.Result{item=repo.UserUpdate} -// @Router /v1/users/self [PUT] -// @Security Bearer -func (ctrl *V1Controller) HandleUserSelfUpdate() server.HandlerFunc { +// +// @Summary Update Account +// @Tags User +// @Produce json +// @Param payload body repo.UserUpdate true "User Data" +// @Success 200 {object} Wrapped{item=repo.UserUpdate} +// @Router /v1/users/self [PUT] +// @Security Bearer +func (ctrl *V1Controller) HandleUserSelfUpdate() errchain.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) error { updateData := repo.UserUpdate{} if err := server.Decode(r, &updateData); err != nil { @@ -84,18 +88,19 @@ func (ctrl *V1Controller) HandleUserSelfUpdate() server.HandlerFunc { return validate.NewRequestError(err, http.StatusInternalServerError) } - return server.Respond(w, http.StatusOK, server.Wrap(newData)) + return server.JSON(w, http.StatusOK, Wrap(newData)) } } // HandleUserSelfDelete godoc -// @Summary Deletes the user account -// @Tags User -// @Produce json -// @Success 204 -// @Router /v1/users/self [DELETE] -// @Security Bearer -func (ctrl *V1Controller) HandleUserSelfDelete() server.HandlerFunc { +// +// @Summary Delete Account +// @Tags User +// @Produce json +// @Success 204 +// @Router /v1/users/self [DELETE] +// @Security Bearer +func (ctrl *V1Controller) HandleUserSelfDelete() errchain.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) error { if ctrl.isDemo { return validate.NewRequestError(nil, http.StatusForbidden) @@ -106,7 +111,7 @@ func (ctrl *V1Controller) HandleUserSelfDelete() server.HandlerFunc { return validate.NewRequestError(err, http.StatusInternalServerError) } - return server.Respond(w, http.StatusNoContent, nil) + return server.JSON(w, http.StatusNoContent, nil) } } @@ -118,13 +123,14 @@ type ( ) // HandleUserSelfChangePassword godoc -// @Summary Updates the users password -// @Tags User -// @Success 204 -// @Param payload body ChangePassword true "Password Payload" -// @Router /v1/users/change-password [PUT] -// @Security Bearer -func (ctrl *V1Controller) HandleUserSelfChangePassword() server.HandlerFunc { +// +// @Summary Change Password +// @Tags User +// @Success 204 +// @Param payload body ChangePassword true "Password Payload" +// @Router /v1/users/change-password [PUT] +// @Security Bearer +func (ctrl *V1Controller) HandleUserSelfChangePassword() errchain.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) error { if ctrl.isDemo { return validate.NewRequestError(nil, http.StatusForbidden) @@ -143,6 +149,6 @@ func (ctrl *V1Controller) HandleUserSelfChangePassword() server.HandlerFunc { return validate.NewRequestError(err, http.StatusInternalServerError) } - return server.Respond(w, http.StatusNoContent, nil) + return server.JSON(w, http.StatusNoContent, nil) } } diff --git a/backend/app/api/logger.go b/backend/app/api/logger.go index ddc574f..34659c6 100644 --- a/backend/app/api/logger.go +++ b/backend/app/api/logger.go @@ -2,7 +2,6 @@ package main import ( "os" - "strings" "github.com/hay-kot/homebox/backend/internal/sys/config" "github.com/rs/zerolog" @@ -18,24 +17,8 @@ func (a *app) setupLogger() { log.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stderr}).With().Caller().Logger() } - log.Level(getLevel(a.conf.Log.Level)) -} - -func getLevel(l string) zerolog.Level { - switch strings.ToLower(l) { - case "debug": - return zerolog.DebugLevel - case "info": - return zerolog.InfoLevel - case "warn": - return zerolog.WarnLevel - case "error": - return zerolog.ErrorLevel - case "fatal": - return zerolog.FatalLevel - case "panic": - return zerolog.PanicLevel - default: - return zerolog.InfoLevel + level, err := zerolog.ParseLevel(a.conf.Log.Level) + if err == nil { + zerolog.SetGlobalLevel(level) } } diff --git a/backend/app/api/main.go b/backend/app/api/main.go index b17b0fa..4811bfa 100644 --- a/backend/app/api/main.go +++ b/backend/app/api/main.go @@ -1,7 +1,9 @@ package main import ( + "bytes" "context" + "fmt" "net/http" "os" "path/filepath" @@ -9,16 +11,24 @@ import ( atlas "ariga.io/atlas/sql/migrate" "entgo.io/ent/dialect/sql/schema" - "github.com/hay-kot/homebox/backend/app/api/static/docs" + "github.com/go-chi/chi/v5" + "github.com/go-chi/chi/v5/middleware" + + "github.com/hay-kot/homebox/backend/internal/core/currencies" "github.com/hay-kot/homebox/backend/internal/core/services" + "github.com/hay-kot/homebox/backend/internal/core/services/reporting/eventbus" "github.com/hay-kot/homebox/backend/internal/data/ent" "github.com/hay-kot/homebox/backend/internal/data/migrations" "github.com/hay-kot/homebox/backend/internal/data/repo" "github.com/hay-kot/homebox/backend/internal/sys/config" "github.com/hay-kot/homebox/backend/internal/web/mid" - "github.com/hay-kot/homebox/backend/pkgs/server" - _ "github.com/mattn/go-sqlite3" + "github.com/hay-kot/httpkit/errchain" + "github.com/hay-kot/httpkit/graceful" + "github.com/rs/zerolog" "github.com/rs/zerolog/log" + "github.com/rs/zerolog/pkgerrors" + + _ "github.com/hay-kot/homebox/backend/pkgs/cgofreesqlite" ) var ( @@ -27,24 +37,32 @@ var ( buildTime = "now" ) -// @title Go API Templates +func build() string { + short := commit + if len(short) > 7 { + short = short[:7] + } + + return fmt.Sprintf("%s, commit %s, built at %s", version, short, buildTime) +} + +// @title Homebox API // @version 1.0 -// @description This is a simple Rest API Server Template that implements some basic User and Authentication patterns to help you get started and bootstrap your next project!. +// @description Track, Manage, and Organize your Things. // @contact.name Don't -// @license.name MIT // @BasePath /api // @securityDefinitions.apikey Bearer // @in header // @name Authorization // @description "Type 'Bearer TOKEN' to correctly set the API Key" func main() { - cfg, err := config.New() + zerolog.ErrorStackMarshaler = pkgerrors.MarshalStack + + cfg, err := config.New(build(), "Homebox inventory management system") if err != nil { panic(err) } - docs.SwaggerInfo.Host = cfg.Swagger.Host - if err := run(cfg); err != nil { panic(err) } @@ -62,12 +80,12 @@ func run(cfg *config.Config) error { log.Fatal().Err(err).Msg("failed to create data directory") } - c, err := ent.Open("sqlite3", cfg.Storage.SqliteUrl) + c, err := ent.Open("sqlite3", cfg.Storage.SqliteURL) if err != nil { log.Fatal(). Err(err). Str("driver", "sqlite"). - Str("url", cfg.Storage.SqliteUrl). + Str("url", cfg.Storage.SqliteURL). Msg("failed opening connection to sqlite") } defer func(c *ent.Client) { @@ -100,7 +118,7 @@ func run(cfg *config.Config) error { log.Fatal(). Err(err). Str("driver", "sqlite"). - Str("url", cfg.Storage.SqliteUrl). + Str("url", cfg.Storage.SqliteURL). Msg("failed creating schema resources") } @@ -110,70 +128,145 @@ func run(cfg *config.Config) error { return err } + collectFuncs := []currencies.CollectorFunc{ + currencies.CollectDefaults(), + } + + if cfg.Options.CurrencyConfig != "" { + log.Info(). + Str("path", cfg.Options.CurrencyConfig). + Msg("loading currency config file") + + content, err := os.ReadFile(cfg.Options.CurrencyConfig) + if err != nil { + log.Fatal(). + Err(err). + Str("path", cfg.Options.CurrencyConfig). + Msg("failed to read currency config file") + } + + collectFuncs = append(collectFuncs, currencies.CollectJSON(bytes.NewReader(content))) + } + + currencies, err := currencies.CollectionCurrencies(collectFuncs...) + if err != nil { + log.Fatal(). + Err(err). + Msg("failed to collect currencies") + } + + app.bus = eventbus.New() app.db = c - app.repos = repo.New(c, cfg.Storage.Data) + app.repos = repo.New(c, app.bus, cfg.Storage.Data) app.services = services.New( app.repos, services.WithAutoIncrementAssetID(cfg.Options.AutoIncrementAssetID), + services.WithCurrencies(currencies), ) // ========================================================================= - // Start Server\ + // Start Server + logger := log.With().Caller().Logger() - mwLogger := mid.Logger(logger) - if app.conf.Mode == config.ModeDevelopment { - mwLogger = mid.SugarLogger(logger) - } - - app.server = server.NewServer( - server.WithHost(app.conf.Web.Host), - server.WithPort(app.conf.Web.Port), - server.WithMiddleware( - mwLogger, - mid.Errors(logger), - mid.Panic(app.conf.Mode == config.ModeDevelopment), - ), + router := chi.NewMux() + router.Use( + middleware.RequestID, + middleware.RealIP, + mid.Logger(logger), + middleware.Recoverer, + middleware.StripSlashes, ) - app.mountRoutes(app.repos) + chain := errchain.New(mid.Errors(logger)) - log.Info().Msgf("Starting HTTP Server on %s:%s", app.server.Host, app.server.Port) + app.mountRoutes(router, chain, app.repos) + + runner := graceful.NewRunner() + + runner.AddFunc("server", func(ctx context.Context) error { + httpserver := http.Server{ + Addr: fmt.Sprintf("%s:%s", cfg.Web.Host, cfg.Web.Port), + Handler: router, + ReadTimeout: cfg.Web.ReadTimeout, + WriteTimeout: cfg.Web.WriteTimeout, + IdleTimeout: cfg.Web.IdleTimeout, + } + + go func() { + <-ctx.Done() + _ = httpserver.Shutdown(context.Background()) + }() + + log.Info().Msgf("Server is running on %s:%s", cfg.Web.Host, cfg.Web.Port) + return httpserver.ListenAndServe() + }) // ========================================================================= // Start Reoccurring Tasks - go app.startBgTask(time.Duration(24)*time.Hour, func() { - _, err := app.repos.AuthTokens.PurgeExpiredTokens(context.Background()) + runner.AddFunc("eventbus", app.bus.Run) + + runner.AddFunc("seed_database", func(ctx context.Context) error { + // TODO: Remove through external API that does setup + if cfg.Demo { + log.Info().Msg("Running in demo mode, creating demo data") + app.SetupDemo() + } + return nil + }) + + runner.AddPlugin(NewTask("purge-tokens", time.Duration(24)*time.Hour, func(ctx context.Context) { + _, err := app.repos.AuthTokens.PurgeExpiredTokens(ctx) if err != nil { log.Error(). Err(err). Msg("failed to purge expired tokens") } - }) - go app.startBgTask(time.Duration(24)*time.Hour, func() { - _, err := app.repos.Groups.InvitationPurge(context.Background()) + })) + + runner.AddPlugin(NewTask("purge-invitations", time.Duration(24)*time.Hour, func(ctx context.Context) { + _, err := app.repos.Groups.InvitationPurge(ctx) if err != nil { log.Error(). Err(err). Msg("failed to purge expired invitations") } - }) + })) - // TODO: Remove through external API that does setup - if cfg.Demo { - log.Info().Msg("Running in demo mode, creating demo data") - app.SetupDemo() - } + runner.AddPlugin(NewTask("send-notifications", time.Duration(1)*time.Hour, func(ctx context.Context) { + now := time.Now() + + if now.Hour() == 8 { + fmt.Println("run notifiers") + err := app.services.BackgroundService.SendNotifiersToday(context.Background()) + if err != nil { + log.Error(). + Err(err). + Msg("failed to send notifiers") + } + } + })) if cfg.Debug.Enabled { - debugrouter := app.debugRouter() - go func() { - if err := http.ListenAndServe(":"+cfg.Debug.Port, debugrouter); err != nil { - log.Fatal().Err(err).Msg("failed to start debug server") + runner.AddFunc("debug", func(ctx context.Context) error { + debugserver := http.Server{ + Addr: fmt.Sprintf("%s:%s", cfg.Web.Host, cfg.Debug.Port), + Handler: app.debugRouter(), + ReadTimeout: cfg.Web.ReadTimeout, + WriteTimeout: cfg.Web.WriteTimeout, + IdleTimeout: cfg.Web.IdleTimeout, } - }() + + go func() { + <-ctx.Done() + _ = debugserver.Shutdown(context.Background()) + }() + + log.Info().Msgf("Debug server is running on %s:%s", cfg.Web.Host, cfg.Debug.Port) + return debugserver.ListenAndServe() + }) } - return app.server.Start() + return runner.Start(context.Background()) } diff --git a/backend/app/api/middleware.go b/backend/app/api/middleware.go index c694618..02b3a6c 100644 --- a/backend/app/api/middleware.go +++ b/backend/app/api/middleware.go @@ -7,9 +7,11 @@ import ( "net/url" "strings" + v1 "github.com/hay-kot/homebox/backend/app/api/handlers/v1" "github.com/hay-kot/homebox/backend/internal/core/services" + "github.com/hay-kot/homebox/backend/internal/data/ent" "github.com/hay-kot/homebox/backend/internal/sys/validate" - "github.com/hay-kot/homebox/backend/pkgs/server" + "github.com/hay-kot/httpkit/errchain" ) type tokenHasKey struct { @@ -30,9 +32,9 @@ const ( // the required roles, a 403 Forbidden will be returned. // // WARNING: This middleware _MUST_ be called after mwAuthToken or else it will panic -func (a *app) mwRoles(rm RoleMode, required ...string) server.Middleware { - return func(next server.Handler) server.Handler { - return server.HandlerFunc(func(w http.ResponseWriter, r *http.Request) error { +func (a *app) mwRoles(rm RoleMode, required ...string) errchain.Middleware { + return func(next errchain.Handler) errchain.Handler { + return errchain.HandlerFunc(func(w http.ResponseWriter, r *http.Request) error { ctx := r.Context() maybeToken := ctx.Value(hashedToken) @@ -94,20 +96,6 @@ func getQuery(r *http.Request) (string, error) { return token, nil } -func getCookie(r *http.Request) (string, error) { - cookie, err := r.Cookie("hb.auth.token") - if err != nil { - return "", errors.New("access_token cookie is required") - } - - token, err := url.QueryUnescape(cookie.Value) - if err != nil { - return "", errors.New("access_token cookie is required") - } - - return token, nil -} - // mwAuthToken is a middleware that will check the database for a stateful token // and attach it's user to the request context, or return an appropriate error. // Authorization support is by token via Headers or Query Parameter @@ -115,26 +103,35 @@ func getCookie(r *http.Request) (string, error) { // Example: // - header = "Bearer 1234567890" // - query = "?access_token=1234567890" -// - cookie = hb.auth.token = 1234567890 -func (a *app) mwAuthToken(next server.Handler) server.Handler { - return server.HandlerFunc(func(w http.ResponseWriter, r *http.Request) error { - keyFuncs := [...]KeyFunc{ - getBearer, - getCookie, - getQuery, - } - +func (a *app) mwAuthToken(next errchain.Handler) errchain.Handler { + return errchain.HandlerFunc(func(w http.ResponseWriter, r *http.Request) error { var requestToken string - for _, keyFunc := range keyFuncs { - token, err := keyFunc(r) - if err == nil { - requestToken = token - break + + // We ignore the error to allow the next strategy to be attempted + { + cookies, _ := v1.GetCookies(r) + if cookies != nil { + requestToken = cookies.Token } } if requestToken == "" { - return validate.NewRequestError(errors.New("Authorization header or query is required"), http.StatusUnauthorized) + keyFuncs := [...]KeyFunc{ + getBearer, + getQuery, + } + + for _, keyFunc := range keyFuncs { + token, err := keyFunc(r) + if err == nil { + requestToken = token + break + } + } + } + + if requestToken == "" { + return validate.NewRequestError(errors.New("authorization header or query is required"), http.StatusUnauthorized) } requestToken = strings.TrimPrefix(requestToken, "Bearer ") @@ -144,7 +141,11 @@ func (a *app) mwAuthToken(next server.Handler) server.Handler { usr, err := a.services.User.GetSelf(r.Context(), requestToken) // Check the database for the token if err != nil { - return validate.NewRequestError(errors.New("valid authorization header is required"), http.StatusUnauthorized) + if ent.IsNotFound(err) { + return validate.NewRequestError(errors.New("valid authorization token is required"), http.StatusUnauthorized) + } + + return err } r = r.WithContext(services.SetUserCtx(r.Context(), &usr, requestToken)) diff --git a/backend/app/api/providers/doc.go b/backend/app/api/providers/doc.go new file mode 100644 index 0000000..f58615d --- /dev/null +++ b/backend/app/api/providers/doc.go @@ -0,0 +1,2 @@ +// Package providers provides a authentication abstraction for the backend. +package providers diff --git a/backend/app/api/providers/extractors.go b/backend/app/api/providers/extractors.go new file mode 100644 index 0000000..bc042a4 --- /dev/null +++ b/backend/app/api/providers/extractors.go @@ -0,0 +1,55 @@ +package providers + +import ( + "errors" + "net/http" + + "github.com/hay-kot/homebox/backend/internal/sys/validate" + "github.com/hay-kot/httpkit/server" + "github.com/rs/zerolog/log" +) + +type LoginForm struct { + Username string `json:"username"` + Password string `json:"password"` + StayLoggedIn bool `json:"stayLoggedIn"` +} + +func getLoginForm(r *http.Request) (LoginForm, error) { + loginForm := LoginForm{} + + switch r.Header.Get("Content-Type") { + case "application/x-www-form-urlencoded": + err := r.ParseForm() + if err != nil { + return loginForm, errors.New("failed to parse form") + } + + loginForm.Username = r.PostFormValue("username") + loginForm.Password = r.PostFormValue("password") + loginForm.StayLoggedIn = r.PostFormValue("stayLoggedIn") == "true" + case "application/json": + err := server.Decode(r, &loginForm) + if err != nil { + log.Err(err).Msg("failed to decode login form") + return loginForm, errors.New("failed to decode login form") + } + default: + return loginForm, errors.New("invalid content type") + } + + if loginForm.Username == "" || loginForm.Password == "" { + return loginForm, validate.NewFieldErrors( + validate.FieldError{ + Field: "username", + Error: "username or password is empty", + }, + validate.FieldError{ + Field: "password", + Error: "username or password is empty", + }, + ) + } + + return loginForm, nil +} diff --git a/backend/app/api/providers/local.go b/backend/app/api/providers/local.go new file mode 100644 index 0000000..991f51a --- /dev/null +++ b/backend/app/api/providers/local.go @@ -0,0 +1,30 @@ +package providers + +import ( + "net/http" + + "github.com/hay-kot/homebox/backend/internal/core/services" +) + +type LocalProvider struct { + service *services.UserService +} + +func NewLocalProvider(service *services.UserService) *LocalProvider { + return &LocalProvider{ + service: service, + } +} + +func (p *LocalProvider) Name() string { + return "local" +} + +func (p *LocalProvider) Authenticate(w http.ResponseWriter, r *http.Request) (services.UserAuthTokenDetail, error) { + loginForm, err := getLoginForm(r) + if err != nil { + return services.UserAuthTokenDetail{}, err + } + + return p.service.Login(r.Context(), loginForm.Username, loginForm.Password, loginForm.StayLoggedIn) +} diff --git a/backend/app/api/routes.go b/backend/app/api/routes.go index e995fa4..de10942 100644 --- a/backend/app/api/routes.go +++ b/backend/app/api/routes.go @@ -3,20 +3,21 @@ package main import ( "embed" "errors" - "fmt" "io" "mime" "net/http" "path" "path/filepath" + "github.com/go-chi/chi/v5" "github.com/hay-kot/homebox/backend/app/api/handlers/debughandlers" v1 "github.com/hay-kot/homebox/backend/app/api/handlers/v1" + "github.com/hay-kot/homebox/backend/app/api/providers" _ "github.com/hay-kot/homebox/backend/app/api/static/docs" "github.com/hay-kot/homebox/backend/internal/data/ent/authroles" "github.com/hay-kot/homebox/backend/internal/data/repo" - "github.com/hay-kot/homebox/backend/pkgs/server" - httpSwagger "github.com/swaggo/http-swagger" // http-swagger middleware + "github.com/hay-kot/httpkit/errchain" + httpSwagger "github.com/swaggo/http-swagger/v2" // http-swagger middleware ) const prefix = "/api" @@ -36,110 +37,133 @@ func (a *app) debugRouter() *http.ServeMux { } // registerRoutes registers all the routes for the API -func (a *app) mountRoutes(repos *repo.AllRepos) { +func (a *app) mountRoutes(r *chi.Mux, chain *errchain.ErrChain, repos *repo.AllRepos) { registerMimes() - a.server.Get("/swagger/*", server.ToHandler(httpSwagger.Handler( - httpSwagger.URL(fmt.Sprintf("%s://%s/swagger/doc.json", a.conf.Swagger.Scheme, a.conf.Swagger.Host)), - ))) + r.Get("/swagger/*", httpSwagger.Handler( + httpSwagger.URL("/swagger/doc.json"), + )) // ========================================================================= // API Version 1 - v1Base := v1.BaseUrlFunc(prefix) + v1Base := v1.BaseURLFunc(prefix) v1Ctrl := v1.NewControllerV1( a.services, a.repos, + a.bus, v1.WithMaxUploadSize(a.conf.Web.MaxUploadSize), v1.WithRegistration(a.conf.Options.AllowRegistration), v1.WithDemoStatus(a.conf.Demo), // Disable Password Change in Demo Mode ) - a.server.Get(v1Base("/status"), v1Ctrl.HandleBase(func() bool { return true }, v1.Build{ + r.Get(v1Base("/status"), chain.ToHandlerFunc(v1Ctrl.HandleBase(func() bool { return true }, v1.Build{ Version: version, Commit: commit, BuildTime: buildTime, - })) + }))) - a.server.Post(v1Base("/users/register"), v1Ctrl.HandleUserRegistration()) - a.server.Post(v1Base("/users/login"), v1Ctrl.HandleAuthLogin()) + r.Get(v1Base("/currencies"), chain.ToHandlerFunc(v1Ctrl.HandleCurrency())) - userMW := []server.Middleware{ + providers := []v1.AuthProvider{ + providers.NewLocalProvider(a.services.User), + } + + r.Post(v1Base("/users/register"), chain.ToHandlerFunc(v1Ctrl.HandleUserRegistration())) + r.Post(v1Base("/users/login"), chain.ToHandlerFunc(v1Ctrl.HandleAuthLogin(providers...))) + + userMW := []errchain.Middleware{ a.mwAuthToken, a.mwRoles(RoleModeOr, authroles.RoleUser.String()), } - a.server.Get(v1Base("/users/self"), v1Ctrl.HandleUserSelf(), userMW...) - a.server.Put(v1Base("/users/self"), v1Ctrl.HandleUserSelfUpdate(), userMW...) - a.server.Delete(v1Base("/users/self"), v1Ctrl.HandleUserSelfDelete(), userMW...) - a.server.Post(v1Base("/users/logout"), v1Ctrl.HandleAuthLogout(), userMW...) - a.server.Get(v1Base("/users/refresh"), v1Ctrl.HandleAuthRefresh(), userMW...) - a.server.Put(v1Base("/users/self/change-password"), v1Ctrl.HandleUserSelfChangePassword(), userMW...) + r.Get(v1Base("/ws/events"), chain.ToHandlerFunc(v1Ctrl.HandleCacheWS(), userMW...)) + r.Get(v1Base("/users/self"), chain.ToHandlerFunc(v1Ctrl.HandleUserSelf(), userMW...)) + r.Put(v1Base("/users/self"), chain.ToHandlerFunc(v1Ctrl.HandleUserSelfUpdate(), userMW...)) + r.Delete(v1Base("/users/self"), chain.ToHandlerFunc(v1Ctrl.HandleUserSelfDelete(), userMW...)) + r.Post(v1Base("/users/logout"), chain.ToHandlerFunc(v1Ctrl.HandleAuthLogout(), userMW...)) + r.Get(v1Base("/users/refresh"), chain.ToHandlerFunc(v1Ctrl.HandleAuthRefresh(), userMW...)) + r.Put(v1Base("/users/self/change-password"), chain.ToHandlerFunc(v1Ctrl.HandleUserSelfChangePassword(), userMW...)) - a.server.Post(v1Base("/groups/invitations"), v1Ctrl.HandleGroupInvitationsCreate(), userMW...) - a.server.Get(v1Base("/groups/statistics"), v1Ctrl.HandleGroupStatistics(), userMW...) - a.server.Get(v1Base("/groups/statistics/purchase-price"), v1Ctrl.HandleGroupStatisticsPriceOverTime(), userMW...) - a.server.Get(v1Base("/groups/statistics/locations"), v1Ctrl.HandleGroupStatisticsLocations(), userMW...) - a.server.Get(v1Base("/groups/statistics/labels"), v1Ctrl.HandleGroupStatisticsLabels(), userMW...) + r.Post(v1Base("/groups/invitations"), chain.ToHandlerFunc(v1Ctrl.HandleGroupInvitationsCreate(), userMW...)) + r.Get(v1Base("/groups/statistics"), chain.ToHandlerFunc(v1Ctrl.HandleGroupStatistics(), userMW...)) + r.Get(v1Base("/groups/statistics/purchase-price"), chain.ToHandlerFunc(v1Ctrl.HandleGroupStatisticsPriceOverTime(), userMW...)) + r.Get(v1Base("/groups/statistics/locations"), chain.ToHandlerFunc(v1Ctrl.HandleGroupStatisticsLocations(), userMW...)) + r.Get(v1Base("/groups/statistics/labels"), chain.ToHandlerFunc(v1Ctrl.HandleGroupStatisticsLabels(), userMW...)) // TODO: I don't like /groups being the URL for users - a.server.Get(v1Base("/groups"), v1Ctrl.HandleGroupGet(), userMW...) - a.server.Put(v1Base("/groups"), v1Ctrl.HandleGroupUpdate(), userMW...) + r.Get(v1Base("/groups"), chain.ToHandlerFunc(v1Ctrl.HandleGroupGet(), userMW...)) + r.Put(v1Base("/groups"), chain.ToHandlerFunc(v1Ctrl.HandleGroupUpdate(), userMW...)) - a.server.Post(v1Base("/actions/ensure-asset-ids"), v1Ctrl.HandleEnsureAssetID(), userMW...) - a.server.Post(v1Base("/actions/zero-item-time-fields"), v1Ctrl.HandleItemDateZeroOut(), userMW...) + r.Post(v1Base("/actions/ensure-asset-ids"), chain.ToHandlerFunc(v1Ctrl.HandleEnsureAssetID(), userMW...)) + r.Post(v1Base("/actions/zero-item-time-fields"), chain.ToHandlerFunc(v1Ctrl.HandleItemDateZeroOut(), userMW...)) + r.Post(v1Base("/actions/ensure-import-refs"), chain.ToHandlerFunc(v1Ctrl.HandleEnsureImportRefs(), userMW...)) + r.Post(v1Base("/actions/set-primary-photos"), chain.ToHandlerFunc(v1Ctrl.HandleSetPrimaryPhotos(), userMW...)) - a.server.Get(v1Base("/locations"), v1Ctrl.HandleLocationGetAll(), userMW...) - a.server.Post(v1Base("/locations"), v1Ctrl.HandleLocationCreate(), userMW...) - a.server.Get(v1Base("/locations/tree"), v1Ctrl.HandleLocationTreeQuery(), userMW...) - a.server.Get(v1Base("/locations/{id}"), v1Ctrl.HandleLocationGet(), userMW...) - a.server.Put(v1Base("/locations/{id}"), v1Ctrl.HandleLocationUpdate(), userMW...) - a.server.Delete(v1Base("/locations/{id}"), v1Ctrl.HandleLocationDelete(), userMW...) + r.Get(v1Base("/locations"), chain.ToHandlerFunc(v1Ctrl.HandleLocationGetAll(), userMW...)) + r.Post(v1Base("/locations"), chain.ToHandlerFunc(v1Ctrl.HandleLocationCreate(), userMW...)) + r.Get(v1Base("/locations/tree"), chain.ToHandlerFunc(v1Ctrl.HandleLocationTreeQuery(), userMW...)) + r.Get(v1Base("/locations/{id}"), chain.ToHandlerFunc(v1Ctrl.HandleLocationGet(), userMW...)) + r.Put(v1Base("/locations/{id}"), chain.ToHandlerFunc(v1Ctrl.HandleLocationUpdate(), userMW...)) + r.Delete(v1Base("/locations/{id}"), chain.ToHandlerFunc(v1Ctrl.HandleLocationDelete(), userMW...)) - a.server.Get(v1Base("/labels"), v1Ctrl.HandleLabelsGetAll(), userMW...) - a.server.Post(v1Base("/labels"), v1Ctrl.HandleLabelsCreate(), userMW...) - a.server.Get(v1Base("/labels/{id}"), v1Ctrl.HandleLabelGet(), userMW...) - a.server.Put(v1Base("/labels/{id}"), v1Ctrl.HandleLabelUpdate(), userMW...) - a.server.Delete(v1Base("/labels/{id}"), v1Ctrl.HandleLabelDelete(), userMW...) + r.Get(v1Base("/labels"), chain.ToHandlerFunc(v1Ctrl.HandleLabelsGetAll(), userMW...)) + r.Post(v1Base("/labels"), chain.ToHandlerFunc(v1Ctrl.HandleLabelsCreate(), userMW...)) + r.Get(v1Base("/labels/{id}"), chain.ToHandlerFunc(v1Ctrl.HandleLabelGet(), userMW...)) + r.Put(v1Base("/labels/{id}"), chain.ToHandlerFunc(v1Ctrl.HandleLabelUpdate(), userMW...)) + r.Delete(v1Base("/labels/{id}"), chain.ToHandlerFunc(v1Ctrl.HandleLabelDelete(), userMW...)) - a.server.Get(v1Base("/items"), v1Ctrl.HandleItemsGetAll(), userMW...) - a.server.Post(v1Base("/items"), v1Ctrl.HandleItemsCreate(), userMW...) - a.server.Post(v1Base("/items/import"), v1Ctrl.HandleItemsImport(), userMW...) - a.server.Get(v1Base("/items/fields"), v1Ctrl.HandleGetAllCustomFieldNames(), userMW...) - a.server.Get(v1Base("/items/fields/values"), v1Ctrl.HandleGetAllCustomFieldValues(), userMW...) + r.Get(v1Base("/items"), chain.ToHandlerFunc(v1Ctrl.HandleItemsGetAll(), userMW...)) + r.Post(v1Base("/items"), chain.ToHandlerFunc(v1Ctrl.HandleItemsCreate(), userMW...)) + r.Post(v1Base("/items/import"), chain.ToHandlerFunc(v1Ctrl.HandleItemsImport(), userMW...)) + r.Get(v1Base("/items/export"), chain.ToHandlerFunc(v1Ctrl.HandleItemsExport(), userMW...)) + r.Get(v1Base("/items/fields"), chain.ToHandlerFunc(v1Ctrl.HandleGetAllCustomFieldNames(), userMW...)) + r.Get(v1Base("/items/fields/values"), chain.ToHandlerFunc(v1Ctrl.HandleGetAllCustomFieldValues(), userMW...)) - a.server.Get(v1Base("/items/{id}"), v1Ctrl.HandleItemGet(), userMW...) - a.server.Put(v1Base("/items/{id}"), v1Ctrl.HandleItemUpdate(), userMW...) - a.server.Delete(v1Base("/items/{id}"), v1Ctrl.HandleItemDelete(), userMW...) + r.Get(v1Base("/items/{id}"), chain.ToHandlerFunc(v1Ctrl.HandleItemGet(), userMW...)) + r.Get(v1Base("/items/{id}/path"), chain.ToHandlerFunc(v1Ctrl.HandleItemFullPath(), userMW...)) + r.Put(v1Base("/items/{id}"), chain.ToHandlerFunc(v1Ctrl.HandleItemUpdate(), userMW...)) + r.Patch(v1Base("/items/{id}"), chain.ToHandlerFunc(v1Ctrl.HandleItemPatch(), userMW...)) + r.Delete(v1Base("/items/{id}"), chain.ToHandlerFunc(v1Ctrl.HandleItemDelete(), userMW...)) - a.server.Post(v1Base("/items/{id}/attachments"), v1Ctrl.HandleItemAttachmentCreate(), userMW...) - a.server.Put(v1Base("/items/{id}/attachments/{attachment_id}"), v1Ctrl.HandleItemAttachmentUpdate(), userMW...) - a.server.Delete(v1Base("/items/{id}/attachments/{attachment_id}"), v1Ctrl.HandleItemAttachmentDelete(), userMW...) + r.Post(v1Base("/items/{id}/attachments"), chain.ToHandlerFunc(v1Ctrl.HandleItemAttachmentCreate(), userMW...)) + r.Put(v1Base("/items/{id}/attachments/{attachment_id}"), chain.ToHandlerFunc(v1Ctrl.HandleItemAttachmentUpdate(), userMW...)) + r.Delete(v1Base("/items/{id}/attachments/{attachment_id}"), chain.ToHandlerFunc(v1Ctrl.HandleItemAttachmentDelete(), userMW...)) - a.server.Get(v1Base("/items/{id}/maintenance"), v1Ctrl.HandleMaintenanceEntryCreate(), userMW...) - a.server.Post(v1Base("/items/{id}/maintenance"), v1Ctrl.HandleMaintenanceEntryCreate(), userMW...) - a.server.Put(v1Base("/items/{id}/maintenance/{entry_id}"), v1Ctrl.HandleMaintenanceEntryUpdate(), userMW...) - a.server.Delete(v1Base("/items/{id}/maintenance/{entry_id}"), v1Ctrl.HandleMaintenanceEntryDelete(), userMW...) + r.Get(v1Base("/items/{id}/maintenance"), chain.ToHandlerFunc(v1Ctrl.HandleMaintenanceLogGet(), userMW...)) + r.Post(v1Base("/items/{id}/maintenance"), chain.ToHandlerFunc(v1Ctrl.HandleMaintenanceEntryCreate(), userMW...)) + r.Put(v1Base("/items/{id}/maintenance/{entry_id}"), chain.ToHandlerFunc(v1Ctrl.HandleMaintenanceEntryUpdate(), userMW...)) + r.Delete(v1Base("/items/{id}/maintenance/{entry_id}"), chain.ToHandlerFunc(v1Ctrl.HandleMaintenanceEntryDelete(), userMW...)) - a.server.Get(v1Base("/asset/{id}"), v1Ctrl.HandleAssetGet(), userMW...) + r.Get(v1Base("/assets/{id}"), chain.ToHandlerFunc(v1Ctrl.HandleAssetGet(), userMW...)) + + // Notifiers + r.Get(v1Base("/notifiers"), chain.ToHandlerFunc(v1Ctrl.HandleGetUserNotifiers(), userMW...)) + r.Post(v1Base("/notifiers"), chain.ToHandlerFunc(v1Ctrl.HandleCreateNotifier(), userMW...)) + r.Put(v1Base("/notifiers/{id}"), chain.ToHandlerFunc(v1Ctrl.HandleUpdateNotifier(), userMW...)) + r.Delete(v1Base("/notifiers/{id}"), chain.ToHandlerFunc(v1Ctrl.HandleDeleteNotifier(), userMW...)) + r.Post(v1Base("/notifiers/test"), chain.ToHandlerFunc(v1Ctrl.HandlerNotifierTest(), userMW...)) // Asset-Like endpoints - a.server.Get( + assetMW := []errchain.Middleware{ + a.mwAuthToken, + a.mwRoles(RoleModeOr, authroles.RoleUser.String(), authroles.RoleAttachments.String()), + } + + r.Get( v1Base("/qrcode"), - v1Ctrl.HandleGenerateQRCode(), - a.mwAuthToken, a.mwRoles(RoleModeOr, authroles.RoleUser.String(), authroles.RoleAttachments.String()), + chain.ToHandlerFunc(v1Ctrl.HandleGenerateQRCode(), assetMW...), ) - a.server.Get( + r.Get( v1Base("/items/{id}/attachments/{attachment_id}"), - v1Ctrl.HandleItemAttachmentGet(), - a.mwAuthToken, a.mwRoles(RoleModeOr, authroles.RoleUser.String(), authroles.RoleAttachments.String()), + chain.ToHandlerFunc(v1Ctrl.HandleItemAttachmentGet(), assetMW...), ) // Reporting Services - a.server.Get(v1Base("/reporting/bill-of-materials"), v1Ctrl.HandleBillOfMaterialsExport(), userMW...) + r.Get(v1Base("/reporting/bill-of-materials"), chain.ToHandlerFunc(v1Ctrl.HandleBillOfMaterialsExport(), userMW...)) - a.server.NotFound(notFoundHandler()) + r.NotFound(chain.ToHandlerFunc(notFoundHandler())) } func registerMimes() { @@ -156,13 +180,13 @@ func registerMimes() { // notFoundHandler perform the main logic around handling the internal SPA embed and ensuring that // the client side routing is handled correctly. -func notFoundHandler() server.HandlerFunc { +func notFoundHandler() errchain.HandlerFunc { tryRead := func(fs embed.FS, prefix, requestedPath string, w http.ResponseWriter) error { f, err := fs.Open(path.Join(prefix, requestedPath)) if err != nil { return err } - defer f.Close() + defer func() { _ = f.Close() }() stat, _ := f.Stat() if stat.IsDir() { diff --git a/backend/app/api/static/docs/docs.go b/backend/app/api/static/docs/docs.go index 8ceb1cf..7c9a748 100644 --- a/backend/app/api/static/docs/docs.go +++ b/backend/app/api/static/docs/docs.go @@ -1,5 +1,4 @@ -// Package docs GENERATED BY SWAG; DO NOT EDIT -// This file was generated by swaggo/swag +// Package docs Code generated by swaggo/swag. DO NOT EDIT package docs import "github.com/swaggo/swag" @@ -13,9 +12,6 @@ const docTemplate = `{ "contact": { "name": "Don't" }, - "license": { - "name": "MIT" - }, "version": "{{.Version}}" }, "host": "{{.Host}}", @@ -28,13 +24,64 @@ const docTemplate = `{ "Bearer": [] } ], + "description": "Ensures all items in the database have an asset ID", "produces": [ "application/json" ], "tags": [ - "Group" + "Actions" ], - "summary": "Ensures all items in the database have an asset id", + "summary": "Ensure Asset IDs", + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/v1.ActionAmountResult" + } + } + } + } + }, + "/v1/actions/ensure-import-refs": { + "post": { + "security": [ + { + "Bearer": [] + } + ], + "description": "Ensures all items in the database have an import ref", + "produces": [ + "application/json" + ], + "tags": [ + "Actions" + ], + "summary": "Ensures Import Refs", + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/v1.ActionAmountResult" + } + } + } + } + }, + "/v1/actions/set-primary-photos": { + "post": { + "security": [ + { + "Bearer": [] + } + ], + "description": "Sets the first photo of each item as the primary photo", + "produces": [ + "application/json" + ], + "tags": [ + "Actions" + ], + "summary": "Set Primary Photos", "responses": { "200": { "description": "OK", @@ -52,13 +99,14 @@ const docTemplate = `{ "Bearer": [] } ], + "description": "Resets all item date fields to the beginning of the day", "produces": [ "application/json" ], "tags": [ - "Group" + "Actions" ], - "summary": "Resets all item date fields to the beginning of the day", + "summary": "Zero Out Time Fields", "responses": { "200": { "description": "OK", @@ -80,9 +128,9 @@ const docTemplate = `{ "application/json" ], "tags": [ - "Assets" + "Items" ], - "summary": "Gets an item by Asset ID", + "summary": "Get Item by Asset ID", "parameters": [ { "type": "string", @@ -102,6 +150,25 @@ const docTemplate = `{ } } }, + "/v1/currency": { + "get": { + "produces": [ + "application/json" + ], + "tags": [ + "Base" + ], + "summary": "Currency", + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/currencies.Currency" + } + } + } + } + }, "/v1/groups": { "get": { "security": [ @@ -115,7 +182,7 @@ const docTemplate = `{ "tags": [ "Group" ], - "summary": "Get the current user's group", + "summary": "Get Group", "responses": { "200": { "description": "OK", @@ -137,7 +204,7 @@ const docTemplate = `{ "tags": [ "Group" ], - "summary": "Updates some fields of the current users group", + "summary": "Update Group", "parameters": [ { "description": "User Data", @@ -172,7 +239,7 @@ const docTemplate = `{ "tags": [ "Group" ], - "summary": "Get the current user", + "summary": "Create Group Invitation", "parameters": [ { "description": "User Data", @@ -207,7 +274,7 @@ const docTemplate = `{ "tags": [ "Statistics" ], - "summary": "Get the current user's group statistics", + "summary": "Get Group Statistics", "responses": { "200": { "description": "OK", @@ -231,7 +298,7 @@ const docTemplate = `{ "tags": [ "Statistics" ], - "summary": "Get the current user's group statistics", + "summary": "Get Label Statistics", "responses": { "200": { "description": "OK", @@ -258,7 +325,7 @@ const docTemplate = `{ "tags": [ "Statistics" ], - "summary": "Get the current user's group statistics", + "summary": "Get Location Statistics", "responses": { "200": { "description": "OK", @@ -285,7 +352,7 @@ const docTemplate = `{ "tags": [ "Statistics" ], - "summary": "Queries the changes overtime of the purchase price over time", + "summary": "Get Purchase Price Statistics", "parameters": [ { "type": "string", @@ -323,7 +390,7 @@ const docTemplate = `{ "tags": [ "Items" ], - "summary": "Get All Items", + "summary": "Query All Items", "parameters": [ { "type": "string", @@ -362,6 +429,16 @@ const docTemplate = `{ "description": "location Ids", "name": "locations", "in": "query" + }, + { + "type": "array", + "items": { + "type": "string" + }, + "collectionFormat": "multi", + "description": "parent Ids", + "name": "parentIds", + "in": "query" } ], "responses": { @@ -385,7 +462,7 @@ const docTemplate = `{ "tags": [ "Items" ], - "summary": "Create a new item", + "summary": "Create Item", "parameters": [ { "description": "Item Data", @@ -398,8 +475,8 @@ const docTemplate = `{ } ], "responses": { - "200": { - "description": "OK", + "201": { + "description": "Created", "schema": { "$ref": "#/definitions/repo.ItemSummary" } @@ -407,6 +484,27 @@ const docTemplate = `{ } } }, + "/v1/items/export": { + "get": { + "security": [ + { + "Bearer": [] + } + ], + "tags": [ + "Items" + ], + "summary": "Export Items", + "responses": { + "200": { + "description": "text/csv", + "schema": { + "type": "string" + } + } + } + } + }, "/v1/items/fields": { "get": { "security": [ @@ -420,7 +518,7 @@ const docTemplate = `{ "tags": [ "Items" ], - "summary": "imports items into the database", + "summary": "Get All Custom Field Names", "responses": { "200": { "description": "OK", @@ -447,7 +545,7 @@ const docTemplate = `{ "tags": [ "Items" ], - "summary": "imports items into the database", + "summary": "Get All Custom Field Values", "responses": { "200": { "description": "OK", @@ -474,7 +572,7 @@ const docTemplate = `{ "tags": [ "Items" ], - "summary": "imports items into the database", + "summary": "Import Items", "parameters": [ { "type": "file", @@ -504,7 +602,7 @@ const docTemplate = `{ "tags": [ "Items" ], - "summary": "Gets a item and fields", + "summary": "Get Item", "parameters": [ { "type": "string", @@ -535,7 +633,7 @@ const docTemplate = `{ "tags": [ "Items" ], - "summary": "updates a item", + "summary": "Update Item", "parameters": [ { "type": "string", @@ -575,7 +673,7 @@ const docTemplate = `{ "tags": [ "Items" ], - "summary": "deletes a item", + "summary": "Delete Item", "parameters": [ { "type": "string", @@ -590,6 +688,46 @@ const docTemplate = `{ "description": "No Content" } } + }, + "patch": { + "security": [ + { + "Bearer": [] + } + ], + "produces": [ + "application/json" + ], + "tags": [ + "Items" + ], + "summary": "Update Item", + "parameters": [ + { + "type": "string", + "description": "Item ID", + "name": "id", + "in": "path", + "required": true + }, + { + "description": "Item Data", + "name": "payload", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/repo.ItemPatch" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/repo.ItemOut" + } + } + } } }, "/v1/items/{id}/attachments": { @@ -605,7 +743,7 @@ const docTemplate = `{ "tags": [ "Items Attachments" ], - "summary": "imports items into the database", + "summary": "Create Item Attachment", "parameters": [ { "type": "string", @@ -646,7 +784,7 @@ const docTemplate = `{ "422": { "description": "Unprocessable Entity", "schema": { - "$ref": "#/definitions/server.ErrorResponse" + "$ref": "#/definitions/validate.ErrorResponse" } } } @@ -665,7 +803,7 @@ const docTemplate = `{ "tags": [ "Items Attachments" ], - "summary": "retrieves an attachment for an item", + "summary": "Get Item Attachment", "parameters": [ { "type": "string", @@ -700,7 +838,7 @@ const docTemplate = `{ "tags": [ "Items Attachments" ], - "summary": "retrieves an attachment for an item", + "summary": "Update Item Attachment", "parameters": [ { "type": "string", @@ -744,7 +882,7 @@ const docTemplate = `{ "tags": [ "Items Attachments" ], - "summary": "retrieves an attachment for an item", + "summary": "Delete Item Attachment", "parameters": [ { "type": "string", @@ -816,8 +954,8 @@ const docTemplate = `{ } ], "responses": { - "200": { - "description": "OK", + "201": { + "description": "Created", "schema": { "$ref": "#/definitions/repo.MaintenanceEntry" } @@ -879,6 +1017,42 @@ const docTemplate = `{ } } }, + "/v1/items/{id}/path": { + "get": { + "security": [ + { + "Bearer": [] + } + ], + "produces": [ + "application/json" + ], + "tags": [ + "Items" + ], + "summary": "Get the full path of an item", + "parameters": [ + { + "type": "string", + "description": "Item ID", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/repo.ItemPath" + } + } + } + } + } + }, "/v1/labels": { "get": { "security": [ @@ -897,22 +1071,10 @@ const docTemplate = `{ "200": { "description": "OK", "schema": { - "allOf": [ - { - "$ref": "#/definitions/server.Results" - }, - { - "type": "object", - "properties": { - "items": { - "type": "array", - "items": { - "$ref": "#/definitions/repo.LabelOut" - } - } - } - } - ] + "type": "array", + "items": { + "$ref": "#/definitions/repo.LabelOut" + } } } } @@ -929,7 +1091,7 @@ const docTemplate = `{ "tags": [ "Labels" ], - "summary": "Create a new label", + "summary": "Create Label", "parameters": [ { "description": "Label Data", @@ -964,7 +1126,7 @@ const docTemplate = `{ "tags": [ "Labels" ], - "summary": "Gets a label and fields", + "summary": "Get Label", "parameters": [ { "type": "string", @@ -995,7 +1157,7 @@ const docTemplate = `{ "tags": [ "Labels" ], - "summary": "updates a label", + "summary": "Update Label", "parameters": [ { "type": "string", @@ -1026,7 +1188,7 @@ const docTemplate = `{ "tags": [ "Labels" ], - "summary": "deletes a label", + "summary": "Delete Label", "parameters": [ { "type": "string", @@ -1069,22 +1231,10 @@ const docTemplate = `{ "200": { "description": "OK", "schema": { - "allOf": [ - { - "$ref": "#/definitions/server.Results" - }, - { - "type": "object", - "properties": { - "items": { - "type": "array", - "items": { - "$ref": "#/definitions/repo.LocationOutCount" - } - } - } - } - ] + "type": "array", + "items": { + "$ref": "#/definitions/repo.LocationOutCount" + } } } } @@ -1101,7 +1251,7 @@ const docTemplate = `{ "tags": [ "Locations" ], - "summary": "Create a new location", + "summary": "Create Location", "parameters": [ { "description": "Location Data", @@ -1136,7 +1286,7 @@ const docTemplate = `{ "tags": [ "Locations" ], - "summary": "Get All Locations", + "summary": "Get Locations Tree", "parameters": [ { "type": "boolean", @@ -1149,22 +1299,10 @@ const docTemplate = `{ "200": { "description": "OK", "schema": { - "allOf": [ - { - "$ref": "#/definitions/server.Results" - }, - { - "type": "object", - "properties": { - "items": { - "type": "array", - "items": { - "$ref": "#/definitions/repo.TreeItem" - } - } - } - } - ] + "type": "array", + "items": { + "$ref": "#/definitions/repo.TreeItem" + } } } } @@ -1183,7 +1321,7 @@ const docTemplate = `{ "tags": [ "Locations" ], - "summary": "Gets a location and fields", + "summary": "Get Location", "parameters": [ { "type": "string", @@ -1214,7 +1352,7 @@ const docTemplate = `{ "tags": [ "Locations" ], - "summary": "updates a location", + "summary": "Update Location", "parameters": [ { "type": "string", @@ -1254,7 +1392,7 @@ const docTemplate = `{ "tags": [ "Locations" ], - "summary": "deletes a location", + "summary": "Delete Location", "parameters": [ { "type": "string", @@ -1271,6 +1409,167 @@ const docTemplate = `{ } } }, + "/v1/notifiers": { + "get": { + "security": [ + { + "Bearer": [] + } + ], + "produces": [ + "application/json" + ], + "tags": [ + "Notifiers" + ], + "summary": "Get Notifiers", + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/repo.NotifierOut" + } + } + } + } + }, + "post": { + "security": [ + { + "Bearer": [] + } + ], + "produces": [ + "application/json" + ], + "tags": [ + "Notifiers" + ], + "summary": "Create Notifier", + "parameters": [ + { + "description": "Notifier Data", + "name": "payload", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/repo.NotifierCreate" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/repo.NotifierOut" + } + } + } + } + }, + "/v1/notifiers/test": { + "post": { + "security": [ + { + "Bearer": [] + } + ], + "produces": [ + "application/json" + ], + "tags": [ + "Notifiers" + ], + "summary": "Test Notifier", + "parameters": [ + { + "type": "string", + "description": "Notifier ID", + "name": "id", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "URL", + "name": "url", + "in": "query", + "required": true + } + ], + "responses": { + "204": { + "description": "No Content" + } + } + } + }, + "/v1/notifiers/{id}": { + "put": { + "security": [ + { + "Bearer": [] + } + ], + "tags": [ + "Notifiers" + ], + "summary": "Update Notifier", + "parameters": [ + { + "type": "string", + "description": "Notifier ID", + "name": "id", + "in": "path", + "required": true + }, + { + "description": "Notifier Data", + "name": "payload", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/repo.NotifierUpdate" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/repo.NotifierOut" + } + } + } + }, + "delete": { + "security": [ + { + "Bearer": [] + } + ], + "tags": [ + "Notifiers" + ], + "summary": "Delete a Notifier", + "parameters": [ + { + "type": "string", + "description": "Notifier ID", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "204": { + "description": "No Content" + } + } + } + }, "/v1/qrcode": { "get": { "security": [ @@ -1284,7 +1583,7 @@ const docTemplate = `{ "tags": [ "Items" ], - "summary": "Encode data into QRCode", + "summary": "Create QR Code", "parameters": [ { "type": "string", @@ -1316,7 +1615,7 @@ const docTemplate = `{ "tags": [ "Reporting" ], - "summary": "Generates a Bill of Materials CSV", + "summary": "Export Bill of Materials", "responses": { "200": { "description": "text/csv", @@ -1335,12 +1634,12 @@ const docTemplate = `{ "tags": [ "Base" ], - "summary": "Retrieves the basic information about the API", + "summary": "Application Info", "responses": { "200": { "description": "OK", "schema": { - "$ref": "#/definitions/v1.ApiSummary" + "$ref": "#/definitions/v1.APISummary" } } } @@ -1356,7 +1655,7 @@ const docTemplate = `{ "tags": [ "User" ], - "summary": "Updates the users password", + "summary": "Change Password", "parameters": [ { "description": "Password Payload", @@ -1402,6 +1701,21 @@ const docTemplate = `{ "description": "string", "name": "password", "in": "formData" + }, + { + "description": "Login Data", + "name": "payload", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/v1.LoginForm" + } + }, + { + "type": "string", + "description": "auth provider", + "name": "provider", + "in": "query" } ], "responses": { @@ -1459,7 +1773,7 @@ const docTemplate = `{ "tags": [ "User" ], - "summary": "Get the current user", + "summary": "Register New User", "parameters": [ { "description": "User Data", @@ -1491,14 +1805,14 @@ const docTemplate = `{ "tags": [ "User" ], - "summary": "Get the current user", + "summary": "Get User Self", "responses": { "200": { "description": "OK", "schema": { "allOf": [ { - "$ref": "#/definitions/server.Result" + "$ref": "#/definitions/v1.Wrapped" }, { "type": "object", @@ -1525,7 +1839,7 @@ const docTemplate = `{ "tags": [ "User" ], - "summary": "Update the current user", + "summary": "Update Account", "parameters": [ { "description": "User Data", @@ -1543,7 +1857,7 @@ const docTemplate = `{ "schema": { "allOf": [ { - "$ref": "#/definitions/server.Result" + "$ref": "#/definitions/v1.Wrapped" }, { "type": "object", @@ -1570,7 +1884,7 @@ const docTemplate = `{ "tags": [ "User" ], - "summary": "Deletes the user account", + "summary": "Delete Account", "responses": { "204": { "description": "No Content" @@ -1580,6 +1894,23 @@ const docTemplate = `{ } }, "definitions": { + "currencies.Currency": { + "type": "object", + "properties": { + "code": { + "type": "string" + }, + "local": { + "type": "string" + }, + "name": { + "type": "string" + }, + "symbol": { + "type": "string" + } + } + }, "repo.DocumentOut": { "type": "object", "properties": { @@ -1660,6 +1991,9 @@ const docTemplate = `{ "id": { "type": "string" }, + "primary": { + "type": "boolean" + }, "type": { "type": "string" }, @@ -1671,6 +2005,9 @@ const docTemplate = `{ "repo.ItemAttachmentUpdate": { "type": "object", "properties": { + "primary": { + "type": "boolean" + }, "title": { "type": "string" }, @@ -1681,9 +2018,13 @@ const docTemplate = `{ }, "repo.ItemCreate": { "type": "object", + "required": [ + "name" + ], "properties": { "description": { - "type": "string" + "type": "string", + "maxLength": 1000 }, "labelIds": { "type": "array", @@ -1696,7 +2037,9 @@ const docTemplate = `{ "type": "string" }, "name": { - "type": "string" + "type": "string", + "maxLength": 255, + "minLength": 1 }, "parentId": { "type": "string", @@ -1743,12 +2086,6 @@ const docTemplate = `{ "$ref": "#/definitions/repo.ItemAttachment" } }, - "children": { - "type": "array", - "items": { - "$ref": "#/definitions/repo.ItemSummary" - } - }, "createdAt": { "type": "string" }, @@ -1764,6 +2101,9 @@ const docTemplate = `{ "id": { "type": "string" }, + "imageId": { + "type": "string" + }, "insured": { "type": "boolean" }, @@ -1779,9 +2119,13 @@ const docTemplate = `{ }, "location": { "description": "Edges", + "allOf": [ + { + "$ref": "#/definitions/repo.LocationSummary" + } + ], "x-nullable": true, - "x-omitempty": true, - "$ref": "#/definitions/repo.LocationSummary" + "x-omitempty": true }, "manufacturer": { "type": "string" @@ -1797,9 +2141,13 @@ const docTemplate = `{ "type": "string" }, "parent": { + "allOf": [ + { + "$ref": "#/definitions/repo.ItemSummary" + } + ], "x-nullable": true, - "x-omitempty": true, - "$ref": "#/definitions/repo.ItemSummary" + "x-omitempty": true }, "purchaseFrom": { "type": "string" @@ -1843,6 +2191,33 @@ const docTemplate = `{ } } }, + "repo.ItemPatch": { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "quantity": { + "type": "integer", + "x-nullable": true, + "x-omitempty": true + } + } + }, + "repo.ItemPath": { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "name": { + "type": "string" + }, + "type": { + "$ref": "#/definitions/repo.ItemType" + } + } + }, "repo.ItemSummary": { "type": "object", "properties": { @@ -1858,6 +2233,9 @@ const docTemplate = `{ "id": { "type": "string" }, + "imageId": { + "type": "string" + }, "insured": { "type": "boolean" }, @@ -1869,9 +2247,13 @@ const docTemplate = `{ }, "location": { "description": "Edges", + "allOf": [ + { + "$ref": "#/definitions/repo.LocationSummary" + } + ], "x-nullable": true, - "x-omitempty": true, - "$ref": "#/definitions/repo.LocationSummary" + "x-omitempty": true }, "name": { "type": "string" @@ -1888,6 +2270,17 @@ const docTemplate = `{ } } }, + "repo.ItemType": { + "type": "string", + "enum": [ + "location", + "item" + ], + "x-enum-varnames": [ + "ItemTypeLocation", + "ItemTypeItem" + ] + }, "repo.ItemUpdate": { "type": "object", "properties": { @@ -1980,22 +2373,27 @@ const docTemplate = `{ "type": "string" }, "warrantyExpires": { - "description": "Sold", "type": "string" } } }, "repo.LabelCreate": { "type": "object", + "required": [ + "name" + ], "properties": { "color": { "type": "string" }, "description": { - "type": "string" + "type": "string", + "maxLength": 255 }, "name": { - "type": "string" + "type": "string", + "maxLength": 255, + "minLength": 1 } } }, @@ -2011,12 +2409,6 @@ const docTemplate = `{ "id": { "type": "string" }, - "items": { - "type": "array", - "items": { - "$ref": "#/definitions/repo.ItemSummary" - } - }, "name": { "type": "string" }, @@ -2078,12 +2470,6 @@ const docTemplate = `{ "id": { "type": "string" }, - "items": { - "type": "array", - "items": { - "$ref": "#/definitions/repo.ItemSummary" - } - }, "name": { "type": "string" }, @@ -2159,13 +2545,13 @@ const docTemplate = `{ "repo.MaintenanceEntry": { "type": "object", "properties": { + "completedDate": { + "type": "string" + }, "cost": { "type": "string", "example": "0" }, - "date": { - "type": "string" - }, "description": { "type": "string" }, @@ -2174,42 +2560,54 @@ const docTemplate = `{ }, "name": { "type": "string" + }, + "scheduledDate": { + "type": "string" } } }, "repo.MaintenanceEntryCreate": { "type": "object", + "required": [ + "name" + ], "properties": { + "completedDate": { + "type": "string" + }, "cost": { "type": "string", "example": "0" }, - "date": { - "type": "string" - }, "description": { "type": "string" }, "name": { "type": "string" + }, + "scheduledDate": { + "type": "string" } } }, "repo.MaintenanceEntryUpdate": { "type": "object", "properties": { + "completedDate": { + "type": "string" + }, "cost": { "type": "string", "example": "0" }, - "date": { - "type": "string" - }, "description": { "type": "string" }, "name": { "type": "string" + }, + "scheduledDate": { + "type": "string" } } }, @@ -2233,6 +2631,72 @@ const docTemplate = `{ } } }, + "repo.NotifierCreate": { + "type": "object", + "required": [ + "name", + "url" + ], + "properties": { + "isActive": { + "type": "boolean" + }, + "name": { + "type": "string", + "maxLength": 255, + "minLength": 1 + }, + "url": { + "type": "string" + } + } + }, + "repo.NotifierOut": { + "type": "object", + "properties": { + "createdAt": { + "type": "string" + }, + "groupId": { + "type": "string" + }, + "id": { + "type": "string" + }, + "isActive": { + "type": "boolean" + }, + "name": { + "type": "string" + }, + "updatedAt": { + "type": "string" + }, + "userId": { + "type": "string" + } + } + }, + "repo.NotifierUpdate": { + "type": "object", + "required": [ + "name" + ], + "properties": { + "isActive": { + "type": "boolean" + }, + "name": { + "type": "string", + "maxLength": 255, + "minLength": 1 + }, + "url": { + "type": "string", + "x-nullable": true + } + } + }, "repo.PaginationResult-repo_ItemSummary": { "type": "object", "properties": { @@ -2361,39 +2825,6 @@ const docTemplate = `{ } } }, - "server.ErrorResponse": { - "type": "object", - "properties": { - "error": { - "type": "string" - }, - "fields": { - "type": "object", - "additionalProperties": { - "type": "string" - } - } - } - }, - "server.Result": { - "type": "object", - "properties": { - "details": {}, - "error": { - "type": "boolean" - }, - "item": {}, - "message": { - "type": "string" - } - } - }, - "server.Results": { - "type": "object", - "properties": { - "items": {} - } - }, "services.UserRegistration": { "type": "object", "properties": { @@ -2411,15 +2842,7 @@ const docTemplate = `{ } } }, - "v1.ActionAmountResult": { - "type": "object", - "properties": { - "completed": { - "type": "integer" - } - } - }, - "v1.ApiSummary": { + "v1.APISummary": { "type": "object", "properties": { "allowRegistration": { @@ -2448,6 +2871,14 @@ const docTemplate = `{ } } }, + "v1.ActionAmountResult": { + "type": "object", + "properties": { + "completed": { + "type": "integer" + } + } + }, "v1.Build": { "type": "object", "properties": { @@ -2489,12 +2920,17 @@ const docTemplate = `{ }, "v1.GroupInvitationCreate": { "type": "object", + "required": [ + "uses" + ], "properties": { "expiresAt": { "type": "string" }, "uses": { - "type": "integer" + "type": "integer", + "maximum": 100, + "minimum": 1 } } }, @@ -2506,6 +2942,20 @@ const docTemplate = `{ } } }, + "v1.LoginForm": { + "type": "object", + "properties": { + "password": { + "type": "string" + }, + "stayLoggedIn": { + "type": "boolean" + }, + "username": { + "type": "string" + } + } + }, "v1.TokenResponse": { "type": "object", "properties": { @@ -2519,6 +2969,23 @@ const docTemplate = `{ "type": "string" } } + }, + "v1.Wrapped": { + "type": "object", + "properties": { + "item": {} + } + }, + "validate.ErrorResponse": { + "type": "object", + "properties": { + "error": { + "type": "string" + }, + "fields": { + "type": "string" + } + } } }, "securityDefinitions": { @@ -2537,10 +3004,12 @@ var SwaggerInfo = &swag.Spec{ Host: "", BasePath: "/api", Schemes: []string{}, - Title: "Go API Templates", - Description: "This is a simple Rest API Server Template that implements some basic User and Authentication patterns to help you get started and bootstrap your next project!.", + Title: "Homebox API", + Description: "Track, Manage, and Organize your Things.", InfoInstanceName: "swagger", SwaggerTemplate: docTemplate, + LeftDelim: "{{", + RightDelim: "}}", } func init() { diff --git a/backend/app/api/static/docs/swagger.json b/backend/app/api/static/docs/swagger.json index 3808fb5..b10c93a 100644 --- a/backend/app/api/static/docs/swagger.json +++ b/backend/app/api/static/docs/swagger.json @@ -1,14 +1,11 @@ { "swagger": "2.0", "info": { - "description": "This is a simple Rest API Server Template that implements some basic User and Authentication patterns to help you get started and bootstrap your next project!.", - "title": "Go API Templates", + "description": "Track, Manage, and Organize your Things.", + "title": "Homebox API", "contact": { "name": "Don't" }, - "license": { - "name": "MIT" - }, "version": "1.0" }, "basePath": "/api", @@ -20,13 +17,64 @@ "Bearer": [] } ], + "description": "Ensures all items in the database have an asset ID", "produces": [ "application/json" ], "tags": [ - "Group" + "Actions" ], - "summary": "Ensures all items in the database have an asset id", + "summary": "Ensure Asset IDs", + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/v1.ActionAmountResult" + } + } + } + } + }, + "/v1/actions/ensure-import-refs": { + "post": { + "security": [ + { + "Bearer": [] + } + ], + "description": "Ensures all items in the database have an import ref", + "produces": [ + "application/json" + ], + "tags": [ + "Actions" + ], + "summary": "Ensures Import Refs", + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/v1.ActionAmountResult" + } + } + } + } + }, + "/v1/actions/set-primary-photos": { + "post": { + "security": [ + { + "Bearer": [] + } + ], + "description": "Sets the first photo of each item as the primary photo", + "produces": [ + "application/json" + ], + "tags": [ + "Actions" + ], + "summary": "Set Primary Photos", "responses": { "200": { "description": "OK", @@ -44,13 +92,14 @@ "Bearer": [] } ], + "description": "Resets all item date fields to the beginning of the day", "produces": [ "application/json" ], "tags": [ - "Group" + "Actions" ], - "summary": "Resets all item date fields to the beginning of the day", + "summary": "Zero Out Time Fields", "responses": { "200": { "description": "OK", @@ -72,9 +121,9 @@ "application/json" ], "tags": [ - "Assets" + "Items" ], - "summary": "Gets an item by Asset ID", + "summary": "Get Item by Asset ID", "parameters": [ { "type": "string", @@ -94,6 +143,25 @@ } } }, + "/v1/currency": { + "get": { + "produces": [ + "application/json" + ], + "tags": [ + "Base" + ], + "summary": "Currency", + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/currencies.Currency" + } + } + } + } + }, "/v1/groups": { "get": { "security": [ @@ -107,7 +175,7 @@ "tags": [ "Group" ], - "summary": "Get the current user's group", + "summary": "Get Group", "responses": { "200": { "description": "OK", @@ -129,7 +197,7 @@ "tags": [ "Group" ], - "summary": "Updates some fields of the current users group", + "summary": "Update Group", "parameters": [ { "description": "User Data", @@ -164,7 +232,7 @@ "tags": [ "Group" ], - "summary": "Get the current user", + "summary": "Create Group Invitation", "parameters": [ { "description": "User Data", @@ -199,7 +267,7 @@ "tags": [ "Statistics" ], - "summary": "Get the current user's group statistics", + "summary": "Get Group Statistics", "responses": { "200": { "description": "OK", @@ -223,7 +291,7 @@ "tags": [ "Statistics" ], - "summary": "Get the current user's group statistics", + "summary": "Get Label Statistics", "responses": { "200": { "description": "OK", @@ -250,7 +318,7 @@ "tags": [ "Statistics" ], - "summary": "Get the current user's group statistics", + "summary": "Get Location Statistics", "responses": { "200": { "description": "OK", @@ -277,7 +345,7 @@ "tags": [ "Statistics" ], - "summary": "Queries the changes overtime of the purchase price over time", + "summary": "Get Purchase Price Statistics", "parameters": [ { "type": "string", @@ -315,7 +383,7 @@ "tags": [ "Items" ], - "summary": "Get All Items", + "summary": "Query All Items", "parameters": [ { "type": "string", @@ -354,6 +422,16 @@ "description": "location Ids", "name": "locations", "in": "query" + }, + { + "type": "array", + "items": { + "type": "string" + }, + "collectionFormat": "multi", + "description": "parent Ids", + "name": "parentIds", + "in": "query" } ], "responses": { @@ -377,7 +455,7 @@ "tags": [ "Items" ], - "summary": "Create a new item", + "summary": "Create Item", "parameters": [ { "description": "Item Data", @@ -390,8 +468,8 @@ } ], "responses": { - "200": { - "description": "OK", + "201": { + "description": "Created", "schema": { "$ref": "#/definitions/repo.ItemSummary" } @@ -399,6 +477,27 @@ } } }, + "/v1/items/export": { + "get": { + "security": [ + { + "Bearer": [] + } + ], + "tags": [ + "Items" + ], + "summary": "Export Items", + "responses": { + "200": { + "description": "text/csv", + "schema": { + "type": "string" + } + } + } + } + }, "/v1/items/fields": { "get": { "security": [ @@ -412,7 +511,7 @@ "tags": [ "Items" ], - "summary": "imports items into the database", + "summary": "Get All Custom Field Names", "responses": { "200": { "description": "OK", @@ -439,7 +538,7 @@ "tags": [ "Items" ], - "summary": "imports items into the database", + "summary": "Get All Custom Field Values", "responses": { "200": { "description": "OK", @@ -466,7 +565,7 @@ "tags": [ "Items" ], - "summary": "imports items into the database", + "summary": "Import Items", "parameters": [ { "type": "file", @@ -496,7 +595,7 @@ "tags": [ "Items" ], - "summary": "Gets a item and fields", + "summary": "Get Item", "parameters": [ { "type": "string", @@ -527,7 +626,7 @@ "tags": [ "Items" ], - "summary": "updates a item", + "summary": "Update Item", "parameters": [ { "type": "string", @@ -567,7 +666,7 @@ "tags": [ "Items" ], - "summary": "deletes a item", + "summary": "Delete Item", "parameters": [ { "type": "string", @@ -582,6 +681,46 @@ "description": "No Content" } } + }, + "patch": { + "security": [ + { + "Bearer": [] + } + ], + "produces": [ + "application/json" + ], + "tags": [ + "Items" + ], + "summary": "Update Item", + "parameters": [ + { + "type": "string", + "description": "Item ID", + "name": "id", + "in": "path", + "required": true + }, + { + "description": "Item Data", + "name": "payload", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/repo.ItemPatch" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/repo.ItemOut" + } + } + } } }, "/v1/items/{id}/attachments": { @@ -597,7 +736,7 @@ "tags": [ "Items Attachments" ], - "summary": "imports items into the database", + "summary": "Create Item Attachment", "parameters": [ { "type": "string", @@ -638,7 +777,7 @@ "422": { "description": "Unprocessable Entity", "schema": { - "$ref": "#/definitions/server.ErrorResponse" + "$ref": "#/definitions/validate.ErrorResponse" } } } @@ -657,7 +796,7 @@ "tags": [ "Items Attachments" ], - "summary": "retrieves an attachment for an item", + "summary": "Get Item Attachment", "parameters": [ { "type": "string", @@ -692,7 +831,7 @@ "tags": [ "Items Attachments" ], - "summary": "retrieves an attachment for an item", + "summary": "Update Item Attachment", "parameters": [ { "type": "string", @@ -736,7 +875,7 @@ "tags": [ "Items Attachments" ], - "summary": "retrieves an attachment for an item", + "summary": "Delete Item Attachment", "parameters": [ { "type": "string", @@ -808,8 +947,8 @@ } ], "responses": { - "200": { - "description": "OK", + "201": { + "description": "Created", "schema": { "$ref": "#/definitions/repo.MaintenanceEntry" } @@ -871,6 +1010,42 @@ } } }, + "/v1/items/{id}/path": { + "get": { + "security": [ + { + "Bearer": [] + } + ], + "produces": [ + "application/json" + ], + "tags": [ + "Items" + ], + "summary": "Get the full path of an item", + "parameters": [ + { + "type": "string", + "description": "Item ID", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/repo.ItemPath" + } + } + } + } + } + }, "/v1/labels": { "get": { "security": [ @@ -889,22 +1064,10 @@ "200": { "description": "OK", "schema": { - "allOf": [ - { - "$ref": "#/definitions/server.Results" - }, - { - "type": "object", - "properties": { - "items": { - "type": "array", - "items": { - "$ref": "#/definitions/repo.LabelOut" - } - } - } - } - ] + "type": "array", + "items": { + "$ref": "#/definitions/repo.LabelOut" + } } } } @@ -921,7 +1084,7 @@ "tags": [ "Labels" ], - "summary": "Create a new label", + "summary": "Create Label", "parameters": [ { "description": "Label Data", @@ -956,7 +1119,7 @@ "tags": [ "Labels" ], - "summary": "Gets a label and fields", + "summary": "Get Label", "parameters": [ { "type": "string", @@ -987,7 +1150,7 @@ "tags": [ "Labels" ], - "summary": "updates a label", + "summary": "Update Label", "parameters": [ { "type": "string", @@ -1018,7 +1181,7 @@ "tags": [ "Labels" ], - "summary": "deletes a label", + "summary": "Delete Label", "parameters": [ { "type": "string", @@ -1061,22 +1224,10 @@ "200": { "description": "OK", "schema": { - "allOf": [ - { - "$ref": "#/definitions/server.Results" - }, - { - "type": "object", - "properties": { - "items": { - "type": "array", - "items": { - "$ref": "#/definitions/repo.LocationOutCount" - } - } - } - } - ] + "type": "array", + "items": { + "$ref": "#/definitions/repo.LocationOutCount" + } } } } @@ -1093,7 +1244,7 @@ "tags": [ "Locations" ], - "summary": "Create a new location", + "summary": "Create Location", "parameters": [ { "description": "Location Data", @@ -1128,7 +1279,7 @@ "tags": [ "Locations" ], - "summary": "Get All Locations", + "summary": "Get Locations Tree", "parameters": [ { "type": "boolean", @@ -1141,22 +1292,10 @@ "200": { "description": "OK", "schema": { - "allOf": [ - { - "$ref": "#/definitions/server.Results" - }, - { - "type": "object", - "properties": { - "items": { - "type": "array", - "items": { - "$ref": "#/definitions/repo.TreeItem" - } - } - } - } - ] + "type": "array", + "items": { + "$ref": "#/definitions/repo.TreeItem" + } } } } @@ -1175,7 +1314,7 @@ "tags": [ "Locations" ], - "summary": "Gets a location and fields", + "summary": "Get Location", "parameters": [ { "type": "string", @@ -1206,7 +1345,7 @@ "tags": [ "Locations" ], - "summary": "updates a location", + "summary": "Update Location", "parameters": [ { "type": "string", @@ -1246,7 +1385,7 @@ "tags": [ "Locations" ], - "summary": "deletes a location", + "summary": "Delete Location", "parameters": [ { "type": "string", @@ -1263,6 +1402,167 @@ } } }, + "/v1/notifiers": { + "get": { + "security": [ + { + "Bearer": [] + } + ], + "produces": [ + "application/json" + ], + "tags": [ + "Notifiers" + ], + "summary": "Get Notifiers", + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/repo.NotifierOut" + } + } + } + } + }, + "post": { + "security": [ + { + "Bearer": [] + } + ], + "produces": [ + "application/json" + ], + "tags": [ + "Notifiers" + ], + "summary": "Create Notifier", + "parameters": [ + { + "description": "Notifier Data", + "name": "payload", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/repo.NotifierCreate" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/repo.NotifierOut" + } + } + } + } + }, + "/v1/notifiers/test": { + "post": { + "security": [ + { + "Bearer": [] + } + ], + "produces": [ + "application/json" + ], + "tags": [ + "Notifiers" + ], + "summary": "Test Notifier", + "parameters": [ + { + "type": "string", + "description": "Notifier ID", + "name": "id", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "URL", + "name": "url", + "in": "query", + "required": true + } + ], + "responses": { + "204": { + "description": "No Content" + } + } + } + }, + "/v1/notifiers/{id}": { + "put": { + "security": [ + { + "Bearer": [] + } + ], + "tags": [ + "Notifiers" + ], + "summary": "Update Notifier", + "parameters": [ + { + "type": "string", + "description": "Notifier ID", + "name": "id", + "in": "path", + "required": true + }, + { + "description": "Notifier Data", + "name": "payload", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/repo.NotifierUpdate" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/repo.NotifierOut" + } + } + } + }, + "delete": { + "security": [ + { + "Bearer": [] + } + ], + "tags": [ + "Notifiers" + ], + "summary": "Delete a Notifier", + "parameters": [ + { + "type": "string", + "description": "Notifier ID", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "204": { + "description": "No Content" + } + } + } + }, "/v1/qrcode": { "get": { "security": [ @@ -1276,7 +1576,7 @@ "tags": [ "Items" ], - "summary": "Encode data into QRCode", + "summary": "Create QR Code", "parameters": [ { "type": "string", @@ -1308,7 +1608,7 @@ "tags": [ "Reporting" ], - "summary": "Generates a Bill of Materials CSV", + "summary": "Export Bill of Materials", "responses": { "200": { "description": "text/csv", @@ -1327,12 +1627,12 @@ "tags": [ "Base" ], - "summary": "Retrieves the basic information about the API", + "summary": "Application Info", "responses": { "200": { "description": "OK", "schema": { - "$ref": "#/definitions/v1.ApiSummary" + "$ref": "#/definitions/v1.APISummary" } } } @@ -1348,7 +1648,7 @@ "tags": [ "User" ], - "summary": "Updates the users password", + "summary": "Change Password", "parameters": [ { "description": "Password Payload", @@ -1394,6 +1694,21 @@ "description": "string", "name": "password", "in": "formData" + }, + { + "description": "Login Data", + "name": "payload", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/v1.LoginForm" + } + }, + { + "type": "string", + "description": "auth provider", + "name": "provider", + "in": "query" } ], "responses": { @@ -1451,7 +1766,7 @@ "tags": [ "User" ], - "summary": "Get the current user", + "summary": "Register New User", "parameters": [ { "description": "User Data", @@ -1483,14 +1798,14 @@ "tags": [ "User" ], - "summary": "Get the current user", + "summary": "Get User Self", "responses": { "200": { "description": "OK", "schema": { "allOf": [ { - "$ref": "#/definitions/server.Result" + "$ref": "#/definitions/v1.Wrapped" }, { "type": "object", @@ -1517,7 +1832,7 @@ "tags": [ "User" ], - "summary": "Update the current user", + "summary": "Update Account", "parameters": [ { "description": "User Data", @@ -1535,7 +1850,7 @@ "schema": { "allOf": [ { - "$ref": "#/definitions/server.Result" + "$ref": "#/definitions/v1.Wrapped" }, { "type": "object", @@ -1562,7 +1877,7 @@ "tags": [ "User" ], - "summary": "Deletes the user account", + "summary": "Delete Account", "responses": { "204": { "description": "No Content" @@ -1572,6 +1887,23 @@ } }, "definitions": { + "currencies.Currency": { + "type": "object", + "properties": { + "code": { + "type": "string" + }, + "local": { + "type": "string" + }, + "name": { + "type": "string" + }, + "symbol": { + "type": "string" + } + } + }, "repo.DocumentOut": { "type": "object", "properties": { @@ -1652,6 +1984,9 @@ "id": { "type": "string" }, + "primary": { + "type": "boolean" + }, "type": { "type": "string" }, @@ -1663,6 +1998,9 @@ "repo.ItemAttachmentUpdate": { "type": "object", "properties": { + "primary": { + "type": "boolean" + }, "title": { "type": "string" }, @@ -1673,9 +2011,13 @@ }, "repo.ItemCreate": { "type": "object", + "required": [ + "name" + ], "properties": { "description": { - "type": "string" + "type": "string", + "maxLength": 1000 }, "labelIds": { "type": "array", @@ -1688,7 +2030,9 @@ "type": "string" }, "name": { - "type": "string" + "type": "string", + "maxLength": 255, + "minLength": 1 }, "parentId": { "type": "string", @@ -1735,12 +2079,6 @@ "$ref": "#/definitions/repo.ItemAttachment" } }, - "children": { - "type": "array", - "items": { - "$ref": "#/definitions/repo.ItemSummary" - } - }, "createdAt": { "type": "string" }, @@ -1756,6 +2094,9 @@ "id": { "type": "string" }, + "imageId": { + "type": "string" + }, "insured": { "type": "boolean" }, @@ -1771,9 +2112,13 @@ }, "location": { "description": "Edges", + "allOf": [ + { + "$ref": "#/definitions/repo.LocationSummary" + } + ], "x-nullable": true, - "x-omitempty": true, - "$ref": "#/definitions/repo.LocationSummary" + "x-omitempty": true }, "manufacturer": { "type": "string" @@ -1789,9 +2134,13 @@ "type": "string" }, "parent": { + "allOf": [ + { + "$ref": "#/definitions/repo.ItemSummary" + } + ], "x-nullable": true, - "x-omitempty": true, - "$ref": "#/definitions/repo.ItemSummary" + "x-omitempty": true }, "purchaseFrom": { "type": "string" @@ -1835,6 +2184,33 @@ } } }, + "repo.ItemPatch": { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "quantity": { + "type": "integer", + "x-nullable": true, + "x-omitempty": true + } + } + }, + "repo.ItemPath": { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "name": { + "type": "string" + }, + "type": { + "$ref": "#/definitions/repo.ItemType" + } + } + }, "repo.ItemSummary": { "type": "object", "properties": { @@ -1850,6 +2226,9 @@ "id": { "type": "string" }, + "imageId": { + "type": "string" + }, "insured": { "type": "boolean" }, @@ -1861,9 +2240,13 @@ }, "location": { "description": "Edges", + "allOf": [ + { + "$ref": "#/definitions/repo.LocationSummary" + } + ], "x-nullable": true, - "x-omitempty": true, - "$ref": "#/definitions/repo.LocationSummary" + "x-omitempty": true }, "name": { "type": "string" @@ -1880,6 +2263,17 @@ } } }, + "repo.ItemType": { + "type": "string", + "enum": [ + "location", + "item" + ], + "x-enum-varnames": [ + "ItemTypeLocation", + "ItemTypeItem" + ] + }, "repo.ItemUpdate": { "type": "object", "properties": { @@ -1972,22 +2366,27 @@ "type": "string" }, "warrantyExpires": { - "description": "Sold", "type": "string" } } }, "repo.LabelCreate": { "type": "object", + "required": [ + "name" + ], "properties": { "color": { "type": "string" }, "description": { - "type": "string" + "type": "string", + "maxLength": 255 }, "name": { - "type": "string" + "type": "string", + "maxLength": 255, + "minLength": 1 } } }, @@ -2003,12 +2402,6 @@ "id": { "type": "string" }, - "items": { - "type": "array", - "items": { - "$ref": "#/definitions/repo.ItemSummary" - } - }, "name": { "type": "string" }, @@ -2070,12 +2463,6 @@ "id": { "type": "string" }, - "items": { - "type": "array", - "items": { - "$ref": "#/definitions/repo.ItemSummary" - } - }, "name": { "type": "string" }, @@ -2151,13 +2538,13 @@ "repo.MaintenanceEntry": { "type": "object", "properties": { + "completedDate": { + "type": "string" + }, "cost": { "type": "string", "example": "0" }, - "date": { - "type": "string" - }, "description": { "type": "string" }, @@ -2166,42 +2553,54 @@ }, "name": { "type": "string" + }, + "scheduledDate": { + "type": "string" } } }, "repo.MaintenanceEntryCreate": { "type": "object", + "required": [ + "name" + ], "properties": { + "completedDate": { + "type": "string" + }, "cost": { "type": "string", "example": "0" }, - "date": { - "type": "string" - }, "description": { "type": "string" }, "name": { "type": "string" + }, + "scheduledDate": { + "type": "string" } } }, "repo.MaintenanceEntryUpdate": { "type": "object", "properties": { + "completedDate": { + "type": "string" + }, "cost": { "type": "string", "example": "0" }, - "date": { - "type": "string" - }, "description": { "type": "string" }, "name": { "type": "string" + }, + "scheduledDate": { + "type": "string" } } }, @@ -2225,6 +2624,72 @@ } } }, + "repo.NotifierCreate": { + "type": "object", + "required": [ + "name", + "url" + ], + "properties": { + "isActive": { + "type": "boolean" + }, + "name": { + "type": "string", + "maxLength": 255, + "minLength": 1 + }, + "url": { + "type": "string" + } + } + }, + "repo.NotifierOut": { + "type": "object", + "properties": { + "createdAt": { + "type": "string" + }, + "groupId": { + "type": "string" + }, + "id": { + "type": "string" + }, + "isActive": { + "type": "boolean" + }, + "name": { + "type": "string" + }, + "updatedAt": { + "type": "string" + }, + "userId": { + "type": "string" + } + } + }, + "repo.NotifierUpdate": { + "type": "object", + "required": [ + "name" + ], + "properties": { + "isActive": { + "type": "boolean" + }, + "name": { + "type": "string", + "maxLength": 255, + "minLength": 1 + }, + "url": { + "type": "string", + "x-nullable": true + } + } + }, "repo.PaginationResult-repo_ItemSummary": { "type": "object", "properties": { @@ -2353,39 +2818,6 @@ } } }, - "server.ErrorResponse": { - "type": "object", - "properties": { - "error": { - "type": "string" - }, - "fields": { - "type": "object", - "additionalProperties": { - "type": "string" - } - } - } - }, - "server.Result": { - "type": "object", - "properties": { - "details": {}, - "error": { - "type": "boolean" - }, - "item": {}, - "message": { - "type": "string" - } - } - }, - "server.Results": { - "type": "object", - "properties": { - "items": {} - } - }, "services.UserRegistration": { "type": "object", "properties": { @@ -2403,15 +2835,7 @@ } } }, - "v1.ActionAmountResult": { - "type": "object", - "properties": { - "completed": { - "type": "integer" - } - } - }, - "v1.ApiSummary": { + "v1.APISummary": { "type": "object", "properties": { "allowRegistration": { @@ -2440,6 +2864,14 @@ } } }, + "v1.ActionAmountResult": { + "type": "object", + "properties": { + "completed": { + "type": "integer" + } + } + }, "v1.Build": { "type": "object", "properties": { @@ -2481,12 +2913,17 @@ }, "v1.GroupInvitationCreate": { "type": "object", + "required": [ + "uses" + ], "properties": { "expiresAt": { "type": "string" }, "uses": { - "type": "integer" + "type": "integer", + "maximum": 100, + "minimum": 1 } } }, @@ -2498,6 +2935,20 @@ } } }, + "v1.LoginForm": { + "type": "object", + "properties": { + "password": { + "type": "string" + }, + "stayLoggedIn": { + "type": "boolean" + }, + "username": { + "type": "string" + } + } + }, "v1.TokenResponse": { "type": "object", "properties": { @@ -2511,6 +2962,23 @@ "type": "string" } } + }, + "v1.Wrapped": { + "type": "object", + "properties": { + "item": {} + } + }, + "validate.ErrorResponse": { + "type": "object", + "properties": { + "error": { + "type": "string" + }, + "fields": { + "type": "string" + } + } } }, "securityDefinitions": { diff --git a/backend/app/api/static/docs/swagger.yaml b/backend/app/api/static/docs/swagger.yaml index c461867..dbb31e6 100644 --- a/backend/app/api/static/docs/swagger.yaml +++ b/backend/app/api/static/docs/swagger.yaml @@ -1,5 +1,16 @@ basePath: /api definitions: + currencies.Currency: + properties: + code: + type: string + local: + type: string + name: + type: string + symbol: + type: string + type: object repo.DocumentOut: properties: id: @@ -52,6 +63,8 @@ definitions: $ref: '#/definitions/repo.DocumentOut' id: type: string + primary: + type: boolean type: type: string updatedAt: @@ -59,6 +72,8 @@ definitions: type: object repo.ItemAttachmentUpdate: properties: + primary: + type: boolean title: type: string type: @@ -67,6 +82,7 @@ definitions: repo.ItemCreate: properties: description: + maxLength: 1000 type: string labelIds: items: @@ -76,10 +92,14 @@ definitions: description: Edges type: string name: + maxLength: 255 + minLength: 1 type: string parentId: type: string x-nullable: true + required: + - name type: object repo.ItemField: properties: @@ -107,10 +127,6 @@ definitions: items: $ref: '#/definitions/repo.ItemAttachment' type: array - children: - items: - $ref: '#/definitions/repo.ItemSummary' - type: array createdAt: type: string description: @@ -121,6 +137,8 @@ definitions: type: array id: type: string + imageId: + type: string insured: type: boolean labels: @@ -131,7 +149,8 @@ definitions: description: Warranty type: boolean location: - $ref: '#/definitions/repo.LocationSummary' + allOf: + - $ref: '#/definitions/repo.LocationSummary' description: Edges x-nullable: true x-omitempty: true @@ -145,7 +164,8 @@ definitions: description: Extras type: string parent: - $ref: '#/definitions/repo.ItemSummary' + allOf: + - $ref: '#/definitions/repo.ItemSummary' x-nullable: true x-omitempty: true purchaseFrom: @@ -177,6 +197,24 @@ definitions: warrantyExpires: type: string type: object + repo.ItemPatch: + properties: + id: + type: string + quantity: + type: integer + x-nullable: true + x-omitempty: true + type: object + repo.ItemPath: + properties: + id: + type: string + name: + type: string + type: + $ref: '#/definitions/repo.ItemType' + type: object repo.ItemSummary: properties: archived: @@ -187,6 +225,8 @@ definitions: type: string id: type: string + imageId: + type: string insured: type: boolean labels: @@ -194,7 +234,8 @@ definitions: $ref: '#/definitions/repo.LabelSummary' type: array location: - $ref: '#/definitions/repo.LocationSummary' + allOf: + - $ref: '#/definitions/repo.LocationSummary' description: Edges x-nullable: true x-omitempty: true @@ -208,6 +249,14 @@ definitions: updatedAt: type: string type: object + repo.ItemType: + enum: + - location + - item + type: string + x-enum-varnames: + - ItemTypeLocation + - ItemTypeItem repo.ItemUpdate: properties: archived: @@ -273,7 +322,6 @@ definitions: warrantyDetails: type: string warrantyExpires: - description: Sold type: string type: object repo.LabelCreate: @@ -281,9 +329,14 @@ definitions: color: type: string description: + maxLength: 255 type: string name: + maxLength: 255 + minLength: 1 type: string + required: + - name type: object repo.LabelOut: properties: @@ -293,10 +346,6 @@ definitions: type: string id: type: string - items: - items: - $ref: '#/definitions/repo.ItemSummary' - type: array name: type: string updatedAt: @@ -337,10 +386,6 @@ definitions: type: string id: type: string - items: - items: - $ref: '#/definitions/repo.ItemSummary' - type: array name: type: string parent: @@ -390,41 +435,49 @@ definitions: type: object repo.MaintenanceEntry: properties: + completedDate: + type: string cost: example: "0" type: string - date: - type: string description: type: string id: type: string name: type: string + scheduledDate: + type: string type: object repo.MaintenanceEntryCreate: properties: + completedDate: + type: string cost: example: "0" type: string - date: - type: string description: type: string name: type: string + scheduledDate: + type: string + required: + - name type: object repo.MaintenanceEntryUpdate: properties: + completedDate: + type: string cost: example: "0" type: string - date: - type: string description: type: string name: type: string + scheduledDate: + type: string type: object repo.MaintenanceLog: properties: @@ -439,6 +492,51 @@ definitions: itemId: type: string type: object + repo.NotifierCreate: + properties: + isActive: + type: boolean + name: + maxLength: 255 + minLength: 1 + type: string + url: + type: string + required: + - name + - url + type: object + repo.NotifierOut: + properties: + createdAt: + type: string + groupId: + type: string + id: + type: string + isActive: + type: boolean + name: + type: string + updatedAt: + type: string + userId: + type: string + type: object + repo.NotifierUpdate: + properties: + isActive: + type: boolean + name: + maxLength: 255 + minLength: 1 + type: string + url: + type: string + x-nullable: true + required: + - name + type: object repo.PaginationResult-repo_ItemSummary: properties: items: @@ -522,28 +620,6 @@ definitions: value: type: number type: object - server.ErrorResponse: - properties: - error: - type: string - fields: - additionalProperties: - type: string - type: object - type: object - server.Result: - properties: - details: {} - error: - type: boolean - item: {} - message: - type: string - type: object - server.Results: - properties: - items: {} - type: object services.UserRegistration: properties: email: @@ -555,12 +631,7 @@ definitions: token: type: string type: object - v1.ActionAmountResult: - properties: - completed: - type: integer - type: object - v1.ApiSummary: + v1.APISummary: properties: allowRegistration: type: boolean @@ -579,6 +650,11 @@ definitions: type: string type: array type: object + v1.ActionAmountResult: + properties: + completed: + type: integer + type: object v1.Build: properties: buildTime: @@ -609,13 +685,26 @@ definitions: expiresAt: type: string uses: + maximum: 100 + minimum: 1 type: integer + required: + - uses type: object v1.ItemAttachmentToken: properties: token: type: string type: object + v1.LoginForm: + properties: + password: + type: string + stayLoggedIn: + type: boolean + username: + type: string + type: object v1.TokenResponse: properties: attachmentToken: @@ -625,19 +714,27 @@ definitions: token: type: string type: object + v1.Wrapped: + properties: + item: {} + type: object + validate.ErrorResponse: + properties: + error: + type: string + fields: + type: string + type: object info: contact: name: Don't - description: This is a simple Rest API Server Template that implements some basic - User and Authentication patterns to help you get started and bootstrap your next - project!. - license: - name: MIT - title: Go API Templates + description: Track, Manage, and Organize your Things. + title: Homebox API version: "1.0" paths: /v1/actions/ensure-asset-ids: post: + description: Ensures all items in the database have an asset ID produces: - application/json responses: @@ -647,11 +744,42 @@ paths: $ref: '#/definitions/v1.ActionAmountResult' security: - Bearer: [] - summary: Ensures all items in the database have an asset id + summary: Ensure Asset IDs tags: - - Group + - Actions + /v1/actions/ensure-import-refs: + post: + description: Ensures all items in the database have an import ref + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/v1.ActionAmountResult' + security: + - Bearer: [] + summary: Ensures Import Refs + tags: + - Actions + /v1/actions/set-primary-photos: + post: + description: Sets the first photo of each item as the primary photo + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/v1.ActionAmountResult' + security: + - Bearer: [] + summary: Set Primary Photos + tags: + - Actions /v1/actions/zero-item-time-fields: post: + description: Resets all item date fields to the beginning of the day produces: - application/json responses: @@ -661,9 +789,9 @@ paths: $ref: '#/definitions/v1.ActionAmountResult' security: - Bearer: [] - summary: Resets all item date fields to the beginning of the day + summary: Zero Out Time Fields tags: - - Group + - Actions /v1/assets/{id}: get: parameters: @@ -681,9 +809,21 @@ paths: $ref: '#/definitions/repo.PaginationResult-repo_ItemSummary' security: - Bearer: [] - summary: Gets an item by Asset ID + summary: Get Item by Asset ID tags: - - Assets + - Items + /v1/currency: + get: + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/currencies.Currency' + summary: Currency + tags: + - Base /v1/groups: get: produces: @@ -695,7 +835,7 @@ paths: $ref: '#/definitions/repo.Group' security: - Bearer: [] - summary: Get the current user's group + summary: Get Group tags: - Group put: @@ -715,7 +855,7 @@ paths: $ref: '#/definitions/repo.Group' security: - Bearer: [] - summary: Updates some fields of the current users group + summary: Update Group tags: - Group /v1/groups/invitations: @@ -736,7 +876,7 @@ paths: $ref: '#/definitions/v1.GroupInvitation' security: - Bearer: [] - summary: Get the current user + summary: Create Group Invitation tags: - Group /v1/groups/statistics: @@ -750,7 +890,7 @@ paths: $ref: '#/definitions/repo.GroupStatistics' security: - Bearer: [] - summary: Get the current user's group statistics + summary: Get Group Statistics tags: - Statistics /v1/groups/statistics/labels: @@ -766,7 +906,7 @@ paths: type: array security: - Bearer: [] - summary: Get the current user's group statistics + summary: Get Label Statistics tags: - Statistics /v1/groups/statistics/locations: @@ -782,7 +922,7 @@ paths: type: array security: - Bearer: [] - summary: Get the current user's group statistics + summary: Get Location Statistics tags: - Statistics /v1/groups/statistics/purchase-price: @@ -805,7 +945,7 @@ paths: $ref: '#/definitions/repo.ValueOverTime' security: - Bearer: [] - summary: Queries the changes overtime of the purchase price over time + summary: Get Purchase Price Statistics tags: - Statistics /v1/items: @@ -837,6 +977,13 @@ paths: type: string name: locations type: array + - collectionFormat: multi + description: parent Ids + in: query + items: + type: string + name: parentIds + type: array produces: - application/json responses: @@ -846,7 +993,7 @@ paths: $ref: '#/definitions/repo.PaginationResult-repo_ItemSummary' security: - Bearer: [] - summary: Get All Items + summary: Query All Items tags: - Items post: @@ -860,13 +1007,13 @@ paths: produces: - application/json responses: - "200": - description: OK + "201": + description: Created schema: $ref: '#/definitions/repo.ItemSummary' security: - Bearer: [] - summary: Create a new item + summary: Create Item tags: - Items /v1/items/{id}: @@ -884,7 +1031,7 @@ paths: description: No Content security: - Bearer: [] - summary: deletes a item + summary: Delete Item tags: - Items get: @@ -903,7 +1050,32 @@ paths: $ref: '#/definitions/repo.ItemOut' security: - Bearer: [] - summary: Gets a item and fields + summary: Get Item + tags: + - Items + patch: + parameters: + - description: Item ID + in: path + name: id + required: true + type: string + - description: Item Data + in: body + name: payload + required: true + schema: + $ref: '#/definitions/repo.ItemPatch' + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/repo.ItemOut' + security: + - Bearer: [] + summary: Update Item tags: - Items put: @@ -928,7 +1100,7 @@ paths: $ref: '#/definitions/repo.ItemOut' security: - Bearer: [] - summary: updates a item + summary: Update Item tags: - Items /v1/items/{id}/attachments: @@ -964,10 +1136,10 @@ paths: "422": description: Unprocessable Entity schema: - $ref: '#/definitions/server.ErrorResponse' + $ref: '#/definitions/validate.ErrorResponse' security: - Bearer: [] - summary: imports items into the database + summary: Create Item Attachment tags: - Items Attachments /v1/items/{id}/attachments/{attachment_id}: @@ -988,7 +1160,7 @@ paths: description: No Content security: - Bearer: [] - summary: retrieves an attachment for an item + summary: Delete Item Attachment tags: - Items Attachments get: @@ -1012,7 +1184,7 @@ paths: $ref: '#/definitions/v1.ItemAttachmentToken' security: - Bearer: [] - summary: retrieves an attachment for an item + summary: Get Item Attachment tags: - Items Attachments put: @@ -1040,7 +1212,7 @@ paths: $ref: '#/definitions/repo.ItemOut' security: - Bearer: [] - summary: retrieves an attachment for an item + summary: Update Item Attachment tags: - Items Attachments /v1/items/{id}/maintenance: @@ -1068,8 +1240,8 @@ paths: produces: - application/json responses: - "200": - description: OK + "201": + description: Created schema: $ref: '#/definitions/repo.MaintenanceEntry' security: @@ -1109,6 +1281,40 @@ paths: summary: Update Maintenance Entry tags: - Maintenance + /v1/items/{id}/path: + get: + parameters: + - description: Item ID + in: path + name: id + required: true + type: string + produces: + - application/json + responses: + "200": + description: OK + schema: + items: + $ref: '#/definitions/repo.ItemPath' + type: array + security: + - Bearer: [] + summary: Get the full path of an item + tags: + - Items + /v1/items/export: + get: + responses: + "200": + description: text/csv + schema: + type: string + security: + - Bearer: [] + summary: Export Items + tags: + - Items /v1/items/fields: get: produces: @@ -1122,7 +1328,7 @@ paths: type: array security: - Bearer: [] - summary: imports items into the database + summary: Get All Custom Field Names tags: - Items /v1/items/fields/values: @@ -1138,7 +1344,7 @@ paths: type: array security: - Bearer: [] - summary: imports items into the database + summary: Get All Custom Field Values tags: - Items /v1/items/import: @@ -1156,7 +1362,7 @@ paths: description: No Content security: - Bearer: [] - summary: imports items into the database + summary: Import Items tags: - Items /v1/labels: @@ -1167,14 +1373,9 @@ paths: "200": description: OK schema: - allOf: - - $ref: '#/definitions/server.Results' - - properties: - items: - items: - $ref: '#/definitions/repo.LabelOut' - type: array - type: object + items: + $ref: '#/definitions/repo.LabelOut' + type: array security: - Bearer: [] summary: Get All Labels @@ -1197,7 +1398,7 @@ paths: $ref: '#/definitions/repo.LabelSummary' security: - Bearer: [] - summary: Create a new label + summary: Create Label tags: - Labels /v1/labels/{id}: @@ -1215,7 +1416,7 @@ paths: description: No Content security: - Bearer: [] - summary: deletes a label + summary: Delete Label tags: - Labels get: @@ -1234,7 +1435,7 @@ paths: $ref: '#/definitions/repo.LabelOut' security: - Bearer: [] - summary: Gets a label and fields + summary: Get Label tags: - Labels put: @@ -1253,7 +1454,7 @@ paths: $ref: '#/definitions/repo.LabelOut' security: - Bearer: [] - summary: updates a label + summary: Update Label tags: - Labels /v1/locations: @@ -1269,14 +1470,9 @@ paths: "200": description: OK schema: - allOf: - - $ref: '#/definitions/server.Results' - - properties: - items: - items: - $ref: '#/definitions/repo.LocationOutCount' - type: array - type: object + items: + $ref: '#/definitions/repo.LocationOutCount' + type: array security: - Bearer: [] summary: Get All Locations @@ -1299,7 +1495,7 @@ paths: $ref: '#/definitions/repo.LocationSummary' security: - Bearer: [] - summary: Create a new location + summary: Create Location tags: - Locations /v1/locations/{id}: @@ -1317,7 +1513,7 @@ paths: description: No Content security: - Bearer: [] - summary: deletes a location + summary: Delete Location tags: - Locations get: @@ -1336,7 +1532,7 @@ paths: $ref: '#/definitions/repo.LocationOut' security: - Bearer: [] - summary: Gets a location and fields + summary: Get Location tags: - Locations put: @@ -1361,7 +1557,7 @@ paths: $ref: '#/definitions/repo.LocationOut' security: - Bearer: [] - summary: updates a location + summary: Update Location tags: - Locations /v1/locations/tree: @@ -1377,19 +1573,112 @@ paths: "200": description: OK schema: - allOf: - - $ref: '#/definitions/server.Results' - - properties: - items: - items: - $ref: '#/definitions/repo.TreeItem' - type: array - type: object + items: + $ref: '#/definitions/repo.TreeItem' + type: array security: - Bearer: [] - summary: Get All Locations + summary: Get Locations Tree tags: - Locations + /v1/notifiers: + get: + produces: + - application/json + responses: + "200": + description: OK + schema: + items: + $ref: '#/definitions/repo.NotifierOut' + type: array + security: + - Bearer: [] + summary: Get Notifiers + tags: + - Notifiers + post: + parameters: + - description: Notifier Data + in: body + name: payload + required: true + schema: + $ref: '#/definitions/repo.NotifierCreate' + produces: + - application/json + responses: + "200": + description: OK + schema: + $ref: '#/definitions/repo.NotifierOut' + security: + - Bearer: [] + summary: Create Notifier + tags: + - Notifiers + /v1/notifiers/{id}: + delete: + parameters: + - description: Notifier ID + in: path + name: id + required: true + type: string + responses: + "204": + description: No Content + security: + - Bearer: [] + summary: Delete a Notifier + tags: + - Notifiers + put: + parameters: + - description: Notifier ID + in: path + name: id + required: true + type: string + - description: Notifier Data + in: body + name: payload + required: true + schema: + $ref: '#/definitions/repo.NotifierUpdate' + responses: + "200": + description: OK + schema: + $ref: '#/definitions/repo.NotifierOut' + security: + - Bearer: [] + summary: Update Notifier + tags: + - Notifiers + /v1/notifiers/test: + post: + parameters: + - description: Notifier ID + in: path + name: id + required: true + type: string + - description: URL + in: query + name: url + required: true + type: string + produces: + - application/json + responses: + "204": + description: No Content + security: + - Bearer: [] + summary: Test Notifier + tags: + - Notifiers /v1/qrcode: get: parameters: @@ -1406,7 +1695,7 @@ paths: type: string security: - Bearer: [] - summary: Encode data into QRCode + summary: Create QR Code tags: - Items /v1/reporting/bill-of-materials: @@ -1420,7 +1709,7 @@ paths: type: string security: - Bearer: [] - summary: Generates a Bill of Materials CSV + summary: Export Bill of Materials tags: - Reporting /v1/status: @@ -1431,8 +1720,8 @@ paths: "200": description: OK schema: - $ref: '#/definitions/v1.ApiSummary' - summary: Retrieves the basic information about the API + $ref: '#/definitions/v1.APISummary' + summary: Application Info tags: - Base /v1/users/change-password: @@ -1449,7 +1738,7 @@ paths: description: No Content security: - Bearer: [] - summary: Updates the users password + summary: Change Password tags: - User /v1/users/login: @@ -1468,6 +1757,16 @@ paths: in: formData name: password type: string + - description: Login Data + in: body + name: payload + required: true + schema: + $ref: '#/definitions/v1.LoginForm' + - description: auth provider + in: query + name: provider + type: string produces: - application/json responses: @@ -1515,7 +1814,7 @@ paths: responses: "204": description: No Content - summary: Get the current user + summary: Register New User tags: - User /v1/users/self: @@ -1527,7 +1826,7 @@ paths: description: No Content security: - Bearer: [] - summary: Deletes the user account + summary: Delete Account tags: - User get: @@ -1538,14 +1837,14 @@ paths: description: OK schema: allOf: - - $ref: '#/definitions/server.Result' + - $ref: '#/definitions/v1.Wrapped' - properties: item: $ref: '#/definitions/repo.UserOut' type: object security: - Bearer: [] - summary: Get the current user + summary: Get User Self tags: - User put: @@ -1563,14 +1862,14 @@ paths: description: OK schema: allOf: - - $ref: '#/definitions/server.Result' + - $ref: '#/definitions/v1.Wrapped' - properties: item: $ref: '#/definitions/repo.UserUpdate' type: object security: - Bearer: [] - summary: Update the current user + summary: Update Account tags: - User securityDefinitions: diff --git a/scripts/process-types/main.go b/backend/app/tools/typegen/main.go similarity index 91% rename from scripts/process-types/main.go rename to backend/app/tools/typegen/main.go index e92a881..5f4d8da 100644 --- a/scripts/process-types/main.go +++ b/backend/app/tools/typegen/main.go @@ -54,6 +54,7 @@ func main() { NewReReplace(` Services`, " "), NewReReplace(` V1`, " "), NewReReplace(`\?:`, ":"), + NewReReplace(`(\w+):\s(.*null.*)`, "$1?: $2"), // make null union types optional NewReDate("createdAt"), NewReDate("updatedAt"), NewReDate("soldTime"), @@ -61,6 +62,8 @@ func main() { NewReDate("warrantyExpires"), NewReDate("expiresAt"), NewReDate("date"), + NewReDate("completedDate"), + NewReDate("scheduledDate"), } for _, replace := range replaces { diff --git a/backend/go.mod b/backend/go.mod index d489f28..d29a620 100644 --- a/backend/go.mod +++ b/backend/go.mod @@ -1,57 +1,77 @@ module github.com/hay-kot/homebox/backend -go 1.19 +go 1.23.0 + +toolchain go1.24.2 require ( - ariga.io/atlas v0.9.1-0.20230119145809-92243f7c55cb - entgo.io/ent v0.11.8 - github.com/ardanlabs/conf/v3 v3.1.4 - github.com/go-chi/chi/v5 v5.0.8 - github.com/go-playground/validator/v10 v10.11.2 - github.com/gocarina/gocsv v0.0.0-20230123225133-763e25b40669 - github.com/google/uuid v1.3.0 - github.com/mattn/go-sqlite3 v1.14.16 - github.com/rs/zerolog v1.29.0 - github.com/stretchr/testify v1.8.1 - github.com/swaggo/http-swagger v1.3.3 - github.com/swaggo/swag v1.8.10 - github.com/yeqown/go-qrcode/v2 v2.2.1 - github.com/yeqown/go-qrcode/writer/standard v1.2.1 - golang.org/x/crypto v0.6.0 + ariga.io/atlas v0.32.0 + entgo.io/ent v0.14.4 + github.com/ardanlabs/conf/v3 v3.7.1 + github.com/containrrr/shoutrrr v0.8.0 + github.com/go-chi/chi/v5 v5.2.1 + github.com/go-playground/validator/v10 v10.26.0 + github.com/gocarina/gocsv v0.0.0-20240520201108-78e41c74b4b1 + github.com/google/uuid v1.6.0 + github.com/gorilla/schema v1.4.1 + github.com/hay-kot/httpkit v0.0.11 + github.com/mattn/go-sqlite3 v1.14.27 + github.com/olahol/melody v1.2.1 + github.com/pkg/errors v0.9.1 + github.com/rs/zerolog v1.34.0 + github.com/stretchr/testify v1.10.0 + github.com/swaggo/http-swagger/v2 v2.0.2 + github.com/swaggo/swag v1.16.4 + github.com/yeqown/go-qrcode/v2 v2.2.5 + github.com/yeqown/go-qrcode/writer/standard v1.2.5 + golang.org/x/crypto v0.37.0 + modernc.org/sqlite v1.37.0 ) require ( github.com/KyleBanks/depth v1.2.1 // indirect github.com/agext/levenshtein v1.2.3 // indirect - github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect + github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect + github.com/bmatcuk/doublestar v1.3.4 // indirect github.com/davecgh/go-spew v1.1.1 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect + github.com/fatih/color v1.18.0 // indirect github.com/fogleman/gg v1.3.0 // indirect - github.com/go-openapi/inflect v0.19.0 // indirect - github.com/go-openapi/jsonpointer v0.19.5 // indirect - github.com/go-openapi/jsonreference v0.20.0 // indirect - github.com/go-openapi/spec v0.20.7 // indirect - github.com/go-openapi/swag v0.22.3 // indirect + github.com/gabriel-vasile/mimetype v1.4.8 // indirect + github.com/go-openapi/inflect v0.21.2 // indirect + github.com/go-openapi/jsonpointer v0.21.1 // indirect + github.com/go-openapi/jsonreference v0.21.0 // indirect + github.com/go-openapi/spec v0.21.0 // indirect + github.com/go-openapi/swag v0.23.1 // indirect github.com/go-playground/locales v0.14.1 // indirect github.com/go-playground/universal-translator v0.18.1 // indirect github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 // indirect - github.com/google/go-cmp v0.5.9 // indirect - github.com/hashicorp/hcl/v2 v2.15.0 // indirect + github.com/google/go-cmp v0.7.0 // indirect + github.com/gorilla/websocket v1.5.3 // indirect + github.com/hashicorp/hcl/v2 v2.23.0 // indirect github.com/josharian/intern v1.0.0 // indirect - github.com/leodido/go-urn v1.2.1 // indirect - github.com/mailru/easyjson v0.7.7 // indirect - github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-isatty v0.0.17 // indirect + github.com/leodido/go-urn v1.4.0 // indirect + github.com/mailru/easyjson v0.9.0 // indirect + github.com/mattn/go-colorable v0.1.14 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect - github.com/pkg/errors v0.9.1 // indirect + github.com/ncruces/go-strftime v0.1.9 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/swaggo/files v1.0.0 // indirect + github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect + github.com/swaggo/files/v2 v2.0.2 // indirect github.com/yeqown/reedsolomon v1.0.0 // indirect - github.com/zclconf/go-cty v1.12.1 // indirect - golang.org/x/image v0.0.0-20200927104501-e162460cd6b5 // indirect - golang.org/x/mod v0.7.0 // indirect - golang.org/x/net v0.6.0 // indirect - golang.org/x/sys v0.5.0 // indirect - golang.org/x/text v0.7.0 // indirect - golang.org/x/tools v0.4.0 // indirect + github.com/zclconf/go-cty v1.16.2 // indirect + github.com/zclconf/go-cty-yaml v1.1.0 // indirect + golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 // indirect + golang.org/x/image v0.26.0 // indirect + golang.org/x/mod v0.24.0 // indirect + golang.org/x/net v0.39.0 // indirect + golang.org/x/sync v0.13.0 // indirect + golang.org/x/sys v0.32.0 // indirect + golang.org/x/text v0.24.0 // indirect + golang.org/x/tools v0.32.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect + modernc.org/libc v1.62.1 // indirect + modernc.org/mathutil v1.7.1 // indirect + modernc.org/memory v1.9.1 // indirect ) diff --git a/backend/go.sum b/backend/go.sum index e0fca76..6e0ebbf 100644 --- a/backend/go.sum +++ b/backend/go.sum @@ -1,174 +1,190 @@ -ariga.io/atlas v0.9.1-0.20230119145809-92243f7c55cb h1:mbsFtavDqGdYwdDpP50LGOOZ2hgyGoJcZeOpbgKMyu4= -ariga.io/atlas v0.9.1-0.20230119145809-92243f7c55cb/go.mod h1:T230JFcENj4ZZzMkZrXFDSkv+2kXkUgpJ5FQQ5hMcKU= -entgo.io/ent v0.11.8 h1:M/M0QL1CYCUSdqGRXUrXhFYSDRJPsOOrr+RLEej/gyQ= -entgo.io/ent v0.11.8/go.mod h1:ericBi6Q8l3wBH1wEIDfKxw7rcQEuRPyBfbIzjtxJ18= +ariga.io/atlas v0.32.0 h1:y+77nueMrExLiKlz1CcPKh/nU7VSlWfBbwCShsJyvCw= +ariga.io/atlas v0.32.0/go.mod h1:Oe1xWPuu5q9LzyrWfbZmEZxFYeu4BHTyzfjeW2aZp/w= +entgo.io/ent v0.14.4 h1:/DhDraSLXIkBhyiVoJeSshr4ZYi7femzhj6/TckzZuI= +entgo.io/ent v0.14.4/go.mod h1:aDPE/OziPEu8+OWbzy4UlvWmD2/kbRuWfK2A40hcxJM= github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= +github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc= github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE= github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= -github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw= -github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo= -github.com/ardanlabs/conf/v3 v3.1.4 h1:c0jJYbqHJcrR/uYImbGC1q7quH3DYxH49zGCT7WLJH4= -github.com/ardanlabs/conf/v3 v3.1.4/go.mod h1:bIacyuGeZjkTdtszdbvOcuq49VhHpV3+IPZ2ewOAK4I= -github.com/coreos/go-systemd/v22 v22.3.3-0.20220203105225-a9a7ef127534/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= +github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= +github.com/ardanlabs/conf/v3 v3.7.1 h1:GIV7ylesF/0NexhnJdLmzsi2NIVYY2wVhR0UfvpmAeQ= +github.com/ardanlabs/conf/v3 v3.7.1/go.mod h1:IIucqD+601gt3jfhMXVukxoT16LnoGVd2DzRC2GhHiA= +github.com/bmatcuk/doublestar v1.3.4 h1:gPypJ5xD31uhX6Tf54sDPUOBXTqKH4c9aPY66CyQrS0= +github.com/bmatcuk/doublestar v1.3.4/go.mod h1:wiQtGV+rzVYxB7WIlirSN++5HPtPlXEo9MEoZQC/PmE= +github.com/containrrr/shoutrrr v0.8.0 h1:mfG2ATzIS7NR2Ec6XL+xyoHzN97H8WPjir8aYzJUSec= +github.com/containrrr/shoutrrr v0.8.0/go.mod h1:ioyQAyu1LJY6sILuNyKaQaw+9Ttik5QePU8atnAdO2o= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= +github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= github.com/fogleman/gg v1.3.0 h1:/7zJX8F6AaYQc57WQCyN9cAIz+4bCJGO9B+dyW29am8= github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= -github.com/go-chi/chi/v5 v5.0.8 h1:lD+NLqFcAi1ovnVZpsnObHGW4xb4J8lNmoYVfECH1Y0= -github.com/go-chi/chi/v5 v5.0.8/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= -github.com/go-openapi/inflect v0.19.0 h1:9jCH9scKIbHeV9m12SmPilScz6krDxKRasNNSNPXu/4= -github.com/go-openapi/inflect v0.19.0/go.mod h1:lHpZVlpIQqLyKwJ4N+YSc9hchQy/i12fJykb83CRBH4= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA= -github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= -github.com/go-openapi/spec v0.20.7 h1:1Rlu/ZrOCCob0n+JKKJAWhNWMPW8bOZRg8FJaY+0SKI= -github.com/go-openapi/spec v0.20.7/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= -github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/gabriel-vasile/mimetype v1.4.8 h1:FfZ3gj38NjllZIeJAmMhr+qKL8Wu+nOoI3GqacKw1NM= +github.com/gabriel-vasile/mimetype v1.4.8/go.mod h1:ByKUIKGjh1ODkGM1asKUbQZOLGrPjydw3hYPU2YU9t8= +github.com/go-chi/chi/v5 v5.2.1 h1:KOIHODQj58PmL80G2Eak4WdvUzjSJSm0vG72crDCqb8= +github.com/go-chi/chi/v5 v5.2.1/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops= +github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-openapi/inflect v0.21.2 h1:0gClGlGcxifcJR56zwvhaOulnNgnhc4qTAkob5ObnSM= +github.com/go-openapi/inflect v0.21.2/go.mod h1:INezMuUu7SJQc2AyR3WO0DqqYUJSj8Kb4hBd7WtjlAw= +github.com/go-openapi/jsonpointer v0.21.1 h1:whnzv/pNXtK2FbX/W9yJfRmE2gsmkfahjMKB0fZvcic= +github.com/go-openapi/jsonpointer v0.21.1/go.mod h1:50I1STOfbY1ycR8jGz8DaMeLCdXiI6aDteEdRNNzpdk= +github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= +github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= +github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9ZY= +github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk= +github.com/go-openapi/swag v0.23.1 h1:lpsStH0n2ittzTnbaSloVZLuB5+fvSY/+hnagBjSNZU= +github.com/go-openapi/swag v0.23.1/go.mod h1:STZs8TbRvEQQKUA+JZNAm3EWlgaOBGpyFDqQnDHMef0= github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= +github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= -github.com/go-playground/validator/v10 v10.11.2 h1:q3SHpufmypg+erIExEKUmsgmhDTyhcJ38oeKGACXohU= -github.com/go-playground/validator/v10 v10.11.2/go.mod h1:NieE624vt4SCTJtD87arVLvdmjPAeV8BQlHtMnw9D7s= +github.com/go-playground/validator/v10 v10.26.0 h1:SP05Nqhjcvz81uJaRfEV0YBSSSGMc/iMaVtFbr3Sw2k= +github.com/go-playground/validator/v10 v10.26.0/go.mod h1:I5QpIEbmr8On7W0TktmJAumgzX4CA1XNl4ZmDuVHKKo= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= -github.com/gocarina/gocsv v0.0.0-20230123225133-763e25b40669 h1:MvZzCA/mduVWoBSVKJeMdv+AqXQmZZ8i6p8889ejt/Y= -github.com/gocarina/gocsv v0.0.0-20230123225133-763e25b40669/go.mod h1:5YoVOkjYAQumqlV356Hj3xeYh4BdZuLE0/nRkf2NKkI= +github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/gocarina/gocsv v0.0.0-20240520201108-78e41c74b4b1 h1:FWNFq4fM1wPfcK40yHE5UO3RUdSNPaBC+j3PokzA6OQ= +github.com/gocarina/gocsv v0.0.0-20240520201108-78e41c74b4b1/go.mod h1:5YoVOkjYAQumqlV356Hj3xeYh4BdZuLE0/nRkf2NKkI= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 h1:DACJavvAHhabrF08vX0COfcOBJRhZ8lUbR+ZWIs0Y5g= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/hashicorp/hcl/v2 v2.15.0 h1:CPDXO6+uORPjKflkWCCwoWc9uRp+zSIPcCQ+BrxV7m8= -github.com/hashicorp/hcl/v2 v2.15.0/go.mod h1:JRmR89jycNkrrqnMmvPDMd56n1rQJ2Q6KocSLCMCXng= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e h1:ijClszYn+mADRFY17kjQEVQ1XRhq2/JR1M3sGqeJoxs= +github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/schema v1.4.1 h1:jUg5hUjCSDZpNGLuXQOgIWGdlgrIdYvgQ0wZtdK1M3E= +github.com/gorilla/schema v1.4.1/go.mod h1:Dg5SSm5PV60mhF2NFaTV1xuYYj8tV8NOPRo4FggUMnM= +github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= +github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/hashicorp/hcl/v2 v2.23.0 h1:Fphj1/gCylPxHutVSEOf2fBOh1VE4AuLV7+kbJf3qos= +github.com/hashicorp/hcl/v2 v2.23.0/go.mod h1:62ZYHrXgPoX8xBnzl8QzbWq4dyDsDtfCRgIq1rbJEvA= +github.com/hay-kot/httpkit v0.0.11 h1:ZdB2uqsFBSDpfUoClGK5c5orjBjQkEVSXh7fZX5FKEk= +github.com/hay-kot/httpkit v0.0.11/go.mod h1:0kZdk5/swzdfqfg2c6pBWimcgeJ9PTyO97EbHnYl2Sw= +github.com/jarcoal/httpmock v1.3.0 h1:2RJ8GP0IIaWwcC9Fp2BmVi8Kog3v2Hn7VXM3fTd+nuc= +github.com/jarcoal/httpmock v1.3.0/go.mod h1:3yb8rc4BI7TCBhFY8ng0gjuLKJNquuDNiPaZjnENuYg= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348 h1:MtvEpTB6LX3vkb4ax0b5D2DHbNAUsen0Gx5wZoq3lV4= -github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w= -github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= -github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= +github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= +github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= +github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= -github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= -github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y= -github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= +github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-sqlite3 v1.14.27 h1:drZCnuvf37yPfs95E5jd9s3XhdVWLal+6BOK6qrv6IU= +github.com/mattn/go-sqlite3 v1.14.27/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= +github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4= +github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= +github.com/olahol/melody v1.2.1 h1:xdwRkzHxf+B0w4TKbGpUSSkV516ZucQZJIWLztOWICQ= +github.com/olahol/melody v1.2.1/go.mod h1:GgkTl6Y7yWj/HtfD48Q5vLKPVoZOH+Qqgfa7CvJgJM4= +github.com/onsi/ginkgo/v2 v2.9.2 h1:BA2GMJOtfGAfagzYtrAlufIP0lq6QERkFmHLMLPwFSU= +github.com/onsi/ginkgo/v2 v2.9.2/go.mod h1:WHcJJG2dIlcCqVfBAwUCrJxSPFb6v4azBwgxeMeDuts= +github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= +github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8= -github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= -github.com/rs/zerolog v1.29.0 h1:Zes4hju04hjbvkVkOhdl2HpZa+0PmVwigmo8XoORE5w= -github.com/rs/zerolog v1.29.0/go.mod h1:NILgTygv/Uej1ra5XxGf82ZFSLk58MFGAUS2o6usyD0= -github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ= -github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/swaggo/files v1.0.0 h1:1gGXVIeUFCS/dta17rnP0iOpr6CXFwKD7EO5ID233e4= -github.com/swaggo/files v1.0.0/go.mod h1:N59U6URJLyU1PQgFqPM7wXLMhJx7QAolnvfQkqO13kc= -github.com/swaggo/http-swagger v1.3.3 h1:Hu5Z0L9ssyBLofaama21iYaF2VbWyA8jdohaaCGpHsc= -github.com/swaggo/http-swagger v1.3.3/go.mod h1:sE+4PjD89IxMPm77FnkDz0sdO+p5lbXzrVWT6OTVVGo= -github.com/swaggo/swag v1.8.10 h1:eExW4bFa52WOjqRzRD58bgWsWfdFJso50lpbeTcmTfo= -github.com/swaggo/swag v1.8.10/go.mod h1:ezQVUUhly8dludpVk+/PuwJWvLLanB13ygV5Pr9enSk= -github.com/yeqown/go-qrcode/v2 v2.2.1 h1:Jc1Q916fwC05R8C7mpWDbrT9tyLPaLLKDABoC5XBCe8= -github.com/yeqown/go-qrcode/v2 v2.2.1/go.mod h1:2Qsk2APUCPne0TsRo40DIkI5MYnbzYKCnKGEFWrxd24= -github.com/yeqown/go-qrcode/writer/standard v1.2.1 h1:FMRZiur5yApUIe4fqtqmcdl/XQTZAZWt2DhkPx4VIW0= -github.com/yeqown/go-qrcode/writer/standard v1.2.1/go.mod h1:ZelyDFiVymrauRjUn454iF7bjsabmB1vixkDA5kq2bw= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= +github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= +github.com/rs/zerolog v1.34.0 h1:k43nTLIwcTVQAncfCw4KZ2VY6ukYoZaBPNOE8txlOeY= +github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6wYQ= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/swaggo/files/v2 v2.0.2 h1:Bq4tgS/yxLB/3nwOMcul5oLEUKa877Ykgz3CJMVbQKU= +github.com/swaggo/files/v2 v2.0.2/go.mod h1:TVqetIzZsO9OhHX1Am9sRf9LdrFZqoK49N37KON/jr0= +github.com/swaggo/http-swagger/v2 v2.0.2 h1:FKCdLsl+sFCx60KFsyM0rDarwiUSZ8DqbfSyIKC9OBg= +github.com/swaggo/http-swagger/v2 v2.0.2/go.mod h1:r7/GBkAWIfK6E/OLnE8fXnviHiDeAHmgIyooa4xm3AQ= +github.com/swaggo/swag v1.16.4 h1:clWJtd9LStiG3VeijiCfOVODP6VpHtKdQy9ELFG3s1A= +github.com/swaggo/swag v1.16.4/go.mod h1:VBsHJRsDvfYvqoiMKnsdwhNV9LEMHgEDZcyVYX0sxPg= +github.com/yeqown/go-qrcode/v2 v2.2.5 h1:HCOe2bSjkhZyYoyyNaXNzh4DJZll6inVJQQw+8228Zk= +github.com/yeqown/go-qrcode/v2 v2.2.5/go.mod h1:uHpt9CM0V1HeXLz+Wg5MN50/sI/fQhfkZlOM+cOTHxw= +github.com/yeqown/go-qrcode/writer/standard v1.2.5 h1:m+5BUIcbsaG2md76FIqI/oZULrAju8tsk47eOohovQ0= +github.com/yeqown/go-qrcode/writer/standard v1.2.5/go.mod h1:O4MbzsotGCvy8upYPCR91j81dr5XLT7heuljcNXW+oQ= github.com/yeqown/reedsolomon v1.0.0 h1:x1h/Ej/uJnNu8jaX7GLHBWmZKCAWjEJTetkqaabr4B0= github.com/yeqown/reedsolomon v1.0.0/go.mod h1:P76zpcn2TCuL0ul1Fso373qHRc69LKwAw/Iy6g1WiiM= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/zclconf/go-cty v1.12.1 h1:PcupnljUm9EIvbgSHQnHhUr3fO6oFmkOrvs2BAFNXXY= -github.com/zclconf/go-cty v1.12.1/go.mod h1:s9IfD1LK5ccNMSWCVFCE2rJfHiZgi7JijgeWIMfhLvA= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc= -golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/image v0.0.0-20200927104501-e162460cd6b5 h1:QelT11PB4FXiDEXucrfNckHoFxwt8USGY1ajP1ZF5lM= -golang.org/x/image v0.0.0-20200927104501-e162460cd6b5/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.7.0 h1:LapD9S96VoQRhi/GrNTqeBJFrUjs5UHCAtTlgwA5oZA= -golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.6.0 h1:L4ZwwTvKW9gr0ZMS1yrHD9GZhIuVjOBBnaKH+SPQK0Q= -golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +github.com/zclconf/go-cty v1.16.2 h1:LAJSwc3v81IRBZyUVQDUdZ7hs3SYs9jv0eZJDWHD/70= +github.com/zclconf/go-cty v1.16.2/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940 h1:4r45xpDWB6ZMSMNJFMOjqrGHynW3DIBuR2H9j0ug+Mo= +github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940/go.mod h1:CmBdvvj3nqzfzJ6nTCIwDTPZ56aVGvDrmztiO5g3qrM= +github.com/zclconf/go-cty-yaml v1.1.0 h1:nP+jp0qPHv2IhUVqmQSzjvqAWcObN0KBkUl2rWBdig0= +github.com/zclconf/go-cty-yaml v1.1.0/go.mod h1:9YLUH4g7lOhVWqUbctnVlZ5KLpg7JAprQNgxSZ1Gyxs= +golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE= +golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc= +golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 h1:R84qjqJb5nVJMxqWYb3np9L5ZsaDtB+a39EqjV0JSUM= +golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0/go.mod h1:S9Xr4PYopiDyqSyp5NjCrhFrqg6A5zA2E/iPHPhqnS8= +golang.org/x/image v0.26.0 h1:4XjIFEZWQmCZi6Wv8BoxsDhRU3RVnLX04dToTDAEPlY= +golang.org/x/image v0.26.0/go.mod h1:lcxbMFAovzpnJxzXS3nyL83K27tmqtKzIJpctK8YO5c= +golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= +golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= +golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY= +golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E= +golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610= +golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.4.0 h1:7mTAgkunk3fr4GAloyyCasadO6h9zSsQZbwvcaIciV4= -golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= +golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= +golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= +golang.org/x/tools v0.32.0 h1:Q7N1vhpkQv7ybVzLFtTjvQya2ewbwNDZzUgfXGqtMWU= +golang.org/x/tools v0.32.0/go.mod h1:ZxrU41P/wAbZD8EDa6dDCa6XfpkhJ7HFMjHJXfBDu8s= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +modernc.org/cc/v4 v4.25.2 h1:T2oH7sZdGvTaie0BRNFbIYsabzCxUQg8nLqCdQ2i0ic= +modernc.org/cc/v4 v4.25.2/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0= +modernc.org/ccgo/v4 v4.25.1 h1:TFSzPrAGmDsdnhT9X2UrcPMI3N/mJ9/X9ykKXwLhDsU= +modernc.org/ccgo/v4 v4.25.1/go.mod h1:njjuAYiPflywOOrm3B7kCB444ONP5pAVr8PIEoE0uDw= +modernc.org/fileutil v1.3.0 h1:gQ5SIzK3H9kdfai/5x41oQiKValumqNTDXMvKo62HvE= +modernc.org/fileutil v1.3.0/go.mod h1:XatxS8fZi3pS8/hKG2GH/ArUogfxjpEKs3Ku3aK4JyQ= +modernc.org/gc/v2 v2.6.5 h1:nyqdV8q46KvTpZlsw66kWqwXRHdjIlJOhG6kxiV/9xI= +modernc.org/gc/v2 v2.6.5/go.mod h1:YgIahr1ypgfe7chRuJi2gD7DBQiKSLMPgBQe9oIiito= +modernc.org/libc v1.62.1 h1:s0+fv5E3FymN8eJVmnk0llBe6rOxCu/DEU+XygRbS8s= +modernc.org/libc v1.62.1/go.mod h1:iXhATfJQLjG3NWy56a6WVU73lWOcdYVxsvwCgoPljuo= +modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU= +modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg= +modernc.org/memory v1.9.1 h1:V/Z1solwAVmMW1yttq3nDdZPJqV1rM05Ccq6KMSZ34g= +modernc.org/memory v1.9.1/go.mod h1:/JP4VbVC+K5sU2wZi9bHoq2MAkCnrt2r98UGeSK7Mjw= +modernc.org/opt v0.1.4 h1:2kNGMRiUjrp4LcaPuLY2PzUfqM/w9N23quVwhKt5Qm8= +modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns= +modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w= +modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE= +modernc.org/sqlite v1.37.0 h1:s1TMe7T3Q3ovQiK2Ouz4Jwh7dw4ZDqbebSDTlSJdfjI= +modernc.org/sqlite v1.37.0/go.mod h1:5YiWv+YviqGMuGw4V+PNplcyaJ5v+vQd7TQOgkACoJM= +modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0= +modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A= +modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y= +modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= diff --git a/backend/internal/core/currencies/currencies.go b/backend/internal/core/currencies/currencies.go new file mode 100644 index 0000000..4cc8766 --- /dev/null +++ b/backend/internal/core/currencies/currencies.go @@ -0,0 +1,104 @@ +// Package currencies provides a shared definition of currencies. This uses a global +// variable to hold the currencies. +package currencies + +import ( + "bytes" + _ "embed" + "encoding/json" + "io" + "slices" + "strings" + "sync" +) + +//go:embed currencies.json +var defaults []byte + +type CollectorFunc func() ([]Currency, error) + +func CollectJSON(reader io.Reader) CollectorFunc { + return func() ([]Currency, error) { + var currencies []Currency + err := json.NewDecoder(reader).Decode(¤cies) + if err != nil { + return nil, err + } + + return currencies, nil + } +} + +func CollectDefaults() CollectorFunc { + return CollectJSON(bytes.NewReader(defaults)) +} + +func CollectionCurrencies(collectors ...CollectorFunc) ([]Currency, error) { + out := make([]Currency, 0, len(collectors)) + for i := range collectors { + c, err := collectors[i]() + if err != nil { + return nil, err + } + + out = append(out, c...) + } + + return out, nil +} + +type Currency struct { + Name string `json:"name"` + Code string `json:"code"` + Local string `json:"local"` + Symbol string `json:"symbol"` +} + +type CurrencyRegistry struct { + mu sync.RWMutex + registry map[string]Currency +} + +func NewCurrencyService(currencies []Currency) *CurrencyRegistry { + registry := make(map[string]Currency, len(currencies)) + for i := range currencies { + registry[currencies[i].Code] = currencies[i] + } + + return &CurrencyRegistry{ + registry: registry, + } +} + +func (cs *CurrencyRegistry) Slice() []Currency { + cs.mu.RLock() + defer cs.mu.RUnlock() + + out := make([]Currency, 0, len(cs.registry)) + for key := range cs.registry { + out = append(out, cs.registry[key]) + } + + slices.SortFunc(out, func(a, b Currency) int { + if a.Name < b.Name { + return -1 + } + + if a.Name > b.Name { + return 1 + } + + return 0 + }) + + return out +} + +func (cs *CurrencyRegistry) IsSupported(code string) bool { + upper := strings.ToUpper(code) + + cs.mu.RLock() + defer cs.mu.RUnlock() + _, ok := cs.registry[upper] + return ok +} diff --git a/backend/internal/core/currencies/currencies.json b/backend/internal/core/currencies/currencies.json new file mode 100644 index 0000000..c7b2630 --- /dev/null +++ b/backend/internal/core/currencies/currencies.json @@ -0,0 +1,638 @@ +[ + { + "code": "USD", + "local": "United States", + "symbol": "$", + "name": "United States Dollar" + }, + { + "code": "AED", + "local": "United Arab Emirates", + "symbol": "د.إ", + "name": "United Arab Emirates Dirham" + }, + { + "code": "AFN", + "local": "Afghanistan", + "symbol": "؋", + "name": "Afghan Afghani" + }, + { + "code": "ALL", + "local": "Albania", + "symbol": "L", + "name": "Albanian Lek" + }, + { + "code": "AMD", + "local": "Armenia", + "symbol": "֏", + "name": "Armenian Dram" + }, + { + "code": "ANG", + "local": "Netherlands Antilles", + "symbol": "ƒ", + "name": "Netherlands Antillean Guilder" + }, + { + "code": "AOA", + "local": "Angola", + "symbol": "Kz", + "name": "Angolan Kwanza" + }, + { + "code": "ARS", + "local": "Argentina", + "symbol": "$", + "name": "Argentine Peso" + }, + { + "code": "AUD", + "local": "Australia", + "symbol": "A$", + "name": "Australian Dollar" + }, + { + "code": "AWG", + "local": "Aruba", + "symbol": "ƒ", + "name": "Aruban Florin" + }, + { + "code": "AZN", + "local": "Azerbaijan", + "symbol": "₼", + "name": "Azerbaijani Manat" + }, + { + "code": "BAM", + "local": "Bosnia and Herzegovina", + "symbol": "KM", + "name": "Bosnia and Herzegovina Convertible Mark" + }, + { + "code": "BBD", + "local": "Barbados", + "symbol": "Bds$", + "name": "Barbadian Dollar" + }, + { + "code": "BDT", + "local": "Bangladesh", + "symbol": "৳", + "name": "Bangladeshi Taka" + }, + { + "code": "BGN", + "local": "Bulgaria", + "symbol": "лв", + "name": "Bulgarian lev" + }, + { + "code": "BHD", + "local": "Bahrain", + "symbol": "ب.د", + "name": "Bahraini Dinar" + }, + { + "code": "BIF", + "local": "Burundi", + "symbol": "FBu", + "name": "Burundian Franc" + }, + { + "code": "BMD", + "local": "Bermuda", + "symbol": "BD$", + "name": "Bermudian Dollar" + }, + { + "code": "BND", + "local": "Brunei", + "symbol": "B$", + "name": "Brunei Dollar" + }, + { + "code": "BOB", + "local": "Bolivia", + "symbol": "Bs.", + "name": "Bolivian Boliviano" + }, + { + "code": "BRL", + "local": "Brazil", + "symbol": "R$", + "name": "Brazilian Real" + }, + { + "code": "BSD", + "local": "Bahamas", + "symbol": "B$", + "name": "Bahamian Dollar" + }, + { + "code": "BTN", + "local": "Bhutan", + "symbol": "Nu.", + "name": "Bhutanese Ngultrum" + }, + { + "code": "BWP", + "local": "Botswana", + "symbol": "P", + "name": "Botswana Pula" + }, + { + "code": "BYN", + "local": "Belarus", + "symbol": "Br", + "name": "Belarusian Ruble" + }, + { + "code": "BZD", + "local": "Belize", + "symbol": "BZ$", + "name": "Belize Dollar" + }, + { + "code": "CAD", + "local": "Canada", + "symbol": "C$", + "name": "Canadian Dollar" + }, + { + "code": "CDF", + "local": "Democratic Republic of the Congo", + "symbol": "FC", + "name": "Congolese Franc" + }, + { + "code": "CHF", + "local": "Switzerland", + "symbol": "CHF", + "name": "Swiss Franc" + }, + { + "code": "CLP", + "local": "Chile", + "symbol": "CL$", + "name": "Chilean Peso" + }, + { + "code": "CNY", + "local": "China", + "symbol": "¥", + "name": "Chinese Yuan" + }, + { + "code": "COP", + "local": "Colombia", + "symbol": "COL$", + "name": "Colombian Peso" + }, + { + "code": "CRC", + "local": "Costa Rica", + "symbol": "₡", + "name": "Costa Rican Colón" + }, + { + "code": "CUP", + "local": "Cuba", + "symbol": "₱", + "name": "Cuban Peso" + }, + { + "code": "CVE", + "local": "Cape Verde", + "symbol": "$", + "name": "Cape Verdean Escudo" + }, + { + "code": "CZK", + "local": "Czech Republic", + "symbol": "Kč", + "name": "Czech Koruna" + }, + { + "code": "DJF", + "local": "Djibouti", + "symbol": "Fdj", + "name": "Djiboutian Franc" + }, + { + "code": "DKK", + "local": "Denmark", + "symbol": "kr", + "name": "Danish Krone" + }, + { + "code": "DOP", + "local": "Dominican Republic", + "symbol": "RD$", + "name": "Dominican Peso" + }, + { + "code": "DZD", + "local": "Algeria", + "symbol": "د.ج", + "name": "Algerian Dinar" + }, + { + "code": "EGP", + "local": "Egypt", + "symbol": "£", + "name": "Egyptian Pound" + }, + { + "code": "ERN", + "local": "Eritrea", + "symbol": "Nfk", + "name": "Eritrean Nakfa" + }, + { + "code": "ETB", + "local": "Ethiopia", + "symbol": "Br", + "name": "Ethiopian Birr" + }, + { + "code": "EUR", + "local": "Eurozone", + "symbol": "€", + "name": "Euro" + }, + { + "code": "FJD", + "local": "Fiji", + "symbol": "FJ$", + "name": "Fijian Dollar" + }, + { + "code": "FKP", + "local": "Falkland Islands", + "symbol": "£", + "name": "Falkland Islands Pound" + }, + { + "code": "FOK", + "local": "Faroe Islands", + "symbol": "kr", + "name": "Faroese Króna" + }, + { + "code": "GBP", + "local": "United Kingdom", + "symbol": "£", + "name": "British Pound Sterling" + }, + { + "code": "GEL", + "local": "Georgia", + "symbol": "₾", + "name": "Georgian Lari" + }, + { + "code": "GGP", + "local": "Guernsey", + "symbol": "£", + "name": "Guernsey Pound" + }, + { + "code": "GHS", + "local": "Ghana", + "symbol": "GH₵", + "name": "Ghanaian Cedi" + }, + { + "code": "GIP", + "local": "Gibraltar", + "symbol": "£", + "name": "Gibraltar Pound" + }, + { + "code": "GMD", + "local": "Gambia", + "symbol": "D", + "name": "Gambian Dalasi" + }, + { + "code": "GNF", + "local": "Guinea", + "symbol": "FG", + "name": "Guinean Franc" + }, + { + "code": "GTQ", + "local": "Guatemala", + "symbol": "Q", + "name": "Guatemalan Quetzal" + }, + { + "code": "GYD", + "local": "Guyana", + "symbol": "GY$", + "name": "Guyanese Dollar" + }, + { + "code": "HKD", + "local": "Hong Kong", + "symbol": "HK$", + "name": "Hong Kong Dollar" + }, + { + "code": "HNL", + "local": "Honduras", + "symbol": "L", + "name": "Honduran Lempira" + }, + { + "code": "HRK", + "local": "Croatia", + "symbol": "kn", + "name": "Croatian Kuna" + }, + { + "code": "HTG", + "local": "Haiti", + "symbol": "G", + "name": "Haitian Gourde" + }, + { + "code": "HUF", + "local": "Hungary", + "symbol": "Ft", + "name": "Hungarian Forint" + }, + { + "code": "IDR", + "local": "Indonesia", + "symbol": "Rp", + "name": "Indonesian Rupiah" + }, + { + "code": "ILS", + "local": "Israel", + "symbol": "₪", + "name": "Israeli New Shekel" + }, + { + "code": "IMP", + "local": "Isle of Man", + "symbol": "£", + "name": "Manx Pound" + }, + { + "code": "INR", + "local": "India", + "symbol": "₹", + "name": "Indian Rupee" + }, + { + "code": "IQD", + "local": "Iraq", + "symbol": "ع.د", + "name": "Iraqi Dinar" + }, + { + "code": "IRR", + "local": "Iran", + "symbol": "﷼", + "name": "Iranian Rial" + }, + { + "code": "ISK", + "local": "Iceland", + "symbol": "kr", + "name": "Icelandic Króna" + }, + { + "code": "JEP", + "local": "Jersey", + "symbol": "£", + "name": "Jersey Pound" + }, + { + "code": "JMD", + "local": "Jamaica", + "symbol": "J$", + "name": "Jamaican Dollar" + }, + { + "code": "JOD", + "local": "Jordan", + "symbol": "د.ا", + "name": "Jordanian Dinar" + }, + { + "code": "JPY", + "local": "Japan", + "symbol": "¥", + "name": "Japanese Yen" + }, + { + "code": "KES", + "local": "Kenya", + "symbol": "KSh", + "name": "Kenyan Shilling" + }, + { + "code": "KGS", + "local": "Kyrgyzstan", + "symbol": "с", + "name": "Kyrgyzstani Som" + }, + { + "code": "KHR", + "local": "Cambodia", + "symbol": "៛", + "name": "Cambodian Riel" + }, + { + "code": "KID", + "local": "Kiribati", + "symbol": "$", + "name": "Kiribati Dollar" + }, + { + "code": "KMF", + "local": "Comoros", + "symbol": "CF", + "name": "Comorian Franc" + }, + { + "code": "KRW", + "local": "South Korea", + "symbol": "₩", + "name": "South Korean Won" + }, + { + "code": "KWD", + "local": "Kuwait", + "symbol": "د.ك", + "name": "Kuwaiti Dinar" + }, + { + "code": "KYD", + "local": "Cayman Islands", + "symbol": "CI$", + "name": "Cayman Islands Dollar" + }, + { + "code": "KZT", + "local": "Kazakhstan", + "symbol": "₸", + "name": "Kazakhstani Tenge" + }, + { + "code": "LAK", + "local": "Laos", + "symbol": "₭", + "name": "Lao Kip" + }, + { + "code": "LBP", + "local": "Lebanon", + "symbol": "ل.ل", + "name": "Lebanese Pound" + }, + { + "code": "LKR", + "local": "Sri Lanka", + "symbol": "₨", + "name": "Sri Lankan Rupee" + }, + { + "code": "LRD", + "local": "Liberia", + "symbol": "L$", + "name": "Liberian Dollar" + }, + { + "code": "LSL", + "local": "Lesotho", + "symbol": "M", + "name": "Lesotho Loti" + }, + { + "code": "LYD", + "local": "Libya", + "symbol": "ل.د", + "name": "Libyan Dinar" + }, + { + "code": "MAD", + "local": "Morocco", + "symbol": "د.م.", + "name": "Moroccan Dirham" + }, + { + "code": "MDL", + "local": "Moldova", + "symbol": "lei", + "name": "Moldovan Leu" + }, + { + "code": "MGA", + "local": "Madagascar", + "symbol": "Ar", + "name": "Malagasy Ariary" + }, + { + "code": "MKD", + "local": "North Macedonia", + "symbol": "ден", + "name": "Macedonian Denar" + }, + { + "code": "MMK", + "local": "Myanmar", + "symbol": "K", + "name": "Myanmar Kyat" + }, + { + "code": "MNT", + "local": "Mongolia", + "symbol": "₮", + "name": "Mongolian Tugrik" + }, + { + "code": "MOP", + "local": "Macau", + "symbol": "MOP$", + "name": "Macanese Pataca" + }, + { + "code": "MRU", + "local": "Mauritania", + "symbol": "UM", + "name": "Mauritanian Ouguiya" + }, + { + "code": "MUR", + "local": "Mauritius", + "symbol": "₨", + "name": "Mauritian Rupee" + }, + { + "code": "MVR", + "local": "Maldives", + "symbol": "Rf", + "name": "Maldivian Rufiyaa" + }, + { + "code": "MWK", + "local": "Malawi", + "symbol": "MK", + "name": "Malawian Kwacha" + }, + { + "code": "MXN", + "local": "Mexico", + "symbol": "Mex$", + "name": "Mexican Peso" + }, + { + "code": "MYR", + "local": "Malaysia", + "symbol": "RM", + "name": "Malaysian Ringgit" + }, + { + "code": "MZN", + "local": "Mozambique", + "symbol": "MT", + "name": "Mozambican Metical" + }, + { + "code": "NAD", + "local": "Namibia", + "symbol": "N$", + "name": "Namibian Dollar" + }, + { + "code": "NGN", + "local": "Nigeria", + "symbol": "₦", + "name": "Nigerian Naira" + }, + { + "code": "NIO", + "local": "Nicaragua", + "symbol": "C$", + "name": "Nicaraguan Córdoba" + }, + { + "code": "NOK", + "local": "Norway", + "symbol": "kr", + "name": "Norwegian Krone" + }, + { + "code": "UAH", + "local": "Ukraine", + "symbol": "₴", + "name": "Ukrainian Hryvnia" + } +] diff --git a/backend/internal/core/services/.testdata/import.csv b/backend/internal/core/services/.testdata/import.csv deleted file mode 100644 index 08bd9c8..0000000 --- a/backend/internal/core/services/.testdata/import.csv +++ /dev/null @@ -1,7 +0,0 @@ -Import Ref,Location,Labels,Quantity,Name,Description,Insured,Serial Number,Mode Number,Manufacturer,Notes,Purchase From,Purchased Price,Purchased Time,Lifetime Warranty,Warranty Expires,Warranty Details,Sold To,Sold Price,Sold Time,Sold Notes -A,Garage,IOT;Home Assistant; Z-Wave,1,Zooz Universal Relay ZEN17,Description 1,TRUE,,ZEN17,Zooz,,Amazon,39.95,10/13/2021,,10/13/2021,,,,10/13/2021, -B,Living Room,IOT;Home Assistant; Z-Wave,1,Zooz Motion Sensor,Description 2,FALSE,,ZSE18,Zooz,,Amazon,29.95,10/15/2021,,10/15/2021,,,,10/15/2021, -C,Office,IOT;Home Assistant; Z-Wave,1,Zooz 110v Power Switch,Description 3,TRUE,,ZEN15,Zooz,,Amazon,39.95,10/13/2021,,10/13/2021,,,,10/13/2021, -D,Downstairs,IOT;Home Assistant; Z-Wave,1,Ecolink Z-Wave PIR Motion Sensor,Description 4,FALSE,,PIRZWAVE2.5-ECO,Ecolink,,Amazon,35.58,10/21/2020,,10/21/2020,,,,10/21/2020, -E,Entry,IOT;Home Assistant; Z-Wave,1,Yale Security Touchscreen Deadbolt,Description 5,TRUE,,YRD226ZW2619,Yale,,Amazon,120.39,10/14/2020,,10/14/2020,,,,10/14/2020, -F,Kitchen,IOT;Home Assistant; Z-Wave,1,Smart Rocker Light Dimmer,Description 6,FALSE,,39351,Honeywell,,Amazon,65.98,09/30/2020,,09/30/2020,,,,09/30/2020, \ No newline at end of file diff --git a/backend/internal/core/services/.testdata/import.tsv b/backend/internal/core/services/.testdata/import.tsv deleted file mode 100644 index 503c777..0000000 --- a/backend/internal/core/services/.testdata/import.tsv +++ /dev/null @@ -1,7 +0,0 @@ -Import Ref Location Labels Quantity Name Description Insured Serial Number Mode Number Manufacturer Notes Purchase From Purchased Price Purchased Time Lifetime Warranty Warranty Expires Warranty Details Sold To Sold Price Sold Time Sold Notes -A Garage IOT;Home Assistant; Z-Wave 1 Zooz Universal Relay ZEN17 Description 1 TRUE ZEN17 Zooz Amazon 39.95 10/13/2021 10/13/2021 10/13/2021 -B Living Room IOT;Home Assistant; Z-Wave 1 Zooz Motion Sensor Description 2 FALSE ZSE18 Zooz Amazon 29.95 10/15/2021 10/15/2021 10/15/2021 -C Office IOT;Home Assistant; Z-Wave 1 Zooz 110v Power Switch Description 3 TRUE ZEN15 Zooz Amazon 39.95 10/13/2021 10/13/2021 10/13/2021 -D Downstairs IOT;Home Assistant; Z-Wave 1 Ecolink Z-Wave PIR Motion Sensor Description 4 FALSE PIRZWAVE2.5-ECO Ecolink Amazon 35.58 10/21/2020 10/21/2020 10/21/2020 -E Entry IOT;Home Assistant; Z-Wave 1 Yale Security Touchscreen Deadbolt Description 5 TRUE YRD226ZW2619 Yale Amazon 120.39 10/14/2020 10/14/2020 10/14/2020 -F Kitchen IOT;Home Assistant; Z-Wave 1 Smart Rocker Light Dimmer Description 6 FALSE 39351 Honeywell Amazon 65.98 09/30/2020 09/30/2020 09/30/2020 \ No newline at end of file diff --git a/backend/internal/core/services/all.go b/backend/internal/core/services/all.go index 2997095..3c03a4e 100644 --- a/backend/internal/core/services/all.go +++ b/backend/internal/core/services/all.go @@ -1,22 +1,24 @@ +// Package services provides the core business logic for the application. package services import ( - "github.com/hay-kot/homebox/backend/internal/core/services/reporting" + "github.com/hay-kot/homebox/backend/internal/core/currencies" "github.com/hay-kot/homebox/backend/internal/data/repo" - "github.com/rs/zerolog/log" ) type AllServices struct { - User *UserService - Group *GroupService - Items *ItemService - Reporting *reporting.ReportingService + User *UserService + Group *GroupService + Items *ItemService + BackgroundService *BackgroundService + Currencies *currencies.CurrencyRegistry } type OptionsFunc func(*options) type options struct { autoIncrementAssetID bool + currencies []currencies.Currency } func WithAutoIncrementAssetID(v bool) func(*options) { @@ -25,13 +27,27 @@ func WithAutoIncrementAssetID(v bool) func(*options) { } } +func WithCurrencies(v []currencies.Currency) func(*options) { + return func(o *options) { + o.currencies = v + } +} + func New(repos *repo.AllRepos, opts ...OptionsFunc) *AllServices { if repos == nil { panic("repos cannot be nil") } + defaultCurrencies, err := currencies.CollectionCurrencies( + currencies.CollectDefaults(), + ) + if err != nil { + panic("failed to collect default currencies") + } + options := &options{ autoIncrementAssetID: true, + currencies: defaultCurrencies, } for _, opt := range opts { @@ -45,7 +61,7 @@ func New(repos *repo.AllRepos, opts ...OptionsFunc) *AllServices { repo: repos, autoIncrementAssetID: options.autoIncrementAssetID, }, - // TODO: don't use global logger - Reporting: reporting.NewReportingService(repos, &log.Logger), + BackgroundService: &BackgroundService{repos}, + Currencies: currencies.NewCurrencyService(options.currencies), } } diff --git a/backend/internal/core/services/main_test.go b/backend/internal/core/services/main_test.go index e1f7282..ecb07b0 100644 --- a/backend/internal/core/services/main_test.go +++ b/backend/internal/core/services/main_test.go @@ -3,11 +3,11 @@ package services import ( "context" "log" - "math/rand" "os" "testing" - "time" + "github.com/hay-kot/homebox/backend/internal/core/currencies" + "github.com/hay-kot/homebox/backend/internal/core/services/reporting/eventbus" "github.com/hay-kot/homebox/backend/internal/data/ent" "github.com/hay-kot/homebox/backend/internal/data/repo" "github.com/hay-kot/homebox/backend/pkgs/faker" @@ -15,7 +15,8 @@ import ( ) var ( - fk = faker.NewFaker() + fk = faker.NewFaker() + tbus = eventbus.New() tCtx = Context{} tClient *ent.Client @@ -49,8 +50,6 @@ func bootstrap() { } func TestMain(m *testing.M) { - rand.Seed(int64(time.Now().Unix())) - client, err := ent.Open("sqlite3", "file:ent?mode=memory&cache=shared&_fk=1") if err != nil { log.Fatalf("failed opening connection to sqlite: %v", err) @@ -62,9 +61,14 @@ func TestMain(m *testing.M) { } tClient = client - tRepos = repo.New(tClient, os.TempDir()+"/homebox") - tSvc = New(tRepos) - defer client.Close() + tRepos = repo.New(tClient, tbus, os.TempDir()+"/homebox") + + defaults, _ := currencies.CollectionCurrencies( + currencies.CollectDefaults(), + ) + + tSvc = New(tRepos, WithCurrencies(defaults)) + defer func() { _ = client.Close() }() bootstrap() tCtx = Context{ diff --git a/backend/internal/core/services/reporting/.testdata/import/fields.csv b/backend/internal/core/services/reporting/.testdata/import/fields.csv new file mode 100644 index 0000000..28c3c17 --- /dev/null +++ b/backend/internal/core/services/reporting/.testdata/import/fields.csv @@ -0,0 +1,5 @@ +HB.location,HB.name,HB.quantity,HB.description,HB.field.Custom Field 1,HB.field.Custom Field 2,HB.field.Custom Field 3 +loc,Item 1,1,Description 1,Value 1[1],Value 1[2],Value 1[3] +loc,Item 2,2,Description 2,Value 2[1],Value 2[2],Value 2[3] +loc,Item 3,3,Description 3,Value 3[1],Value 3[2],Value 3[3] + diff --git a/backend/internal/core/services/reporting/.testdata/import/minimal.csv b/backend/internal/core/services/reporting/.testdata/import/minimal.csv new file mode 100644 index 0000000..be39ad2 --- /dev/null +++ b/backend/internal/core/services/reporting/.testdata/import/minimal.csv @@ -0,0 +1,4 @@ +HB.location,HB.name,HB.quantity,HB.description +loc,Item 1,1,Description 1 +loc,Item 2,2,Description 2 +loc,Item 3,3,Description 3 \ No newline at end of file diff --git a/backend/internal/core/services/reporting/.testdata/import/types.csv b/backend/internal/core/services/reporting/.testdata/import/types.csv new file mode 100644 index 0000000..96ff236 --- /dev/null +++ b/backend/internal/core/services/reporting/.testdata/import/types.csv @@ -0,0 +1,4 @@ +HB.name,HB.asset_id,HB.location,HB.labels +Item 1,1,Path / To / Location 1,L1 ; L2 ; L3 +Item 2,000-002,Path /To/ Location 2,L1;L2;L3 +Item 3,1000-003,Path / To /Location 3 , L1;L2; L3 \ No newline at end of file diff --git a/backend/internal/core/services/reporting/bill_of_materials.go b/backend/internal/core/services/reporting/bill_of_materials.go new file mode 100644 index 0000000..4147d4b --- /dev/null +++ b/backend/internal/core/services/reporting/bill_of_materials.go @@ -0,0 +1,42 @@ +package reporting + +import ( + "github.com/gocarina/gocsv" + "github.com/hay-kot/homebox/backend/internal/data/repo" + "github.com/hay-kot/homebox/backend/internal/data/types" +) + +// ================================================================================================= + +type BillOfMaterialsEntry struct { + PurchaseDate types.Date `csv:"Purchase Date"` + Name string `csv:"Name"` + Description string `csv:"Description"` + Manufacturer string `csv:"Manufacturer"` + SerialNumber string `csv:"Serial Number"` + ModelNumber string `csv:"Model Number"` + Quantity int `csv:"Quantity"` + Price float64 `csv:"Price"` + TotalPrice float64 `csv:"Total Price"` +} + +// BillOfMaterialsTSV returns a byte slice of the Bill of Materials for a given GID in TSV format +// See BillOfMaterialsEntry for the format of the output +func BillOfMaterialsTSV(entities []repo.ItemOut) ([]byte, error) { + bomEntries := make([]BillOfMaterialsEntry, len(entities)) + for i, entity := range entities { + bomEntries[i] = BillOfMaterialsEntry{ + PurchaseDate: entity.PurchaseTime, + Name: entity.Name, + Description: entity.Description, + Manufacturer: entity.Manufacturer, + SerialNumber: entity.SerialNumber, + ModelNumber: entity.ModelNumber, + Quantity: entity.Quantity, + Price: entity.PurchasePrice, + TotalPrice: entity.PurchasePrice * float64(entity.Quantity), + } + } + + return gocsv.MarshalBytes(&bomEntries) +} diff --git a/backend/internal/core/services/reporting/eventbus/eventbus.go b/backend/internal/core/services/reporting/eventbus/eventbus.go new file mode 100644 index 0000000..581bc38 --- /dev/null +++ b/backend/internal/core/services/reporting/eventbus/eventbus.go @@ -0,0 +1,91 @@ +// Package eventbus provides an interface for event bus. +package eventbus + +import ( + "context" + "sync" + + "github.com/google/uuid" +) + +type Event string + +const ( + EventLabelMutation Event = "label.mutation" + EventLocationMutation Event = "location.mutation" + EventItemMutation Event = "item.mutation" +) + +type GroupMutationEvent struct { + GID uuid.UUID +} + +type eventData struct { + event Event + data any +} + +type EventBus struct { + started bool + ch chan eventData + + mu sync.RWMutex + subscribers map[Event][]func(any) +} + +func New() *EventBus { + return &EventBus{ + ch: make(chan eventData, 100), + subscribers: map[Event][]func(any){ + EventLabelMutation: {}, + EventLocationMutation: {}, + EventItemMutation: {}, + }, + } +} + +func (e *EventBus) Run(ctx context.Context) error { + if e.started { + panic("event bus already started") + } + + e.started = true + + for { + select { + case <-ctx.Done(): + return nil + case event := <-e.ch: + e.mu.RLock() + arr, ok := e.subscribers[event.event] + e.mu.RUnlock() + + if !ok { + continue + } + + for _, fn := range arr { + fn(event.data) + } + } + } +} + +func (e *EventBus) Publish(event Event, data any) { + e.ch <- eventData{ + event: event, + data: data, + } +} + +func (e *EventBus) Subscribe(event Event, fn func(any)) { + e.mu.Lock() + defer e.mu.Unlock() + + arr, ok := e.subscribers[event] + if !ok { + panic("event not found") + } + + e.subscribers[event] = append(arr, fn) +} diff --git a/backend/internal/core/services/reporting/import.go b/backend/internal/core/services/reporting/import.go new file mode 100644 index 0000000..6f01b1b --- /dev/null +++ b/backend/internal/core/services/reporting/import.go @@ -0,0 +1,94 @@ +// Package reporting provides a way to import CSV files into the database. +package reporting + +import ( + "bytes" + "encoding/csv" + "errors" + "io" + "strings" +) + +var ( + ErrNoHomeboxHeaders = errors.New("no headers found") + ErrMissingRequiredHeaders = errors.New("missing required headers `HB.location` or `HB.name`") +) + +// determineSeparator determines the separator used in the CSV file +// It returns the separator as a rune and an error if it could not be determined +// +// It is assumed that the first row is the header row and that the separator is the same +// for all rows. +// +// Supported separators are `,` and `\t` +func determineSeparator(data []byte) (rune, error) { + // First row + firstRow := bytes.Split(data, []byte("\n"))[0] + + // find first comma or /t + comma := bytes.IndexByte(firstRow, ',') + tab := bytes.IndexByte(firstRow, '\t') + + switch { + case comma == -1 && tab == -1: + return 0, errors.New("could not determine separator") + case tab > comma: + return '\t', nil + default: + return ',', nil + } +} + +// readRawCsv reads a CSV file and returns the raw data as a 2D string array +// It determines the separator used in the CSV file and returns an error if +// it could not be determined +func readRawCsv(r io.Reader) ([][]string, error) { + data, err := io.ReadAll(r) + if err != nil { + return nil, err + } + + reader := csv.NewReader(bytes.NewReader(data)) + + // Determine separator + sep, err := determineSeparator(data) + if err != nil { + return nil, err + } + + reader.Comma = sep + + return reader.ReadAll() +} + +// parseHeaders parses the homebox headers from the CSV file and returns a map of the headers +// and their column index as well as a list of the field headers (HB.field.*) in the order +// they appear in the CSV file +// +// It returns an error if no homebox headers are found +func parseHeaders(headers []string) (hbHeaders map[string]int, fieldHeaders []string, err error) { + hbHeaders = map[string]int{} // initialize map + + for col, h := range headers { + if strings.HasPrefix(h, "HB.field.") { + fieldHeaders = append(fieldHeaders, h) + } + + if strings.HasPrefix(h, "HB.") { + hbHeaders[h] = col + } + } + + required := []string{"HB.location", "HB.name"} + for _, h := range required { + if _, ok := hbHeaders[h]; !ok { + return nil, nil, ErrMissingRequiredHeaders + } + } + + if len(hbHeaders) == 0 { + return nil, nil, ErrNoHomeboxHeaders + } + + return hbHeaders, fieldHeaders, nil +} diff --git a/backend/internal/core/services/reporting/io_row.go b/backend/internal/core/services/reporting/io_row.go new file mode 100644 index 0000000..c80e00d --- /dev/null +++ b/backend/internal/core/services/reporting/io_row.go @@ -0,0 +1,95 @@ +package reporting + +import ( + "strings" + + "github.com/hay-kot/homebox/backend/internal/data/repo" + "github.com/hay-kot/homebox/backend/internal/data/types" +) + +type ExportItemFields struct { + Name string + Value string +} + +type ExportTSVRow struct { + ImportRef string `csv:"HB.import_ref"` + Location LocationString `csv:"HB.location"` + LabelStr LabelString `csv:"HB.labels"` + AssetID repo.AssetID `csv:"HB.asset_id"` + Archived bool `csv:"HB.archived"` + + Name string `csv:"HB.name"` + Quantity int `csv:"HB.quantity"` + Description string `csv:"HB.description"` + Insured bool `csv:"HB.insured"` + Notes string `csv:"HB.notes"` + + PurchasePrice float64 `csv:"HB.purchase_price"` + PurchaseFrom string `csv:"HB.purchase_from"` + PurchaseTime types.Date `csv:"HB.purchase_time"` + + Manufacturer string `csv:"HB.manufacturer"` + ModelNumber string `csv:"HB.model_number"` + SerialNumber string `csv:"HB.serial_number"` + + LifetimeWarranty bool `csv:"HB.lifetime_warranty"` + WarrantyExpires types.Date `csv:"HB.warranty_expires"` + WarrantyDetails string `csv:"HB.warranty_details"` + + SoldTo string `csv:"HB.sold_to"` + SoldPrice float64 `csv:"HB.sold_price"` + SoldTime types.Date `csv:"HB.sold_time"` + SoldNotes string `csv:"HB.sold_notes"` + + Fields []ExportItemFields `csv:"-"` +} + +// ============================================================================ + +// LabelString is a string slice that is used to represent a list of labels. +// +// For example, a list of labels "Important; Work" would be represented as a +// LabelString with the following values: +// +// LabelString{"Important", "Work"} +type LabelString []string + +func parseLabelString(s string) LabelString { + v, _ := parseSeparatedString(s, ";") + return v +} + +func (ls LabelString) String() string { + return strings.Join(ls, "; ") +} + +// ============================================================================ + +// LocationString is a string slice that is used to represent a location +// hierarchy. +// +// For example, a location hierarchy of "Home / Bedroom / Desk" would be +// represented as a LocationString with the following values: +// +// LocationString{"Home", "Bedroom", "Desk"} +type LocationString []string + +func parseLocationString(s string) LocationString { + v, _ := parseSeparatedString(s, "/") + return v +} + +func (csf LocationString) String() string { + return strings.Join(csf, " / ") +} + +func fromPathSlice(s []repo.ItemPath) LocationString { + v := make(LocationString, len(s)) + + for i := range s { + v[i] = s[i].Name + } + + return v +} diff --git a/backend/internal/core/services/reporting/io_sheet.go b/backend/internal/core/services/reporting/io_sheet.go new file mode 100644 index 0000000..5877f3e --- /dev/null +++ b/backend/internal/core/services/reporting/io_sheet.go @@ -0,0 +1,322 @@ +package reporting + +import ( + "context" + "fmt" + "io" + "reflect" + "sort" + "strconv" + "strings" + + "github.com/google/uuid" + "github.com/hay-kot/homebox/backend/internal/data/repo" + "github.com/hay-kot/homebox/backend/internal/data/types" + "github.com/rs/zerolog/log" +) + +// IOSheet is the representation of a CSV/TSV sheet that is used for importing/exporting +// items from homebox. It is used to read/write the data from/to a CSV/TSV file given +// the standard format of the file. +// +// See ExportTSVRow for the format of the data in the sheet. +type IOSheet struct { + headers []string + custom []int + index map[string]int + Rows []ExportTSVRow +} + +func (s *IOSheet) indexHeaders() { + s.index = make(map[string]int) + + for i, h := range s.headers { + if strings.HasPrefix(h, "HB.field") { + s.custom = append(s.custom, i) + } + + if strings.HasPrefix(h, "HB.") { + s.index[h] = i + } + } +} + +func (s *IOSheet) GetColumn(str string) (col int, ok bool) { + if s.index == nil { + s.indexHeaders() + } + + col, ok = s.index[str] + return +} + +// Read reads a CSV/TSV and populates the "Rows" field with the data from the sheet +// Custom Fields are supported via the `HB.field.*` headers. The `HB.field.*` the "Name" +// of the field is the part after the `HB.field.` prefix. Additionally, Custom Fields with +// no value are excluded from the row.Fields slice, this includes empty strings. +// +// Note That +// - the first row is assumed to be the header +// - at least 1 row of data is required +// - rows and columns must be rectangular (i.e. all rows must have the same number of columns) +func (s *IOSheet) Read(data io.Reader) error { + sheet, err := readRawCsv(data) + if err != nil { + return err + } + + if len(sheet) < 2 { + return fmt.Errorf("sheet must have at least 1 row of data (header + 1)") + } + + s.headers = sheet[0] + s.Rows = make([]ExportTSVRow, len(sheet)-1) + + for i, row := range sheet[1:] { + if len(row) != len(s.headers) { + return fmt.Errorf("row has %d columns, expected %d", len(row), len(s.headers)) + } + + rowData := ExportTSVRow{} + + st := reflect.TypeOf(ExportTSVRow{}) + + for i := 0; i < st.NumField(); i++ { + field := st.Field(i) + tag := field.Tag.Get("csv") + if tag == "" || tag == "-" { + continue + } + + col, ok := s.GetColumn(tag) + if !ok { + continue + } + + val := row[col] + + var v interface{} + + switch field.Type { + case reflect.TypeOf(""): + v = val + case reflect.TypeOf(int(0)): + v = parseInt(val) + case reflect.TypeOf(bool(false)): + v = parseBool(val) + case reflect.TypeOf(float64(0)): + v = parseFloat(val) + + // Custom Types + case reflect.TypeOf(types.Date{}): + v = types.DateFromString(val) + case reflect.TypeOf(repo.AssetID(0)): + v, _ = repo.ParseAssetID(val) + case reflect.TypeOf(LocationString{}): + v = parseLocationString(val) + case reflect.TypeOf(LabelString{}): + v = parseLabelString(val) + } + + log.Debug(). + Str("tag", tag). + Interface("val", v). + Str("type", fmt.Sprintf("%T", v)). + Msg("parsed value") + + // Nil values are not allowed at the moment. This may change. + if v == nil { + return fmt.Errorf("could not convert %q to %s", val, field.Type) + } + + ptrField := reflect.ValueOf(&rowData).Elem().Field(i) + ptrField.Set(reflect.ValueOf(v)) + } + + for _, col := range s.custom { + colName := strings.TrimPrefix(s.headers[col], "HB.field.") + customVal := row[col] + if customVal == "" { + continue + } + + rowData.Fields = append(rowData.Fields, ExportItemFields{ + Name: colName, + Value: customVal, + }) + } + + s.Rows[i] = rowData + } + + return nil +} + +// ReadItems writes the sheet to a writer. +func (s *IOSheet) ReadItems(ctx context.Context, items []repo.ItemOut, GID uuid.UUID, repos *repo.AllRepos) error { + s.Rows = make([]ExportTSVRow, len(items)) + + extraHeaders := map[string]struct{}{} + + for i := range items { + item := items[i] + + // TODO: Support fetching nested locations + locID := item.Location.ID + + locPaths, err := repos.Locations.PathForLoc(context.Background(), GID, locID) + if err != nil { + log.Error().Err(err).Msg("could not get location path") + return err + } + + locString := fromPathSlice(locPaths) + + labelString := make([]string, len(item.Labels)) + + for i, l := range item.Labels { + labelString[i] = l.Name + } + + customFields := make([]ExportItemFields, len(item.Fields)) + + for i, f := range item.Fields { + extraHeaders[f.Name] = struct{}{} + + customFields[i] = ExportItemFields{ + Name: f.Name, + Value: f.TextValue, + } + } + + s.Rows[i] = ExportTSVRow{ + // fill struct + Location: locString, + LabelStr: labelString, + + ImportRef: item.ImportRef, + AssetID: item.AssetID, + Name: item.Name, + Quantity: item.Quantity, + Description: item.Description, + Insured: item.Insured, + Archived: item.Archived, + + PurchasePrice: item.PurchasePrice, + PurchaseFrom: item.PurchaseFrom, + PurchaseTime: item.PurchaseTime, + + Manufacturer: item.Manufacturer, + ModelNumber: item.ModelNumber, + SerialNumber: item.SerialNumber, + + LifetimeWarranty: item.LifetimeWarranty, + WarrantyExpires: item.WarrantyExpires, + WarrantyDetails: item.WarrantyDetails, + + SoldTo: item.SoldTo, + SoldTime: item.SoldTime, + SoldPrice: item.SoldPrice, + SoldNotes: item.SoldNotes, + + Fields: customFields, + } + } + + // Extract and sort additional headers for deterministic output + customHeaders := make([]string, 0, len(extraHeaders)) + + for k := range extraHeaders { + customHeaders = append(customHeaders, k) + } + + sort.Strings(customHeaders) + + st := reflect.TypeOf(ExportTSVRow{}) + + // Write headers + for i := 0; i < st.NumField(); i++ { + field := st.Field(i) + tag := field.Tag.Get("csv") + if tag == "" || tag == "-" { + continue + } + + s.headers = append(s.headers, tag) + } + + for _, h := range customHeaders { + s.headers = append(s.headers, "HB.field."+h) + } + + return nil +} + +// TSV writes the current sheet to a writer in TSV format. +func (s *IOSheet) TSV() ([][]string, error) { + memcsv := make([][]string, len(s.Rows)+1) + + memcsv[0] = s.headers + + // use struct tags in rows to dertmine column order + for i, row := range s.Rows { + rowIdx := i + 1 + + memcsv[rowIdx] = make([]string, len(s.headers)) + + st := reflect.TypeOf(row) + + for i := 0; i < st.NumField(); i++ { + field := st.Field(i) + tag := field.Tag.Get("csv") + if tag == "" || tag == "-" { + continue + } + + col, ok := s.GetColumn(tag) + if !ok { + continue + } + + val := reflect.ValueOf(row).Field(i) + + var v string + + switch field.Type { + case reflect.TypeOf(""): + v = val.String() + case reflect.TypeOf(int(0)): + v = strconv.Itoa(int(val.Int())) + case reflect.TypeOf(bool(false)): + v = strconv.FormatBool(val.Bool()) + case reflect.TypeOf(float64(0)): + v = strconv.FormatFloat(val.Float(), 'f', -1, 64) + + // Custom Types + case reflect.TypeOf(types.Date{}): + v = val.Interface().(types.Date).String() + case reflect.TypeOf(repo.AssetID(0)): + v = val.Interface().(repo.AssetID).String() + case reflect.TypeOf(LocationString{}): + v = val.Interface().(LocationString).String() + case reflect.TypeOf(LabelString{}): + v = val.Interface().(LabelString).String() + default: + log.Debug().Str("type", field.Type.String()).Msg("unknown type") + } + + memcsv[rowIdx][col] = v + } + + for _, f := range row.Fields { + col, ok := s.GetColumn("HB.field." + f.Name) + if !ok { + continue + } + + memcsv[i+1][col] = f.Value + } + } + + return memcsv, nil +} diff --git a/backend/internal/core/services/reporting/io_sheet_test.go b/backend/internal/core/services/reporting/io_sheet_test.go new file mode 100644 index 0000000..f056e31 --- /dev/null +++ b/backend/internal/core/services/reporting/io_sheet_test.go @@ -0,0 +1,221 @@ +package reporting + +import ( + "bytes" + "reflect" + "testing" + + _ "embed" + + "github.com/hay-kot/homebox/backend/internal/data/repo" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + //go:embed .testdata/import/minimal.csv + minimalImportCSV []byte + + //go:embed .testdata/import/fields.csv + customFieldImportCSV []byte + + //go:embed .testdata/import/types.csv + customTypesImportCSV []byte +) + +func TestSheet_Read(t *testing.T) { + tests := []struct { + name string + data []byte + want []ExportTSVRow + wantErr bool + }{ + { + name: "minimal import", + data: minimalImportCSV, + want: []ExportTSVRow{ + {Location: LocationString{"loc"}, Name: "Item 1", Quantity: 1, Description: "Description 1"}, + {Location: LocationString{"loc"}, Name: "Item 2", Quantity: 2, Description: "Description 2"}, + {Location: LocationString{"loc"}, Name: "Item 3", Quantity: 3, Description: "Description 3"}, + }, + }, + { + name: "custom field import", + data: customFieldImportCSV, + want: []ExportTSVRow{ + { + Location: LocationString{"loc"}, Name: "Item 1", Quantity: 1, Description: "Description 1", + Fields: []ExportItemFields{ + {Name: "Custom Field 1", Value: "Value 1[1]"}, + {Name: "Custom Field 2", Value: "Value 1[2]"}, + {Name: "Custom Field 3", Value: "Value 1[3]"}, + }, + }, + { + Location: LocationString{"loc"}, Name: "Item 2", Quantity: 2, Description: "Description 2", + Fields: []ExportItemFields{ + {Name: "Custom Field 1", Value: "Value 2[1]"}, + {Name: "Custom Field 2", Value: "Value 2[2]"}, + {Name: "Custom Field 3", Value: "Value 2[3]"}, + }, + }, + { + Location: LocationString{"loc"}, Name: "Item 3", Quantity: 3, Description: "Description 3", + Fields: []ExportItemFields{ + {Name: "Custom Field 1", Value: "Value 3[1]"}, + {Name: "Custom Field 2", Value: "Value 3[2]"}, + {Name: "Custom Field 3", Value: "Value 3[3]"}, + }, + }, + }, + }, + { + name: "custom types import", + data: customTypesImportCSV, + want: []ExportTSVRow{ + { + Name: "Item 1", + AssetID: repo.AssetID(1), + Location: LocationString{"Path", "To", "Location 1"}, + LabelStr: LabelString{"L1", "L2", "L3"}, + }, + { + Name: "Item 2", + AssetID: repo.AssetID(2), + Location: LocationString{"Path", "To", "Location 2"}, + LabelStr: LabelString{"L1", "L2", "L3"}, + }, + { + Name: "Item 3", + AssetID: repo.AssetID(1000003), + Location: LocationString{"Path", "To", "Location 3"}, + LabelStr: LabelString{"L1", "L2", "L3"}, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + reader := bytes.NewReader(tt.data) + + sheet := &IOSheet{} + err := sheet.Read(reader) + + switch { + case tt.wantErr: + require.Error(t, err) + default: + require.NoError(t, err) + assert.ElementsMatch(t, tt.want, sheet.Rows) + } + }) + } +} + +func Test_parseHeaders(t *testing.T) { + tests := []struct { + name string + rawHeaders []string + wantHbHeaders map[string]int + wantFieldHeaders []string + wantErr bool + }{ + { + name: "no hombox headers", + rawHeaders: []string{"Header 1", "Header 2", "Header 3"}, + wantHbHeaders: nil, + wantFieldHeaders: nil, + wantErr: true, + }, + { + name: "field headers only", + rawHeaders: []string{"HB.location", "HB.name", "HB.field.1", "HB.field.2", "HB.field.3"}, + wantHbHeaders: map[string]int{ + "HB.location": 0, + "HB.name": 1, + "HB.field.1": 2, + "HB.field.2": 3, + "HB.field.3": 4, + }, + wantFieldHeaders: []string{"HB.field.1", "HB.field.2", "HB.field.3"}, + wantErr: false, + }, + { + name: "mixed headers", + rawHeaders: []string{"Header 1", "HB.name", "Header 2", "HB.field.2", "Header 3", "HB.field.3", "HB.location"}, + wantHbHeaders: map[string]int{ + "HB.name": 1, + "HB.field.2": 3, + "HB.field.3": 5, + "HB.location": 6, + }, + wantFieldHeaders: []string{"HB.field.2", "HB.field.3"}, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotHbHeaders, gotFieldHeaders, err := parseHeaders(tt.rawHeaders) + if (err != nil) != tt.wantErr { + t.Errorf("parseHeaders() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(gotHbHeaders, tt.wantHbHeaders) { + t.Errorf("parseHeaders() gotHbHeaders = %v, want %v", gotHbHeaders, tt.wantHbHeaders) + } + if !reflect.DeepEqual(gotFieldHeaders, tt.wantFieldHeaders) { + t.Errorf("parseHeaders() gotFieldHeaders = %v, want %v", gotFieldHeaders, tt.wantFieldHeaders) + } + }) + } +} + +func Test_determineSeparator(t *testing.T) { + type args struct { + data []byte + } + tests := []struct { + name string + args args + want rune + wantErr bool + }{ + { + name: "comma", + args: args{ + data: []byte("a,b,c"), + }, + want: ',', + wantErr: false, + }, + { + name: "tab", + args: args{ + data: []byte("a\tb\tc"), + }, + want: '\t', + wantErr: false, + }, + { + name: "invalid", + args: args{ + data: []byte("a;b;c"), + }, + want: 0, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := determineSeparator(tt.args.data) + if (err != nil) != tt.wantErr { + t.Errorf("determineSeparator() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("determineSeparator() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/backend/internal/core/services/reporting/reporting.go b/backend/internal/core/services/reporting/reporting.go deleted file mode 100644 index 4ba408b..0000000 --- a/backend/internal/core/services/reporting/reporting.go +++ /dev/null @@ -1,85 +0,0 @@ -package reporting - -import ( - "context" - "encoding/csv" - "io" - "time" - - "github.com/gocarina/gocsv" - "github.com/google/uuid" - "github.com/hay-kot/homebox/backend/internal/data/repo" - "github.com/rs/zerolog" -) - -type ReportingService struct { - repos *repo.AllRepos - l *zerolog.Logger -} - -func NewReportingService(repos *repo.AllRepos, l *zerolog.Logger) *ReportingService { - gocsv.SetCSVWriter(func(out io.Writer) *gocsv.SafeCSVWriter { - writer := csv.NewWriter(out) - writer.Comma = '\t' - return gocsv.NewSafeCSVWriter(writer) - }) - - return &ReportingService{ - repos: repos, - l: l, - } -} - -// ================================================================================================= - -// NullableTime is a custom type that implements the MarshalCSV interface -// to allow for nullable time.Time fields in the CSV output to be empty -// and not "0001-01-01". It also overrides the default CSV output format -type NullableTime time.Time - -func (t NullableTime) MarshalCSV() (string, error) { - if time.Time(t).IsZero() { - return "", nil - } - // YYYY-MM-DD - return time.Time(t).Format("2006-01-02"), nil -} - -type BillOfMaterialsEntry struct { - PurchaseDate NullableTime `csv:"Purchase Date"` - Name string `csv:"Name"` - Description string `csv:"Description"` - Manufacturer string `csv:"Manufacturer"` - SerialNumber string `csv:"Serial Number"` - ModelNumber string `csv:"Model Number"` - Quantity int `csv:"Quantity"` - Price float64 `csv:"Price"` - TotalPrice float64 `csv:"Total Price"` -} - -// BillOfMaterialsTSV returns a byte slice of the Bill of Materials for a given GID in TSV format -// See BillOfMaterialsEntry for the format of the output -func (rs *ReportingService) BillOfMaterialsTSV(ctx context.Context, GID uuid.UUID) ([]byte, error) { - entities, err := rs.repos.Items.GetAll(ctx, GID) - if err != nil { - rs.l.Debug().Err(err).Msg("failed to get all items for BOM Csv Reporting") - return nil, err - } - - bomEntries := make([]BillOfMaterialsEntry, len(entities)) - for i, entity := range entities { - bomEntries[i] = BillOfMaterialsEntry{ - PurchaseDate: NullableTime(entity.PurchaseTime), - Name: entity.Name, - Description: entity.Description, - Manufacturer: entity.Manufacturer, - SerialNumber: entity.SerialNumber, - ModelNumber: entity.ModelNumber, - Quantity: entity.Quantity, - Price: entity.PurchasePrice, - TotalPrice: entity.PurchasePrice * float64(entity.Quantity), - } - } - - return gocsv.MarshalBytes(&bomEntries) -} diff --git a/backend/internal/core/services/reporting/value_parsers.go b/backend/internal/core/services/reporting/value_parsers.go new file mode 100644 index 0000000..7410396 --- /dev/null +++ b/backend/internal/core/services/reporting/value_parsers.go @@ -0,0 +1,38 @@ +package reporting + +import ( + "strconv" + "strings" +) + +func parseSeparatedString(s string, sep string) ([]string, error) { + list := strings.Split(s, sep) + + csf := make([]string, 0, len(list)) + for _, s := range list { + trimmed := strings.TrimSpace(s) + if trimmed != "" { + csf = append(csf, trimmed) + } + } + + return csf, nil +} + +func parseFloat(s string) float64 { + if s == "" { + return 0 + } + f, _ := strconv.ParseFloat(s, 64) + return f +} + +func parseBool(s string) bool { + b, _ := strconv.ParseBool(s) + return b +} + +func parseInt(s string) int { + i, _ := strconv.Atoi(s) + return i +} diff --git a/backend/internal/core/services/reporting/value_parsers_test.go b/backend/internal/core/services/reporting/value_parsers_test.go new file mode 100644 index 0000000..bcd7431 --- /dev/null +++ b/backend/internal/core/services/reporting/value_parsers_test.go @@ -0,0 +1,65 @@ +package reporting + +import ( + "reflect" + "testing" +) + +func Test_parseSeparatedString(t *testing.T) { + type args struct { + s string + sep string + } + tests := []struct { + name string + args args + want []string + wantErr bool + }{ + { + name: "comma", + args: args{ + s: "a,b,c", + sep: ",", + }, + want: []string{"a", "b", "c"}, + wantErr: false, + }, + { + name: "trimmed comma", + args: args{ + s: "a, b, c", + sep: ",", + }, + want: []string{"a", "b", "c"}, + }, + { + name: "excessive whitespace", + args: args{ + s: " a, b, c ", + sep: ",", + }, + want: []string{"a", "b", "c"}, + }, + { + name: "empty", + args: args{ + s: "", + sep: ",", + }, + want: []string{}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := parseSeparatedString(tt.args.s, tt.args.sep) + if (err != nil) != tt.wantErr { + t.Errorf("parseSeparatedString() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("parseSeparatedString() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/backend/internal/core/services/service_background.go b/backend/internal/core/services/service_background.go new file mode 100644 index 0000000..21ae4c3 --- /dev/null +++ b/backend/internal/core/services/service_background.go @@ -0,0 +1,81 @@ +package services + +import ( + "context" + "strings" + "time" + + "github.com/containrrr/shoutrrr" + "github.com/hay-kot/homebox/backend/internal/data/repo" + "github.com/hay-kot/homebox/backend/internal/data/types" + "github.com/rs/zerolog/log" +) + +type BackgroundService struct { + repos *repo.AllRepos +} + +func (svc *BackgroundService) SendNotifiersToday(ctx context.Context) error { + // Get All Groups + groups, err := svc.repos.Groups.GetAllGroups(ctx) + if err != nil { + return err + } + + today := types.DateFromTime(time.Now()) + + for i := range groups { + group := groups[i] + + entries, err := svc.repos.MaintEntry.GetScheduled(ctx, group.ID, today) + if err != nil { + return err + } + + if len(entries) == 0 { + log.Debug(). + Str("group_name", group.Name). + Str("group_id", group.ID.String()). + Msg("No scheduled maintenance for today") + continue + } + + notifiers, err := svc.repos.Notifiers.GetByGroup(ctx, group.ID) + if err != nil { + return err + } + + urls := make([]string, len(notifiers)) + for i := range notifiers { + urls[i] = notifiers[i].URL + } + + bldr := strings.Builder{} + + bldr.WriteString("Homebox Maintenance for (") + bldr.WriteString(today.String()) + bldr.WriteString("):\n") + + for i := range entries { + entry := entries[i] + bldr.WriteString(" - ") + bldr.WriteString(entry.Name) + bldr.WriteString("\n") + } + + var sendErrs []error + for i := range urls { + err := shoutrrr.Send(urls[i], bldr.String()) + + if err != nil { + sendErrs = append(sendErrs, err) + } + } + + if len(sendErrs) > 0 { + return sendErrs[0] + } + } + + return nil +} diff --git a/backend/internal/core/services/service_items.go b/backend/internal/core/services/service_items.go index df37e67..4d510e5 100644 --- a/backend/internal/core/services/service_items.go +++ b/backend/internal/core/services/service_items.go @@ -3,10 +3,13 @@ package services import ( "context" "errors" + "fmt" + "io" + "strings" "github.com/google/uuid" + "github.com/hay-kot/homebox/backend/internal/core/services/reporting" "github.com/hay-kot/homebox/backend/internal/data/repo" - "github.com/rs/zerolog/log" ) var ( @@ -29,7 +32,7 @@ func (svc *ItemService) Create(ctx Context, item repo.ItemCreate) (repo.ItemOut, return repo.ItemOut{}, err } - item.AssetID = repo.AssetID(highest + 1) + item.AssetID = highest + 1 } return svc.repo.Items.Create(ctx, ctx.GID, item) @@ -37,7 +40,6 @@ func (svc *ItemService) Create(ctx Context, item repo.ItemCreate) (repo.ItemOut, func (svc *ItemService) EnsureAssetID(ctx context.Context, GID uuid.UUID) (int, error) { items, err := svc.repo.Items.GetAllZeroAssetID(ctx, GID) - if err != nil { return 0, err } @@ -51,7 +53,7 @@ func (svc *ItemService) EnsureAssetID(ctx context.Context, GID uuid.UUID) (int, for _, item := range items { highest++ - err = svc.repo.Items.SetAssetID(ctx, GID, item.ID, repo.AssetID(highest)) + err = svc.repo.Items.SetAssetID(ctx, GID, item.ID, highest) if err != nil { return 0, err } @@ -61,190 +63,293 @@ func (svc *ItemService) EnsureAssetID(ctx context.Context, GID uuid.UUID) (int, return finished, nil } -func (svc *ItemService) CsvImport(ctx context.Context, GID uuid.UUID, data [][]string) (int, error) { - loaded := []csvRow{} - // Skip first row - for _, row := range data[1:] { - // Skip empty rows - if len(row) == 0 { - continue - } - - if len(row) != NumOfCols { - return 0, ErrInvalidCsv - } - - r := newCsvRow(row) - loaded = append(loaded, r) - } - - // validate rows - var errMap = map[int][]error{} - var hasErr bool - for i, r := range loaded { - - errs := r.validate() - - if len(errs) > 0 { - hasErr = true - lineNum := i + 2 - - errMap[lineNum] = errs - } - } - - if hasErr { - for lineNum, errs := range errMap { - for _, err := range errs { - log.Error().Err(err).Int("line", lineNum).Msg("csv import error") - } - } - } - - // Bootstrap the locations and labels so we can reuse the created IDs for the items - locations := map[string]uuid.UUID{} - existingLocation, err := svc.repo.Locations.GetAll(ctx, GID, repo.LocationQuery{}) +func (svc *ItemService) EnsureImportRef(ctx context.Context, GID uuid.UUID) (int, error) { + ids, err := svc.repo.Items.GetAllZeroImportRef(ctx, GID) if err != nil { return 0, err } - for _, loc := range existingLocation { - locations[loc.Name] = loc.ID + + finished := 0 + for _, itemID := range ids { + ref := uuid.New().String()[0:8] + + err = svc.repo.Items.Patch(ctx, GID, itemID, repo.ItemPatch{ImportRef: &ref}) + if err != nil { + return 0, err + } + + finished++ } - labels := map[string]uuid.UUID{} - existingLabels, err := svc.repo.Labels.GetAll(ctx, GID) + return finished, nil +} + +func serializeLocation[T ~[]string](location T) string { + return strings.Join(location, "/") +} + +// CsvImport imports items from a CSV file. using the standard defined format. +// +// CsvImport applies the following rules/operations +// +// 1. If the item does not exist, it is created. +// 2. If the item has a ImportRef and it exists it is skipped +// 3. Locations and Labels are created if they do not exist. +func (svc *ItemService) CsvImport(ctx context.Context, GID uuid.UUID, data io.Reader) (int, error) { + sheet := reporting.IOSheet{} + + err := sheet.Read(data) if err != nil { return 0, err } - for _, label := range existingLabels { - labels[label.Name] = label.ID - } - for _, row := range loaded { + // ======================================== + // Labels - // Locations - if _, exists := locations[row.Location]; !exists { - result, err := svc.repo.Locations.Create(ctx, GID, repo.LocationCreate{ - Name: row.Location, - Description: "", - }) - if err != nil { - return 0, err - } - locations[row.Location] = result.ID + labelMap := make(map[string]uuid.UUID) + { + labels, err := svc.repo.Labels.GetAll(ctx, GID) + if err != nil { + return 0, err } - // Labels - - for _, label := range row.getLabels() { - if _, exists := labels[label]; exists { - continue - } - result, err := svc.repo.Labels.Create(ctx, GID, repo.LabelCreate{ - Name: label, - Description: "", - }) - if err != nil { - return 0, err - } - labels[label] = result.ID + for _, label := range labels { + labelMap[label.Name] = label.ID } } - highest := repo.AssetID(-1) + // ======================================== + // Locations + + locationMap := make(map[string]uuid.UUID) + { + locations, err := svc.repo.Locations.Tree(ctx, GID, repo.TreeQuery{WithItems: false}) + if err != nil { + return 0, err + } + + // Traverse the tree and build a map of location full paths to IDs + // where the full path is the location name joined by slashes. + var traverse func(location *repo.TreeItem, path []string) + traverse = func(location *repo.TreeItem, path []string) { + path = append(path, location.Name) + + locationMap[serializeLocation(path)] = location.ID + + for _, child := range location.Children { + traverse(child, path) + } + } + + for _, location := range locations { + traverse(&location, []string{}) + } + } + + // ======================================== + // Import items + + // Asset ID Pre-Check + highestAID := repo.AssetID(-1) if svc.autoIncrementAssetID { - highest, err = svc.repo.Items.GetHighestAssetID(ctx, GID) + highestAID, err = svc.repo.Items.GetHighestAssetID(ctx, GID) if err != nil { return 0, err } } - // Create the items - var count int - for _, row := range loaded { - // Check Import Ref - if row.Item.ImportRef != "" { - exists, err := svc.repo.Items.CheckRef(ctx, GID, row.Item.ImportRef) - if exists { - continue - } + finished := 0 + + for i := range sheet.Rows { + row := sheet.Rows[i] + + createRequired := true + + // ======================================== + // Preflight check for existing item + if row.ImportRef != "" { + exists, err := svc.repo.Items.CheckRef(ctx, GID, row.ImportRef) if err != nil { - log.Err(err).Msg("error checking import ref") + return 0, fmt.Errorf("error checking for existing item with ref %q: %w", row.ImportRef, err) + } + + if exists { + createRequired = false } } - locationID := locations[row.Location] - labelIDs := []uuid.UUID{} - for _, label := range row.getLabels() { - labelIDs = append(labelIDs, labels[label]) + // ======================================== + // Pre-Create Labels as necessary + labelIds := make([]uuid.UUID, len(row.LabelStr)) + + for j := range row.LabelStr { + label := row.LabelStr[j] + + id, ok := labelMap[label] + if !ok { + newLabel, err := svc.repo.Labels.Create(ctx, GID, repo.LabelCreate{Name: label}) + if err != nil { + return 0, err + } + id = newLabel.ID + } + + labelIds[j] = id + labelMap[label] = id } - log.Info(). - Str("name", row.Item.Name). - Str("location", row.Location). - Msgf("Creating Item: %s", row.Item.Name) + // ======================================== + // Pre-Create Locations as necessary + path := serializeLocation(row.Location) - data := repo.ItemCreate{ - ImportRef: row.Item.ImportRef, - Name: row.Item.Name, - Description: row.Item.Description, - LabelIDs: labelIDs, - LocationID: locationID, + locationID, ok := locationMap[path] + if !ok { // Traverse the path of LocationStr and check each path element to see if it exists already, if not create it. + paths := []string{} + for i, pathElement := range row.Location { + paths = append(paths, pathElement) + path := serializeLocation(paths) + + locationID, ok = locationMap[path] + if !ok { + parentID := uuid.Nil + + // Get the parent ID + if i > 0 { + parentPath := serializeLocation(row.Location[:i]) + parentID = locationMap[parentPath] + } + + newLocation, err := svc.repo.Locations.Create(ctx, GID, repo.LocationCreate{ + ParentID: parentID, + Name: pathElement, + }) + if err != nil { + return 0, err + } + locationID = newLocation.ID + } + + locationMap[path] = locationID + } + + locationID, ok = locationMap[path] + if !ok { + return 0, errors.New("failed to create location") + } } - if svc.autoIncrementAssetID { - highest++ - data.AssetID = highest + var effAID repo.AssetID + if svc.autoIncrementAssetID && row.AssetID.Nil() { + effAID = highestAID + 1 + highestAID++ + } else { + effAID = row.AssetID } - result, err := svc.repo.Items.Create(ctx, GID, data) + // ======================================== + // Create Item + var item repo.ItemOut + switch { + case createRequired: + newItem := repo.ItemCreate{ + ImportRef: row.ImportRef, + Name: row.Name, + Description: row.Description, + AssetID: effAID, + LocationID: locationID, + LabelIDs: labelIds, + } - if err != nil { - return count, err + item, err = svc.repo.Items.Create(ctx, GID, newItem) + if err != nil { + return 0, err + } + default: + item, err = svc.repo.Items.GetByRef(ctx, GID, row.ImportRef) + if err != nil { + return 0, err + } } - // Update the item with the rest of the data - _, err = svc.repo.Items.UpdateByGroup(ctx, GID, repo.ItemUpdate{ - // Edges + if item.ID == uuid.Nil { + panic("item ID is nil on import - this should never happen") + } + + fields := make([]repo.ItemField, len(row.Fields)) + for i := range row.Fields { + fields[i] = repo.ItemField{ + Name: row.Fields[i].Name, + Type: "text", + TextValue: row.Fields[i].Value, + } + } + + updateItem := repo.ItemUpdate{ + ID: item.ID, + LabelIDs: labelIds, LocationID: locationID, - LabelIDs: labelIDs, - AssetID: data.AssetID, - // General Fields - ID: result.ID, - Name: result.Name, - Description: result.Description, - Insured: row.Item.Insured, - Notes: row.Item.Notes, - Quantity: row.Item.Quantity, + Name: row.Name, + Description: row.Description, + AssetID: effAID, + Insured: row.Insured, + Quantity: row.Quantity, + Archived: row.Archived, - // Identifies the item as imported - SerialNumber: row.Item.SerialNumber, - ModelNumber: row.Item.ModelNumber, - Manufacturer: row.Item.Manufacturer, + PurchasePrice: row.PurchasePrice, + PurchaseFrom: row.PurchaseFrom, + PurchaseTime: row.PurchaseTime, - // Purchase - PurchaseFrom: row.Item.PurchaseFrom, - PurchasePrice: row.Item.PurchasePrice, - PurchaseTime: row.Item.PurchaseTime, + Manufacturer: row.Manufacturer, + ModelNumber: row.ModelNumber, + SerialNumber: row.SerialNumber, - // Warranty - LifetimeWarranty: row.Item.LifetimeWarranty, - WarrantyExpires: row.Item.WarrantyExpires, - WarrantyDetails: row.Item.WarrantyDetails, + LifetimeWarranty: row.LifetimeWarranty, + WarrantyExpires: row.WarrantyExpires, + WarrantyDetails: row.WarrantyDetails, - SoldTo: row.Item.SoldTo, - SoldPrice: row.Item.SoldPrice, - SoldTime: row.Item.SoldTime, - SoldNotes: row.Item.SoldNotes, - }) + SoldTo: row.SoldTo, + SoldTime: row.SoldTime, + SoldPrice: row.SoldPrice, + SoldNotes: row.SoldNotes, - if err != nil { - return count, err + Notes: row.Notes, + Fields: fields, } - count++ + item, err = svc.repo.Items.UpdateByGroup(ctx, GID, updateItem) + if err != nil { + return 0, err + } + + finished++ } - return count, nil + + return finished, nil +} + +func (svc *ItemService) ExportTSV(ctx context.Context, GID uuid.UUID) ([][]string, error) { + items, err := svc.repo.Items.GetAll(ctx, GID) + if err != nil { + return nil, err + } + + sheet := reporting.IOSheet{} + + err = sheet.ReadItems(ctx, items, GID, svc.repo) + if err != nil { + return nil, err + } + + return sheet.TSV() +} + +func (svc *ItemService) ExportBillOfMaterialsTSV(ctx context.Context, GID uuid.UUID) ([]byte, error) { + items, err := svc.repo.Items.GetAll(ctx, GID) + if err != nil { + return nil, err + } + + return reporting.BillOfMaterialsTSV(items) } diff --git a/backend/internal/core/services/service_items_attachments.go b/backend/internal/core/services/service_items_attachments.go index 4a7b197..43835c6 100644 --- a/backend/internal/core/services/service_items_attachments.go +++ b/backend/internal/core/services/service_items_attachments.go @@ -12,8 +12,8 @@ import ( "github.com/rs/zerolog/log" ) -func (svc *ItemService) AttachmentPath(ctx context.Context, attachmentId uuid.UUID) (*ent.Document, error) { - attachment, err := svc.repo.Attachments.Get(ctx, attachmentId) +func (svc *ItemService) AttachmentPath(ctx context.Context, attachmentID uuid.UUID) (*ent.Document, error) { + attachment, err := svc.repo.Attachments.Get(ctx, attachmentID) if err != nil { return nil, err } @@ -21,9 +21,9 @@ func (svc *ItemService) AttachmentPath(ctx context.Context, attachmentId uuid.UU return attachment.Edges.Document, nil } -func (svc *ItemService) AttachmentUpdate(ctx Context, itemId uuid.UUID, data *repo.ItemAttachmentUpdate) (repo.ItemOut, error) { +func (svc *ItemService) AttachmentUpdate(ctx Context, itemID uuid.UUID, data *repo.ItemAttachmentUpdate) (repo.ItemOut, error) { // Update Attachment - attachment, err := svc.repo.Attachments.Update(ctx, data.ID, attachment.Type(data.Type)) + attachment, err := svc.repo.Attachments.Update(ctx, data.ID, data) if err != nil { return repo.ItemOut{}, err } @@ -35,15 +35,15 @@ func (svc *ItemService) AttachmentUpdate(ctx Context, itemId uuid.UUID, data *re return repo.ItemOut{}, err } - return svc.repo.Items.GetOneByGroup(ctx, ctx.GID, itemId) + return svc.repo.Items.GetOneByGroup(ctx, ctx.GID, itemID) } // AttachmentAdd adds an attachment to an item by creating an entry in the Documents table and linking it to the Attachment // Table and Items table. The file provided via the reader is stored on the file system based on the provided // relative path during construction of the service. -func (svc *ItemService) AttachmentAdd(ctx Context, itemId uuid.UUID, filename string, attachmentType attachment.Type, file io.Reader) (repo.ItemOut, error) { +func (svc *ItemService) AttachmentAdd(ctx Context, itemID uuid.UUID, filename string, attachmentType attachment.Type, file io.Reader) (repo.ItemOut, error) { // Get the Item - _, err := svc.repo.Items.GetOneByGroup(ctx, ctx.GID, itemId) + _, err := svc.repo.Items.GetOneByGroup(ctx, ctx.GID, itemID) if err != nil { return repo.ItemOut{}, err } @@ -56,29 +56,29 @@ func (svc *ItemService) AttachmentAdd(ctx Context, itemId uuid.UUID, filename st } // Create the attachment - _, err = svc.repo.Attachments.Create(ctx, itemId, doc.ID, attachmentType) + _, err = svc.repo.Attachments.Create(ctx, itemID, doc.ID, attachmentType) if err != nil { log.Err(err).Msg("failed to create attachment") return repo.ItemOut{}, err } - return svc.repo.Items.GetOneByGroup(ctx, ctx.GID, itemId) + return svc.repo.Items.GetOneByGroup(ctx, ctx.GID, itemID) } -func (svc *ItemService) AttachmentDelete(ctx context.Context, gid, itemId, attachmentId uuid.UUID) error { +func (svc *ItemService) AttachmentDelete(ctx context.Context, gid, itemID, attachmentID uuid.UUID) error { // Get the Item - _, err := svc.repo.Items.GetOneByGroup(ctx, gid, itemId) + _, err := svc.repo.Items.GetOneByGroup(ctx, gid, itemID) if err != nil { return err } - attachment, err := svc.repo.Attachments.Get(ctx, attachmentId) + attachment, err := svc.repo.Attachments.Get(ctx, attachmentID) if err != nil { return err } // Delete the attachment - err = svc.repo.Attachments.Delete(ctx, attachmentId) + err = svc.repo.Attachments.Delete(ctx, attachmentID) if err != nil { return err } diff --git a/backend/internal/core/services/service_items_attachments_test.go b/backend/internal/core/services/service_items_attachments_test.go index f9db0f6..4e2315e 100644 --- a/backend/internal/core/services/service_items_attachments_test.go +++ b/backend/internal/core/services/service_items_attachments_test.go @@ -9,6 +9,7 @@ import ( "github.com/hay-kot/homebox/backend/internal/data/repo" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestItemService_AddAttachment(t *testing.T) { @@ -23,7 +24,7 @@ func TestItemService_AddAttachment(t *testing.T) { Description: "test", Name: "test", }) - assert.NoError(t, err) + require.NoError(t, err) assert.NotNil(t, loc) itmC := repo.ItemCreate{ @@ -33,11 +34,11 @@ func TestItemService_AddAttachment(t *testing.T) { } itm, err := svc.repo.Items.Create(context.Background(), tGroup.ID, itmC) - assert.NoError(t, err) + require.NoError(t, err) assert.NotNil(t, itm) t.Cleanup(func() { err := svc.repo.Items.Delete(context.Background(), itm.ID) - assert.NoError(t, err) + require.NoError(t, err) }) contents := fk.Str(1000) @@ -45,7 +46,7 @@ func TestItemService_AddAttachment(t *testing.T) { // Setup afterAttachment, err := svc.AttachmentAdd(tCtx, itm.ID, "testfile.txt", "attachment", reader) - assert.NoError(t, err) + require.NoError(t, err) assert.NotNil(t, afterAttachment) // Check that the file exists @@ -56,6 +57,6 @@ func TestItemService_AddAttachment(t *testing.T) { // Check that the file contents are correct bts, err := os.ReadFile(storedPath) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, contents, string(bts)) } diff --git a/backend/internal/core/services/service_items_csv.go b/backend/internal/core/services/service_items_csv.go deleted file mode 100644 index 2d93f6e..0000000 --- a/backend/internal/core/services/service_items_csv.go +++ /dev/null @@ -1,151 +0,0 @@ -package services - -import ( - "bytes" - "encoding/csv" - "errors" - "io" - "strconv" - "strings" - - "github.com/hay-kot/homebox/backend/internal/data/repo" - "github.com/hay-kot/homebox/backend/internal/data/types" -) - -func determineSeparator(data []byte) (rune, error) { - // First row - firstRow := bytes.Split(data, []byte("\n"))[0] - - // find first comma or /t - comma := bytes.IndexByte(firstRow, ',') - tab := bytes.IndexByte(firstRow, '\t') - - switch { - case comma == -1 && tab == -1: - return 0, errors.New("could not determine separator") - case tab > comma: - return '\t', nil - default: - return ',', nil - } -} - -func ReadCsv(r io.Reader) ([][]string, error) { - data, err := io.ReadAll(r) - if err != nil { - return nil, err - } - - reader := csv.NewReader(bytes.NewReader(data)) - - // Determine separator - sep, err := determineSeparator(data) - - if err != nil { - return nil, err - } - - reader.Comma = sep - - return reader.ReadAll() -} - -var ErrInvalidCsv = errors.New("invalid csv") - -const NumOfCols = 21 - -func parseFloat(s string) float64 { - if s == "" { - return 0 - } - f, _ := strconv.ParseFloat(s, 64) - return f -} - -func parseBool(s string) bool { - switch strings.ToLower(s) { - case "true", "yes", "1": - return true - default: - return false - } -} - -func parseInt(s string) int { - i, _ := strconv.Atoi(s) - return i -} - -type csvRow struct { - Item repo.ItemOut - Location string - LabelStr string -} - -func newCsvRow(row []string) csvRow { - - return csvRow{ - Location: row[1], - LabelStr: row[2], - Item: repo.ItemOut{ - ItemSummary: repo.ItemSummary{ - ImportRef: row[0], - Quantity: parseInt(row[3]), - Name: row[4], - Description: row[5], - Insured: parseBool(row[6]), - PurchasePrice: parseFloat(row[12]), - }, - SerialNumber: row[7], - ModelNumber: row[8], - Manufacturer: row[9], - Notes: row[10], - PurchaseFrom: row[11], - PurchaseTime: types.DateFromString(row[13]), - LifetimeWarranty: parseBool(row[14]), - WarrantyExpires: types.DateFromString(row[15]), - WarrantyDetails: row[16], - SoldTo: row[17], - SoldPrice: parseFloat(row[18]), - SoldTime: types.DateFromString(row[19]), - SoldNotes: row[20], - }, - } -} - -func (c csvRow) getLabels() []string { - split := strings.Split(c.LabelStr, ";") - - // Trim each - for i, s := range split { - split[i] = strings.TrimSpace(s) - } - - // Remove empty - for i, s := range split { - if s == "" { - split = append(split[:i], split[i+1:]...) - } - } - - return split -} - -func (c csvRow) validate() []error { - var errs []error - - add := func(err error) { - errs = append(errs, err) - } - - required := func(s string, name string) { - if s == "" { - add(errors.New(name + " is required")) - } - } - - required(c.Location, "Location") - required(c.Item.Name, "Name") - - return errs -} diff --git a/backend/internal/core/services/service_items_csv_test.go b/backend/internal/core/services/service_items_csv_test.go deleted file mode 100644 index af3056c..0000000 --- a/backend/internal/core/services/service_items_csv_test.go +++ /dev/null @@ -1,164 +0,0 @@ -package services - -import ( - "bytes" - _ "embed" - "encoding/csv" - "fmt" - "reflect" - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -//go:embed .testdata/import.csv -var CSVData_Comma []byte - -//go:embed .testdata/import.tsv -var CSVData_Tab []byte - -func loadcsv() [][]string { - reader := csv.NewReader(bytes.NewReader(CSVData_Comma)) - - records, err := reader.ReadAll() - if err != nil { - panic(err) - } - - return records -} - -func Test_CorrectDateParsing(t *testing.T) { - t.Parallel() - - expected := []time.Time{ - time.Date(2021, 10, 13, 0, 0, 0, 0, time.UTC), - time.Date(2021, 10, 15, 0, 0, 0, 0, time.UTC), - time.Date(2021, 10, 13, 0, 0, 0, 0, time.UTC), - time.Date(2020, 10, 21, 0, 0, 0, 0, time.UTC), - time.Date(2020, 10, 14, 0, 0, 0, 0, time.UTC), - time.Date(2020, 9, 30, 0, 0, 0, 0, time.UTC), - } - - records := loadcsv() - - for i, record := range records { - if i == 0 { - continue - } - entity := newCsvRow(record) - expected := expected[i-1] - - assert.Equal(t, expected, entity.Item.PurchaseTime.Time(), fmt.Sprintf("Failed on row %d", i)) - assert.Equal(t, expected, entity.Item.WarrantyExpires.Time(), fmt.Sprintf("Failed on row %d", i)) - assert.Equal(t, expected, entity.Item.SoldTime.Time(), fmt.Sprintf("Failed on row %d", i)) - } -} - -func Test_csvRow_getLabels(t *testing.T) { - type fields struct { - LabelStr string - } - tests := []struct { - name string - fields fields - want []string - }{ - { - name: "basic test", - fields: fields{ - LabelStr: "IOT;Home Assistant;Z-Wave", - }, - want: []string{"IOT", "Home Assistant", "Z-Wave"}, - }, - { - name: "no labels", - fields: fields{ - LabelStr: "", - }, - want: []string{}, - }, - { - name: "single label", - fields: fields{ - LabelStr: "IOT", - }, - want: []string{"IOT"}, - }, - { - name: "trailing semicolon", - fields: fields{ - LabelStr: "IOT;", - }, - want: []string{"IOT"}, - }, - - { - name: "whitespace", - fields: fields{ - LabelStr: " IOT; Home Assistant; Z-Wave ", - }, - want: []string{"IOT", "Home Assistant", "Z-Wave"}, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - c := csvRow{ - LabelStr: tt.fields.LabelStr, - } - if got := c.getLabels(); !reflect.DeepEqual(got, tt.want) { - t.Errorf("csvRow.getLabels() = %v, want %v", got, tt.want) - } - }) - } -} - -func Test_determineSeparator(t *testing.T) { - type args struct { - data []byte - } - tests := []struct { - name string - args args - want rune - wantErr bool - }{ - { - name: "comma", - args: args{ - data: CSVData_Comma, - }, - want: ',', - wantErr: false, - }, - { - name: "tab", - args: args{ - data: CSVData_Tab, - }, - want: '\t', - wantErr: false, - }, - { - name: "invalid", - args: args{ - data: []byte("a;b;c"), - }, - want: 0, - wantErr: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := determineSeparator(tt.args.data) - if (err != nil) != tt.wantErr { - t.Errorf("determineSeparator() error = %v, wantErr %v", err, tt.wantErr) - return - } - if got != tt.want { - t.Errorf("determineSeparator() = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/backend/internal/core/services/service_items_test.go b/backend/internal/core/services/service_items_test.go deleted file mode 100644 index 105c842..0000000 --- a/backend/internal/core/services/service_items_test.go +++ /dev/null @@ -1,78 +0,0 @@ -package services - -import ( - "context" - "testing" - - "github.com/google/uuid" - "github.com/hay-kot/homebox/backend/internal/data/repo" - "github.com/stretchr/testify/assert" -) - -func TestItemService_CsvImport(t *testing.T) { - data := loadcsv() - svc := &ItemService{ - repo: tRepos, - } - count, err := svc.CsvImport(context.Background(), tGroup.ID, data) - assert.Equal(t, 6, count) - assert.NoError(t, err) - - // Check import refs are deduplicated - count, err = svc.CsvImport(context.Background(), tGroup.ID, data) - assert.Equal(t, 0, count) - assert.NoError(t, err) - - items, err := svc.repo.Items.GetAll(context.Background(), tGroup.ID) - assert.NoError(t, err) - t.Cleanup(func() { - for _, item := range items { - err := svc.repo.Items.Delete(context.Background(), item.ID) - assert.NoError(t, err) - } - }) - - assert.Equal(t, len(items), 6) - - dataCsv := []csvRow{} - for _, item := range data { - dataCsv = append(dataCsv, newCsvRow(item)) - } - - allLocation, err := tRepos.Locations.GetAll(context.Background(), tGroup.ID, repo.LocationQuery{}) - assert.NoError(t, err) - locNames := []string{} - for _, loc := range allLocation { - locNames = append(locNames, loc.Name) - } - - allLabels, err := tRepos.Labels.GetAll(context.Background(), tGroup.ID) - assert.NoError(t, err) - labelNames := []string{} - for _, label := range allLabels { - labelNames = append(labelNames, label.Name) - } - - ids := []uuid.UUID{} - t.Cleanup((func() { - for _, id := range ids { - err := svc.repo.Items.Delete(context.Background(), id) - assert.NoError(t, err) - } - })) - - for _, item := range items { - assert.Contains(t, locNames, item.Location.Name) - for _, label := range item.Labels { - assert.Contains(t, labelNames, label.Name) - } - - for _, csvRow := range dataCsv { - if csvRow.Item.Name == item.Name { - assert.Equal(t, csvRow.Item.Description, item.Description) - assert.Equal(t, csvRow.Item.Quantity, item.Quantity) - assert.Equal(t, csvRow.Item.Insured, item.Insured) - } - } - } -} diff --git a/backend/internal/core/services/service_user.go b/backend/internal/core/services/service_user.go index bed3adb..d86c39b 100644 --- a/backend/internal/core/services/service_user.go +++ b/backend/internal/core/services/service_user.go @@ -16,7 +16,7 @@ var ( oneWeek = time.Hour * 24 * 7 ErrorInvalidLogin = errors.New("invalid username or password") ErrorInvalidToken = errors.New("invalid token") - ErrorTokenIdMismatch = errors.New("token id mismatch") + ErrorTokenIDMismatch = errors.New("token id mismatch") ) type UserService struct { @@ -61,6 +61,7 @@ func (svc *UserService) RegisterUser(ctx context.Context, data UserRegistration) switch data.GroupToken { case "": + log.Debug().Msg("creating new group") creatingGroup = true group, err = svc.repos.Groups.GroupCreate(ctx, "Home") if err != nil { @@ -68,6 +69,7 @@ func (svc *UserService) RegisterUser(ctx context.Context, data UserRegistration) return repo.UserOut{}, err } default: + log.Debug().Msg("joining existing group") token, err = svc.repos.Groups.InvitationGet(ctx, hasher.HashToken(data.GroupToken)) if err != nil { log.Err(err).Msg("Failed to get invitation token") @@ -90,18 +92,21 @@ func (svc *UserService) RegisterUser(ctx context.Context, data UserRegistration) if err != nil { return repo.UserOut{}, err } + log.Debug().Msg("user created") // Create the default labels and locations for the group. if creatingGroup { + log.Debug().Msg("creating default labels") for _, label := range defaultLabels() { - _, err := svc.repos.Labels.Create(ctx, group.ID, label) + _, err := svc.repos.Labels.Create(ctx, usr.GroupID, label) if err != nil { return repo.UserOut{}, err } } + log.Debug().Msg("creating default locations") for _, location := range defaultLocations() { - _, err := svc.repos.Locations.Create(ctx, group.ID, location) + _, err := svc.repos.Locations.Create(ctx, usr.GroupID, location) if err != nil { return repo.UserOut{}, err } @@ -110,6 +115,7 @@ func (svc *UserService) RegisterUser(ctx context.Context, data UserRegistration) // Decrement the invitation token if it was used. if token.ID != uuid.Nil { + log.Debug().Msg("decrementing invitation token") err = svc.repos.Groups.InvitationUpdate(ctx, token.ID, token.Uses-1) if err != nil { log.Err(err).Msg("Failed to update invitation token") @@ -132,18 +138,24 @@ func (svc *UserService) UpdateSelf(ctx context.Context, ID uuid.UUID, data repo. return repo.UserOut{}, err } - return svc.repos.Users.GetOneId(ctx, ID) + return svc.repos.Users.GetOneID(ctx, ID) } // ============================================================================ // User Authentication -func (svc *UserService) createSessionToken(ctx context.Context, userId uuid.UUID) (UserAuthTokenDetail, error) { +func (svc *UserService) createSessionToken(ctx context.Context, userID uuid.UUID, extendedSession bool) (UserAuthTokenDetail, error) { attachmentToken := hasher.GenerateToken() + + expiresAt := time.Now().Add(oneWeek) + if extendedSession { + expiresAt = time.Now().Add(oneWeek * 4) + } + attachmentData := repo.UserAuthTokenCreate{ - UserID: userId, + UserID: userID, TokenHash: attachmentToken.Hash, - ExpiresAt: time.Now().Add(oneWeek), + ExpiresAt: expiresAt, } _, err := svc.repos.AuthTokens.CreateToken(ctx, attachmentData, authroles.RoleAttachments) @@ -153,9 +165,9 @@ func (svc *UserService) createSessionToken(ctx context.Context, userId uuid.UUID userToken := hasher.GenerateToken() data := repo.UserAuthTokenCreate{ - UserID: userId, + UserID: userID, TokenHash: userToken.Hash, - ExpiresAt: time.Now().Add(oneWeek), + ExpiresAt: expiresAt, } created, err := svc.repos.AuthTokens.CreateToken(ctx, data, authroles.RoleUser) @@ -170,7 +182,7 @@ func (svc *UserService) createSessionToken(ctx context.Context, userId uuid.UUID }, nil } -func (svc *UserService) Login(ctx context.Context, username, password string) (UserAuthTokenDetail, error) { +func (svc *UserService) Login(ctx context.Context, username, password string, extendedSession bool) (UserAuthTokenDetail, error) { usr, err := svc.repos.Users.GetOneEmail(ctx, username) if err != nil { // SECURITY: Perform hash to ensure response times are the same @@ -182,7 +194,7 @@ func (svc *UserService) Login(ctx context.Context, username, password string) (U return UserAuthTokenDetail{}, ErrorInvalidLogin } - return svc.createSessionToken(ctx, usr.ID) + return svc.createSessionToken(ctx, usr.ID, extendedSession) } func (svc *UserService) Logout(ctx context.Context, token string) error { @@ -199,7 +211,7 @@ func (svc *UserService) RenewToken(ctx context.Context, token string) (UserAuthT return UserAuthTokenDetail{}, ErrorInvalidToken } - return svc.createSessionToken(ctx, dbToken.ID) + return svc.createSessionToken(ctx, dbToken.ID, false) } // DeleteSelf deletes the user that is currently logged based of the provided UUID @@ -210,7 +222,7 @@ func (svc *UserService) DeleteSelf(ctx context.Context, ID uuid.UUID) error { } func (svc *UserService) ChangePassword(ctx Context, current string, new string) (ok bool) { - usr, err := svc.repos.Users.GetOneId(ctx, ctx.UID) + usr, err := svc.repos.Users.GetOneID(ctx, ctx.UID) if err != nil { return false } diff --git a/backend/internal/data/ent/attachment.go b/backend/internal/data/ent/attachment.go index 25d2df4..bfb7de2 100644 --- a/backend/internal/data/ent/attachment.go +++ b/backend/internal/data/ent/attachment.go @@ -7,6 +7,7 @@ import ( "strings" "time" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/google/uuid" "github.com/hay-kot/homebox/backend/internal/data/ent/attachment" @@ -25,11 +26,14 @@ type Attachment struct { UpdatedAt time.Time `json:"updated_at,omitempty"` // Type holds the value of the "type" field. Type attachment.Type `json:"type,omitempty"` + // Primary holds the value of the "primary" field. + Primary bool `json:"primary,omitempty"` // Edges holds the relations/edges for other nodes in the graph. // The values are being populated by the AttachmentQuery when eager-loading is set. Edges AttachmentEdges `json:"edges"` document_attachments *uuid.UUID item_attachments *uuid.UUID + selectValues sql.SelectValues } // AttachmentEdges holds the relations/edges for other nodes in the graph. @@ -74,6 +78,8 @@ func (*Attachment) scanValues(columns []string) ([]any, error) { values := make([]any, len(columns)) for i := range columns { switch columns[i] { + case attachment.FieldPrimary: + values[i] = new(sql.NullBool) case attachment.FieldType: values[i] = new(sql.NullString) case attachment.FieldCreatedAt, attachment.FieldUpdatedAt: @@ -85,7 +91,7 @@ func (*Attachment) scanValues(columns []string) ([]any, error) { case attachment.ForeignKeys[1]: // item_attachments values[i] = &sql.NullScanner{S: new(uuid.UUID)} default: - return nil, fmt.Errorf("unexpected column %q for type Attachment", columns[i]) + values[i] = new(sql.UnknownType) } } return values, nil @@ -123,6 +129,12 @@ func (a *Attachment) assignValues(columns []string, values []any) error { } else if value.Valid { a.Type = attachment.Type(value.String) } + case attachment.FieldPrimary: + if value, ok := values[i].(*sql.NullBool); !ok { + return fmt.Errorf("unexpected type %T for field primary", values[i]) + } else if value.Valid { + a.Primary = value.Bool + } case attachment.ForeignKeys[0]: if value, ok := values[i].(*sql.NullScanner); !ok { return fmt.Errorf("unexpected type %T for field document_attachments", values[i]) @@ -137,11 +149,19 @@ func (a *Attachment) assignValues(columns []string, values []any) error { a.item_attachments = new(uuid.UUID) *a.item_attachments = *value.S.(*uuid.UUID) } + default: + a.selectValues.Set(columns[i], values[i]) } } return nil } +// Value returns the ent.Value that was dynamically selected and assigned to the Attachment. +// This includes values selected through modifiers, order, etc. +func (a *Attachment) Value(name string) (ent.Value, error) { + return a.selectValues.Get(name) +} + // QueryItem queries the "item" edge of the Attachment entity. func (a *Attachment) QueryItem() *ItemQuery { return NewAttachmentClient(a.config).QueryItem(a) @@ -183,6 +203,9 @@ func (a *Attachment) String() string { builder.WriteString(", ") builder.WriteString("type=") builder.WriteString(fmt.Sprintf("%v", a.Type)) + builder.WriteString(", ") + builder.WriteString("primary=") + builder.WriteString(fmt.Sprintf("%v", a.Primary)) builder.WriteByte(')') return builder.String() } diff --git a/backend/internal/data/ent/attachment/attachment.go b/backend/internal/data/ent/attachment/attachment.go index f7aef63..4bbac72 100644 --- a/backend/internal/data/ent/attachment/attachment.go +++ b/backend/internal/data/ent/attachment/attachment.go @@ -6,6 +6,8 @@ import ( "fmt" "time" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" "github.com/google/uuid" ) @@ -20,6 +22,8 @@ const ( FieldUpdatedAt = "updated_at" // FieldType holds the string denoting the type field in the database. FieldType = "type" + // FieldPrimary holds the string denoting the primary field in the database. + FieldPrimary = "primary" // EdgeItem holds the string denoting the item edge name in mutations. EdgeItem = "item" // EdgeDocument holds the string denoting the document edge name in mutations. @@ -48,6 +52,7 @@ var Columns = []string{ FieldCreatedAt, FieldUpdatedAt, FieldType, + FieldPrimary, } // ForeignKeys holds the SQL foreign-keys that are owned by the "attachments" @@ -79,6 +84,8 @@ var ( DefaultUpdatedAt func() time.Time // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. UpdateDefaultUpdatedAt func() time.Time + // DefaultPrimary holds the default value on creation for the "primary" field. + DefaultPrimary bool // DefaultID holds the default value on creation for the "id" field. DefaultID func() uuid.UUID ) @@ -111,3 +118,59 @@ func TypeValidator(_type Type) error { return fmt.Errorf("attachment: invalid enum value for type field: %q", _type) } } + +// OrderOption defines the ordering options for the Attachment queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByType orders the results by the type field. +func ByType(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldType, opts...).ToFunc() +} + +// ByPrimary orders the results by the primary field. +func ByPrimary(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldPrimary, opts...).ToFunc() +} + +// ByItemField orders the results by item field. +func ByItemField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newItemStep(), sql.OrderByField(field, opts...)) + } +} + +// ByDocumentField orders the results by document field. +func ByDocumentField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newDocumentStep(), sql.OrderByField(field, opts...)) + } +} +func newItemStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(ItemInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, ItemTable, ItemColumn), + ) +} +func newDocumentStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(DocumentInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, DocumentTable, DocumentColumn), + ) +} diff --git a/backend/internal/data/ent/attachment/where.go b/backend/internal/data/ent/attachment/where.go index dd1981f..f6950f3 100644 --- a/backend/internal/data/ent/attachment/where.go +++ b/backend/internal/data/ent/attachment/where.go @@ -66,6 +66,11 @@ func UpdatedAt(v time.Time) predicate.Attachment { return predicate.Attachment(sql.FieldEQ(FieldUpdatedAt, v)) } +// Primary applies equality check predicate on the "primary" field. It's identical to PrimaryEQ. +func Primary(v bool) predicate.Attachment { + return predicate.Attachment(sql.FieldEQ(FieldPrimary, v)) +} + // CreatedAtEQ applies the EQ predicate on the "created_at" field. func CreatedAtEQ(v time.Time) predicate.Attachment { return predicate.Attachment(sql.FieldEQ(FieldCreatedAt, v)) @@ -166,6 +171,16 @@ func TypeNotIn(vs ...Type) predicate.Attachment { return predicate.Attachment(sql.FieldNotIn(FieldType, vs...)) } +// PrimaryEQ applies the EQ predicate on the "primary" field. +func PrimaryEQ(v bool) predicate.Attachment { + return predicate.Attachment(sql.FieldEQ(FieldPrimary, v)) +} + +// PrimaryNEQ applies the NEQ predicate on the "primary" field. +func PrimaryNEQ(v bool) predicate.Attachment { + return predicate.Attachment(sql.FieldNEQ(FieldPrimary, v)) +} + // HasItem applies the HasEdge predicate on the "item" edge. func HasItem() predicate.Attachment { return predicate.Attachment(func(s *sql.Selector) { @@ -180,11 +195,7 @@ func HasItem() predicate.Attachment { // HasItemWith applies the HasEdge predicate on the "item" edge with a given conditions (other predicates). func HasItemWith(preds ...predicate.Item) predicate.Attachment { return predicate.Attachment(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(ItemInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, true, ItemTable, ItemColumn), - ) + step := newItemStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -207,11 +218,7 @@ func HasDocument() predicate.Attachment { // HasDocumentWith applies the HasEdge predicate on the "document" edge with a given conditions (other predicates). func HasDocumentWith(preds ...predicate.Document) predicate.Attachment { return predicate.Attachment(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(DocumentInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, true, DocumentTable, DocumentColumn), - ) + step := newDocumentStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -222,32 +229,15 @@ func HasDocumentWith(preds ...predicate.Document) predicate.Attachment { // And groups predicates with the AND operator between them. func And(predicates ...predicate.Attachment) predicate.Attachment { - return predicate.Attachment(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for _, p := range predicates { - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Attachment(sql.AndPredicates(predicates...)) } // Or groups predicates with the OR operator between them. func Or(predicates ...predicate.Attachment) predicate.Attachment { - return predicate.Attachment(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for i, p := range predicates { - if i > 0 { - s1.Or() - } - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Attachment(sql.OrPredicates(predicates...)) } // Not applies the not operator on the given predicate. func Not(p predicate.Attachment) predicate.Attachment { - return predicate.Attachment(func(s *sql.Selector) { - p(s.Not()) - }) + return predicate.Attachment(sql.NotPredicates(p)) } diff --git a/backend/internal/data/ent/attachment_create.go b/backend/internal/data/ent/attachment_create.go index 5ce7ef3..d1a0b5b 100644 --- a/backend/internal/data/ent/attachment_create.go +++ b/backend/internal/data/ent/attachment_create.go @@ -65,6 +65,20 @@ func (ac *AttachmentCreate) SetNillableType(a *attachment.Type) *AttachmentCreat return ac } +// SetPrimary sets the "primary" field. +func (ac *AttachmentCreate) SetPrimary(b bool) *AttachmentCreate { + ac.mutation.SetPrimary(b) + return ac +} + +// SetNillablePrimary sets the "primary" field if the given value is not nil. +func (ac *AttachmentCreate) SetNillablePrimary(b *bool) *AttachmentCreate { + if b != nil { + ac.SetPrimary(*b) + } + return ac +} + // SetID sets the "id" field. func (ac *AttachmentCreate) SetID(u uuid.UUID) *AttachmentCreate { ac.mutation.SetID(u) @@ -109,7 +123,7 @@ func (ac *AttachmentCreate) Mutation() *AttachmentMutation { // Save creates the Attachment in the database. func (ac *AttachmentCreate) Save(ctx context.Context) (*Attachment, error) { ac.defaults() - return withHooks[*Attachment, AttachmentMutation](ctx, ac.sqlSave, ac.mutation, ac.hooks) + return withHooks(ctx, ac.sqlSave, ac.mutation, ac.hooks) } // SaveX calls Save and panics if Save returns an error. @@ -148,6 +162,10 @@ func (ac *AttachmentCreate) defaults() { v := attachment.DefaultType ac.mutation.SetType(v) } + if _, ok := ac.mutation.Primary(); !ok { + v := attachment.DefaultPrimary + ac.mutation.SetPrimary(v) + } if _, ok := ac.mutation.ID(); !ok { v := attachment.DefaultID() ac.mutation.SetID(v) @@ -170,6 +188,9 @@ func (ac *AttachmentCreate) check() error { return &ValidationError{Name: "type", err: fmt.Errorf(`ent: validator failed for field "Attachment.type": %w`, err)} } } + if _, ok := ac.mutation.Primary(); !ok { + return &ValidationError{Name: "primary", err: errors.New(`ent: missing required field "Attachment.primary"`)} + } if _, ok := ac.mutation.ItemID(); !ok { return &ValidationError{Name: "item", err: errors.New(`ent: missing required edge "Attachment.item"`)} } @@ -223,6 +244,10 @@ func (ac *AttachmentCreate) createSpec() (*Attachment, *sqlgraph.CreateSpec) { _spec.SetField(attachment.FieldType, field.TypeEnum, value) _node.Type = value } + if value, ok := ac.mutation.Primary(); ok { + _spec.SetField(attachment.FieldPrimary, field.TypeBool, value) + _node.Primary = value + } if nodes := ac.mutation.ItemIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, @@ -231,10 +256,7 @@ func (ac *AttachmentCreate) createSpec() (*Attachment, *sqlgraph.CreateSpec) { Columns: []string{attachment.ItemColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: item.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -251,10 +273,7 @@ func (ac *AttachmentCreate) createSpec() (*Attachment, *sqlgraph.CreateSpec) { Columns: []string{attachment.DocumentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: document.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(document.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -269,11 +288,15 @@ func (ac *AttachmentCreate) createSpec() (*Attachment, *sqlgraph.CreateSpec) { // AttachmentCreateBulk is the builder for creating many Attachment entities in bulk. type AttachmentCreateBulk struct { config + err error builders []*AttachmentCreate } // Save creates the Attachment entities in the database. func (acb *AttachmentCreateBulk) Save(ctx context.Context) ([]*Attachment, error) { + if acb.err != nil { + return nil, acb.err + } specs := make([]*sqlgraph.CreateSpec, len(acb.builders)) nodes := make([]*Attachment, len(acb.builders)) mutators := make([]Mutator, len(acb.builders)) @@ -290,8 +313,8 @@ func (acb *AttachmentCreateBulk) Save(ctx context.Context) ([]*Attachment, error return nil, err } builder.mutation = mutation - nodes[i], specs[i] = builder.createSpec() var err error + nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { _, err = mutators[i+1].Mutate(root, acb.builders[i+1].mutation) } else { diff --git a/backend/internal/data/ent/attachment_delete.go b/backend/internal/data/ent/attachment_delete.go index 8185ac1..1be608a 100644 --- a/backend/internal/data/ent/attachment_delete.go +++ b/backend/internal/data/ent/attachment_delete.go @@ -27,7 +27,7 @@ func (ad *AttachmentDelete) Where(ps ...predicate.Attachment) *AttachmentDelete // Exec executes the deletion query and returns how many vertices were deleted. func (ad *AttachmentDelete) Exec(ctx context.Context) (int, error) { - return withHooks[int, AttachmentMutation](ctx, ad.sqlExec, ad.mutation, ad.hooks) + return withHooks(ctx, ad.sqlExec, ad.mutation, ad.hooks) } // ExecX is like Exec, but panics if an error occurs. diff --git a/backend/internal/data/ent/attachment_query.go b/backend/internal/data/ent/attachment_query.go index de5821a..976e436 100644 --- a/backend/internal/data/ent/attachment_query.go +++ b/backend/internal/data/ent/attachment_query.go @@ -21,7 +21,7 @@ import ( type AttachmentQuery struct { config ctx *QueryContext - order []OrderFunc + order []attachment.OrderOption inters []Interceptor predicates []predicate.Attachment withItem *ItemQuery @@ -58,7 +58,7 @@ func (aq *AttachmentQuery) Unique(unique bool) *AttachmentQuery { } // Order specifies how the records should be ordered. -func (aq *AttachmentQuery) Order(o ...OrderFunc) *AttachmentQuery { +func (aq *AttachmentQuery) Order(o ...attachment.OrderOption) *AttachmentQuery { aq.order = append(aq.order, o...) return aq } @@ -296,7 +296,7 @@ func (aq *AttachmentQuery) Clone() *AttachmentQuery { return &AttachmentQuery{ config: aq.config, ctx: aq.ctx.Clone(), - order: append([]OrderFunc{}, aq.order...), + order: append([]attachment.OrderOption{}, aq.order...), inters: append([]Interceptor{}, aq.inters...), predicates: append([]predicate.Attachment{}, aq.predicates...), withItem: aq.withItem.Clone(), diff --git a/backend/internal/data/ent/attachment_update.go b/backend/internal/data/ent/attachment_update.go index e4b747d..bdf10a5 100644 --- a/backend/internal/data/ent/attachment_update.go +++ b/backend/internal/data/ent/attachment_update.go @@ -51,6 +51,20 @@ func (au *AttachmentUpdate) SetNillableType(a *attachment.Type) *AttachmentUpdat return au } +// SetPrimary sets the "primary" field. +func (au *AttachmentUpdate) SetPrimary(b bool) *AttachmentUpdate { + au.mutation.SetPrimary(b) + return au +} + +// SetNillablePrimary sets the "primary" field if the given value is not nil. +func (au *AttachmentUpdate) SetNillablePrimary(b *bool) *AttachmentUpdate { + if b != nil { + au.SetPrimary(*b) + } + return au +} + // SetItemID sets the "item" edge to the Item entity by ID. func (au *AttachmentUpdate) SetItemID(id uuid.UUID) *AttachmentUpdate { au.mutation.SetItemID(id) @@ -93,7 +107,7 @@ func (au *AttachmentUpdate) ClearDocument() *AttachmentUpdate { // Save executes the query and returns the number of nodes affected by the update operation. func (au *AttachmentUpdate) Save(ctx context.Context) (int, error) { au.defaults() - return withHooks[int, AttachmentMutation](ctx, au.sqlSave, au.mutation, au.hooks) + return withHooks(ctx, au.sqlSave, au.mutation, au.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -160,6 +174,9 @@ func (au *AttachmentUpdate) sqlSave(ctx context.Context) (n int, err error) { if value, ok := au.mutation.GetType(); ok { _spec.SetField(attachment.FieldType, field.TypeEnum, value) } + if value, ok := au.mutation.Primary(); ok { + _spec.SetField(attachment.FieldPrimary, field.TypeBool, value) + } if au.mutation.ItemCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, @@ -168,10 +185,7 @@ func (au *AttachmentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{attachment.ItemColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: item.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -184,10 +198,7 @@ func (au *AttachmentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{attachment.ItemColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: item.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -203,10 +214,7 @@ func (au *AttachmentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{attachment.DocumentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: document.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(document.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -219,10 +227,7 @@ func (au *AttachmentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{attachment.DocumentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: document.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(document.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -270,6 +275,20 @@ func (auo *AttachmentUpdateOne) SetNillableType(a *attachment.Type) *AttachmentU return auo } +// SetPrimary sets the "primary" field. +func (auo *AttachmentUpdateOne) SetPrimary(b bool) *AttachmentUpdateOne { + auo.mutation.SetPrimary(b) + return auo +} + +// SetNillablePrimary sets the "primary" field if the given value is not nil. +func (auo *AttachmentUpdateOne) SetNillablePrimary(b *bool) *AttachmentUpdateOne { + if b != nil { + auo.SetPrimary(*b) + } + return auo +} + // SetItemID sets the "item" edge to the Item entity by ID. func (auo *AttachmentUpdateOne) SetItemID(id uuid.UUID) *AttachmentUpdateOne { auo.mutation.SetItemID(id) @@ -325,7 +344,7 @@ func (auo *AttachmentUpdateOne) Select(field string, fields ...string) *Attachme // Save executes the query and returns the updated Attachment entity. func (auo *AttachmentUpdateOne) Save(ctx context.Context) (*Attachment, error) { auo.defaults() - return withHooks[*Attachment, AttachmentMutation](ctx, auo.sqlSave, auo.mutation, auo.hooks) + return withHooks(ctx, auo.sqlSave, auo.mutation, auo.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -409,6 +428,9 @@ func (auo *AttachmentUpdateOne) sqlSave(ctx context.Context) (_node *Attachment, if value, ok := auo.mutation.GetType(); ok { _spec.SetField(attachment.FieldType, field.TypeEnum, value) } + if value, ok := auo.mutation.Primary(); ok { + _spec.SetField(attachment.FieldPrimary, field.TypeBool, value) + } if auo.mutation.ItemCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, @@ -417,10 +439,7 @@ func (auo *AttachmentUpdateOne) sqlSave(ctx context.Context) (_node *Attachment, Columns: []string{attachment.ItemColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: item.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -433,10 +452,7 @@ func (auo *AttachmentUpdateOne) sqlSave(ctx context.Context) (_node *Attachment, Columns: []string{attachment.ItemColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: item.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -452,10 +468,7 @@ func (auo *AttachmentUpdateOne) sqlSave(ctx context.Context) (_node *Attachment, Columns: []string{attachment.DocumentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: document.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(document.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -468,10 +481,7 @@ func (auo *AttachmentUpdateOne) sqlSave(ctx context.Context) (_node *Attachment, Columns: []string{attachment.DocumentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: document.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(document.FieldID, field.TypeUUID), }, } for _, k := range nodes { diff --git a/backend/internal/data/ent/authroles.go b/backend/internal/data/ent/authroles.go index 4dcd733..4daa0f6 100644 --- a/backend/internal/data/ent/authroles.go +++ b/backend/internal/data/ent/authroles.go @@ -6,6 +6,7 @@ import ( "fmt" "strings" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/google/uuid" "github.com/hay-kot/homebox/backend/internal/data/ent/authroles" @@ -23,6 +24,7 @@ type AuthRoles struct { // The values are being populated by the AuthRolesQuery when eager-loading is set. Edges AuthRolesEdges `json:"edges"` auth_tokens_roles *uuid.UUID + selectValues sql.SelectValues } // AuthRolesEdges holds the relations/edges for other nodes in the graph. @@ -59,7 +61,7 @@ func (*AuthRoles) scanValues(columns []string) ([]any, error) { case authroles.ForeignKeys[0]: // auth_tokens_roles values[i] = &sql.NullScanner{S: new(uuid.UUID)} default: - return nil, fmt.Errorf("unexpected column %q for type AuthRoles", columns[i]) + values[i] = new(sql.UnknownType) } } return values, nil @@ -92,11 +94,19 @@ func (ar *AuthRoles) assignValues(columns []string, values []any) error { ar.auth_tokens_roles = new(uuid.UUID) *ar.auth_tokens_roles = *value.S.(*uuid.UUID) } + default: + ar.selectValues.Set(columns[i], values[i]) } } return nil } +// Value returns the ent.Value that was dynamically selected and assigned to the AuthRoles. +// This includes values selected through modifiers, order, etc. +func (ar *AuthRoles) Value(name string) (ent.Value, error) { + return ar.selectValues.Get(name) +} + // QueryToken queries the "token" edge of the AuthRoles entity. func (ar *AuthRoles) QueryToken() *AuthTokensQuery { return NewAuthRolesClient(ar.config).QueryToken(ar) diff --git a/backend/internal/data/ent/authroles/authroles.go b/backend/internal/data/ent/authroles/authroles.go index b414e60..bb5e87a 100644 --- a/backend/internal/data/ent/authroles/authroles.go +++ b/backend/internal/data/ent/authroles/authroles.go @@ -4,6 +4,9 @@ package authroles import ( "fmt" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" ) const ( @@ -79,3 +82,30 @@ func RoleValidator(r Role) error { return fmt.Errorf("authroles: invalid enum value for role field: %q", r) } } + +// OrderOption defines the ordering options for the AuthRoles queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByRole orders the results by the role field. +func ByRole(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldRole, opts...).ToFunc() +} + +// ByTokenField orders the results by token field. +func ByTokenField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newTokenStep(), sql.OrderByField(field, opts...)) + } +} +func newTokenStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(TokenInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2O, true, TokenTable, TokenColumn), + ) +} diff --git a/backend/internal/data/ent/authroles/where.go b/backend/internal/data/ent/authroles/where.go index 53978b0..bb5b54a 100644 --- a/backend/internal/data/ent/authroles/where.go +++ b/backend/internal/data/ent/authroles/where.go @@ -87,11 +87,7 @@ func HasToken() predicate.AuthRoles { // HasTokenWith applies the HasEdge predicate on the "token" edge with a given conditions (other predicates). func HasTokenWith(preds ...predicate.AuthTokens) predicate.AuthRoles { return predicate.AuthRoles(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(TokenInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2O, true, TokenTable, TokenColumn), - ) + step := newTokenStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -102,32 +98,15 @@ func HasTokenWith(preds ...predicate.AuthTokens) predicate.AuthRoles { // And groups predicates with the AND operator between them. func And(predicates ...predicate.AuthRoles) predicate.AuthRoles { - return predicate.AuthRoles(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for _, p := range predicates { - p(s1) - } - s.Where(s1.P()) - }) + return predicate.AuthRoles(sql.AndPredicates(predicates...)) } // Or groups predicates with the OR operator between them. func Or(predicates ...predicate.AuthRoles) predicate.AuthRoles { - return predicate.AuthRoles(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for i, p := range predicates { - if i > 0 { - s1.Or() - } - p(s1) - } - s.Where(s1.P()) - }) + return predicate.AuthRoles(sql.OrPredicates(predicates...)) } // Not applies the not operator on the given predicate. func Not(p predicate.AuthRoles) predicate.AuthRoles { - return predicate.AuthRoles(func(s *sql.Selector) { - p(s.Not()) - }) + return predicate.AuthRoles(sql.NotPredicates(p)) } diff --git a/backend/internal/data/ent/authroles_create.go b/backend/internal/data/ent/authroles_create.go index 566d107..19e594f 100644 --- a/backend/internal/data/ent/authroles_create.go +++ b/backend/internal/data/ent/authroles_create.go @@ -62,7 +62,7 @@ func (arc *AuthRolesCreate) Mutation() *AuthRolesMutation { // Save creates the AuthRoles in the database. func (arc *AuthRolesCreate) Save(ctx context.Context) (*AuthRoles, error) { arc.defaults() - return withHooks[*AuthRoles, AuthRolesMutation](ctx, arc.sqlSave, arc.mutation, arc.hooks) + return withHooks(ctx, arc.sqlSave, arc.mutation, arc.hooks) } // SaveX calls Save and panics if Save returns an error. @@ -143,10 +143,7 @@ func (arc *AuthRolesCreate) createSpec() (*AuthRoles, *sqlgraph.CreateSpec) { Columns: []string{authroles.TokenColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: authtokens.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(authtokens.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -161,11 +158,15 @@ func (arc *AuthRolesCreate) createSpec() (*AuthRoles, *sqlgraph.CreateSpec) { // AuthRolesCreateBulk is the builder for creating many AuthRoles entities in bulk. type AuthRolesCreateBulk struct { config + err error builders []*AuthRolesCreate } // Save creates the AuthRoles entities in the database. func (arcb *AuthRolesCreateBulk) Save(ctx context.Context) ([]*AuthRoles, error) { + if arcb.err != nil { + return nil, arcb.err + } specs := make([]*sqlgraph.CreateSpec, len(arcb.builders)) nodes := make([]*AuthRoles, len(arcb.builders)) mutators := make([]Mutator, len(arcb.builders)) @@ -182,8 +183,8 @@ func (arcb *AuthRolesCreateBulk) Save(ctx context.Context) ([]*AuthRoles, error) return nil, err } builder.mutation = mutation - nodes[i], specs[i] = builder.createSpec() var err error + nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { _, err = mutators[i+1].Mutate(root, arcb.builders[i+1].mutation) } else { diff --git a/backend/internal/data/ent/authroles_delete.go b/backend/internal/data/ent/authroles_delete.go index 13a2518..68a0dfc 100644 --- a/backend/internal/data/ent/authroles_delete.go +++ b/backend/internal/data/ent/authroles_delete.go @@ -27,7 +27,7 @@ func (ard *AuthRolesDelete) Where(ps ...predicate.AuthRoles) *AuthRolesDelete { // Exec executes the deletion query and returns how many vertices were deleted. func (ard *AuthRolesDelete) Exec(ctx context.Context) (int, error) { - return withHooks[int, AuthRolesMutation](ctx, ard.sqlExec, ard.mutation, ard.hooks) + return withHooks(ctx, ard.sqlExec, ard.mutation, ard.hooks) } // ExecX is like Exec, but panics if an error occurs. diff --git a/backend/internal/data/ent/authroles_query.go b/backend/internal/data/ent/authroles_query.go index 14042ff..bf47577 100644 --- a/backend/internal/data/ent/authroles_query.go +++ b/backend/internal/data/ent/authroles_query.go @@ -20,7 +20,7 @@ import ( type AuthRolesQuery struct { config ctx *QueryContext - order []OrderFunc + order []authroles.OrderOption inters []Interceptor predicates []predicate.AuthRoles withToken *AuthTokensQuery @@ -56,7 +56,7 @@ func (arq *AuthRolesQuery) Unique(unique bool) *AuthRolesQuery { } // Order specifies how the records should be ordered. -func (arq *AuthRolesQuery) Order(o ...OrderFunc) *AuthRolesQuery { +func (arq *AuthRolesQuery) Order(o ...authroles.OrderOption) *AuthRolesQuery { arq.order = append(arq.order, o...) return arq } @@ -272,7 +272,7 @@ func (arq *AuthRolesQuery) Clone() *AuthRolesQuery { return &AuthRolesQuery{ config: arq.config, ctx: arq.ctx.Clone(), - order: append([]OrderFunc{}, arq.order...), + order: append([]authroles.OrderOption{}, arq.order...), inters: append([]Interceptor{}, arq.inters...), predicates: append([]predicate.AuthRoles{}, arq.predicates...), withToken: arq.withToken.Clone(), diff --git a/backend/internal/data/ent/authroles_update.go b/backend/internal/data/ent/authroles_update.go index ca222e4..fbec4f9 100644 --- a/backend/internal/data/ent/authroles_update.go +++ b/backend/internal/data/ent/authroles_update.go @@ -75,7 +75,7 @@ func (aru *AuthRolesUpdate) ClearToken() *AuthRolesUpdate { // Save executes the query and returns the number of nodes affected by the update operation. func (aru *AuthRolesUpdate) Save(ctx context.Context) (int, error) { - return withHooks[int, AuthRolesMutation](ctx, aru.sqlSave, aru.mutation, aru.hooks) + return withHooks(ctx, aru.sqlSave, aru.mutation, aru.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -133,10 +133,7 @@ func (aru *AuthRolesUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{authroles.TokenColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: authtokens.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(authtokens.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -149,10 +146,7 @@ func (aru *AuthRolesUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{authroles.TokenColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: authtokens.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(authtokens.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -239,7 +233,7 @@ func (aruo *AuthRolesUpdateOne) Select(field string, fields ...string) *AuthRole // Save executes the query and returns the updated AuthRoles entity. func (aruo *AuthRolesUpdateOne) Save(ctx context.Context) (*AuthRoles, error) { - return withHooks[*AuthRoles, AuthRolesMutation](ctx, aruo.sqlSave, aruo.mutation, aruo.hooks) + return withHooks(ctx, aruo.sqlSave, aruo.mutation, aruo.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -314,10 +308,7 @@ func (aruo *AuthRolesUpdateOne) sqlSave(ctx context.Context) (_node *AuthRoles, Columns: []string{authroles.TokenColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: authtokens.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(authtokens.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -330,10 +321,7 @@ func (aruo *AuthRolesUpdateOne) sqlSave(ctx context.Context) (_node *AuthRoles, Columns: []string{authroles.TokenColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: authtokens.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(authtokens.FieldID, field.TypeUUID), }, } for _, k := range nodes { diff --git a/backend/internal/data/ent/authtokens.go b/backend/internal/data/ent/authtokens.go index a2b6589..14299ba 100644 --- a/backend/internal/data/ent/authtokens.go +++ b/backend/internal/data/ent/authtokens.go @@ -7,6 +7,7 @@ import ( "strings" "time" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/google/uuid" "github.com/hay-kot/homebox/backend/internal/data/ent/authroles" @@ -31,6 +32,7 @@ type AuthTokens struct { // The values are being populated by the AuthTokensQuery when eager-loading is set. Edges AuthTokensEdges `json:"edges"` user_auth_tokens *uuid.UUID + selectValues sql.SelectValues } // AuthTokensEdges holds the relations/edges for other nodes in the graph. @@ -84,7 +86,7 @@ func (*AuthTokens) scanValues(columns []string) ([]any, error) { case authtokens.ForeignKeys[0]: // user_auth_tokens values[i] = &sql.NullScanner{S: new(uuid.UUID)} default: - return nil, fmt.Errorf("unexpected column %q for type AuthTokens", columns[i]) + values[i] = new(sql.UnknownType) } } return values, nil @@ -135,11 +137,19 @@ func (at *AuthTokens) assignValues(columns []string, values []any) error { at.user_auth_tokens = new(uuid.UUID) *at.user_auth_tokens = *value.S.(*uuid.UUID) } + default: + at.selectValues.Set(columns[i], values[i]) } } return nil } +// Value returns the ent.Value that was dynamically selected and assigned to the AuthTokens. +// This includes values selected through modifiers, order, etc. +func (at *AuthTokens) Value(name string) (ent.Value, error) { + return at.selectValues.Get(name) +} + // QueryUser queries the "user" edge of the AuthTokens entity. func (at *AuthTokens) QueryUser() *UserQuery { return NewAuthTokensClient(at.config).QueryUser(at) diff --git a/backend/internal/data/ent/authtokens/authtokens.go b/backend/internal/data/ent/authtokens/authtokens.go index 2d809f4..ff555df 100644 --- a/backend/internal/data/ent/authtokens/authtokens.go +++ b/backend/internal/data/ent/authtokens/authtokens.go @@ -5,6 +5,8 @@ package authtokens import ( "time" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" "github.com/google/uuid" ) @@ -85,3 +87,54 @@ var ( // DefaultID holds the default value on creation for the "id" field. DefaultID func() uuid.UUID ) + +// OrderOption defines the ordering options for the AuthTokens queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByExpiresAt orders the results by the expires_at field. +func ByExpiresAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldExpiresAt, opts...).ToFunc() +} + +// ByUserField orders the results by user field. +func ByUserField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newUserStep(), sql.OrderByField(field, opts...)) + } +} + +// ByRolesField orders the results by roles field. +func ByRolesField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newRolesStep(), sql.OrderByField(field, opts...)) + } +} +func newUserStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(UserInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn), + ) +} +func newRolesStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(RolesInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2O, false, RolesTable, RolesColumn), + ) +} diff --git a/backend/internal/data/ent/authtokens/where.go b/backend/internal/data/ent/authtokens/where.go index fc2983f..d3642d8 100644 --- a/backend/internal/data/ent/authtokens/where.go +++ b/backend/internal/data/ent/authtokens/where.go @@ -250,11 +250,7 @@ func HasUser() predicate.AuthTokens { // HasUserWith applies the HasEdge predicate on the "user" edge with a given conditions (other predicates). func HasUserWith(preds ...predicate.User) predicate.AuthTokens { return predicate.AuthTokens(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(UserInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn), - ) + step := newUserStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -277,11 +273,7 @@ func HasRoles() predicate.AuthTokens { // HasRolesWith applies the HasEdge predicate on the "roles" edge with a given conditions (other predicates). func HasRolesWith(preds ...predicate.AuthRoles) predicate.AuthTokens { return predicate.AuthTokens(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(RolesInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2O, false, RolesTable, RolesColumn), - ) + step := newRolesStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -292,32 +284,15 @@ func HasRolesWith(preds ...predicate.AuthRoles) predicate.AuthTokens { // And groups predicates with the AND operator between them. func And(predicates ...predicate.AuthTokens) predicate.AuthTokens { - return predicate.AuthTokens(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for _, p := range predicates { - p(s1) - } - s.Where(s1.P()) - }) + return predicate.AuthTokens(sql.AndPredicates(predicates...)) } // Or groups predicates with the OR operator between them. func Or(predicates ...predicate.AuthTokens) predicate.AuthTokens { - return predicate.AuthTokens(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for i, p := range predicates { - if i > 0 { - s1.Or() - } - p(s1) - } - s.Where(s1.P()) - }) + return predicate.AuthTokens(sql.OrPredicates(predicates...)) } // Not applies the not operator on the given predicate. func Not(p predicate.AuthTokens) predicate.AuthTokens { - return predicate.AuthTokens(func(s *sql.Selector) { - p(s.Not()) - }) + return predicate.AuthTokens(sql.NotPredicates(p)) } diff --git a/backend/internal/data/ent/authtokens_create.go b/backend/internal/data/ent/authtokens_create.go index a8f2971..afddb3b 100644 --- a/backend/internal/data/ent/authtokens_create.go +++ b/backend/internal/data/ent/authtokens_create.go @@ -131,7 +131,7 @@ func (atc *AuthTokensCreate) Mutation() *AuthTokensMutation { // Save creates the AuthTokens in the database. func (atc *AuthTokensCreate) Save(ctx context.Context) (*AuthTokens, error) { atc.defaults() - return withHooks[*AuthTokens, AuthTokensMutation](ctx, atc.sqlSave, atc.mutation, atc.hooks) + return withHooks(ctx, atc.sqlSave, atc.mutation, atc.hooks) } // SaveX calls Save and panics if Save returns an error. @@ -249,10 +249,7 @@ func (atc *AuthTokensCreate) createSpec() (*AuthTokens, *sqlgraph.CreateSpec) { Columns: []string{authtokens.UserColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: user.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -269,10 +266,7 @@ func (atc *AuthTokensCreate) createSpec() (*AuthTokens, *sqlgraph.CreateSpec) { Columns: []string{authtokens.RolesColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: authroles.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(authroles.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -286,11 +280,15 @@ func (atc *AuthTokensCreate) createSpec() (*AuthTokens, *sqlgraph.CreateSpec) { // AuthTokensCreateBulk is the builder for creating many AuthTokens entities in bulk. type AuthTokensCreateBulk struct { config + err error builders []*AuthTokensCreate } // Save creates the AuthTokens entities in the database. func (atcb *AuthTokensCreateBulk) Save(ctx context.Context) ([]*AuthTokens, error) { + if atcb.err != nil { + return nil, atcb.err + } specs := make([]*sqlgraph.CreateSpec, len(atcb.builders)) nodes := make([]*AuthTokens, len(atcb.builders)) mutators := make([]Mutator, len(atcb.builders)) @@ -307,8 +305,8 @@ func (atcb *AuthTokensCreateBulk) Save(ctx context.Context) ([]*AuthTokens, erro return nil, err } builder.mutation = mutation - nodes[i], specs[i] = builder.createSpec() var err error + nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { _, err = mutators[i+1].Mutate(root, atcb.builders[i+1].mutation) } else { diff --git a/backend/internal/data/ent/authtokens_delete.go b/backend/internal/data/ent/authtokens_delete.go index 1d46fe3..4c29851 100644 --- a/backend/internal/data/ent/authtokens_delete.go +++ b/backend/internal/data/ent/authtokens_delete.go @@ -27,7 +27,7 @@ func (atd *AuthTokensDelete) Where(ps ...predicate.AuthTokens) *AuthTokensDelete // Exec executes the deletion query and returns how many vertices were deleted. func (atd *AuthTokensDelete) Exec(ctx context.Context) (int, error) { - return withHooks[int, AuthTokensMutation](ctx, atd.sqlExec, atd.mutation, atd.hooks) + return withHooks(ctx, atd.sqlExec, atd.mutation, atd.hooks) } // ExecX is like Exec, but panics if an error occurs. diff --git a/backend/internal/data/ent/authtokens_query.go b/backend/internal/data/ent/authtokens_query.go index 5b4302a..238ab88 100644 --- a/backend/internal/data/ent/authtokens_query.go +++ b/backend/internal/data/ent/authtokens_query.go @@ -22,7 +22,7 @@ import ( type AuthTokensQuery struct { config ctx *QueryContext - order []OrderFunc + order []authtokens.OrderOption inters []Interceptor predicates []predicate.AuthTokens withUser *UserQuery @@ -59,7 +59,7 @@ func (atq *AuthTokensQuery) Unique(unique bool) *AuthTokensQuery { } // Order specifies how the records should be ordered. -func (atq *AuthTokensQuery) Order(o ...OrderFunc) *AuthTokensQuery { +func (atq *AuthTokensQuery) Order(o ...authtokens.OrderOption) *AuthTokensQuery { atq.order = append(atq.order, o...) return atq } @@ -297,7 +297,7 @@ func (atq *AuthTokensQuery) Clone() *AuthTokensQuery { return &AuthTokensQuery{ config: atq.config, ctx: atq.ctx.Clone(), - order: append([]OrderFunc{}, atq.order...), + order: append([]authtokens.OrderOption{}, atq.order...), inters: append([]Interceptor{}, atq.inters...), predicates: append([]predicate.AuthTokens{}, atq.predicates...), withUser: atq.withUser.Clone(), @@ -494,7 +494,7 @@ func (atq *AuthTokensQuery) loadRoles(ctx context.Context, query *AuthRolesQuery } query.withFKs = true query.Where(predicate.AuthRoles(func(s *sql.Selector) { - s.Where(sql.InValues(authtokens.RolesColumn, fks...)) + s.Where(sql.InValues(s.C(authtokens.RolesColumn), fks...)) })) neighbors, err := query.All(ctx) if err != nil { @@ -507,7 +507,7 @@ func (atq *AuthTokensQuery) loadRoles(ctx context.Context, query *AuthRolesQuery } node, ok := nodeids[*fk] if !ok { - return fmt.Errorf(`unexpected foreign-key "auth_tokens_roles" returned %v for node %v`, *fk, n.ID) + return fmt.Errorf(`unexpected referenced foreign-key "auth_tokens_roles" returned %v for node %v`, *fk, n.ID) } assign(node, n) } diff --git a/backend/internal/data/ent/authtokens_update.go b/backend/internal/data/ent/authtokens_update.go index 11f34db..776888e 100644 --- a/backend/internal/data/ent/authtokens_update.go +++ b/backend/internal/data/ent/authtokens_update.go @@ -115,7 +115,7 @@ func (atu *AuthTokensUpdate) ClearRoles() *AuthTokensUpdate { // Save executes the query and returns the number of nodes affected by the update operation. func (atu *AuthTokensUpdate) Save(ctx context.Context) (int, error) { atu.defaults() - return withHooks[int, AuthTokensMutation](ctx, atu.sqlSave, atu.mutation, atu.hooks) + return withHooks(ctx, atu.sqlSave, atu.mutation, atu.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -174,10 +174,7 @@ func (atu *AuthTokensUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{authtokens.UserColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: user.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -190,10 +187,7 @@ func (atu *AuthTokensUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{authtokens.UserColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: user.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -209,10 +203,7 @@ func (atu *AuthTokensUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{authtokens.RolesColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: authroles.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(authroles.FieldID, field.TypeInt), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -225,10 +216,7 @@ func (atu *AuthTokensUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{authtokens.RolesColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: authroles.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(authroles.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -353,7 +341,7 @@ func (atuo *AuthTokensUpdateOne) Select(field string, fields ...string) *AuthTok // Save executes the query and returns the updated AuthTokens entity. func (atuo *AuthTokensUpdateOne) Save(ctx context.Context) (*AuthTokens, error) { atuo.defaults() - return withHooks[*AuthTokens, AuthTokensMutation](ctx, atuo.sqlSave, atuo.mutation, atuo.hooks) + return withHooks(ctx, atuo.sqlSave, atuo.mutation, atuo.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -429,10 +417,7 @@ func (atuo *AuthTokensUpdateOne) sqlSave(ctx context.Context) (_node *AuthTokens Columns: []string{authtokens.UserColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: user.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -445,10 +430,7 @@ func (atuo *AuthTokensUpdateOne) sqlSave(ctx context.Context) (_node *AuthTokens Columns: []string{authtokens.UserColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: user.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -464,10 +446,7 @@ func (atuo *AuthTokensUpdateOne) sqlSave(ctx context.Context) (_node *AuthTokens Columns: []string{authtokens.RolesColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: authroles.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(authroles.FieldID, field.TypeInt), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -480,10 +459,7 @@ func (atuo *AuthTokensUpdateOne) sqlSave(ctx context.Context) (_node *AuthTokens Columns: []string{authtokens.RolesColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: authroles.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(authroles.FieldID, field.TypeInt), }, } for _, k := range nodes { diff --git a/backend/internal/data/ent/client.go b/backend/internal/data/ent/client.go index 44755c8..2fb9b53 100644 --- a/backend/internal/data/ent/client.go +++ b/backend/internal/data/ent/client.go @@ -7,10 +7,15 @@ import ( "errors" "fmt" "log" + "reflect" "github.com/google/uuid" "github.com/hay-kot/homebox/backend/internal/data/ent/migrate" + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" "github.com/hay-kot/homebox/backend/internal/data/ent/attachment" "github.com/hay-kot/homebox/backend/internal/data/ent/authroles" "github.com/hay-kot/homebox/backend/internal/data/ent/authtokens" @@ -22,11 +27,8 @@ import ( "github.com/hay-kot/homebox/backend/internal/data/ent/label" "github.com/hay-kot/homebox/backend/internal/data/ent/location" "github.com/hay-kot/homebox/backend/internal/data/ent/maintenanceentry" + "github.com/hay-kot/homebox/backend/internal/data/ent/notifier" "github.com/hay-kot/homebox/backend/internal/data/ent/user" - - "entgo.io/ent/dialect" - "entgo.io/ent/dialect/sql" - "entgo.io/ent/dialect/sql/sqlgraph" ) // Client is the client that holds all ent builders. @@ -56,15 +58,15 @@ type Client struct { Location *LocationClient // MaintenanceEntry is the client for interacting with the MaintenanceEntry builders. MaintenanceEntry *MaintenanceEntryClient + // Notifier is the client for interacting with the Notifier builders. + Notifier *NotifierClient // User is the client for interacting with the User builders. User *UserClient } // NewClient creates a new client configured with the given options. func NewClient(opts ...Option) *Client { - cfg := config{log: log.Println, hooks: &hooks{}, inters: &inters{}} - cfg.options(opts...) - client := &Client{config: cfg} + client := &Client{config: newConfig(opts...)} client.init() return client } @@ -82,9 +84,66 @@ func (c *Client) init() { c.Label = NewLabelClient(c.config) c.Location = NewLocationClient(c.config) c.MaintenanceEntry = NewMaintenanceEntryClient(c.config) + c.Notifier = NewNotifierClient(c.config) c.User = NewUserClient(c.config) } +type ( + // config is the configuration for the client and its builder. + config struct { + // driver used for executing database requests. + driver dialect.Driver + // debug enable a debug logging. + debug bool + // log used for logging on debug mode. + log func(...any) + // hooks to execute on mutations. + hooks *hooks + // interceptors to execute on queries. + inters *inters + } + // Option function to configure the client. + Option func(*config) +) + +// newConfig creates a new config for the client. +func newConfig(opts ...Option) config { + cfg := config{log: log.Println, hooks: &hooks{}, inters: &inters{}} + cfg.options(opts...) + return cfg +} + +// options applies the options on the config object. +func (c *config) options(opts ...Option) { + for _, opt := range opts { + opt(c) + } + if c.debug { + c.driver = dialect.Debug(c.driver, c.log) + } +} + +// Debug enables debug logging on the ent.Driver. +func Debug() Option { + return func(c *config) { + c.debug = true + } +} + +// Log sets the logging function for debug mode. +func Log(fn func(...any)) Option { + return func(c *config) { + c.log = fn + } +} + +// Driver configures the client driver. +func Driver(driver dialect.Driver) Option { + return func(c *config) { + c.driver = driver + } +} + // Open opens a database/sql.DB specified by the driver name and // the data source name, and returns a new client attached to it. // Optional parameters can be added for configuring the client. @@ -101,11 +160,14 @@ func Open(driverName, dataSourceName string, options ...Option) (*Client, error) } } +// ErrTxStarted is returned when trying to start a new transaction from a transactional client. +var ErrTxStarted = errors.New("ent: cannot start a transaction within a transaction") + // Tx returns a new transactional client. The provided context // is used until the transaction is committed or rolled back. func (c *Client) Tx(ctx context.Context) (*Tx, error) { if _, ok := c.driver.(*txDriver); ok { - return nil, errors.New("ent: cannot start a transaction within a transaction") + return nil, ErrTxStarted } tx, err := newTx(ctx, c.driver) if err != nil { @@ -127,6 +189,7 @@ func (c *Client) Tx(ctx context.Context) (*Tx, error) { Label: NewLabelClient(cfg), Location: NewLocationClient(cfg), MaintenanceEntry: NewMaintenanceEntryClient(cfg), + Notifier: NewNotifierClient(cfg), User: NewUserClient(cfg), }, nil } @@ -158,6 +221,7 @@ func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) Label: NewLabelClient(cfg), Location: NewLocationClient(cfg), MaintenanceEntry: NewMaintenanceEntryClient(cfg), + Notifier: NewNotifierClient(cfg), User: NewUserClient(cfg), }, nil } @@ -187,35 +251,25 @@ func (c *Client) Close() error { // Use adds the mutation hooks to all the entity clients. // In order to add hooks to a specific client, call: `client.Node.Use(...)`. func (c *Client) Use(hooks ...Hook) { - c.Attachment.Use(hooks...) - c.AuthRoles.Use(hooks...) - c.AuthTokens.Use(hooks...) - c.Document.Use(hooks...) - c.Group.Use(hooks...) - c.GroupInvitationToken.Use(hooks...) - c.Item.Use(hooks...) - c.ItemField.Use(hooks...) - c.Label.Use(hooks...) - c.Location.Use(hooks...) - c.MaintenanceEntry.Use(hooks...) - c.User.Use(hooks...) + for _, n := range []interface{ Use(...Hook) }{ + c.Attachment, c.AuthRoles, c.AuthTokens, c.Document, c.Group, + c.GroupInvitationToken, c.Item, c.ItemField, c.Label, c.Location, + c.MaintenanceEntry, c.Notifier, c.User, + } { + n.Use(hooks...) + } } // Intercept adds the query interceptors to all the entity clients. // In order to add interceptors to a specific client, call: `client.Node.Intercept(...)`. func (c *Client) Intercept(interceptors ...Interceptor) { - c.Attachment.Intercept(interceptors...) - c.AuthRoles.Intercept(interceptors...) - c.AuthTokens.Intercept(interceptors...) - c.Document.Intercept(interceptors...) - c.Group.Intercept(interceptors...) - c.GroupInvitationToken.Intercept(interceptors...) - c.Item.Intercept(interceptors...) - c.ItemField.Intercept(interceptors...) - c.Label.Intercept(interceptors...) - c.Location.Intercept(interceptors...) - c.MaintenanceEntry.Intercept(interceptors...) - c.User.Intercept(interceptors...) + for _, n := range []interface{ Intercept(...Interceptor) }{ + c.Attachment, c.AuthRoles, c.AuthTokens, c.Document, c.Group, + c.GroupInvitationToken, c.Item, c.ItemField, c.Label, c.Location, + c.MaintenanceEntry, c.Notifier, c.User, + } { + n.Intercept(interceptors...) + } } // Mutate implements the ent.Mutator interface. @@ -243,6 +297,8 @@ func (c *Client) Mutate(ctx context.Context, m Mutation) (Value, error) { return c.Location.mutate(ctx, m) case *MaintenanceEntryMutation: return c.MaintenanceEntry.mutate(ctx, m) + case *NotifierMutation: + return c.Notifier.mutate(ctx, m) case *UserMutation: return c.User.mutate(ctx, m) default: @@ -283,6 +339,21 @@ func (c *AttachmentClient) CreateBulk(builders ...*AttachmentCreate) *Attachment return &AttachmentCreateBulk{config: c.config, builders: builders} } +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *AttachmentClient) MapCreateBulk(slice any, setFunc func(*AttachmentCreate, int)) *AttachmentCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &AttachmentCreateBulk{err: fmt.Errorf("calling to AttachmentClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*AttachmentCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &AttachmentCreateBulk{config: c.config, builders: builders} +} + // Update returns an update builder for Attachment. func (c *AttachmentClient) Update() *AttachmentUpdate { mutation := newAttachmentMutation(c.config, OpUpdate) @@ -433,6 +504,21 @@ func (c *AuthRolesClient) CreateBulk(builders ...*AuthRolesCreate) *AuthRolesCre return &AuthRolesCreateBulk{config: c.config, builders: builders} } +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *AuthRolesClient) MapCreateBulk(slice any, setFunc func(*AuthRolesCreate, int)) *AuthRolesCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &AuthRolesCreateBulk{err: fmt.Errorf("calling to AuthRolesClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*AuthRolesCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &AuthRolesCreateBulk{config: c.config, builders: builders} +} + // Update returns an update builder for AuthRoles. func (c *AuthRolesClient) Update() *AuthRolesUpdate { mutation := newAuthRolesMutation(c.config, OpUpdate) @@ -567,6 +653,21 @@ func (c *AuthTokensClient) CreateBulk(builders ...*AuthTokensCreate) *AuthTokens return &AuthTokensCreateBulk{config: c.config, builders: builders} } +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *AuthTokensClient) MapCreateBulk(slice any, setFunc func(*AuthTokensCreate, int)) *AuthTokensCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &AuthTokensCreateBulk{err: fmt.Errorf("calling to AuthTokensClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*AuthTokensCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &AuthTokensCreateBulk{config: c.config, builders: builders} +} + // Update returns an update builder for AuthTokens. func (c *AuthTokensClient) Update() *AuthTokensUpdate { mutation := newAuthTokensMutation(c.config, OpUpdate) @@ -717,6 +818,21 @@ func (c *DocumentClient) CreateBulk(builders ...*DocumentCreate) *DocumentCreate return &DocumentCreateBulk{config: c.config, builders: builders} } +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *DocumentClient) MapCreateBulk(slice any, setFunc func(*DocumentCreate, int)) *DocumentCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &DocumentCreateBulk{err: fmt.Errorf("calling to DocumentClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*DocumentCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &DocumentCreateBulk{config: c.config, builders: builders} +} + // Update returns an update builder for Document. func (c *DocumentClient) Update() *DocumentUpdate { mutation := newDocumentMutation(c.config, OpUpdate) @@ -867,6 +983,21 @@ func (c *GroupClient) CreateBulk(builders ...*GroupCreate) *GroupCreateBulk { return &GroupCreateBulk{config: c.config, builders: builders} } +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *GroupClient) MapCreateBulk(slice any, setFunc func(*GroupCreate, int)) *GroupCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &GroupCreateBulk{err: fmt.Errorf("calling to GroupClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*GroupCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &GroupCreateBulk{config: c.config, builders: builders} +} + // Update returns an update builder for Group. func (c *GroupClient) Update() *GroupUpdate { mutation := newGroupMutation(c.config, OpUpdate) @@ -1023,6 +1154,22 @@ func (c *GroupClient) QueryInvitationTokens(gr *Group) *GroupInvitationTokenQuer return query } +// QueryNotifiers queries the notifiers edge of a Group. +func (c *GroupClient) QueryNotifiers(gr *Group) *NotifierQuery { + query := (&NotifierClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := gr.ID + step := sqlgraph.NewStep( + sqlgraph.From(group.Table, group.FieldID, id), + sqlgraph.To(notifier.Table, notifier.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, group.NotifiersTable, group.NotifiersColumn), + ) + fromV = sqlgraph.Neighbors(gr.driver.Dialect(), step) + return fromV, nil + } + return query +} + // Hooks returns the client hooks. func (c *GroupClient) Hooks() []Hook { return c.hooks.Group @@ -1081,6 +1228,21 @@ func (c *GroupInvitationTokenClient) CreateBulk(builders ...*GroupInvitationToke return &GroupInvitationTokenCreateBulk{config: c.config, builders: builders} } +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *GroupInvitationTokenClient) MapCreateBulk(slice any, setFunc func(*GroupInvitationTokenCreate, int)) *GroupInvitationTokenCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &GroupInvitationTokenCreateBulk{err: fmt.Errorf("calling to GroupInvitationTokenClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*GroupInvitationTokenCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &GroupInvitationTokenCreateBulk{config: c.config, builders: builders} +} + // Update returns an update builder for GroupInvitationToken. func (c *GroupInvitationTokenClient) Update() *GroupInvitationTokenUpdate { mutation := newGroupInvitationTokenMutation(c.config, OpUpdate) @@ -1215,6 +1377,21 @@ func (c *ItemClient) CreateBulk(builders ...*ItemCreate) *ItemCreateBulk { return &ItemCreateBulk{config: c.config, builders: builders} } +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *ItemClient) MapCreateBulk(slice any, setFunc func(*ItemCreate, int)) *ItemCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &ItemCreateBulk{err: fmt.Errorf("calling to ItemClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*ItemCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &ItemCreateBulk{config: c.config, builders: builders} +} + // Update returns an update builder for Item. func (c *ItemClient) Update() *ItemUpdate { mutation := newItemMutation(c.config, OpUpdate) @@ -1275,6 +1452,22 @@ func (c *ItemClient) GetX(ctx context.Context, id uuid.UUID) *Item { return obj } +// QueryGroup queries the group edge of a Item. +func (c *ItemClient) QueryGroup(i *Item) *GroupQuery { + query := (&GroupClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := i.ID + step := sqlgraph.NewStep( + sqlgraph.From(item.Table, item.FieldID, id), + sqlgraph.To(group.Table, group.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, item.GroupTable, item.GroupColumn), + ) + fromV = sqlgraph.Neighbors(i.driver.Dialect(), step) + return fromV, nil + } + return query +} + // QueryParent queries the parent edge of a Item. func (c *ItemClient) QueryParent(i *Item) *ItemQuery { query := (&ItemClient{config: c.config}).Query() @@ -1307,22 +1500,6 @@ func (c *ItemClient) QueryChildren(i *Item) *ItemQuery { return query } -// QueryGroup queries the group edge of a Item. -func (c *ItemClient) QueryGroup(i *Item) *GroupQuery { - query := (&GroupClient{config: c.config}).Query() - query.path = func(context.Context) (fromV *sql.Selector, _ error) { - id := i.ID - step := sqlgraph.NewStep( - sqlgraph.From(item.Table, item.FieldID, id), - sqlgraph.To(group.Table, group.FieldID), - sqlgraph.Edge(sqlgraph.M2O, true, item.GroupTable, item.GroupColumn), - ) - fromV = sqlgraph.Neighbors(i.driver.Dialect(), step) - return fromV, nil - } - return query -} - // QueryLabel queries the label edge of a Item. func (c *ItemClient) QueryLabel(i *Item) *LabelQuery { query := (&LabelClient{config: c.config}).Query() @@ -1461,6 +1638,21 @@ func (c *ItemFieldClient) CreateBulk(builders ...*ItemFieldCreate) *ItemFieldCre return &ItemFieldCreateBulk{config: c.config, builders: builders} } +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *ItemFieldClient) MapCreateBulk(slice any, setFunc func(*ItemFieldCreate, int)) *ItemFieldCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &ItemFieldCreateBulk{err: fmt.Errorf("calling to ItemFieldClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*ItemFieldCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &ItemFieldCreateBulk{config: c.config, builders: builders} +} + // Update returns an update builder for ItemField. func (c *ItemFieldClient) Update() *ItemFieldUpdate { mutation := newItemFieldMutation(c.config, OpUpdate) @@ -1595,6 +1787,21 @@ func (c *LabelClient) CreateBulk(builders ...*LabelCreate) *LabelCreateBulk { return &LabelCreateBulk{config: c.config, builders: builders} } +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *LabelClient) MapCreateBulk(slice any, setFunc func(*LabelCreate, int)) *LabelCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &LabelCreateBulk{err: fmt.Errorf("calling to LabelClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*LabelCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &LabelCreateBulk{config: c.config, builders: builders} +} + // Update returns an update builder for Label. func (c *LabelClient) Update() *LabelUpdate { mutation := newLabelMutation(c.config, OpUpdate) @@ -1745,6 +1952,21 @@ func (c *LocationClient) CreateBulk(builders ...*LocationCreate) *LocationCreate return &LocationCreateBulk{config: c.config, builders: builders} } +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *LocationClient) MapCreateBulk(slice any, setFunc func(*LocationCreate, int)) *LocationCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &LocationCreateBulk{err: fmt.Errorf("calling to LocationClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*LocationCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &LocationCreateBulk{config: c.config, builders: builders} +} + // Update returns an update builder for Location. func (c *LocationClient) Update() *LocationUpdate { mutation := newLocationMutation(c.config, OpUpdate) @@ -1805,6 +2027,22 @@ func (c *LocationClient) GetX(ctx context.Context, id uuid.UUID) *Location { return obj } +// QueryGroup queries the group edge of a Location. +func (c *LocationClient) QueryGroup(l *Location) *GroupQuery { + query := (&GroupClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := l.ID + step := sqlgraph.NewStep( + sqlgraph.From(location.Table, location.FieldID, id), + sqlgraph.To(group.Table, group.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, location.GroupTable, location.GroupColumn), + ) + fromV = sqlgraph.Neighbors(l.driver.Dialect(), step) + return fromV, nil + } + return query +} + // QueryParent queries the parent edge of a Location. func (c *LocationClient) QueryParent(l *Location) *LocationQuery { query := (&LocationClient{config: c.config}).Query() @@ -1837,22 +2075,6 @@ func (c *LocationClient) QueryChildren(l *Location) *LocationQuery { return query } -// QueryGroup queries the group edge of a Location. -func (c *LocationClient) QueryGroup(l *Location) *GroupQuery { - query := (&GroupClient{config: c.config}).Query() - query.path = func(context.Context) (fromV *sql.Selector, _ error) { - id := l.ID - step := sqlgraph.NewStep( - sqlgraph.From(location.Table, location.FieldID, id), - sqlgraph.To(group.Table, group.FieldID), - sqlgraph.Edge(sqlgraph.M2O, true, location.GroupTable, location.GroupColumn), - ) - fromV = sqlgraph.Neighbors(l.driver.Dialect(), step) - return fromV, nil - } - return query -} - // QueryItems queries the items edge of a Location. func (c *LocationClient) QueryItems(l *Location) *ItemQuery { query := (&ItemClient{config: c.config}).Query() @@ -1927,6 +2149,21 @@ func (c *MaintenanceEntryClient) CreateBulk(builders ...*MaintenanceEntryCreate) return &MaintenanceEntryCreateBulk{config: c.config, builders: builders} } +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *MaintenanceEntryClient) MapCreateBulk(slice any, setFunc func(*MaintenanceEntryCreate, int)) *MaintenanceEntryCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &MaintenanceEntryCreateBulk{err: fmt.Errorf("calling to MaintenanceEntryClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*MaintenanceEntryCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &MaintenanceEntryCreateBulk{config: c.config, builders: builders} +} + // Update returns an update builder for MaintenanceEntry. func (c *MaintenanceEntryClient) Update() *MaintenanceEntryUpdate { mutation := newMaintenanceEntryMutation(c.config, OpUpdate) @@ -2028,6 +2265,171 @@ func (c *MaintenanceEntryClient) mutate(ctx context.Context, m *MaintenanceEntry } } +// NotifierClient is a client for the Notifier schema. +type NotifierClient struct { + config +} + +// NewNotifierClient returns a client for the Notifier from the given config. +func NewNotifierClient(c config) *NotifierClient { + return &NotifierClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `notifier.Hooks(f(g(h())))`. +func (c *NotifierClient) Use(hooks ...Hook) { + c.hooks.Notifier = append(c.hooks.Notifier, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `notifier.Intercept(f(g(h())))`. +func (c *NotifierClient) Intercept(interceptors ...Interceptor) { + c.inters.Notifier = append(c.inters.Notifier, interceptors...) +} + +// Create returns a builder for creating a Notifier entity. +func (c *NotifierClient) Create() *NotifierCreate { + mutation := newNotifierMutation(c.config, OpCreate) + return &NotifierCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of Notifier entities. +func (c *NotifierClient) CreateBulk(builders ...*NotifierCreate) *NotifierCreateBulk { + return &NotifierCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *NotifierClient) MapCreateBulk(slice any, setFunc func(*NotifierCreate, int)) *NotifierCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &NotifierCreateBulk{err: fmt.Errorf("calling to NotifierClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*NotifierCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &NotifierCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for Notifier. +func (c *NotifierClient) Update() *NotifierUpdate { + mutation := newNotifierMutation(c.config, OpUpdate) + return &NotifierUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *NotifierClient) UpdateOne(n *Notifier) *NotifierUpdateOne { + mutation := newNotifierMutation(c.config, OpUpdateOne, withNotifier(n)) + return &NotifierUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *NotifierClient) UpdateOneID(id uuid.UUID) *NotifierUpdateOne { + mutation := newNotifierMutation(c.config, OpUpdateOne, withNotifierID(id)) + return &NotifierUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for Notifier. +func (c *NotifierClient) Delete() *NotifierDelete { + mutation := newNotifierMutation(c.config, OpDelete) + return &NotifierDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *NotifierClient) DeleteOne(n *Notifier) *NotifierDeleteOne { + return c.DeleteOneID(n.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *NotifierClient) DeleteOneID(id uuid.UUID) *NotifierDeleteOne { + builder := c.Delete().Where(notifier.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &NotifierDeleteOne{builder} +} + +// Query returns a query builder for Notifier. +func (c *NotifierClient) Query() *NotifierQuery { + return &NotifierQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeNotifier}, + inters: c.Interceptors(), + } +} + +// Get returns a Notifier entity by its id. +func (c *NotifierClient) Get(ctx context.Context, id uuid.UUID) (*Notifier, error) { + return c.Query().Where(notifier.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *NotifierClient) GetX(ctx context.Context, id uuid.UUID) *Notifier { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryGroup queries the group edge of a Notifier. +func (c *NotifierClient) QueryGroup(n *Notifier) *GroupQuery { + query := (&GroupClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := n.ID + step := sqlgraph.NewStep( + sqlgraph.From(notifier.Table, notifier.FieldID, id), + sqlgraph.To(group.Table, group.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, notifier.GroupTable, notifier.GroupColumn), + ) + fromV = sqlgraph.Neighbors(n.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryUser queries the user edge of a Notifier. +func (c *NotifierClient) QueryUser(n *Notifier) *UserQuery { + query := (&UserClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := n.ID + step := sqlgraph.NewStep( + sqlgraph.From(notifier.Table, notifier.FieldID, id), + sqlgraph.To(user.Table, user.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, notifier.UserTable, notifier.UserColumn), + ) + fromV = sqlgraph.Neighbors(n.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *NotifierClient) Hooks() []Hook { + return c.hooks.Notifier +} + +// Interceptors returns the client interceptors. +func (c *NotifierClient) Interceptors() []Interceptor { + return c.inters.Notifier +} + +func (c *NotifierClient) mutate(ctx context.Context, m *NotifierMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&NotifierCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&NotifierUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&NotifierUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&NotifierDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown Notifier mutation op: %q", m.Op()) + } +} + // UserClient is a client for the User schema. type UserClient struct { config @@ -2061,6 +2463,21 @@ func (c *UserClient) CreateBulk(builders ...*UserCreate) *UserCreateBulk { return &UserCreateBulk{config: c.config, builders: builders} } +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *UserClient) MapCreateBulk(slice any, setFunc func(*UserCreate, int)) *UserCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &UserCreateBulk{err: fmt.Errorf("calling to UserClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*UserCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &UserCreateBulk{config: c.config, builders: builders} +} + // Update returns an update builder for User. func (c *UserClient) Update() *UserUpdate { mutation := newUserMutation(c.config, OpUpdate) @@ -2153,6 +2570,22 @@ func (c *UserClient) QueryAuthTokens(u *User) *AuthTokensQuery { return query } +// QueryNotifiers queries the notifiers edge of a User. +func (c *UserClient) QueryNotifiers(u *User) *NotifierQuery { + query := (&NotifierClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := u.ID + step := sqlgraph.NewStep( + sqlgraph.From(user.Table, user.FieldID, id), + sqlgraph.To(notifier.Table, notifier.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, user.NotifiersTable, user.NotifiersColumn), + ) + fromV = sqlgraph.Neighbors(u.driver.Dialect(), step) + return fromV, nil + } + return query +} + // Hooks returns the client hooks. func (c *UserClient) Hooks() []Hook { return c.hooks.User @@ -2177,3 +2610,15 @@ func (c *UserClient) mutate(ctx context.Context, m *UserMutation) (Value, error) return nil, fmt.Errorf("ent: unknown User mutation op: %q", m.Op()) } } + +// hooks and interceptors per client, for fast access. +type ( + hooks struct { + Attachment, AuthRoles, AuthTokens, Document, Group, GroupInvitationToken, Item, + ItemField, Label, Location, MaintenanceEntry, Notifier, User []ent.Hook + } + inters struct { + Attachment, AuthRoles, AuthTokens, Document, Group, GroupInvitationToken, Item, + ItemField, Label, Location, MaintenanceEntry, Notifier, User []ent.Interceptor + } +) diff --git a/backend/internal/data/ent/config.go b/backend/internal/data/ent/config.go deleted file mode 100644 index c4dc769..0000000 --- a/backend/internal/data/ent/config.go +++ /dev/null @@ -1,88 +0,0 @@ -// Code generated by ent, DO NOT EDIT. - -package ent - -import ( - "entgo.io/ent" - "entgo.io/ent/dialect" -) - -// Option function to configure the client. -type Option func(*config) - -// Config is the configuration for the client and its builder. -type config struct { - // driver used for executing database requests. - driver dialect.Driver - // debug enable a debug logging. - debug bool - // log used for logging on debug mode. - log func(...any) - // hooks to execute on mutations. - hooks *hooks - // interceptors to execute on queries. - inters *inters -} - -// hooks and interceptors per client, for fast access. -type ( - hooks struct { - Attachment []ent.Hook - AuthRoles []ent.Hook - AuthTokens []ent.Hook - Document []ent.Hook - Group []ent.Hook - GroupInvitationToken []ent.Hook - Item []ent.Hook - ItemField []ent.Hook - Label []ent.Hook - Location []ent.Hook - MaintenanceEntry []ent.Hook - User []ent.Hook - } - inters struct { - Attachment []ent.Interceptor - AuthRoles []ent.Interceptor - AuthTokens []ent.Interceptor - Document []ent.Interceptor - Group []ent.Interceptor - GroupInvitationToken []ent.Interceptor - Item []ent.Interceptor - ItemField []ent.Interceptor - Label []ent.Interceptor - Location []ent.Interceptor - MaintenanceEntry []ent.Interceptor - User []ent.Interceptor - } -) - -// Options applies the options on the config object. -func (c *config) options(opts ...Option) { - for _, opt := range opts { - opt(c) - } - if c.debug { - c.driver = dialect.Debug(c.driver, c.log) - } -} - -// Debug enables debug logging on the ent.Driver. -func Debug() Option { - return func(c *config) { - c.debug = true - } -} - -// Log sets the logging function for debug mode. -func Log(fn func(...any)) Option { - return func(c *config) { - c.log = fn - } -} - -// Driver configures the client driver. -func Driver(driver dialect.Driver) Option { - return func(c *config) { - c.driver = driver - } -} diff --git a/backend/internal/data/ent/context.go b/backend/internal/data/ent/context.go deleted file mode 100644 index 7811bfa..0000000 --- a/backend/internal/data/ent/context.go +++ /dev/null @@ -1,33 +0,0 @@ -// Code generated by ent, DO NOT EDIT. - -package ent - -import ( - "context" -) - -type clientCtxKey struct{} - -// FromContext returns a Client stored inside a context, or nil if there isn't one. -func FromContext(ctx context.Context) *Client { - c, _ := ctx.Value(clientCtxKey{}).(*Client) - return c -} - -// NewContext returns a new context with the given Client attached. -func NewContext(parent context.Context, c *Client) context.Context { - return context.WithValue(parent, clientCtxKey{}, c) -} - -type txCtxKey struct{} - -// TxFromContext returns a Tx stored inside a context, or nil if there isn't one. -func TxFromContext(ctx context.Context) *Tx { - tx, _ := ctx.Value(txCtxKey{}).(*Tx) - return tx -} - -// NewTxContext returns a new context with the given Tx attached. -func NewTxContext(parent context.Context, tx *Tx) context.Context { - return context.WithValue(parent, txCtxKey{}, tx) -} diff --git a/backend/internal/data/ent/document.go b/backend/internal/data/ent/document.go index bcaae9a..3141bac 100644 --- a/backend/internal/data/ent/document.go +++ b/backend/internal/data/ent/document.go @@ -7,6 +7,7 @@ import ( "strings" "time" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/google/uuid" "github.com/hay-kot/homebox/backend/internal/data/ent/document" @@ -30,6 +31,7 @@ type Document struct { // The values are being populated by the DocumentQuery when eager-loading is set. Edges DocumentEdges `json:"edges"` group_documents *uuid.UUID + selectValues sql.SelectValues } // DocumentEdges holds the relations/edges for other nodes in the graph. @@ -79,7 +81,7 @@ func (*Document) scanValues(columns []string) ([]any, error) { case document.ForeignKeys[0]: // group_documents values[i] = &sql.NullScanner{S: new(uuid.UUID)} default: - return nil, fmt.Errorf("unexpected column %q for type Document", columns[i]) + values[i] = new(sql.UnknownType) } } return values, nil @@ -130,11 +132,19 @@ func (d *Document) assignValues(columns []string, values []any) error { d.group_documents = new(uuid.UUID) *d.group_documents = *value.S.(*uuid.UUID) } + default: + d.selectValues.Set(columns[i], values[i]) } } return nil } +// Value returns the ent.Value that was dynamically selected and assigned to the Document. +// This includes values selected through modifiers, order, etc. +func (d *Document) Value(name string) (ent.Value, error) { + return d.selectValues.Get(name) +} + // QueryGroup queries the "group" edge of the Document entity. func (d *Document) QueryGroup() *GroupQuery { return NewDocumentClient(d.config).QueryGroup(d) diff --git a/backend/internal/data/ent/document/document.go b/backend/internal/data/ent/document/document.go index b6a15eb..95380f4 100644 --- a/backend/internal/data/ent/document/document.go +++ b/backend/internal/data/ent/document/document.go @@ -5,6 +5,8 @@ package document import ( "time" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" "github.com/google/uuid" ) @@ -87,3 +89,66 @@ var ( // DefaultID holds the default value on creation for the "id" field. DefaultID func() uuid.UUID ) + +// OrderOption defines the ordering options for the Document queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByTitle orders the results by the title field. +func ByTitle(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldTitle, opts...).ToFunc() +} + +// ByPath orders the results by the path field. +func ByPath(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldPath, opts...).ToFunc() +} + +// ByGroupField orders the results by group field. +func ByGroupField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newGroupStep(), sql.OrderByField(field, opts...)) + } +} + +// ByAttachmentsCount orders the results by attachments count. +func ByAttachmentsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newAttachmentsStep(), opts...) + } +} + +// ByAttachments orders the results by attachments terms. +func ByAttachments(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newAttachmentsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} +func newGroupStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(GroupInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn), + ) +} +func newAttachmentsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(AttachmentsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, AttachmentsTable, AttachmentsColumn), + ) +} diff --git a/backend/internal/data/ent/document/where.go b/backend/internal/data/ent/document/where.go index 614cf4e..3e491ad 100644 --- a/backend/internal/data/ent/document/where.go +++ b/backend/internal/data/ent/document/where.go @@ -300,11 +300,7 @@ func HasGroup() predicate.Document { // HasGroupWith applies the HasEdge predicate on the "group" edge with a given conditions (other predicates). func HasGroupWith(preds ...predicate.Group) predicate.Document { return predicate.Document(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(GroupInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn), - ) + step := newGroupStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -327,11 +323,7 @@ func HasAttachments() predicate.Document { // HasAttachmentsWith applies the HasEdge predicate on the "attachments" edge with a given conditions (other predicates). func HasAttachmentsWith(preds ...predicate.Attachment) predicate.Document { return predicate.Document(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(AttachmentsInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, false, AttachmentsTable, AttachmentsColumn), - ) + step := newAttachmentsStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -342,32 +334,15 @@ func HasAttachmentsWith(preds ...predicate.Attachment) predicate.Document { // And groups predicates with the AND operator between them. func And(predicates ...predicate.Document) predicate.Document { - return predicate.Document(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for _, p := range predicates { - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Document(sql.AndPredicates(predicates...)) } // Or groups predicates with the OR operator between them. func Or(predicates ...predicate.Document) predicate.Document { - return predicate.Document(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for i, p := range predicates { - if i > 0 { - s1.Or() - } - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Document(sql.OrPredicates(predicates...)) } // Not applies the not operator on the given predicate. func Not(p predicate.Document) predicate.Document { - return predicate.Document(func(s *sql.Selector) { - p(s.Not()) - }) + return predicate.Document(sql.NotPredicates(p)) } diff --git a/backend/internal/data/ent/document_create.go b/backend/internal/data/ent/document_create.go index eabfc0f..fe61e98 100644 --- a/backend/internal/data/ent/document_create.go +++ b/backend/internal/data/ent/document_create.go @@ -111,7 +111,7 @@ func (dc *DocumentCreate) Mutation() *DocumentMutation { // Save creates the Document in the database. func (dc *DocumentCreate) Save(ctx context.Context) (*Document, error) { dc.defaults() - return withHooks[*Document, DocumentMutation](ctx, dc.sqlSave, dc.mutation, dc.hooks) + return withHooks(ctx, dc.sqlSave, dc.mutation, dc.hooks) } // SaveX calls Save and panics if Save returns an error. @@ -238,10 +238,7 @@ func (dc *DocumentCreate) createSpec() (*Document, *sqlgraph.CreateSpec) { Columns: []string{document.GroupColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: group.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -258,10 +255,7 @@ func (dc *DocumentCreate) createSpec() (*Document, *sqlgraph.CreateSpec) { Columns: []string{document.AttachmentsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: attachment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -275,11 +269,15 @@ func (dc *DocumentCreate) createSpec() (*Document, *sqlgraph.CreateSpec) { // DocumentCreateBulk is the builder for creating many Document entities in bulk. type DocumentCreateBulk struct { config + err error builders []*DocumentCreate } // Save creates the Document entities in the database. func (dcb *DocumentCreateBulk) Save(ctx context.Context) ([]*Document, error) { + if dcb.err != nil { + return nil, dcb.err + } specs := make([]*sqlgraph.CreateSpec, len(dcb.builders)) nodes := make([]*Document, len(dcb.builders)) mutators := make([]Mutator, len(dcb.builders)) @@ -296,8 +294,8 @@ func (dcb *DocumentCreateBulk) Save(ctx context.Context) ([]*Document, error) { return nil, err } builder.mutation = mutation - nodes[i], specs[i] = builder.createSpec() var err error + nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { _, err = mutators[i+1].Mutate(root, dcb.builders[i+1].mutation) } else { diff --git a/backend/internal/data/ent/document_delete.go b/backend/internal/data/ent/document_delete.go index d0481d3..5901c03 100644 --- a/backend/internal/data/ent/document_delete.go +++ b/backend/internal/data/ent/document_delete.go @@ -27,7 +27,7 @@ func (dd *DocumentDelete) Where(ps ...predicate.Document) *DocumentDelete { // Exec executes the deletion query and returns how many vertices were deleted. func (dd *DocumentDelete) Exec(ctx context.Context) (int, error) { - return withHooks[int, DocumentMutation](ctx, dd.sqlExec, dd.mutation, dd.hooks) + return withHooks(ctx, dd.sqlExec, dd.mutation, dd.hooks) } // ExecX is like Exec, but panics if an error occurs. diff --git a/backend/internal/data/ent/document_query.go b/backend/internal/data/ent/document_query.go index e22abe2..34f4801 100644 --- a/backend/internal/data/ent/document_query.go +++ b/backend/internal/data/ent/document_query.go @@ -22,7 +22,7 @@ import ( type DocumentQuery struct { config ctx *QueryContext - order []OrderFunc + order []document.OrderOption inters []Interceptor predicates []predicate.Document withGroup *GroupQuery @@ -59,7 +59,7 @@ func (dq *DocumentQuery) Unique(unique bool) *DocumentQuery { } // Order specifies how the records should be ordered. -func (dq *DocumentQuery) Order(o ...OrderFunc) *DocumentQuery { +func (dq *DocumentQuery) Order(o ...document.OrderOption) *DocumentQuery { dq.order = append(dq.order, o...) return dq } @@ -297,7 +297,7 @@ func (dq *DocumentQuery) Clone() *DocumentQuery { return &DocumentQuery{ config: dq.config, ctx: dq.ctx.Clone(), - order: append([]OrderFunc{}, dq.order...), + order: append([]document.OrderOption{}, dq.order...), inters: append([]Interceptor{}, dq.inters...), predicates: append([]predicate.Document{}, dq.predicates...), withGroup: dq.withGroup.Clone(), @@ -498,7 +498,7 @@ func (dq *DocumentQuery) loadAttachments(ctx context.Context, query *AttachmentQ } query.withFKs = true query.Where(predicate.Attachment(func(s *sql.Selector) { - s.Where(sql.InValues(document.AttachmentsColumn, fks...)) + s.Where(sql.InValues(s.C(document.AttachmentsColumn), fks...)) })) neighbors, err := query.All(ctx) if err != nil { @@ -511,7 +511,7 @@ func (dq *DocumentQuery) loadAttachments(ctx context.Context, query *AttachmentQ } node, ok := nodeids[*fk] if !ok { - return fmt.Errorf(`unexpected foreign-key "document_attachments" returned %v for node %v`, *fk, n.ID) + return fmt.Errorf(`unexpected referenced foreign-key "document_attachments" returned %v for node %v`, *fk, n.ID) } assign(node, n) } diff --git a/backend/internal/data/ent/document_update.go b/backend/internal/data/ent/document_update.go index a172e5f..23e6d9c 100644 --- a/backend/internal/data/ent/document_update.go +++ b/backend/internal/data/ent/document_update.go @@ -43,12 +43,28 @@ func (du *DocumentUpdate) SetTitle(s string) *DocumentUpdate { return du } +// SetNillableTitle sets the "title" field if the given value is not nil. +func (du *DocumentUpdate) SetNillableTitle(s *string) *DocumentUpdate { + if s != nil { + du.SetTitle(*s) + } + return du +} + // SetPath sets the "path" field. func (du *DocumentUpdate) SetPath(s string) *DocumentUpdate { du.mutation.SetPath(s) return du } +// SetNillablePath sets the "path" field if the given value is not nil. +func (du *DocumentUpdate) SetNillablePath(s *string) *DocumentUpdate { + if s != nil { + du.SetPath(*s) + } + return du +} + // SetGroupID sets the "group" edge to the Group entity by ID. func (du *DocumentUpdate) SetGroupID(id uuid.UUID) *DocumentUpdate { du.mutation.SetGroupID(id) @@ -110,7 +126,7 @@ func (du *DocumentUpdate) RemoveAttachments(a ...*Attachment) *DocumentUpdate { // Save executes the query and returns the number of nodes affected by the update operation. func (du *DocumentUpdate) Save(ctx context.Context) (int, error) { du.defaults() - return withHooks[int, DocumentMutation](ctx, du.sqlSave, du.mutation, du.hooks) + return withHooks(ctx, du.sqlSave, du.mutation, du.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -190,10 +206,7 @@ func (du *DocumentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{document.GroupColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: group.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -206,10 +219,7 @@ func (du *DocumentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{document.GroupColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: group.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -225,10 +235,7 @@ func (du *DocumentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{document.AttachmentsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: attachment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -241,10 +248,7 @@ func (du *DocumentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{document.AttachmentsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: attachment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -260,10 +264,7 @@ func (du *DocumentUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{document.AttachmentsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: attachment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -303,12 +304,28 @@ func (duo *DocumentUpdateOne) SetTitle(s string) *DocumentUpdateOne { return duo } +// SetNillableTitle sets the "title" field if the given value is not nil. +func (duo *DocumentUpdateOne) SetNillableTitle(s *string) *DocumentUpdateOne { + if s != nil { + duo.SetTitle(*s) + } + return duo +} + // SetPath sets the "path" field. func (duo *DocumentUpdateOne) SetPath(s string) *DocumentUpdateOne { duo.mutation.SetPath(s) return duo } +// SetNillablePath sets the "path" field if the given value is not nil. +func (duo *DocumentUpdateOne) SetNillablePath(s *string) *DocumentUpdateOne { + if s != nil { + duo.SetPath(*s) + } + return duo +} + // SetGroupID sets the "group" edge to the Group entity by ID. func (duo *DocumentUpdateOne) SetGroupID(id uuid.UUID) *DocumentUpdateOne { duo.mutation.SetGroupID(id) @@ -383,7 +400,7 @@ func (duo *DocumentUpdateOne) Select(field string, fields ...string) *DocumentUp // Save executes the query and returns the updated Document entity. func (duo *DocumentUpdateOne) Save(ctx context.Context) (*Document, error) { duo.defaults() - return withHooks[*Document, DocumentMutation](ctx, duo.sqlSave, duo.mutation, duo.hooks) + return withHooks(ctx, duo.sqlSave, duo.mutation, duo.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -480,10 +497,7 @@ func (duo *DocumentUpdateOne) sqlSave(ctx context.Context) (_node *Document, err Columns: []string{document.GroupColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: group.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -496,10 +510,7 @@ func (duo *DocumentUpdateOne) sqlSave(ctx context.Context) (_node *Document, err Columns: []string{document.GroupColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: group.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -515,10 +526,7 @@ func (duo *DocumentUpdateOne) sqlSave(ctx context.Context) (_node *Document, err Columns: []string{document.AttachmentsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: attachment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -531,10 +539,7 @@ func (duo *DocumentUpdateOne) sqlSave(ctx context.Context) (_node *Document, err Columns: []string{document.AttachmentsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: attachment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -550,10 +555,7 @@ func (duo *DocumentUpdateOne) sqlSave(ctx context.Context) (_node *Document, err Columns: []string{document.AttachmentsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: attachment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID), }, } for _, k := range nodes { diff --git a/backend/internal/data/ent/ent.go b/backend/internal/data/ent/ent.go index 27d53ca..6e52ac8 100644 --- a/backend/internal/data/ent/ent.go +++ b/backend/internal/data/ent/ent.go @@ -7,6 +7,7 @@ import ( "errors" "fmt" "reflect" + "sync" "entgo.io/ent" "entgo.io/ent/dialect/sql" @@ -22,6 +23,7 @@ import ( "github.com/hay-kot/homebox/backend/internal/data/ent/label" "github.com/hay-kot/homebox/backend/internal/data/ent/location" "github.com/hay-kot/homebox/backend/internal/data/ent/maintenanceentry" + "github.com/hay-kot/homebox/backend/internal/data/ent/notifier" "github.com/hay-kot/homebox/backend/internal/data/ent/user" ) @@ -44,45 +46,68 @@ type ( MutateFunc = ent.MutateFunc ) +type clientCtxKey struct{} + +// FromContext returns a Client stored inside a context, or nil if there isn't one. +func FromContext(ctx context.Context) *Client { + c, _ := ctx.Value(clientCtxKey{}).(*Client) + return c +} + +// NewContext returns a new context with the given Client attached. +func NewContext(parent context.Context, c *Client) context.Context { + return context.WithValue(parent, clientCtxKey{}, c) +} + +type txCtxKey struct{} + +// TxFromContext returns a Tx stored inside a context, or nil if there isn't one. +func TxFromContext(ctx context.Context) *Tx { + tx, _ := ctx.Value(txCtxKey{}).(*Tx) + return tx +} + +// NewTxContext returns a new context with the given Tx attached. +func NewTxContext(parent context.Context, tx *Tx) context.Context { + return context.WithValue(parent, txCtxKey{}, tx) +} + // OrderFunc applies an ordering on the sql selector. +// Deprecated: Use Asc/Desc functions or the package builders instead. type OrderFunc func(*sql.Selector) -// columnChecker returns a function indicates if the column exists in the given column. -func columnChecker(table string) func(string) error { - checks := map[string]func(string) bool{ - attachment.Table: attachment.ValidColumn, - authroles.Table: authroles.ValidColumn, - authtokens.Table: authtokens.ValidColumn, - document.Table: document.ValidColumn, - group.Table: group.ValidColumn, - groupinvitationtoken.Table: groupinvitationtoken.ValidColumn, - item.Table: item.ValidColumn, - itemfield.Table: itemfield.ValidColumn, - label.Table: label.ValidColumn, - location.Table: location.ValidColumn, - maintenanceentry.Table: maintenanceentry.ValidColumn, - user.Table: user.ValidColumn, - } - check, ok := checks[table] - if !ok { - return func(string) error { - return fmt.Errorf("unknown table %q", table) - } - } - return func(column string) error { - if !check(column) { - return fmt.Errorf("unknown column %q for table %q", column, table) - } - return nil - } +var ( + initCheck sync.Once + columnCheck sql.ColumnCheck +) + +// columnChecker checks if the column exists in the given table. +func checkColumn(table, column string) error { + initCheck.Do(func() { + columnCheck = sql.NewColumnCheck(map[string]func(string) bool{ + attachment.Table: attachment.ValidColumn, + authroles.Table: authroles.ValidColumn, + authtokens.Table: authtokens.ValidColumn, + document.Table: document.ValidColumn, + group.Table: group.ValidColumn, + groupinvitationtoken.Table: groupinvitationtoken.ValidColumn, + item.Table: item.ValidColumn, + itemfield.Table: itemfield.ValidColumn, + label.Table: label.ValidColumn, + location.Table: location.ValidColumn, + maintenanceentry.Table: maintenanceentry.ValidColumn, + notifier.Table: notifier.ValidColumn, + user.Table: user.ValidColumn, + }) + }) + return columnCheck(table, column) } // Asc applies the given fields in ASC order. -func Asc(fields ...string) OrderFunc { +func Asc(fields ...string) func(*sql.Selector) { return func(s *sql.Selector) { - check := columnChecker(s.TableName()) for _, f := range fields { - if err := check(f); err != nil { + if err := checkColumn(s.TableName(), f); err != nil { s.AddError(&ValidationError{Name: f, err: fmt.Errorf("ent: %w", err)}) } s.OrderBy(sql.Asc(s.C(f))) @@ -91,11 +116,10 @@ func Asc(fields ...string) OrderFunc { } // Desc applies the given fields in DESC order. -func Desc(fields ...string) OrderFunc { +func Desc(fields ...string) func(*sql.Selector) { return func(s *sql.Selector) { - check := columnChecker(s.TableName()) for _, f := range fields { - if err := check(f); err != nil { + if err := checkColumn(s.TableName(), f); err != nil { s.AddError(&ValidationError{Name: f, err: fmt.Errorf("ent: %w", err)}) } s.OrderBy(sql.Desc(s.C(f))) @@ -127,8 +151,7 @@ func Count() AggregateFunc { // Max applies the "max" aggregation function on the given field of each group. func Max(field string) AggregateFunc { return func(s *sql.Selector) string { - check := columnChecker(s.TableName()) - if err := check(field); err != nil { + if err := checkColumn(s.TableName(), field); err != nil { s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)}) return "" } @@ -139,8 +162,7 @@ func Max(field string) AggregateFunc { // Mean applies the "mean" aggregation function on the given field of each group. func Mean(field string) AggregateFunc { return func(s *sql.Selector) string { - check := columnChecker(s.TableName()) - if err := check(field); err != nil { + if err := checkColumn(s.TableName(), field); err != nil { s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)}) return "" } @@ -151,8 +173,7 @@ func Mean(field string) AggregateFunc { // Min applies the "min" aggregation function on the given field of each group. func Min(field string) AggregateFunc { return func(s *sql.Selector) string { - check := columnChecker(s.TableName()) - if err := check(field); err != nil { + if err := checkColumn(s.TableName(), field); err != nil { s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)}) return "" } @@ -163,8 +184,7 @@ func Min(field string) AggregateFunc { // Sum applies the "sum" aggregation function on the given field of each group. func Sum(field string) AggregateFunc { return func(s *sql.Selector) string { - check := columnChecker(s.TableName()) - if err := check(field); err != nil { + if err := checkColumn(s.TableName(), field); err != nil { s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)}) return "" } @@ -501,7 +521,7 @@ func withHooks[V Value, M any, PM interface { return exec(ctx) } var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutationT, ok := m.(PM) + mutationT, ok := any(m).(PM) if !ok { return nil, fmt.Errorf("unexpected mutation type %T", m) } diff --git a/backend/internal/data/ent/group.go b/backend/internal/data/ent/group.go index 25e1ce4..69c67de 100644 --- a/backend/internal/data/ent/group.go +++ b/backend/internal/data/ent/group.go @@ -7,6 +7,7 @@ import ( "strings" "time" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/google/uuid" "github.com/hay-kot/homebox/backend/internal/data/ent/group" @@ -24,10 +25,11 @@ type Group struct { // Name holds the value of the "name" field. Name string `json:"name,omitempty"` // Currency holds the value of the "currency" field. - Currency group.Currency `json:"currency,omitempty"` + Currency string `json:"currency,omitempty"` // Edges holds the relations/edges for other nodes in the graph. // The values are being populated by the GroupQuery when eager-loading is set. - Edges GroupEdges `json:"edges"` + Edges GroupEdges `json:"edges"` + selectValues sql.SelectValues } // GroupEdges holds the relations/edges for other nodes in the graph. @@ -44,9 +46,11 @@ type GroupEdges struct { Documents []*Document `json:"documents,omitempty"` // InvitationTokens holds the value of the invitation_tokens edge. InvitationTokens []*GroupInvitationToken `json:"invitation_tokens,omitempty"` + // Notifiers holds the value of the notifiers edge. + Notifiers []*Notifier `json:"notifiers,omitempty"` // loadedTypes holds the information for reporting if a // type was loaded (or requested) in eager-loading or not. - loadedTypes [6]bool + loadedTypes [7]bool } // UsersOrErr returns the Users value or an error if the edge @@ -103,6 +107,15 @@ func (e GroupEdges) InvitationTokensOrErr() ([]*GroupInvitationToken, error) { return nil, &NotLoadedError{edge: "invitation_tokens"} } +// NotifiersOrErr returns the Notifiers value or an error if the edge +// was not loaded in eager-loading. +func (e GroupEdges) NotifiersOrErr() ([]*Notifier, error) { + if e.loadedTypes[6] { + return e.Notifiers, nil + } + return nil, &NotLoadedError{edge: "notifiers"} +} + // scanValues returns the types for scanning values from sql.Rows. func (*Group) scanValues(columns []string) ([]any, error) { values := make([]any, len(columns)) @@ -115,7 +128,7 @@ func (*Group) scanValues(columns []string) ([]any, error) { case group.FieldID: values[i] = new(uuid.UUID) default: - return nil, fmt.Errorf("unexpected column %q for type Group", columns[i]) + values[i] = new(sql.UnknownType) } } return values, nil @@ -157,13 +170,21 @@ func (gr *Group) assignValues(columns []string, values []any) error { if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field currency", values[i]) } else if value.Valid { - gr.Currency = group.Currency(value.String) + gr.Currency = value.String } + default: + gr.selectValues.Set(columns[i], values[i]) } } return nil } +// Value returns the ent.Value that was dynamically selected and assigned to the Group. +// This includes values selected through modifiers, order, etc. +func (gr *Group) Value(name string) (ent.Value, error) { + return gr.selectValues.Get(name) +} + // QueryUsers queries the "users" edge of the Group entity. func (gr *Group) QueryUsers() *UserQuery { return NewGroupClient(gr.config).QueryUsers(gr) @@ -194,6 +215,11 @@ func (gr *Group) QueryInvitationTokens() *GroupInvitationTokenQuery { return NewGroupClient(gr.config).QueryInvitationTokens(gr) } +// QueryNotifiers queries the "notifiers" edge of the Group entity. +func (gr *Group) QueryNotifiers() *NotifierQuery { + return NewGroupClient(gr.config).QueryNotifiers(gr) +} + // Update returns a builder for updating this Group. // Note that you need to call Group.Unwrap() before calling this method if this Group // was returned from a transaction, and the transaction was committed or rolled back. @@ -227,7 +253,7 @@ func (gr *Group) String() string { builder.WriteString(gr.Name) builder.WriteString(", ") builder.WriteString("currency=") - builder.WriteString(fmt.Sprintf("%v", gr.Currency)) + builder.WriteString(gr.Currency) builder.WriteByte(')') return builder.String() } diff --git a/backend/internal/data/ent/group/group.go b/backend/internal/data/ent/group/group.go index 28cf69d..32cb101 100644 --- a/backend/internal/data/ent/group/group.go +++ b/backend/internal/data/ent/group/group.go @@ -3,9 +3,10 @@ package group import ( - "fmt" "time" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" "github.com/google/uuid" ) @@ -34,6 +35,8 @@ const ( EdgeDocuments = "documents" // EdgeInvitationTokens holds the string denoting the invitation_tokens edge name in mutations. EdgeInvitationTokens = "invitation_tokens" + // EdgeNotifiers holds the string denoting the notifiers edge name in mutations. + EdgeNotifiers = "notifiers" // Table holds the table name of the group in the database. Table = "groups" // UsersTable is the table that holds the users relation/edge. @@ -78,6 +81,13 @@ const ( InvitationTokensInverseTable = "group_invitation_tokens" // InvitationTokensColumn is the table column denoting the invitation_tokens relation/edge. InvitationTokensColumn = "group_invitation_tokens" + // NotifiersTable is the table that holds the notifiers relation/edge. + NotifiersTable = "notifiers" + // NotifiersInverseTable is the table name for the Notifier entity. + // It exists in this package in order to avoid circular dependency with the "notifier" package. + NotifiersInverseTable = "notifiers" + // NotifiersColumn is the table column denoting the notifiers relation/edge. + NotifiersColumn = "group_id" ) // Columns holds all SQL columns for group fields. @@ -108,42 +118,183 @@ var ( UpdateDefaultUpdatedAt func() time.Time // NameValidator is a validator for the "name" field. It is called by the builders before save. NameValidator func(string) error + // DefaultCurrency holds the default value on creation for the "currency" field. + DefaultCurrency string // DefaultID holds the default value on creation for the "id" field. DefaultID func() uuid.UUID ) -// Currency defines the type for the "currency" enum field. -type Currency string +// OrderOption defines the ordering options for the Group queries. +type OrderOption func(*sql.Selector) -// CurrencyUsd is the default value of the Currency enum. -const DefaultCurrency = CurrencyUsd - -// Currency values. -const ( - CurrencyUsd Currency = "usd" - CurrencyEur Currency = "eur" - CurrencyGbp Currency = "gbp" - CurrencyJpy Currency = "jpy" - CurrencyZar Currency = "zar" - CurrencyAud Currency = "aud" - CurrencyNok Currency = "nok" - CurrencySek Currency = "sek" - CurrencyDkk Currency = "dkk" - CurrencyInr Currency = "inr" - CurrencyRmb Currency = "rmb" - CurrencyBgn Currency = "bgn" -) - -func (c Currency) String() string { - return string(c) +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() } -// CurrencyValidator is a validator for the "currency" field enum values. It is called by the builders before save. -func CurrencyValidator(c Currency) error { - switch c { - case CurrencyUsd, CurrencyEur, CurrencyGbp, CurrencyJpy, CurrencyZar, CurrencyAud, CurrencyNok, CurrencySek, CurrencyDkk, CurrencyInr, CurrencyRmb, CurrencyBgn: - return nil - default: - return fmt.Errorf("group: invalid enum value for currency field: %q", c) +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByName orders the results by the name field. +func ByName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldName, opts...).ToFunc() +} + +// ByCurrency orders the results by the currency field. +func ByCurrency(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCurrency, opts...).ToFunc() +} + +// ByUsersCount orders the results by users count. +func ByUsersCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newUsersStep(), opts...) } } + +// ByUsers orders the results by users terms. +func ByUsers(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newUsersStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByLocationsCount orders the results by locations count. +func ByLocationsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newLocationsStep(), opts...) + } +} + +// ByLocations orders the results by locations terms. +func ByLocations(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newLocationsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByItemsCount orders the results by items count. +func ByItemsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newItemsStep(), opts...) + } +} + +// ByItems orders the results by items terms. +func ByItems(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newItemsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByLabelsCount orders the results by labels count. +func ByLabelsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newLabelsStep(), opts...) + } +} + +// ByLabels orders the results by labels terms. +func ByLabels(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newLabelsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByDocumentsCount orders the results by documents count. +func ByDocumentsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newDocumentsStep(), opts...) + } +} + +// ByDocuments orders the results by documents terms. +func ByDocuments(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newDocumentsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByInvitationTokensCount orders the results by invitation_tokens count. +func ByInvitationTokensCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newInvitationTokensStep(), opts...) + } +} + +// ByInvitationTokens orders the results by invitation_tokens terms. +func ByInvitationTokens(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newInvitationTokensStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByNotifiersCount orders the results by notifiers count. +func ByNotifiersCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newNotifiersStep(), opts...) + } +} + +// ByNotifiers orders the results by notifiers terms. +func ByNotifiers(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newNotifiersStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} +func newUsersStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(UsersInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, UsersTable, UsersColumn), + ) +} +func newLocationsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(LocationsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, LocationsTable, LocationsColumn), + ) +} +func newItemsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(ItemsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, ItemsTable, ItemsColumn), + ) +} +func newLabelsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(LabelsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, LabelsTable, LabelsColumn), + ) +} +func newDocumentsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(DocumentsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, DocumentsTable, DocumentsColumn), + ) +} +func newInvitationTokensStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(InvitationTokensInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, InvitationTokensTable, InvitationTokensColumn), + ) +} +func newNotifiersStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(NotifiersInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, NotifiersTable, NotifiersColumn), + ) +} diff --git a/backend/internal/data/ent/group/where.go b/backend/internal/data/ent/group/where.go index e6d434b..d18faa7 100644 --- a/backend/internal/data/ent/group/where.go +++ b/backend/internal/data/ent/group/where.go @@ -71,6 +71,11 @@ func Name(v string) predicate.Group { return predicate.Group(sql.FieldEQ(FieldName, v)) } +// Currency applies equality check predicate on the "currency" field. It's identical to CurrencyEQ. +func Currency(v string) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldCurrency, v)) +} + // CreatedAtEQ applies the EQ predicate on the "created_at" field. func CreatedAtEQ(v time.Time) predicate.Group { return predicate.Group(sql.FieldEQ(FieldCreatedAt, v)) @@ -217,25 +222,70 @@ func NameContainsFold(v string) predicate.Group { } // CurrencyEQ applies the EQ predicate on the "currency" field. -func CurrencyEQ(v Currency) predicate.Group { +func CurrencyEQ(v string) predicate.Group { return predicate.Group(sql.FieldEQ(FieldCurrency, v)) } // CurrencyNEQ applies the NEQ predicate on the "currency" field. -func CurrencyNEQ(v Currency) predicate.Group { +func CurrencyNEQ(v string) predicate.Group { return predicate.Group(sql.FieldNEQ(FieldCurrency, v)) } // CurrencyIn applies the In predicate on the "currency" field. -func CurrencyIn(vs ...Currency) predicate.Group { +func CurrencyIn(vs ...string) predicate.Group { return predicate.Group(sql.FieldIn(FieldCurrency, vs...)) } // CurrencyNotIn applies the NotIn predicate on the "currency" field. -func CurrencyNotIn(vs ...Currency) predicate.Group { +func CurrencyNotIn(vs ...string) predicate.Group { return predicate.Group(sql.FieldNotIn(FieldCurrency, vs...)) } +// CurrencyGT applies the GT predicate on the "currency" field. +func CurrencyGT(v string) predicate.Group { + return predicate.Group(sql.FieldGT(FieldCurrency, v)) +} + +// CurrencyGTE applies the GTE predicate on the "currency" field. +func CurrencyGTE(v string) predicate.Group { + return predicate.Group(sql.FieldGTE(FieldCurrency, v)) +} + +// CurrencyLT applies the LT predicate on the "currency" field. +func CurrencyLT(v string) predicate.Group { + return predicate.Group(sql.FieldLT(FieldCurrency, v)) +} + +// CurrencyLTE applies the LTE predicate on the "currency" field. +func CurrencyLTE(v string) predicate.Group { + return predicate.Group(sql.FieldLTE(FieldCurrency, v)) +} + +// CurrencyContains applies the Contains predicate on the "currency" field. +func CurrencyContains(v string) predicate.Group { + return predicate.Group(sql.FieldContains(FieldCurrency, v)) +} + +// CurrencyHasPrefix applies the HasPrefix predicate on the "currency" field. +func CurrencyHasPrefix(v string) predicate.Group { + return predicate.Group(sql.FieldHasPrefix(FieldCurrency, v)) +} + +// CurrencyHasSuffix applies the HasSuffix predicate on the "currency" field. +func CurrencyHasSuffix(v string) predicate.Group { + return predicate.Group(sql.FieldHasSuffix(FieldCurrency, v)) +} + +// CurrencyEqualFold applies the EqualFold predicate on the "currency" field. +func CurrencyEqualFold(v string) predicate.Group { + return predicate.Group(sql.FieldEqualFold(FieldCurrency, v)) +} + +// CurrencyContainsFold applies the ContainsFold predicate on the "currency" field. +func CurrencyContainsFold(v string) predicate.Group { + return predicate.Group(sql.FieldContainsFold(FieldCurrency, v)) +} + // HasUsers applies the HasEdge predicate on the "users" edge. func HasUsers() predicate.Group { return predicate.Group(func(s *sql.Selector) { @@ -250,11 +300,7 @@ func HasUsers() predicate.Group { // HasUsersWith applies the HasEdge predicate on the "users" edge with a given conditions (other predicates). func HasUsersWith(preds ...predicate.User) predicate.Group { return predicate.Group(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(UsersInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, false, UsersTable, UsersColumn), - ) + step := newUsersStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -277,11 +323,7 @@ func HasLocations() predicate.Group { // HasLocationsWith applies the HasEdge predicate on the "locations" edge with a given conditions (other predicates). func HasLocationsWith(preds ...predicate.Location) predicate.Group { return predicate.Group(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(LocationsInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, false, LocationsTable, LocationsColumn), - ) + step := newLocationsStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -304,11 +346,7 @@ func HasItems() predicate.Group { // HasItemsWith applies the HasEdge predicate on the "items" edge with a given conditions (other predicates). func HasItemsWith(preds ...predicate.Item) predicate.Group { return predicate.Group(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(ItemsInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, false, ItemsTable, ItemsColumn), - ) + step := newItemsStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -331,11 +369,7 @@ func HasLabels() predicate.Group { // HasLabelsWith applies the HasEdge predicate on the "labels" edge with a given conditions (other predicates). func HasLabelsWith(preds ...predicate.Label) predicate.Group { return predicate.Group(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(LabelsInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, false, LabelsTable, LabelsColumn), - ) + step := newLabelsStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -358,11 +392,7 @@ func HasDocuments() predicate.Group { // HasDocumentsWith applies the HasEdge predicate on the "documents" edge with a given conditions (other predicates). func HasDocumentsWith(preds ...predicate.Document) predicate.Group { return predicate.Group(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(DocumentsInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, false, DocumentsTable, DocumentsColumn), - ) + step := newDocumentsStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -384,12 +414,31 @@ func HasInvitationTokens() predicate.Group { // HasInvitationTokensWith applies the HasEdge predicate on the "invitation_tokens" edge with a given conditions (other predicates). func HasInvitationTokensWith(preds ...predicate.GroupInvitationToken) predicate.Group { + return predicate.Group(func(s *sql.Selector) { + step := newInvitationTokensStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasNotifiers applies the HasEdge predicate on the "notifiers" edge. +func HasNotifiers() predicate.Group { return predicate.Group(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(InvitationTokensInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, false, InvitationTokensTable, InvitationTokensColumn), + sqlgraph.Edge(sqlgraph.O2M, false, NotifiersTable, NotifiersColumn), ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasNotifiersWith applies the HasEdge predicate on the "notifiers" edge with a given conditions (other predicates). +func HasNotifiersWith(preds ...predicate.Notifier) predicate.Group { + return predicate.Group(func(s *sql.Selector) { + step := newNotifiersStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -400,32 +449,15 @@ func HasInvitationTokensWith(preds ...predicate.GroupInvitationToken) predicate. // And groups predicates with the AND operator between them. func And(predicates ...predicate.Group) predicate.Group { - return predicate.Group(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for _, p := range predicates { - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Group(sql.AndPredicates(predicates...)) } // Or groups predicates with the OR operator between them. func Or(predicates ...predicate.Group) predicate.Group { - return predicate.Group(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for i, p := range predicates { - if i > 0 { - s1.Or() - } - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Group(sql.OrPredicates(predicates...)) } // Not applies the not operator on the given predicate. func Not(p predicate.Group) predicate.Group { - return predicate.Group(func(s *sql.Selector) { - p(s.Not()) - }) + return predicate.Group(sql.NotPredicates(p)) } diff --git a/backend/internal/data/ent/group_create.go b/backend/internal/data/ent/group_create.go index 9f0e90a..be56ba0 100644 --- a/backend/internal/data/ent/group_create.go +++ b/backend/internal/data/ent/group_create.go @@ -17,6 +17,7 @@ import ( "github.com/hay-kot/homebox/backend/internal/data/ent/item" "github.com/hay-kot/homebox/backend/internal/data/ent/label" "github.com/hay-kot/homebox/backend/internal/data/ent/location" + "github.com/hay-kot/homebox/backend/internal/data/ent/notifier" "github.com/hay-kot/homebox/backend/internal/data/ent/user" ) @@ -62,15 +63,15 @@ func (gc *GroupCreate) SetName(s string) *GroupCreate { } // SetCurrency sets the "currency" field. -func (gc *GroupCreate) SetCurrency(gr group.Currency) *GroupCreate { - gc.mutation.SetCurrency(gr) +func (gc *GroupCreate) SetCurrency(s string) *GroupCreate { + gc.mutation.SetCurrency(s) return gc } // SetNillableCurrency sets the "currency" field if the given value is not nil. -func (gc *GroupCreate) SetNillableCurrency(gr *group.Currency) *GroupCreate { - if gr != nil { - gc.SetCurrency(*gr) +func (gc *GroupCreate) SetNillableCurrency(s *string) *GroupCreate { + if s != nil { + gc.SetCurrency(*s) } return gc } @@ -179,6 +180,21 @@ func (gc *GroupCreate) AddInvitationTokens(g ...*GroupInvitationToken) *GroupCre return gc.AddInvitationTokenIDs(ids...) } +// AddNotifierIDs adds the "notifiers" edge to the Notifier entity by IDs. +func (gc *GroupCreate) AddNotifierIDs(ids ...uuid.UUID) *GroupCreate { + gc.mutation.AddNotifierIDs(ids...) + return gc +} + +// AddNotifiers adds the "notifiers" edges to the Notifier entity. +func (gc *GroupCreate) AddNotifiers(n ...*Notifier) *GroupCreate { + ids := make([]uuid.UUID, len(n)) + for i := range n { + ids[i] = n[i].ID + } + return gc.AddNotifierIDs(ids...) +} + // Mutation returns the GroupMutation object of the builder. func (gc *GroupCreate) Mutation() *GroupMutation { return gc.mutation @@ -187,7 +203,7 @@ func (gc *GroupCreate) Mutation() *GroupMutation { // Save creates the Group in the database. func (gc *GroupCreate) Save(ctx context.Context) (*Group, error) { gc.defaults() - return withHooks[*Group, GroupMutation](ctx, gc.sqlSave, gc.mutation, gc.hooks) + return withHooks(ctx, gc.sqlSave, gc.mutation, gc.hooks) } // SaveX calls Save and panics if Save returns an error. @@ -251,11 +267,6 @@ func (gc *GroupCreate) check() error { if _, ok := gc.mutation.Currency(); !ok { return &ValidationError{Name: "currency", err: errors.New(`ent: missing required field "Group.currency"`)} } - if v, ok := gc.mutation.Currency(); ok { - if err := group.CurrencyValidator(v); err != nil { - return &ValidationError{Name: "currency", err: fmt.Errorf(`ent: validator failed for field "Group.currency": %w`, err)} - } - } return nil } @@ -304,7 +315,7 @@ func (gc *GroupCreate) createSpec() (*Group, *sqlgraph.CreateSpec) { _node.Name = value } if value, ok := gc.mutation.Currency(); ok { - _spec.SetField(group.FieldCurrency, field.TypeEnum, value) + _spec.SetField(group.FieldCurrency, field.TypeString, value) _node.Currency = value } if nodes := gc.mutation.UsersIDs(); len(nodes) > 0 { @@ -315,10 +326,7 @@ func (gc *GroupCreate) createSpec() (*Group, *sqlgraph.CreateSpec) { Columns: []string{group.UsersColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: user.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -334,10 +342,7 @@ func (gc *GroupCreate) createSpec() (*Group, *sqlgraph.CreateSpec) { Columns: []string{group.LocationsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: location.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -353,10 +358,7 @@ func (gc *GroupCreate) createSpec() (*Group, *sqlgraph.CreateSpec) { Columns: []string{group.ItemsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: item.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -372,10 +374,7 @@ func (gc *GroupCreate) createSpec() (*Group, *sqlgraph.CreateSpec) { Columns: []string{group.LabelsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: label.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(label.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -391,10 +390,7 @@ func (gc *GroupCreate) createSpec() (*Group, *sqlgraph.CreateSpec) { Columns: []string{group.DocumentsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: document.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(document.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -410,10 +406,23 @@ func (gc *GroupCreate) createSpec() (*Group, *sqlgraph.CreateSpec) { Columns: []string{group.InvitationTokensColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: groupinvitationtoken.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(groupinvitationtoken.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := gc.mutation.NotifiersIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.NotifiersTable, + Columns: []string{group.NotifiersColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(notifier.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -427,11 +436,15 @@ func (gc *GroupCreate) createSpec() (*Group, *sqlgraph.CreateSpec) { // GroupCreateBulk is the builder for creating many Group entities in bulk. type GroupCreateBulk struct { config + err error builders []*GroupCreate } // Save creates the Group entities in the database. func (gcb *GroupCreateBulk) Save(ctx context.Context) ([]*Group, error) { + if gcb.err != nil { + return nil, gcb.err + } specs := make([]*sqlgraph.CreateSpec, len(gcb.builders)) nodes := make([]*Group, len(gcb.builders)) mutators := make([]Mutator, len(gcb.builders)) @@ -448,8 +461,8 @@ func (gcb *GroupCreateBulk) Save(ctx context.Context) ([]*Group, error) { return nil, err } builder.mutation = mutation - nodes[i], specs[i] = builder.createSpec() var err error + nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { _, err = mutators[i+1].Mutate(root, gcb.builders[i+1].mutation) } else { diff --git a/backend/internal/data/ent/group_delete.go b/backend/internal/data/ent/group_delete.go index 29e0ffc..b8c3e59 100644 --- a/backend/internal/data/ent/group_delete.go +++ b/backend/internal/data/ent/group_delete.go @@ -27,7 +27,7 @@ func (gd *GroupDelete) Where(ps ...predicate.Group) *GroupDelete { // Exec executes the deletion query and returns how many vertices were deleted. func (gd *GroupDelete) Exec(ctx context.Context) (int, error) { - return withHooks[int, GroupMutation](ctx, gd.sqlExec, gd.mutation, gd.hooks) + return withHooks(ctx, gd.sqlExec, gd.mutation, gd.hooks) } // ExecX is like Exec, but panics if an error occurs. diff --git a/backend/internal/data/ent/group_query.go b/backend/internal/data/ent/group_query.go index c9bef5f..f17bd3b 100644 --- a/backend/internal/data/ent/group_query.go +++ b/backend/internal/data/ent/group_query.go @@ -18,6 +18,7 @@ import ( "github.com/hay-kot/homebox/backend/internal/data/ent/item" "github.com/hay-kot/homebox/backend/internal/data/ent/label" "github.com/hay-kot/homebox/backend/internal/data/ent/location" + "github.com/hay-kot/homebox/backend/internal/data/ent/notifier" "github.com/hay-kot/homebox/backend/internal/data/ent/predicate" "github.com/hay-kot/homebox/backend/internal/data/ent/user" ) @@ -26,7 +27,7 @@ import ( type GroupQuery struct { config ctx *QueryContext - order []OrderFunc + order []group.OrderOption inters []Interceptor predicates []predicate.Group withUsers *UserQuery @@ -35,6 +36,7 @@ type GroupQuery struct { withLabels *LabelQuery withDocuments *DocumentQuery withInvitationTokens *GroupInvitationTokenQuery + withNotifiers *NotifierQuery // intermediate query (i.e. traversal path). sql *sql.Selector path func(context.Context) (*sql.Selector, error) @@ -66,7 +68,7 @@ func (gq *GroupQuery) Unique(unique bool) *GroupQuery { } // Order specifies how the records should be ordered. -func (gq *GroupQuery) Order(o ...OrderFunc) *GroupQuery { +func (gq *GroupQuery) Order(o ...group.OrderOption) *GroupQuery { gq.order = append(gq.order, o...) return gq } @@ -203,6 +205,28 @@ func (gq *GroupQuery) QueryInvitationTokens() *GroupInvitationTokenQuery { return query } +// QueryNotifiers chains the current query on the "notifiers" edge. +func (gq *GroupQuery) QueryNotifiers() *NotifierQuery { + query := (&NotifierClient{config: gq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := gq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := gq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(group.Table, group.FieldID, selector), + sqlgraph.To(notifier.Table, notifier.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, group.NotifiersTable, group.NotifiersColumn), + ) + fromU = sqlgraph.SetNeighbors(gq.driver.Dialect(), step) + return fromU, nil + } + return query +} + // First returns the first Group entity from the query. // Returns a *NotFoundError when no Group was found. func (gq *GroupQuery) First(ctx context.Context) (*Group, error) { @@ -392,7 +416,7 @@ func (gq *GroupQuery) Clone() *GroupQuery { return &GroupQuery{ config: gq.config, ctx: gq.ctx.Clone(), - order: append([]OrderFunc{}, gq.order...), + order: append([]group.OrderOption{}, gq.order...), inters: append([]Interceptor{}, gq.inters...), predicates: append([]predicate.Group{}, gq.predicates...), withUsers: gq.withUsers.Clone(), @@ -401,6 +425,7 @@ func (gq *GroupQuery) Clone() *GroupQuery { withLabels: gq.withLabels.Clone(), withDocuments: gq.withDocuments.Clone(), withInvitationTokens: gq.withInvitationTokens.Clone(), + withNotifiers: gq.withNotifiers.Clone(), // clone intermediate query. sql: gq.sql.Clone(), path: gq.path, @@ -473,6 +498,17 @@ func (gq *GroupQuery) WithInvitationTokens(opts ...func(*GroupInvitationTokenQue return gq } +// WithNotifiers tells the query-builder to eager-load the nodes that are connected to +// the "notifiers" edge. The optional arguments are used to configure the query builder of the edge. +func (gq *GroupQuery) WithNotifiers(opts ...func(*NotifierQuery)) *GroupQuery { + query := (&NotifierClient{config: gq.config}).Query() + for _, opt := range opts { + opt(query) + } + gq.withNotifiers = query + return gq +} + // GroupBy is used to group vertices by one or more fields/columns. // It is often used with aggregate functions, like: count, max, mean, min, sum. // @@ -551,13 +587,14 @@ func (gq *GroupQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Group, var ( nodes = []*Group{} _spec = gq.querySpec() - loadedTypes = [6]bool{ + loadedTypes = [7]bool{ gq.withUsers != nil, gq.withLocations != nil, gq.withItems != nil, gq.withLabels != nil, gq.withDocuments != nil, gq.withInvitationTokens != nil, + gq.withNotifiers != nil, } ) _spec.ScanValues = func(columns []string) ([]any, error) { @@ -622,6 +659,13 @@ func (gq *GroupQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Group, return nil, err } } + if query := gq.withNotifiers; query != nil { + if err := gq.loadNotifiers(ctx, query, nodes, + func(n *Group) { n.Edges.Notifiers = []*Notifier{} }, + func(n *Group, e *Notifier) { n.Edges.Notifiers = append(n.Edges.Notifiers, e) }); err != nil { + return nil, err + } + } return nodes, nil } @@ -637,7 +681,7 @@ func (gq *GroupQuery) loadUsers(ctx context.Context, query *UserQuery, nodes []* } query.withFKs = true query.Where(predicate.User(func(s *sql.Selector) { - s.Where(sql.InValues(group.UsersColumn, fks...)) + s.Where(sql.InValues(s.C(group.UsersColumn), fks...)) })) neighbors, err := query.All(ctx) if err != nil { @@ -650,7 +694,7 @@ func (gq *GroupQuery) loadUsers(ctx context.Context, query *UserQuery, nodes []* } node, ok := nodeids[*fk] if !ok { - return fmt.Errorf(`unexpected foreign-key "group_users" returned %v for node %v`, *fk, n.ID) + return fmt.Errorf(`unexpected referenced foreign-key "group_users" returned %v for node %v`, *fk, n.ID) } assign(node, n) } @@ -668,7 +712,7 @@ func (gq *GroupQuery) loadLocations(ctx context.Context, query *LocationQuery, n } query.withFKs = true query.Where(predicate.Location(func(s *sql.Selector) { - s.Where(sql.InValues(group.LocationsColumn, fks...)) + s.Where(sql.InValues(s.C(group.LocationsColumn), fks...)) })) neighbors, err := query.All(ctx) if err != nil { @@ -681,7 +725,7 @@ func (gq *GroupQuery) loadLocations(ctx context.Context, query *LocationQuery, n } node, ok := nodeids[*fk] if !ok { - return fmt.Errorf(`unexpected foreign-key "group_locations" returned %v for node %v`, *fk, n.ID) + return fmt.Errorf(`unexpected referenced foreign-key "group_locations" returned %v for node %v`, *fk, n.ID) } assign(node, n) } @@ -699,7 +743,7 @@ func (gq *GroupQuery) loadItems(ctx context.Context, query *ItemQuery, nodes []* } query.withFKs = true query.Where(predicate.Item(func(s *sql.Selector) { - s.Where(sql.InValues(group.ItemsColumn, fks...)) + s.Where(sql.InValues(s.C(group.ItemsColumn), fks...)) })) neighbors, err := query.All(ctx) if err != nil { @@ -712,7 +756,7 @@ func (gq *GroupQuery) loadItems(ctx context.Context, query *ItemQuery, nodes []* } node, ok := nodeids[*fk] if !ok { - return fmt.Errorf(`unexpected foreign-key "group_items" returned %v for node %v`, *fk, n.ID) + return fmt.Errorf(`unexpected referenced foreign-key "group_items" returned %v for node %v`, *fk, n.ID) } assign(node, n) } @@ -730,7 +774,7 @@ func (gq *GroupQuery) loadLabels(ctx context.Context, query *LabelQuery, nodes [ } query.withFKs = true query.Where(predicate.Label(func(s *sql.Selector) { - s.Where(sql.InValues(group.LabelsColumn, fks...)) + s.Where(sql.InValues(s.C(group.LabelsColumn), fks...)) })) neighbors, err := query.All(ctx) if err != nil { @@ -743,7 +787,7 @@ func (gq *GroupQuery) loadLabels(ctx context.Context, query *LabelQuery, nodes [ } node, ok := nodeids[*fk] if !ok { - return fmt.Errorf(`unexpected foreign-key "group_labels" returned %v for node %v`, *fk, n.ID) + return fmt.Errorf(`unexpected referenced foreign-key "group_labels" returned %v for node %v`, *fk, n.ID) } assign(node, n) } @@ -761,7 +805,7 @@ func (gq *GroupQuery) loadDocuments(ctx context.Context, query *DocumentQuery, n } query.withFKs = true query.Where(predicate.Document(func(s *sql.Selector) { - s.Where(sql.InValues(group.DocumentsColumn, fks...)) + s.Where(sql.InValues(s.C(group.DocumentsColumn), fks...)) })) neighbors, err := query.All(ctx) if err != nil { @@ -774,7 +818,7 @@ func (gq *GroupQuery) loadDocuments(ctx context.Context, query *DocumentQuery, n } node, ok := nodeids[*fk] if !ok { - return fmt.Errorf(`unexpected foreign-key "group_documents" returned %v for node %v`, *fk, n.ID) + return fmt.Errorf(`unexpected referenced foreign-key "group_documents" returned %v for node %v`, *fk, n.ID) } assign(node, n) } @@ -792,7 +836,7 @@ func (gq *GroupQuery) loadInvitationTokens(ctx context.Context, query *GroupInvi } query.withFKs = true query.Where(predicate.GroupInvitationToken(func(s *sql.Selector) { - s.Where(sql.InValues(group.InvitationTokensColumn, fks...)) + s.Where(sql.InValues(s.C(group.InvitationTokensColumn), fks...)) })) neighbors, err := query.All(ctx) if err != nil { @@ -805,7 +849,37 @@ func (gq *GroupQuery) loadInvitationTokens(ctx context.Context, query *GroupInvi } node, ok := nodeids[*fk] if !ok { - return fmt.Errorf(`unexpected foreign-key "group_invitation_tokens" returned %v for node %v`, *fk, n.ID) + return fmt.Errorf(`unexpected referenced foreign-key "group_invitation_tokens" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) + } + return nil +} +func (gq *GroupQuery) loadNotifiers(ctx context.Context, query *NotifierQuery, nodes []*Group, init func(*Group), assign func(*Group, *Notifier)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*Group) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(notifier.FieldGroupID) + } + query.Where(predicate.Notifier(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(group.NotifiersColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.GroupID + node, ok := nodeids[fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "group_id" returned %v for node %v`, fk, n.ID) } assign(node, n) } diff --git a/backend/internal/data/ent/group_update.go b/backend/internal/data/ent/group_update.go index 1ff9cd6..fdb11a3 100644 --- a/backend/internal/data/ent/group_update.go +++ b/backend/internal/data/ent/group_update.go @@ -18,6 +18,7 @@ import ( "github.com/hay-kot/homebox/backend/internal/data/ent/item" "github.com/hay-kot/homebox/backend/internal/data/ent/label" "github.com/hay-kot/homebox/backend/internal/data/ent/location" + "github.com/hay-kot/homebox/backend/internal/data/ent/notifier" "github.com/hay-kot/homebox/backend/internal/data/ent/predicate" "github.com/hay-kot/homebox/backend/internal/data/ent/user" ) @@ -47,16 +48,24 @@ func (gu *GroupUpdate) SetName(s string) *GroupUpdate { return gu } +// SetNillableName sets the "name" field if the given value is not nil. +func (gu *GroupUpdate) SetNillableName(s *string) *GroupUpdate { + if s != nil { + gu.SetName(*s) + } + return gu +} + // SetCurrency sets the "currency" field. -func (gu *GroupUpdate) SetCurrency(gr group.Currency) *GroupUpdate { - gu.mutation.SetCurrency(gr) +func (gu *GroupUpdate) SetCurrency(s string) *GroupUpdate { + gu.mutation.SetCurrency(s) return gu } // SetNillableCurrency sets the "currency" field if the given value is not nil. -func (gu *GroupUpdate) SetNillableCurrency(gr *group.Currency) *GroupUpdate { - if gr != nil { - gu.SetCurrency(*gr) +func (gu *GroupUpdate) SetNillableCurrency(s *string) *GroupUpdate { + if s != nil { + gu.SetCurrency(*s) } return gu } @@ -151,6 +160,21 @@ func (gu *GroupUpdate) AddInvitationTokens(g ...*GroupInvitationToken) *GroupUpd return gu.AddInvitationTokenIDs(ids...) } +// AddNotifierIDs adds the "notifiers" edge to the Notifier entity by IDs. +func (gu *GroupUpdate) AddNotifierIDs(ids ...uuid.UUID) *GroupUpdate { + gu.mutation.AddNotifierIDs(ids...) + return gu +} + +// AddNotifiers adds the "notifiers" edges to the Notifier entity. +func (gu *GroupUpdate) AddNotifiers(n ...*Notifier) *GroupUpdate { + ids := make([]uuid.UUID, len(n)) + for i := range n { + ids[i] = n[i].ID + } + return gu.AddNotifierIDs(ids...) +} + // Mutation returns the GroupMutation object of the builder. func (gu *GroupUpdate) Mutation() *GroupMutation { return gu.mutation @@ -282,10 +306,31 @@ func (gu *GroupUpdate) RemoveInvitationTokens(g ...*GroupInvitationToken) *Group return gu.RemoveInvitationTokenIDs(ids...) } +// ClearNotifiers clears all "notifiers" edges to the Notifier entity. +func (gu *GroupUpdate) ClearNotifiers() *GroupUpdate { + gu.mutation.ClearNotifiers() + return gu +} + +// RemoveNotifierIDs removes the "notifiers" edge to Notifier entities by IDs. +func (gu *GroupUpdate) RemoveNotifierIDs(ids ...uuid.UUID) *GroupUpdate { + gu.mutation.RemoveNotifierIDs(ids...) + return gu +} + +// RemoveNotifiers removes "notifiers" edges to Notifier entities. +func (gu *GroupUpdate) RemoveNotifiers(n ...*Notifier) *GroupUpdate { + ids := make([]uuid.UUID, len(n)) + for i := range n { + ids[i] = n[i].ID + } + return gu.RemoveNotifierIDs(ids...) +} + // Save executes the query and returns the number of nodes affected by the update operation. func (gu *GroupUpdate) Save(ctx context.Context) (int, error) { gu.defaults() - return withHooks[int, GroupMutation](ctx, gu.sqlSave, gu.mutation, gu.hooks) + return withHooks(ctx, gu.sqlSave, gu.mutation, gu.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -325,11 +370,6 @@ func (gu *GroupUpdate) check() error { return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "Group.name": %w`, err)} } } - if v, ok := gu.mutation.Currency(); ok { - if err := group.CurrencyValidator(v); err != nil { - return &ValidationError{Name: "currency", err: fmt.Errorf(`ent: validator failed for field "Group.currency": %w`, err)} - } - } return nil } @@ -352,7 +392,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) { _spec.SetField(group.FieldName, field.TypeString, value) } if value, ok := gu.mutation.Currency(); ok { - _spec.SetField(group.FieldCurrency, field.TypeEnum, value) + _spec.SetField(group.FieldCurrency, field.TypeString, value) } if gu.mutation.UsersCleared() { edge := &sqlgraph.EdgeSpec{ @@ -362,10 +402,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{group.UsersColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: user.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -378,10 +415,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{group.UsersColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: user.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -397,10 +431,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{group.UsersColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: user.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -416,10 +447,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{group.LocationsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: location.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -432,10 +460,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{group.LocationsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: location.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -451,10 +476,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{group.LocationsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: location.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -470,10 +492,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{group.ItemsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: item.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -486,10 +505,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{group.ItemsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: item.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -505,10 +521,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{group.ItemsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: item.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -524,10 +537,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{group.LabelsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: label.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(label.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -540,10 +550,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{group.LabelsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: label.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(label.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -559,10 +566,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{group.LabelsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: label.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(label.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -578,10 +582,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{group.DocumentsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: document.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(document.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -594,10 +595,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{group.DocumentsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: document.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(document.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -613,10 +611,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{group.DocumentsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: document.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(document.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -632,10 +627,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{group.InvitationTokensColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: groupinvitationtoken.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(groupinvitationtoken.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -648,10 +640,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{group.InvitationTokensColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: groupinvitationtoken.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(groupinvitationtoken.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -667,10 +656,52 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{group.InvitationTokensColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: groupinvitationtoken.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(groupinvitationtoken.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if gu.mutation.NotifiersCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.NotifiersTable, + Columns: []string{group.NotifiersColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(notifier.FieldID, field.TypeUUID), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := gu.mutation.RemovedNotifiersIDs(); len(nodes) > 0 && !gu.mutation.NotifiersCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.NotifiersTable, + Columns: []string{group.NotifiersColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(notifier.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := gu.mutation.NotifiersIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.NotifiersTable, + Columns: []string{group.NotifiersColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(notifier.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -710,16 +741,24 @@ func (guo *GroupUpdateOne) SetName(s string) *GroupUpdateOne { return guo } +// SetNillableName sets the "name" field if the given value is not nil. +func (guo *GroupUpdateOne) SetNillableName(s *string) *GroupUpdateOne { + if s != nil { + guo.SetName(*s) + } + return guo +} + // SetCurrency sets the "currency" field. -func (guo *GroupUpdateOne) SetCurrency(gr group.Currency) *GroupUpdateOne { - guo.mutation.SetCurrency(gr) +func (guo *GroupUpdateOne) SetCurrency(s string) *GroupUpdateOne { + guo.mutation.SetCurrency(s) return guo } // SetNillableCurrency sets the "currency" field if the given value is not nil. -func (guo *GroupUpdateOne) SetNillableCurrency(gr *group.Currency) *GroupUpdateOne { - if gr != nil { - guo.SetCurrency(*gr) +func (guo *GroupUpdateOne) SetNillableCurrency(s *string) *GroupUpdateOne { + if s != nil { + guo.SetCurrency(*s) } return guo } @@ -814,6 +853,21 @@ func (guo *GroupUpdateOne) AddInvitationTokens(g ...*GroupInvitationToken) *Grou return guo.AddInvitationTokenIDs(ids...) } +// AddNotifierIDs adds the "notifiers" edge to the Notifier entity by IDs. +func (guo *GroupUpdateOne) AddNotifierIDs(ids ...uuid.UUID) *GroupUpdateOne { + guo.mutation.AddNotifierIDs(ids...) + return guo +} + +// AddNotifiers adds the "notifiers" edges to the Notifier entity. +func (guo *GroupUpdateOne) AddNotifiers(n ...*Notifier) *GroupUpdateOne { + ids := make([]uuid.UUID, len(n)) + for i := range n { + ids[i] = n[i].ID + } + return guo.AddNotifierIDs(ids...) +} + // Mutation returns the GroupMutation object of the builder. func (guo *GroupUpdateOne) Mutation() *GroupMutation { return guo.mutation @@ -945,6 +999,27 @@ func (guo *GroupUpdateOne) RemoveInvitationTokens(g ...*GroupInvitationToken) *G return guo.RemoveInvitationTokenIDs(ids...) } +// ClearNotifiers clears all "notifiers" edges to the Notifier entity. +func (guo *GroupUpdateOne) ClearNotifiers() *GroupUpdateOne { + guo.mutation.ClearNotifiers() + return guo +} + +// RemoveNotifierIDs removes the "notifiers" edge to Notifier entities by IDs. +func (guo *GroupUpdateOne) RemoveNotifierIDs(ids ...uuid.UUID) *GroupUpdateOne { + guo.mutation.RemoveNotifierIDs(ids...) + return guo +} + +// RemoveNotifiers removes "notifiers" edges to Notifier entities. +func (guo *GroupUpdateOne) RemoveNotifiers(n ...*Notifier) *GroupUpdateOne { + ids := make([]uuid.UUID, len(n)) + for i := range n { + ids[i] = n[i].ID + } + return guo.RemoveNotifierIDs(ids...) +} + // Where appends a list predicates to the GroupUpdate builder. func (guo *GroupUpdateOne) Where(ps ...predicate.Group) *GroupUpdateOne { guo.mutation.Where(ps...) @@ -961,7 +1036,7 @@ func (guo *GroupUpdateOne) Select(field string, fields ...string) *GroupUpdateOn // Save executes the query and returns the updated Group entity. func (guo *GroupUpdateOne) Save(ctx context.Context) (*Group, error) { guo.defaults() - return withHooks[*Group, GroupMutation](ctx, guo.sqlSave, guo.mutation, guo.hooks) + return withHooks(ctx, guo.sqlSave, guo.mutation, guo.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -1001,11 +1076,6 @@ func (guo *GroupUpdateOne) check() error { return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "Group.name": %w`, err)} } } - if v, ok := guo.mutation.Currency(); ok { - if err := group.CurrencyValidator(v); err != nil { - return &ValidationError{Name: "currency", err: fmt.Errorf(`ent: validator failed for field "Group.currency": %w`, err)} - } - } return nil } @@ -1045,7 +1115,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error _spec.SetField(group.FieldName, field.TypeString, value) } if value, ok := guo.mutation.Currency(); ok { - _spec.SetField(group.FieldCurrency, field.TypeEnum, value) + _spec.SetField(group.FieldCurrency, field.TypeString, value) } if guo.mutation.UsersCleared() { edge := &sqlgraph.EdgeSpec{ @@ -1055,10 +1125,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error Columns: []string{group.UsersColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: user.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1071,10 +1138,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error Columns: []string{group.UsersColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: user.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1090,10 +1154,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error Columns: []string{group.UsersColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: user.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1109,10 +1170,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error Columns: []string{group.LocationsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: location.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1125,10 +1183,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error Columns: []string{group.LocationsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: location.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1144,10 +1199,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error Columns: []string{group.LocationsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: location.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1163,10 +1215,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error Columns: []string{group.ItemsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: item.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1179,10 +1228,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error Columns: []string{group.ItemsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: item.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1198,10 +1244,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error Columns: []string{group.ItemsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: item.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1217,10 +1260,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error Columns: []string{group.LabelsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: label.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(label.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1233,10 +1273,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error Columns: []string{group.LabelsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: label.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(label.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1252,10 +1289,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error Columns: []string{group.LabelsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: label.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(label.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1271,10 +1305,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error Columns: []string{group.DocumentsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: document.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(document.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1287,10 +1318,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error Columns: []string{group.DocumentsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: document.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(document.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1306,10 +1334,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error Columns: []string{group.DocumentsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: document.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(document.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1325,10 +1350,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error Columns: []string{group.InvitationTokensColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: groupinvitationtoken.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(groupinvitationtoken.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1341,10 +1363,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error Columns: []string{group.InvitationTokensColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: groupinvitationtoken.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(groupinvitationtoken.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1360,10 +1379,52 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error Columns: []string{group.InvitationTokensColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: groupinvitationtoken.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(groupinvitationtoken.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if guo.mutation.NotifiersCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.NotifiersTable, + Columns: []string{group.NotifiersColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(notifier.FieldID, field.TypeUUID), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := guo.mutation.RemovedNotifiersIDs(); len(nodes) > 0 && !guo.mutation.NotifiersCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.NotifiersTable, + Columns: []string{group.NotifiersColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(notifier.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := guo.mutation.NotifiersIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.NotifiersTable, + Columns: []string{group.NotifiersColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(notifier.FieldID, field.TypeUUID), }, } for _, k := range nodes { diff --git a/backend/internal/data/ent/groupinvitationtoken.go b/backend/internal/data/ent/groupinvitationtoken.go index f8c2bb3..d715cc6 100644 --- a/backend/internal/data/ent/groupinvitationtoken.go +++ b/backend/internal/data/ent/groupinvitationtoken.go @@ -7,6 +7,7 @@ import ( "strings" "time" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/google/uuid" "github.com/hay-kot/homebox/backend/internal/data/ent/group" @@ -32,6 +33,7 @@ type GroupInvitationToken struct { // The values are being populated by the GroupInvitationTokenQuery when eager-loading is set. Edges GroupInvitationTokenEdges `json:"edges"` group_invitation_tokens *uuid.UUID + selectValues sql.SelectValues } // GroupInvitationTokenEdges holds the relations/edges for other nodes in the graph. @@ -72,7 +74,7 @@ func (*GroupInvitationToken) scanValues(columns []string) ([]any, error) { case groupinvitationtoken.ForeignKeys[0]: // group_invitation_tokens values[i] = &sql.NullScanner{S: new(uuid.UUID)} default: - return nil, fmt.Errorf("unexpected column %q for type GroupInvitationToken", columns[i]) + values[i] = new(sql.UnknownType) } } return values, nil @@ -129,11 +131,19 @@ func (git *GroupInvitationToken) assignValues(columns []string, values []any) er git.group_invitation_tokens = new(uuid.UUID) *git.group_invitation_tokens = *value.S.(*uuid.UUID) } + default: + git.selectValues.Set(columns[i], values[i]) } } return nil } +// Value returns the ent.Value that was dynamically selected and assigned to the GroupInvitationToken. +// This includes values selected through modifiers, order, etc. +func (git *GroupInvitationToken) Value(name string) (ent.Value, error) { + return git.selectValues.Get(name) +} + // QueryGroup queries the "group" edge of the GroupInvitationToken entity. func (git *GroupInvitationToken) QueryGroup() *GroupQuery { return NewGroupInvitationTokenClient(git.config).QueryGroup(git) diff --git a/backend/internal/data/ent/groupinvitationtoken/groupinvitationtoken.go b/backend/internal/data/ent/groupinvitationtoken/groupinvitationtoken.go index 1daea17..748d739 100644 --- a/backend/internal/data/ent/groupinvitationtoken/groupinvitationtoken.go +++ b/backend/internal/data/ent/groupinvitationtoken/groupinvitationtoken.go @@ -5,6 +5,8 @@ package groupinvitationtoken import ( "time" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" "github.com/google/uuid" ) @@ -81,3 +83,45 @@ var ( // DefaultID holds the default value on creation for the "id" field. DefaultID func() uuid.UUID ) + +// OrderOption defines the ordering options for the GroupInvitationToken queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByExpiresAt orders the results by the expires_at field. +func ByExpiresAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldExpiresAt, opts...).ToFunc() +} + +// ByUses orders the results by the uses field. +func ByUses(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUses, opts...).ToFunc() +} + +// ByGroupField orders the results by group field. +func ByGroupField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newGroupStep(), sql.OrderByField(field, opts...)) + } +} +func newGroupStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(GroupInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn), + ) +} diff --git a/backend/internal/data/ent/groupinvitationtoken/where.go b/backend/internal/data/ent/groupinvitationtoken/where.go index 2d81adc..d462df0 100644 --- a/backend/internal/data/ent/groupinvitationtoken/where.go +++ b/backend/internal/data/ent/groupinvitationtoken/where.go @@ -295,11 +295,7 @@ func HasGroup() predicate.GroupInvitationToken { // HasGroupWith applies the HasEdge predicate on the "group" edge with a given conditions (other predicates). func HasGroupWith(preds ...predicate.Group) predicate.GroupInvitationToken { return predicate.GroupInvitationToken(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(GroupInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn), - ) + step := newGroupStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -310,32 +306,15 @@ func HasGroupWith(preds ...predicate.Group) predicate.GroupInvitationToken { // And groups predicates with the AND operator between them. func And(predicates ...predicate.GroupInvitationToken) predicate.GroupInvitationToken { - return predicate.GroupInvitationToken(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for _, p := range predicates { - p(s1) - } - s.Where(s1.P()) - }) + return predicate.GroupInvitationToken(sql.AndPredicates(predicates...)) } // Or groups predicates with the OR operator between them. func Or(predicates ...predicate.GroupInvitationToken) predicate.GroupInvitationToken { - return predicate.GroupInvitationToken(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for i, p := range predicates { - if i > 0 { - s1.Or() - } - p(s1) - } - s.Where(s1.P()) - }) + return predicate.GroupInvitationToken(sql.OrPredicates(predicates...)) } // Not applies the not operator on the given predicate. func Not(p predicate.GroupInvitationToken) predicate.GroupInvitationToken { - return predicate.GroupInvitationToken(func(s *sql.Selector) { - p(s.Not()) - }) + return predicate.GroupInvitationToken(sql.NotPredicates(p)) } diff --git a/backend/internal/data/ent/groupinvitationtoken_create.go b/backend/internal/data/ent/groupinvitationtoken_create.go index e45fb27..1d5859f 100644 --- a/backend/internal/data/ent/groupinvitationtoken_create.go +++ b/backend/internal/data/ent/groupinvitationtoken_create.go @@ -125,7 +125,7 @@ func (gitc *GroupInvitationTokenCreate) Mutation() *GroupInvitationTokenMutation // Save creates the GroupInvitationToken in the database. func (gitc *GroupInvitationTokenCreate) Save(ctx context.Context) (*GroupInvitationToken, error) { gitc.defaults() - return withHooks[*GroupInvitationToken, GroupInvitationTokenMutation](ctx, gitc.sqlSave, gitc.mutation, gitc.hooks) + return withHooks(ctx, gitc.sqlSave, gitc.mutation, gitc.hooks) } // SaveX calls Save and panics if Save returns an error. @@ -254,10 +254,7 @@ func (gitc *GroupInvitationTokenCreate) createSpec() (*GroupInvitationToken, *sq Columns: []string{groupinvitationtoken.GroupColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: group.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -272,11 +269,15 @@ func (gitc *GroupInvitationTokenCreate) createSpec() (*GroupInvitationToken, *sq // GroupInvitationTokenCreateBulk is the builder for creating many GroupInvitationToken entities in bulk. type GroupInvitationTokenCreateBulk struct { config + err error builders []*GroupInvitationTokenCreate } // Save creates the GroupInvitationToken entities in the database. func (gitcb *GroupInvitationTokenCreateBulk) Save(ctx context.Context) ([]*GroupInvitationToken, error) { + if gitcb.err != nil { + return nil, gitcb.err + } specs := make([]*sqlgraph.CreateSpec, len(gitcb.builders)) nodes := make([]*GroupInvitationToken, len(gitcb.builders)) mutators := make([]Mutator, len(gitcb.builders)) @@ -293,8 +294,8 @@ func (gitcb *GroupInvitationTokenCreateBulk) Save(ctx context.Context) ([]*Group return nil, err } builder.mutation = mutation - nodes[i], specs[i] = builder.createSpec() var err error + nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { _, err = mutators[i+1].Mutate(root, gitcb.builders[i+1].mutation) } else { diff --git a/backend/internal/data/ent/groupinvitationtoken_delete.go b/backend/internal/data/ent/groupinvitationtoken_delete.go index 1720383..5878fdf 100644 --- a/backend/internal/data/ent/groupinvitationtoken_delete.go +++ b/backend/internal/data/ent/groupinvitationtoken_delete.go @@ -27,7 +27,7 @@ func (gitd *GroupInvitationTokenDelete) Where(ps ...predicate.GroupInvitationTok // Exec executes the deletion query and returns how many vertices were deleted. func (gitd *GroupInvitationTokenDelete) Exec(ctx context.Context) (int, error) { - return withHooks[int, GroupInvitationTokenMutation](ctx, gitd.sqlExec, gitd.mutation, gitd.hooks) + return withHooks(ctx, gitd.sqlExec, gitd.mutation, gitd.hooks) } // ExecX is like Exec, but panics if an error occurs. diff --git a/backend/internal/data/ent/groupinvitationtoken_query.go b/backend/internal/data/ent/groupinvitationtoken_query.go index 287b509..89de054 100644 --- a/backend/internal/data/ent/groupinvitationtoken_query.go +++ b/backend/internal/data/ent/groupinvitationtoken_query.go @@ -20,7 +20,7 @@ import ( type GroupInvitationTokenQuery struct { config ctx *QueryContext - order []OrderFunc + order []groupinvitationtoken.OrderOption inters []Interceptor predicates []predicate.GroupInvitationToken withGroup *GroupQuery @@ -56,7 +56,7 @@ func (gitq *GroupInvitationTokenQuery) Unique(unique bool) *GroupInvitationToken } // Order specifies how the records should be ordered. -func (gitq *GroupInvitationTokenQuery) Order(o ...OrderFunc) *GroupInvitationTokenQuery { +func (gitq *GroupInvitationTokenQuery) Order(o ...groupinvitationtoken.OrderOption) *GroupInvitationTokenQuery { gitq.order = append(gitq.order, o...) return gitq } @@ -272,7 +272,7 @@ func (gitq *GroupInvitationTokenQuery) Clone() *GroupInvitationTokenQuery { return &GroupInvitationTokenQuery{ config: gitq.config, ctx: gitq.ctx.Clone(), - order: append([]OrderFunc{}, gitq.order...), + order: append([]groupinvitationtoken.OrderOption{}, gitq.order...), inters: append([]Interceptor{}, gitq.inters...), predicates: append([]predicate.GroupInvitationToken{}, gitq.predicates...), withGroup: gitq.withGroup.Clone(), diff --git a/backend/internal/data/ent/groupinvitationtoken_update.go b/backend/internal/data/ent/groupinvitationtoken_update.go index 7a4caaa..3e0db91 100644 --- a/backend/internal/data/ent/groupinvitationtoken_update.go +++ b/backend/internal/data/ent/groupinvitationtoken_update.go @@ -110,7 +110,7 @@ func (gitu *GroupInvitationTokenUpdate) ClearGroup() *GroupInvitationTokenUpdate // Save executes the query and returns the number of nodes affected by the update operation. func (gitu *GroupInvitationTokenUpdate) Save(ctx context.Context) (int, error) { gitu.defaults() - return withHooks[int, GroupInvitationTokenMutation](ctx, gitu.sqlSave, gitu.mutation, gitu.hooks) + return withHooks(ctx, gitu.sqlSave, gitu.mutation, gitu.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -175,10 +175,7 @@ func (gitu *GroupInvitationTokenUpdate) sqlSave(ctx context.Context) (n int, err Columns: []string{groupinvitationtoken.GroupColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: group.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -191,10 +188,7 @@ func (gitu *GroupInvitationTokenUpdate) sqlSave(ctx context.Context) (n int, err Columns: []string{groupinvitationtoken.GroupColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: group.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -315,7 +309,7 @@ func (gituo *GroupInvitationTokenUpdateOne) Select(field string, fields ...strin // Save executes the query and returns the updated GroupInvitationToken entity. func (gituo *GroupInvitationTokenUpdateOne) Save(ctx context.Context) (*GroupInvitationToken, error) { gituo.defaults() - return withHooks[*GroupInvitationToken, GroupInvitationTokenMutation](ctx, gituo.sqlSave, gituo.mutation, gituo.hooks) + return withHooks(ctx, gituo.sqlSave, gituo.mutation, gituo.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -397,10 +391,7 @@ func (gituo *GroupInvitationTokenUpdateOne) sqlSave(ctx context.Context) (_node Columns: []string{groupinvitationtoken.GroupColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: group.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -413,10 +404,7 @@ func (gituo *GroupInvitationTokenUpdateOne) sqlSave(ctx context.Context) (_node Columns: []string{groupinvitationtoken.GroupColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: group.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID), }, } for _, k := range nodes { diff --git a/backend/internal/data/ent/has_id.go b/backend/internal/data/ent/has_id.go index 875ba0d..0877caa 100644 --- a/backend/internal/data/ent/has_id.go +++ b/backend/internal/data/ent/has_id.go @@ -48,6 +48,10 @@ func (me *MaintenanceEntry) GetID() uuid.UUID { return me.ID } +func (n *Notifier) GetID() uuid.UUID { + return n.ID +} + func (u *User) GetID() uuid.UUID { return u.ID } diff --git a/backend/internal/data/ent/hook/hook.go b/backend/internal/data/ent/hook/hook.go index f4fb2ca..4648b23 100644 --- a/backend/internal/data/ent/hook/hook.go +++ b/backend/internal/data/ent/hook/hook.go @@ -141,6 +141,18 @@ func (f MaintenanceEntryFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.V return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.MaintenanceEntryMutation", m) } +// The NotifierFunc type is an adapter to allow the use of ordinary +// function as Notifier mutator. +type NotifierFunc func(context.Context, *ent.NotifierMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f NotifierFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.NotifierMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.NotifierMutation", m) +} + // The UserFunc type is an adapter to allow the use of ordinary // function as User mutator. type UserFunc func(context.Context, *ent.UserMutation) (ent.Value, error) diff --git a/backend/internal/data/ent/item.go b/backend/internal/data/ent/item.go index 3ad36f3..7b2be8a 100644 --- a/backend/internal/data/ent/item.go +++ b/backend/internal/data/ent/item.go @@ -7,6 +7,7 @@ import ( "strings" "time" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/google/uuid" "github.com/hay-kot/homebox/backend/internal/data/ent/group" @@ -71,16 +72,17 @@ type Item struct { group_items *uuid.UUID item_children *uuid.UUID location_items *uuid.UUID + selectValues sql.SelectValues } // ItemEdges holds the relations/edges for other nodes in the graph. type ItemEdges struct { + // Group holds the value of the group edge. + Group *Group `json:"group,omitempty"` // Parent holds the value of the parent edge. Parent *Item `json:"parent,omitempty"` // Children holds the value of the children edge. Children []*Item `json:"children,omitempty"` - // Group holds the value of the group edge. - Group *Group `json:"group,omitempty"` // Label holds the value of the label edge. Label []*Label `json:"label,omitempty"` // Location holds the value of the location edge. @@ -96,10 +98,23 @@ type ItemEdges struct { loadedTypes [8]bool } +// GroupOrErr returns the Group value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e ItemEdges) GroupOrErr() (*Group, error) { + if e.loadedTypes[0] { + if e.Group == nil { + // Edge was loaded but was not found. + return nil, &NotFoundError{label: group.Label} + } + return e.Group, nil + } + return nil, &NotLoadedError{edge: "group"} +} + // ParentOrErr returns the Parent value or an error if the edge // was not loaded in eager-loading, or loaded but was not found. func (e ItemEdges) ParentOrErr() (*Item, error) { - if e.loadedTypes[0] { + if e.loadedTypes[1] { if e.Parent == nil { // Edge was loaded but was not found. return nil, &NotFoundError{label: item.Label} @@ -112,25 +127,12 @@ func (e ItemEdges) ParentOrErr() (*Item, error) { // ChildrenOrErr returns the Children value or an error if the edge // was not loaded in eager-loading. func (e ItemEdges) ChildrenOrErr() ([]*Item, error) { - if e.loadedTypes[1] { + if e.loadedTypes[2] { return e.Children, nil } return nil, &NotLoadedError{edge: "children"} } -// GroupOrErr returns the Group value or an error if the edge -// was not loaded in eager-loading, or loaded but was not found. -func (e ItemEdges) GroupOrErr() (*Group, error) { - if e.loadedTypes[2] { - if e.Group == nil { - // Edge was loaded but was not found. - return nil, &NotFoundError{label: group.Label} - } - return e.Group, nil - } - return nil, &NotLoadedError{edge: "group"} -} - // LabelOrErr returns the Label value or an error if the edge // was not loaded in eager-loading. func (e ItemEdges) LabelOrErr() ([]*Label, error) { @@ -204,7 +206,7 @@ func (*Item) scanValues(columns []string) ([]any, error) { case item.ForeignKeys[2]: // location_items values[i] = &sql.NullScanner{S: new(uuid.UUID)} default: - return nil, fmt.Errorf("unexpected column %q for type Item", columns[i]) + values[i] = new(sql.UnknownType) } } return values, nil @@ -383,11 +385,24 @@ func (i *Item) assignValues(columns []string, values []any) error { i.location_items = new(uuid.UUID) *i.location_items = *value.S.(*uuid.UUID) } + default: + i.selectValues.Set(columns[j], values[j]) } } return nil } +// Value returns the ent.Value that was dynamically selected and assigned to the Item. +// This includes values selected through modifiers, order, etc. +func (i *Item) Value(name string) (ent.Value, error) { + return i.selectValues.Get(name) +} + +// QueryGroup queries the "group" edge of the Item entity. +func (i *Item) QueryGroup() *GroupQuery { + return NewItemClient(i.config).QueryGroup(i) +} + // QueryParent queries the "parent" edge of the Item entity. func (i *Item) QueryParent() *ItemQuery { return NewItemClient(i.config).QueryParent(i) @@ -398,11 +413,6 @@ func (i *Item) QueryChildren() *ItemQuery { return NewItemClient(i.config).QueryChildren(i) } -// QueryGroup queries the "group" edge of the Item entity. -func (i *Item) QueryGroup() *GroupQuery { - return NewItemClient(i.config).QueryGroup(i) -} - // QueryLabel queries the "label" edge of the Item entity. func (i *Item) QueryLabel() *LabelQuery { return NewItemClient(i.config).QueryLabel(i) diff --git a/backend/internal/data/ent/item/item.go b/backend/internal/data/ent/item/item.go index 2cb7f6d..bd04679 100644 --- a/backend/internal/data/ent/item/item.go +++ b/backend/internal/data/ent/item/item.go @@ -5,6 +5,8 @@ package item import ( "time" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" "github.com/google/uuid" ) @@ -59,12 +61,12 @@ const ( FieldSoldPrice = "sold_price" // FieldSoldNotes holds the string denoting the sold_notes field in the database. FieldSoldNotes = "sold_notes" + // EdgeGroup holds the string denoting the group edge name in mutations. + EdgeGroup = "group" // EdgeParent holds the string denoting the parent edge name in mutations. EdgeParent = "parent" // EdgeChildren holds the string denoting the children edge name in mutations. EdgeChildren = "children" - // EdgeGroup holds the string denoting the group edge name in mutations. - EdgeGroup = "group" // EdgeLabel holds the string denoting the label edge name in mutations. EdgeLabel = "label" // EdgeLocation holds the string denoting the location edge name in mutations. @@ -77,6 +79,13 @@ const ( EdgeAttachments = "attachments" // Table holds the table name of the item in the database. Table = "items" + // GroupTable is the table that holds the group relation/edge. + GroupTable = "items" + // GroupInverseTable is the table name for the Group entity. + // It exists in this package in order to avoid circular dependency with the "group" package. + GroupInverseTable = "groups" + // GroupColumn is the table column denoting the group relation/edge. + GroupColumn = "group_items" // ParentTable is the table that holds the parent relation/edge. ParentTable = "items" // ParentColumn is the table column denoting the parent relation/edge. @@ -85,13 +94,6 @@ const ( ChildrenTable = "items" // ChildrenColumn is the table column denoting the children relation/edge. ChildrenColumn = "item_children" - // GroupTable is the table that holds the group relation/edge. - GroupTable = "items" - // GroupInverseTable is the table name for the Group entity. - // It exists in this package in order to avoid circular dependency with the "group" package. - GroupInverseTable = "groups" - // GroupColumn is the table column denoting the group relation/edge. - GroupColumn = "group_items" // LabelTable is the table that holds the label relation/edge. The primary key declared below. LabelTable = "label_items" // LabelInverseTable is the table name for the Label entity. @@ -226,3 +228,273 @@ var ( // DefaultID holds the default value on creation for the "id" field. DefaultID func() uuid.UUID ) + +// OrderOption defines the ordering options for the Item queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByName orders the results by the name field. +func ByName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldName, opts...).ToFunc() +} + +// ByDescription orders the results by the description field. +func ByDescription(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDescription, opts...).ToFunc() +} + +// ByImportRef orders the results by the import_ref field. +func ByImportRef(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldImportRef, opts...).ToFunc() +} + +// ByNotes orders the results by the notes field. +func ByNotes(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldNotes, opts...).ToFunc() +} + +// ByQuantity orders the results by the quantity field. +func ByQuantity(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldQuantity, opts...).ToFunc() +} + +// ByInsured orders the results by the insured field. +func ByInsured(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldInsured, opts...).ToFunc() +} + +// ByArchived orders the results by the archived field. +func ByArchived(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldArchived, opts...).ToFunc() +} + +// ByAssetID orders the results by the asset_id field. +func ByAssetID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldAssetID, opts...).ToFunc() +} + +// BySerialNumber orders the results by the serial_number field. +func BySerialNumber(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSerialNumber, opts...).ToFunc() +} + +// ByModelNumber orders the results by the model_number field. +func ByModelNumber(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldModelNumber, opts...).ToFunc() +} + +// ByManufacturer orders the results by the manufacturer field. +func ByManufacturer(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldManufacturer, opts...).ToFunc() +} + +// ByLifetimeWarranty orders the results by the lifetime_warranty field. +func ByLifetimeWarranty(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldLifetimeWarranty, opts...).ToFunc() +} + +// ByWarrantyExpires orders the results by the warranty_expires field. +func ByWarrantyExpires(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldWarrantyExpires, opts...).ToFunc() +} + +// ByWarrantyDetails orders the results by the warranty_details field. +func ByWarrantyDetails(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldWarrantyDetails, opts...).ToFunc() +} + +// ByPurchaseTime orders the results by the purchase_time field. +func ByPurchaseTime(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldPurchaseTime, opts...).ToFunc() +} + +// ByPurchaseFrom orders the results by the purchase_from field. +func ByPurchaseFrom(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldPurchaseFrom, opts...).ToFunc() +} + +// ByPurchasePrice orders the results by the purchase_price field. +func ByPurchasePrice(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldPurchasePrice, opts...).ToFunc() +} + +// BySoldTime orders the results by the sold_time field. +func BySoldTime(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSoldTime, opts...).ToFunc() +} + +// BySoldTo orders the results by the sold_to field. +func BySoldTo(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSoldTo, opts...).ToFunc() +} + +// BySoldPrice orders the results by the sold_price field. +func BySoldPrice(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSoldPrice, opts...).ToFunc() +} + +// BySoldNotes orders the results by the sold_notes field. +func BySoldNotes(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSoldNotes, opts...).ToFunc() +} + +// ByGroupField orders the results by group field. +func ByGroupField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newGroupStep(), sql.OrderByField(field, opts...)) + } +} + +// ByParentField orders the results by parent field. +func ByParentField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newParentStep(), sql.OrderByField(field, opts...)) + } +} + +// ByChildrenCount orders the results by children count. +func ByChildrenCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newChildrenStep(), opts...) + } +} + +// ByChildren orders the results by children terms. +func ByChildren(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newChildrenStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByLabelCount orders the results by label count. +func ByLabelCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newLabelStep(), opts...) + } +} + +// ByLabel orders the results by label terms. +func ByLabel(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newLabelStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByLocationField orders the results by location field. +func ByLocationField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newLocationStep(), sql.OrderByField(field, opts...)) + } +} + +// ByFieldsCount orders the results by fields count. +func ByFieldsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newFieldsStep(), opts...) + } +} + +// ByFields orders the results by fields terms. +func ByFields(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newFieldsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByMaintenanceEntriesCount orders the results by maintenance_entries count. +func ByMaintenanceEntriesCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newMaintenanceEntriesStep(), opts...) + } +} + +// ByMaintenanceEntries orders the results by maintenance_entries terms. +func ByMaintenanceEntries(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newMaintenanceEntriesStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByAttachmentsCount orders the results by attachments count. +func ByAttachmentsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newAttachmentsStep(), opts...) + } +} + +// ByAttachments orders the results by attachments terms. +func ByAttachments(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newAttachmentsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} +func newGroupStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(GroupInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn), + ) +} +func newParentStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, ParentTable, ParentColumn), + ) +} +func newChildrenStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, ChildrenTable, ChildrenColumn), + ) +} +func newLabelStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(LabelInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2M, true, LabelTable, LabelPrimaryKey...), + ) +} +func newLocationStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(LocationInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, LocationTable, LocationColumn), + ) +} +func newFieldsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(FieldsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, FieldsTable, FieldsColumn), + ) +} +func newMaintenanceEntriesStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(MaintenanceEntriesInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, MaintenanceEntriesTable, MaintenanceEntriesColumn), + ) +} +func newAttachmentsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(AttachmentsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, AttachmentsTable, AttachmentsColumn), + ) +} diff --git a/backend/internal/data/ent/item/where.go b/backend/internal/data/ent/item/where.go index e57536e..7504e6a 100644 --- a/backend/internal/data/ent/item/where.go +++ b/backend/internal/data/ent/item/where.go @@ -1406,6 +1406,29 @@ func SoldNotesContainsFold(v string) predicate.Item { return predicate.Item(sql.FieldContainsFold(FieldSoldNotes, v)) } +// HasGroup applies the HasEdge predicate on the "group" edge. +func HasGroup() predicate.Item { + return predicate.Item(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasGroupWith applies the HasEdge predicate on the "group" edge with a given conditions (other predicates). +func HasGroupWith(preds ...predicate.Group) predicate.Item { + return predicate.Item(func(s *sql.Selector) { + step := newGroupStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + // HasParent applies the HasEdge predicate on the "parent" edge. func HasParent() predicate.Item { return predicate.Item(func(s *sql.Selector) { @@ -1420,11 +1443,7 @@ func HasParent() predicate.Item { // HasParentWith applies the HasEdge predicate on the "parent" edge with a given conditions (other predicates). func HasParentWith(preds ...predicate.Item) predicate.Item { return predicate.Item(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(Table, FieldID), - sqlgraph.Edge(sqlgraph.M2O, true, ParentTable, ParentColumn), - ) + step := newParentStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -1447,38 +1466,7 @@ func HasChildren() predicate.Item { // HasChildrenWith applies the HasEdge predicate on the "children" edge with a given conditions (other predicates). func HasChildrenWith(preds ...predicate.Item) predicate.Item { return predicate.Item(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(Table, FieldID), - sqlgraph.Edge(sqlgraph.O2M, false, ChildrenTable, ChildrenColumn), - ) - sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { - for _, p := range preds { - p(s) - } - }) - }) -} - -// HasGroup applies the HasEdge predicate on the "group" edge. -func HasGroup() predicate.Item { - return predicate.Item(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn), - ) - sqlgraph.HasNeighbors(s, step) - }) -} - -// HasGroupWith applies the HasEdge predicate on the "group" edge with a given conditions (other predicates). -func HasGroupWith(preds ...predicate.Group) predicate.Item { - return predicate.Item(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(GroupInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn), - ) + step := newChildrenStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -1501,11 +1489,7 @@ func HasLabel() predicate.Item { // HasLabelWith applies the HasEdge predicate on the "label" edge with a given conditions (other predicates). func HasLabelWith(preds ...predicate.Label) predicate.Item { return predicate.Item(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(LabelInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2M, true, LabelTable, LabelPrimaryKey...), - ) + step := newLabelStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -1528,11 +1512,7 @@ func HasLocation() predicate.Item { // HasLocationWith applies the HasEdge predicate on the "location" edge with a given conditions (other predicates). func HasLocationWith(preds ...predicate.Location) predicate.Item { return predicate.Item(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(LocationInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, true, LocationTable, LocationColumn), - ) + step := newLocationStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -1555,11 +1535,7 @@ func HasFields() predicate.Item { // HasFieldsWith applies the HasEdge predicate on the "fields" edge with a given conditions (other predicates). func HasFieldsWith(preds ...predicate.ItemField) predicate.Item { return predicate.Item(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(FieldsInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, false, FieldsTable, FieldsColumn), - ) + step := newFieldsStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -1582,11 +1558,7 @@ func HasMaintenanceEntries() predicate.Item { // HasMaintenanceEntriesWith applies the HasEdge predicate on the "maintenance_entries" edge with a given conditions (other predicates). func HasMaintenanceEntriesWith(preds ...predicate.MaintenanceEntry) predicate.Item { return predicate.Item(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(MaintenanceEntriesInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, false, MaintenanceEntriesTable, MaintenanceEntriesColumn), - ) + step := newMaintenanceEntriesStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -1609,11 +1581,7 @@ func HasAttachments() predicate.Item { // HasAttachmentsWith applies the HasEdge predicate on the "attachments" edge with a given conditions (other predicates). func HasAttachmentsWith(preds ...predicate.Attachment) predicate.Item { return predicate.Item(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(AttachmentsInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, false, AttachmentsTable, AttachmentsColumn), - ) + step := newAttachmentsStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -1624,32 +1592,15 @@ func HasAttachmentsWith(preds ...predicate.Attachment) predicate.Item { // And groups predicates with the AND operator between them. func And(predicates ...predicate.Item) predicate.Item { - return predicate.Item(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for _, p := range predicates { - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Item(sql.AndPredicates(predicates...)) } // Or groups predicates with the OR operator between them. func Or(predicates ...predicate.Item) predicate.Item { - return predicate.Item(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for i, p := range predicates { - if i > 0 { - s1.Or() - } - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Item(sql.OrPredicates(predicates...)) } // Not applies the not operator on the given predicate. func Not(p predicate.Item) predicate.Item { - return predicate.Item(func(s *sql.Selector) { - p(s.Not()) - }) + return predicate.Item(sql.NotPredicates(p)) } diff --git a/backend/internal/data/ent/item_create.go b/backend/internal/data/ent/item_create.go index d1a446e..9eb1cb6 100644 --- a/backend/internal/data/ent/item_create.go +++ b/backend/internal/data/ent/item_create.go @@ -355,6 +355,17 @@ func (ic *ItemCreate) SetNillableID(u *uuid.UUID) *ItemCreate { return ic } +// SetGroupID sets the "group" edge to the Group entity by ID. +func (ic *ItemCreate) SetGroupID(id uuid.UUID) *ItemCreate { + ic.mutation.SetGroupID(id) + return ic +} + +// SetGroup sets the "group" edge to the Group entity. +func (ic *ItemCreate) SetGroup(g *Group) *ItemCreate { + return ic.SetGroupID(g.ID) +} + // SetParentID sets the "parent" edge to the Item entity by ID. func (ic *ItemCreate) SetParentID(id uuid.UUID) *ItemCreate { ic.mutation.SetParentID(id) @@ -389,17 +400,6 @@ func (ic *ItemCreate) AddChildren(i ...*Item) *ItemCreate { return ic.AddChildIDs(ids...) } -// SetGroupID sets the "group" edge to the Group entity by ID. -func (ic *ItemCreate) SetGroupID(id uuid.UUID) *ItemCreate { - ic.mutation.SetGroupID(id) - return ic -} - -// SetGroup sets the "group" edge to the Group entity. -func (ic *ItemCreate) SetGroup(g *Group) *ItemCreate { - return ic.SetGroupID(g.ID) -} - // AddLabelIDs adds the "label" edge to the Label entity by IDs. func (ic *ItemCreate) AddLabelIDs(ids ...uuid.UUID) *ItemCreate { ic.mutation.AddLabelIDs(ids...) @@ -487,7 +487,7 @@ func (ic *ItemCreate) Mutation() *ItemMutation { // Save creates the Item in the database. func (ic *ItemCreate) Save(ctx context.Context) (*Item, error) { ic.defaults() - return withHooks[*Item, ItemMutation](ctx, ic.sqlSave, ic.mutation, ic.hooks) + return withHooks(ctx, ic.sqlSave, ic.mutation, ic.hooks) } // SaveX calls Save and panics if Save returns an error. @@ -763,6 +763,23 @@ func (ic *ItemCreate) createSpec() (*Item, *sqlgraph.CreateSpec) { _spec.SetField(item.FieldSoldNotes, field.TypeString, value) _node.SoldNotes = value } + if nodes := ic.mutation.GroupIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: item.GroupTable, + Columns: []string{item.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.group_items = &nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } if nodes := ic.mutation.ParentIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, @@ -771,10 +788,7 @@ func (ic *ItemCreate) createSpec() (*Item, *sqlgraph.CreateSpec) { Columns: []string{item.ParentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: item.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -791,10 +805,7 @@ func (ic *ItemCreate) createSpec() (*Item, *sqlgraph.CreateSpec) { Columns: []string{item.ChildrenColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: item.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -802,26 +813,6 @@ func (ic *ItemCreate) createSpec() (*Item, *sqlgraph.CreateSpec) { } _spec.Edges = append(_spec.Edges, edge) } - if nodes := ic.mutation.GroupIDs(); len(nodes) > 0 { - edge := &sqlgraph.EdgeSpec{ - Rel: sqlgraph.M2O, - Inverse: true, - Table: item.GroupTable, - Columns: []string{item.GroupColumn}, - Bidi: false, - Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: group.FieldID, - }, - }, - } - for _, k := range nodes { - edge.Target.Nodes = append(edge.Target.Nodes, k) - } - _node.group_items = &nodes[0] - _spec.Edges = append(_spec.Edges, edge) - } if nodes := ic.mutation.LabelIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2M, @@ -830,10 +821,7 @@ func (ic *ItemCreate) createSpec() (*Item, *sqlgraph.CreateSpec) { Columns: item.LabelPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: label.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(label.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -849,10 +837,7 @@ func (ic *ItemCreate) createSpec() (*Item, *sqlgraph.CreateSpec) { Columns: []string{item.LocationColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: location.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -869,10 +854,7 @@ func (ic *ItemCreate) createSpec() (*Item, *sqlgraph.CreateSpec) { Columns: []string{item.FieldsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: itemfield.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(itemfield.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -888,10 +870,7 @@ func (ic *ItemCreate) createSpec() (*Item, *sqlgraph.CreateSpec) { Columns: []string{item.MaintenanceEntriesColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: maintenanceentry.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(maintenanceentry.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -907,10 +886,7 @@ func (ic *ItemCreate) createSpec() (*Item, *sqlgraph.CreateSpec) { Columns: []string{item.AttachmentsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: attachment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -924,11 +900,15 @@ func (ic *ItemCreate) createSpec() (*Item, *sqlgraph.CreateSpec) { // ItemCreateBulk is the builder for creating many Item entities in bulk. type ItemCreateBulk struct { config + err error builders []*ItemCreate } // Save creates the Item entities in the database. func (icb *ItemCreateBulk) Save(ctx context.Context) ([]*Item, error) { + if icb.err != nil { + return nil, icb.err + } specs := make([]*sqlgraph.CreateSpec, len(icb.builders)) nodes := make([]*Item, len(icb.builders)) mutators := make([]Mutator, len(icb.builders)) @@ -945,8 +925,8 @@ func (icb *ItemCreateBulk) Save(ctx context.Context) ([]*Item, error) { return nil, err } builder.mutation = mutation - nodes[i], specs[i] = builder.createSpec() var err error + nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { _, err = mutators[i+1].Mutate(root, icb.builders[i+1].mutation) } else { diff --git a/backend/internal/data/ent/item_delete.go b/backend/internal/data/ent/item_delete.go index c2d00ff..d634d5d 100644 --- a/backend/internal/data/ent/item_delete.go +++ b/backend/internal/data/ent/item_delete.go @@ -27,7 +27,7 @@ func (id *ItemDelete) Where(ps ...predicate.Item) *ItemDelete { // Exec executes the deletion query and returns how many vertices were deleted. func (id *ItemDelete) Exec(ctx context.Context) (int, error) { - return withHooks[int, ItemMutation](ctx, id.sqlExec, id.mutation, id.hooks) + return withHooks(ctx, id.sqlExec, id.mutation, id.hooks) } // ExecX is like Exec, but panics if an error occurs. diff --git a/backend/internal/data/ent/item_query.go b/backend/internal/data/ent/item_query.go index c6af553..12fc331 100644 --- a/backend/internal/data/ent/item_query.go +++ b/backend/internal/data/ent/item_query.go @@ -26,12 +26,12 @@ import ( type ItemQuery struct { config ctx *QueryContext - order []OrderFunc + order []item.OrderOption inters []Interceptor predicates []predicate.Item + withGroup *GroupQuery withParent *ItemQuery withChildren *ItemQuery - withGroup *GroupQuery withLabel *LabelQuery withLocation *LocationQuery withFields *ItemFieldQuery @@ -69,11 +69,33 @@ func (iq *ItemQuery) Unique(unique bool) *ItemQuery { } // Order specifies how the records should be ordered. -func (iq *ItemQuery) Order(o ...OrderFunc) *ItemQuery { +func (iq *ItemQuery) Order(o ...item.OrderOption) *ItemQuery { iq.order = append(iq.order, o...) return iq } +// QueryGroup chains the current query on the "group" edge. +func (iq *ItemQuery) QueryGroup() *GroupQuery { + query := (&GroupClient{config: iq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := iq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := iq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(item.Table, item.FieldID, selector), + sqlgraph.To(group.Table, group.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, item.GroupTable, item.GroupColumn), + ) + fromU = sqlgraph.SetNeighbors(iq.driver.Dialect(), step) + return fromU, nil + } + return query +} + // QueryParent chains the current query on the "parent" edge. func (iq *ItemQuery) QueryParent() *ItemQuery { query := (&ItemClient{config: iq.config}).Query() @@ -118,28 +140,6 @@ func (iq *ItemQuery) QueryChildren() *ItemQuery { return query } -// QueryGroup chains the current query on the "group" edge. -func (iq *ItemQuery) QueryGroup() *GroupQuery { - query := (&GroupClient{config: iq.config}).Query() - query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { - if err := iq.prepareQuery(ctx); err != nil { - return nil, err - } - selector := iq.sqlQuery(ctx) - if err := selector.Err(); err != nil { - return nil, err - } - step := sqlgraph.NewStep( - sqlgraph.From(item.Table, item.FieldID, selector), - sqlgraph.To(group.Table, group.FieldID), - sqlgraph.Edge(sqlgraph.M2O, true, item.GroupTable, item.GroupColumn), - ) - fromU = sqlgraph.SetNeighbors(iq.driver.Dialect(), step) - return fromU, nil - } - return query -} - // QueryLabel chains the current query on the "label" edge. func (iq *ItemQuery) QueryLabel() *LabelQuery { query := (&LabelClient{config: iq.config}).Query() @@ -439,12 +439,12 @@ func (iq *ItemQuery) Clone() *ItemQuery { return &ItemQuery{ config: iq.config, ctx: iq.ctx.Clone(), - order: append([]OrderFunc{}, iq.order...), + order: append([]item.OrderOption{}, iq.order...), inters: append([]Interceptor{}, iq.inters...), predicates: append([]predicate.Item{}, iq.predicates...), + withGroup: iq.withGroup.Clone(), withParent: iq.withParent.Clone(), withChildren: iq.withChildren.Clone(), - withGroup: iq.withGroup.Clone(), withLabel: iq.withLabel.Clone(), withLocation: iq.withLocation.Clone(), withFields: iq.withFields.Clone(), @@ -456,6 +456,17 @@ func (iq *ItemQuery) Clone() *ItemQuery { } } +// WithGroup tells the query-builder to eager-load the nodes that are connected to +// the "group" edge. The optional arguments are used to configure the query builder of the edge. +func (iq *ItemQuery) WithGroup(opts ...func(*GroupQuery)) *ItemQuery { + query := (&GroupClient{config: iq.config}).Query() + for _, opt := range opts { + opt(query) + } + iq.withGroup = query + return iq +} + // WithParent tells the query-builder to eager-load the nodes that are connected to // the "parent" edge. The optional arguments are used to configure the query builder of the edge. func (iq *ItemQuery) WithParent(opts ...func(*ItemQuery)) *ItemQuery { @@ -478,17 +489,6 @@ func (iq *ItemQuery) WithChildren(opts ...func(*ItemQuery)) *ItemQuery { return iq } -// WithGroup tells the query-builder to eager-load the nodes that are connected to -// the "group" edge. The optional arguments are used to configure the query builder of the edge. -func (iq *ItemQuery) WithGroup(opts ...func(*GroupQuery)) *ItemQuery { - query := (&GroupClient{config: iq.config}).Query() - for _, opt := range opts { - opt(query) - } - iq.withGroup = query - return iq -} - // WithLabel tells the query-builder to eager-load the nodes that are connected to // the "label" edge. The optional arguments are used to configure the query builder of the edge. func (iq *ItemQuery) WithLabel(opts ...func(*LabelQuery)) *ItemQuery { @@ -624,9 +624,9 @@ func (iq *ItemQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Item, e withFKs = iq.withFKs _spec = iq.querySpec() loadedTypes = [8]bool{ + iq.withGroup != nil, iq.withParent != nil, iq.withChildren != nil, - iq.withGroup != nil, iq.withLabel != nil, iq.withLocation != nil, iq.withFields != nil, @@ -634,7 +634,7 @@ func (iq *ItemQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Item, e iq.withAttachments != nil, } ) - if iq.withParent != nil || iq.withGroup != nil || iq.withLocation != nil { + if iq.withGroup != nil || iq.withParent != nil || iq.withLocation != nil { withFKs = true } if withFKs { @@ -658,6 +658,12 @@ func (iq *ItemQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Item, e if len(nodes) == 0 { return nodes, nil } + if query := iq.withGroup; query != nil { + if err := iq.loadGroup(ctx, query, nodes, nil, + func(n *Item, e *Group) { n.Edges.Group = e }); err != nil { + return nil, err + } + } if query := iq.withParent; query != nil { if err := iq.loadParent(ctx, query, nodes, nil, func(n *Item, e *Item) { n.Edges.Parent = e }); err != nil { @@ -671,12 +677,6 @@ func (iq *ItemQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Item, e return nil, err } } - if query := iq.withGroup; query != nil { - if err := iq.loadGroup(ctx, query, nodes, nil, - func(n *Item, e *Group) { n.Edges.Group = e }); err != nil { - return nil, err - } - } if query := iq.withLabel; query != nil { if err := iq.loadLabel(ctx, query, nodes, func(n *Item) { n.Edges.Label = []*Label{} }, @@ -714,6 +714,38 @@ func (iq *ItemQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Item, e return nodes, nil } +func (iq *ItemQuery) loadGroup(ctx context.Context, query *GroupQuery, nodes []*Item, init func(*Item), assign func(*Item, *Group)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*Item) + for i := range nodes { + if nodes[i].group_items == nil { + continue + } + fk := *nodes[i].group_items + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(group.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "group_items" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} func (iq *ItemQuery) loadParent(ctx context.Context, query *ItemQuery, nodes []*Item, init func(*Item), assign func(*Item, *Item)) error { ids := make([]uuid.UUID, 0, len(nodes)) nodeids := make(map[uuid.UUID][]*Item) @@ -758,7 +790,7 @@ func (iq *ItemQuery) loadChildren(ctx context.Context, query *ItemQuery, nodes [ } query.withFKs = true query.Where(predicate.Item(func(s *sql.Selector) { - s.Where(sql.InValues(item.ChildrenColumn, fks...)) + s.Where(sql.InValues(s.C(item.ChildrenColumn), fks...)) })) neighbors, err := query.All(ctx) if err != nil { @@ -771,44 +803,12 @@ func (iq *ItemQuery) loadChildren(ctx context.Context, query *ItemQuery, nodes [ } node, ok := nodeids[*fk] if !ok { - return fmt.Errorf(`unexpected foreign-key "item_children" returned %v for node %v`, *fk, n.ID) + return fmt.Errorf(`unexpected referenced foreign-key "item_children" returned %v for node %v`, *fk, n.ID) } assign(node, n) } return nil } -func (iq *ItemQuery) loadGroup(ctx context.Context, query *GroupQuery, nodes []*Item, init func(*Item), assign func(*Item, *Group)) error { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*Item) - for i := range nodes { - if nodes[i].group_items == nil { - continue - } - fk := *nodes[i].group_items - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) - } - if len(ids) == 0 { - return nil - } - query.Where(group.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { - return err - } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return fmt.Errorf(`unexpected foreign-key "group_items" returned %v`, n.ID) - } - for i := range nodes { - assign(nodes[i], n) - } - } - return nil -} func (iq *ItemQuery) loadLabel(ctx context.Context, query *LabelQuery, nodes []*Item, init func(*Item), assign func(*Item, *Label)) error { edgeIDs := make([]driver.Value, len(nodes)) byID := make(map[uuid.UUID]*Item) @@ -914,7 +914,7 @@ func (iq *ItemQuery) loadFields(ctx context.Context, query *ItemFieldQuery, node } query.withFKs = true query.Where(predicate.ItemField(func(s *sql.Selector) { - s.Where(sql.InValues(item.FieldsColumn, fks...)) + s.Where(sql.InValues(s.C(item.FieldsColumn), fks...)) })) neighbors, err := query.All(ctx) if err != nil { @@ -927,7 +927,7 @@ func (iq *ItemQuery) loadFields(ctx context.Context, query *ItemFieldQuery, node } node, ok := nodeids[*fk] if !ok { - return fmt.Errorf(`unexpected foreign-key "item_fields" returned %v for node %v`, *fk, n.ID) + return fmt.Errorf(`unexpected referenced foreign-key "item_fields" returned %v for node %v`, *fk, n.ID) } assign(node, n) } @@ -943,8 +943,11 @@ func (iq *ItemQuery) loadMaintenanceEntries(ctx context.Context, query *Maintena init(nodes[i]) } } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(maintenanceentry.FieldItemID) + } query.Where(predicate.MaintenanceEntry(func(s *sql.Selector) { - s.Where(sql.InValues(item.MaintenanceEntriesColumn, fks...)) + s.Where(sql.InValues(s.C(item.MaintenanceEntriesColumn), fks...)) })) neighbors, err := query.All(ctx) if err != nil { @@ -954,7 +957,7 @@ func (iq *ItemQuery) loadMaintenanceEntries(ctx context.Context, query *Maintena fk := n.ItemID node, ok := nodeids[fk] if !ok { - return fmt.Errorf(`unexpected foreign-key "item_id" returned %v for node %v`, fk, n.ID) + return fmt.Errorf(`unexpected referenced foreign-key "item_id" returned %v for node %v`, fk, n.ID) } assign(node, n) } @@ -972,7 +975,7 @@ func (iq *ItemQuery) loadAttachments(ctx context.Context, query *AttachmentQuery } query.withFKs = true query.Where(predicate.Attachment(func(s *sql.Selector) { - s.Where(sql.InValues(item.AttachmentsColumn, fks...)) + s.Where(sql.InValues(s.C(item.AttachmentsColumn), fks...)) })) neighbors, err := query.All(ctx) if err != nil { @@ -985,7 +988,7 @@ func (iq *ItemQuery) loadAttachments(ctx context.Context, query *AttachmentQuery } node, ok := nodeids[*fk] if !ok { - return fmt.Errorf(`unexpected foreign-key "item_attachments" returned %v for node %v`, *fk, n.ID) + return fmt.Errorf(`unexpected referenced foreign-key "item_attachments" returned %v for node %v`, *fk, n.ID) } assign(node, n) } diff --git a/backend/internal/data/ent/item_update.go b/backend/internal/data/ent/item_update.go index 88796e4..8cd4722 100644 --- a/backend/internal/data/ent/item_update.go +++ b/backend/internal/data/ent/item_update.go @@ -47,6 +47,14 @@ func (iu *ItemUpdate) SetName(s string) *ItemUpdate { return iu } +// SetNillableName sets the "name" field if the given value is not nil. +func (iu *ItemUpdate) SetNillableName(s *string) *ItemUpdate { + if s != nil { + iu.SetName(*s) + } + return iu +} + // SetDescription sets the "description" field. func (iu *ItemUpdate) SetDescription(s string) *ItemUpdate { iu.mutation.SetDescription(s) @@ -67,6 +75,26 @@ func (iu *ItemUpdate) ClearDescription() *ItemUpdate { return iu } +// SetImportRef sets the "import_ref" field. +func (iu *ItemUpdate) SetImportRef(s string) *ItemUpdate { + iu.mutation.SetImportRef(s) + return iu +} + +// SetNillableImportRef sets the "import_ref" field if the given value is not nil. +func (iu *ItemUpdate) SetNillableImportRef(s *string) *ItemUpdate { + if s != nil { + iu.SetImportRef(*s) + } + return iu +} + +// ClearImportRef clears the value of the "import_ref" field. +func (iu *ItemUpdate) ClearImportRef() *ItemUpdate { + iu.mutation.ClearImportRef() + return iu +} + // SetNotes sets the "notes" field. func (iu *ItemUpdate) SetNotes(s string) *ItemUpdate { iu.mutation.SetNotes(s) @@ -413,6 +441,17 @@ func (iu *ItemUpdate) ClearSoldNotes() *ItemUpdate { return iu } +// SetGroupID sets the "group" edge to the Group entity by ID. +func (iu *ItemUpdate) SetGroupID(id uuid.UUID) *ItemUpdate { + iu.mutation.SetGroupID(id) + return iu +} + +// SetGroup sets the "group" edge to the Group entity. +func (iu *ItemUpdate) SetGroup(g *Group) *ItemUpdate { + return iu.SetGroupID(g.ID) +} + // SetParentID sets the "parent" edge to the Item entity by ID. func (iu *ItemUpdate) SetParentID(id uuid.UUID) *ItemUpdate { iu.mutation.SetParentID(id) @@ -447,17 +486,6 @@ func (iu *ItemUpdate) AddChildren(i ...*Item) *ItemUpdate { return iu.AddChildIDs(ids...) } -// SetGroupID sets the "group" edge to the Group entity by ID. -func (iu *ItemUpdate) SetGroupID(id uuid.UUID) *ItemUpdate { - iu.mutation.SetGroupID(id) - return iu -} - -// SetGroup sets the "group" edge to the Group entity. -func (iu *ItemUpdate) SetGroup(g *Group) *ItemUpdate { - return iu.SetGroupID(g.ID) -} - // AddLabelIDs adds the "label" edge to the Label entity by IDs. func (iu *ItemUpdate) AddLabelIDs(ids ...uuid.UUID) *ItemUpdate { iu.mutation.AddLabelIDs(ids...) @@ -542,6 +570,12 @@ func (iu *ItemUpdate) Mutation() *ItemMutation { return iu.mutation } +// ClearGroup clears the "group" edge to the Group entity. +func (iu *ItemUpdate) ClearGroup() *ItemUpdate { + iu.mutation.ClearGroup() + return iu +} + // ClearParent clears the "parent" edge to the Item entity. func (iu *ItemUpdate) ClearParent() *ItemUpdate { iu.mutation.ClearParent() @@ -569,12 +603,6 @@ func (iu *ItemUpdate) RemoveChildren(i ...*Item) *ItemUpdate { return iu.RemoveChildIDs(ids...) } -// ClearGroup clears the "group" edge to the Group entity. -func (iu *ItemUpdate) ClearGroup() *ItemUpdate { - iu.mutation.ClearGroup() - return iu -} - // ClearLabel clears all "label" edges to the Label entity. func (iu *ItemUpdate) ClearLabel() *ItemUpdate { iu.mutation.ClearLabel() @@ -668,7 +696,7 @@ func (iu *ItemUpdate) RemoveAttachments(a ...*Attachment) *ItemUpdate { // Save executes the query and returns the number of nodes affected by the update operation. func (iu *ItemUpdate) Save(ctx context.Context) (int, error) { iu.defaults() - return withHooks[int, ItemMutation](ctx, iu.sqlSave, iu.mutation, iu.hooks) + return withHooks(ctx, iu.sqlSave, iu.mutation, iu.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -713,6 +741,11 @@ func (iu *ItemUpdate) check() error { return &ValidationError{Name: "description", err: fmt.Errorf(`ent: validator failed for field "Item.description": %w`, err)} } } + if v, ok := iu.mutation.ImportRef(); ok { + if err := item.ImportRefValidator(v); err != nil { + return &ValidationError{Name: "import_ref", err: fmt.Errorf(`ent: validator failed for field "Item.import_ref": %w`, err)} + } + } if v, ok := iu.mutation.Notes(); ok { if err := item.NotesValidator(v); err != nil { return &ValidationError{Name: "notes", err: fmt.Errorf(`ent: validator failed for field "Item.notes": %w`, err)} @@ -773,6 +806,9 @@ func (iu *ItemUpdate) sqlSave(ctx context.Context) (n int, err error) { if iu.mutation.DescriptionCleared() { _spec.ClearField(item.FieldDescription, field.TypeString) } + if value, ok := iu.mutation.ImportRef(); ok { + _spec.SetField(item.FieldImportRef, field.TypeString, value) + } if iu.mutation.ImportRefCleared() { _spec.ClearField(item.FieldImportRef, field.TypeString) } @@ -875,6 +911,35 @@ func (iu *ItemUpdate) sqlSave(ctx context.Context) (n int, err error) { if iu.mutation.SoldNotesCleared() { _spec.ClearField(item.FieldSoldNotes, field.TypeString) } + if iu.mutation.GroupCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: item.GroupTable, + Columns: []string{item.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := iu.mutation.GroupIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: item.GroupTable, + Columns: []string{item.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } if iu.mutation.ParentCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, @@ -883,10 +948,7 @@ func (iu *ItemUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{item.ParentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: item.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -899,10 +961,7 @@ func (iu *ItemUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{item.ParentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: item.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -918,10 +977,7 @@ func (iu *ItemUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{item.ChildrenColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: item.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -934,10 +990,7 @@ func (iu *ItemUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{item.ChildrenColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: item.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -953,45 +1006,7 @@ func (iu *ItemUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{item.ChildrenColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: item.FieldID, - }, - }, - } - for _, k := range nodes { - edge.Target.Nodes = append(edge.Target.Nodes, k) - } - _spec.Edges.Add = append(_spec.Edges.Add, edge) - } - if iu.mutation.GroupCleared() { - edge := &sqlgraph.EdgeSpec{ - Rel: sqlgraph.M2O, - Inverse: true, - Table: item.GroupTable, - Columns: []string{item.GroupColumn}, - Bidi: false, - Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: group.FieldID, - }, - }, - } - _spec.Edges.Clear = append(_spec.Edges.Clear, edge) - } - if nodes := iu.mutation.GroupIDs(); len(nodes) > 0 { - edge := &sqlgraph.EdgeSpec{ - Rel: sqlgraph.M2O, - Inverse: true, - Table: item.GroupTable, - Columns: []string{item.GroupColumn}, - Bidi: false, - Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: group.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1007,10 +1022,7 @@ func (iu *ItemUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: item.LabelPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: label.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(label.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1023,10 +1035,7 @@ func (iu *ItemUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: item.LabelPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: label.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(label.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1042,10 +1051,7 @@ func (iu *ItemUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: item.LabelPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: label.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(label.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1061,10 +1067,7 @@ func (iu *ItemUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{item.LocationColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: location.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1077,10 +1080,7 @@ func (iu *ItemUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{item.LocationColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: location.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1096,10 +1096,7 @@ func (iu *ItemUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{item.FieldsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: itemfield.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(itemfield.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1112,10 +1109,7 @@ func (iu *ItemUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{item.FieldsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: itemfield.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(itemfield.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1131,10 +1125,7 @@ func (iu *ItemUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{item.FieldsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: itemfield.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(itemfield.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1150,10 +1141,7 @@ func (iu *ItemUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{item.MaintenanceEntriesColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: maintenanceentry.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(maintenanceentry.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1166,10 +1154,7 @@ func (iu *ItemUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{item.MaintenanceEntriesColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: maintenanceentry.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(maintenanceentry.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1185,10 +1170,7 @@ func (iu *ItemUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{item.MaintenanceEntriesColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: maintenanceentry.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(maintenanceentry.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1204,10 +1186,7 @@ func (iu *ItemUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{item.AttachmentsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: attachment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1220,10 +1199,7 @@ func (iu *ItemUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{item.AttachmentsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: attachment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1239,10 +1215,7 @@ func (iu *ItemUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{item.AttachmentsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: attachment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -1282,6 +1255,14 @@ func (iuo *ItemUpdateOne) SetName(s string) *ItemUpdateOne { return iuo } +// SetNillableName sets the "name" field if the given value is not nil. +func (iuo *ItemUpdateOne) SetNillableName(s *string) *ItemUpdateOne { + if s != nil { + iuo.SetName(*s) + } + return iuo +} + // SetDescription sets the "description" field. func (iuo *ItemUpdateOne) SetDescription(s string) *ItemUpdateOne { iuo.mutation.SetDescription(s) @@ -1302,6 +1283,26 @@ func (iuo *ItemUpdateOne) ClearDescription() *ItemUpdateOne { return iuo } +// SetImportRef sets the "import_ref" field. +func (iuo *ItemUpdateOne) SetImportRef(s string) *ItemUpdateOne { + iuo.mutation.SetImportRef(s) + return iuo +} + +// SetNillableImportRef sets the "import_ref" field if the given value is not nil. +func (iuo *ItemUpdateOne) SetNillableImportRef(s *string) *ItemUpdateOne { + if s != nil { + iuo.SetImportRef(*s) + } + return iuo +} + +// ClearImportRef clears the value of the "import_ref" field. +func (iuo *ItemUpdateOne) ClearImportRef() *ItemUpdateOne { + iuo.mutation.ClearImportRef() + return iuo +} + // SetNotes sets the "notes" field. func (iuo *ItemUpdateOne) SetNotes(s string) *ItemUpdateOne { iuo.mutation.SetNotes(s) @@ -1648,6 +1649,17 @@ func (iuo *ItemUpdateOne) ClearSoldNotes() *ItemUpdateOne { return iuo } +// SetGroupID sets the "group" edge to the Group entity by ID. +func (iuo *ItemUpdateOne) SetGroupID(id uuid.UUID) *ItemUpdateOne { + iuo.mutation.SetGroupID(id) + return iuo +} + +// SetGroup sets the "group" edge to the Group entity. +func (iuo *ItemUpdateOne) SetGroup(g *Group) *ItemUpdateOne { + return iuo.SetGroupID(g.ID) +} + // SetParentID sets the "parent" edge to the Item entity by ID. func (iuo *ItemUpdateOne) SetParentID(id uuid.UUID) *ItemUpdateOne { iuo.mutation.SetParentID(id) @@ -1682,17 +1694,6 @@ func (iuo *ItemUpdateOne) AddChildren(i ...*Item) *ItemUpdateOne { return iuo.AddChildIDs(ids...) } -// SetGroupID sets the "group" edge to the Group entity by ID. -func (iuo *ItemUpdateOne) SetGroupID(id uuid.UUID) *ItemUpdateOne { - iuo.mutation.SetGroupID(id) - return iuo -} - -// SetGroup sets the "group" edge to the Group entity. -func (iuo *ItemUpdateOne) SetGroup(g *Group) *ItemUpdateOne { - return iuo.SetGroupID(g.ID) -} - // AddLabelIDs adds the "label" edge to the Label entity by IDs. func (iuo *ItemUpdateOne) AddLabelIDs(ids ...uuid.UUID) *ItemUpdateOne { iuo.mutation.AddLabelIDs(ids...) @@ -1777,6 +1778,12 @@ func (iuo *ItemUpdateOne) Mutation() *ItemMutation { return iuo.mutation } +// ClearGroup clears the "group" edge to the Group entity. +func (iuo *ItemUpdateOne) ClearGroup() *ItemUpdateOne { + iuo.mutation.ClearGroup() + return iuo +} + // ClearParent clears the "parent" edge to the Item entity. func (iuo *ItemUpdateOne) ClearParent() *ItemUpdateOne { iuo.mutation.ClearParent() @@ -1804,12 +1811,6 @@ func (iuo *ItemUpdateOne) RemoveChildren(i ...*Item) *ItemUpdateOne { return iuo.RemoveChildIDs(ids...) } -// ClearGroup clears the "group" edge to the Group entity. -func (iuo *ItemUpdateOne) ClearGroup() *ItemUpdateOne { - iuo.mutation.ClearGroup() - return iuo -} - // ClearLabel clears all "label" edges to the Label entity. func (iuo *ItemUpdateOne) ClearLabel() *ItemUpdateOne { iuo.mutation.ClearLabel() @@ -1916,7 +1917,7 @@ func (iuo *ItemUpdateOne) Select(field string, fields ...string) *ItemUpdateOne // Save executes the query and returns the updated Item entity. func (iuo *ItemUpdateOne) Save(ctx context.Context) (*Item, error) { iuo.defaults() - return withHooks[*Item, ItemMutation](ctx, iuo.sqlSave, iuo.mutation, iuo.hooks) + return withHooks(ctx, iuo.sqlSave, iuo.mutation, iuo.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -1961,6 +1962,11 @@ func (iuo *ItemUpdateOne) check() error { return &ValidationError{Name: "description", err: fmt.Errorf(`ent: validator failed for field "Item.description": %w`, err)} } } + if v, ok := iuo.mutation.ImportRef(); ok { + if err := item.ImportRefValidator(v); err != nil { + return &ValidationError{Name: "import_ref", err: fmt.Errorf(`ent: validator failed for field "Item.import_ref": %w`, err)} + } + } if v, ok := iuo.mutation.Notes(); ok { if err := item.NotesValidator(v); err != nil { return &ValidationError{Name: "notes", err: fmt.Errorf(`ent: validator failed for field "Item.notes": %w`, err)} @@ -2038,6 +2044,9 @@ func (iuo *ItemUpdateOne) sqlSave(ctx context.Context) (_node *Item, err error) if iuo.mutation.DescriptionCleared() { _spec.ClearField(item.FieldDescription, field.TypeString) } + if value, ok := iuo.mutation.ImportRef(); ok { + _spec.SetField(item.FieldImportRef, field.TypeString, value) + } if iuo.mutation.ImportRefCleared() { _spec.ClearField(item.FieldImportRef, field.TypeString) } @@ -2140,6 +2149,35 @@ func (iuo *ItemUpdateOne) sqlSave(ctx context.Context) (_node *Item, err error) if iuo.mutation.SoldNotesCleared() { _spec.ClearField(item.FieldSoldNotes, field.TypeString) } + if iuo.mutation.GroupCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: item.GroupTable, + Columns: []string{item.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := iuo.mutation.GroupIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: item.GroupTable, + Columns: []string{item.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } if iuo.mutation.ParentCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, @@ -2148,10 +2186,7 @@ func (iuo *ItemUpdateOne) sqlSave(ctx context.Context) (_node *Item, err error) Columns: []string{item.ParentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: item.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -2164,10 +2199,7 @@ func (iuo *ItemUpdateOne) sqlSave(ctx context.Context) (_node *Item, err error) Columns: []string{item.ParentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: item.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -2183,10 +2215,7 @@ func (iuo *ItemUpdateOne) sqlSave(ctx context.Context) (_node *Item, err error) Columns: []string{item.ChildrenColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: item.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -2199,10 +2228,7 @@ func (iuo *ItemUpdateOne) sqlSave(ctx context.Context) (_node *Item, err error) Columns: []string{item.ChildrenColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: item.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -2218,45 +2244,7 @@ func (iuo *ItemUpdateOne) sqlSave(ctx context.Context) (_node *Item, err error) Columns: []string{item.ChildrenColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: item.FieldID, - }, - }, - } - for _, k := range nodes { - edge.Target.Nodes = append(edge.Target.Nodes, k) - } - _spec.Edges.Add = append(_spec.Edges.Add, edge) - } - if iuo.mutation.GroupCleared() { - edge := &sqlgraph.EdgeSpec{ - Rel: sqlgraph.M2O, - Inverse: true, - Table: item.GroupTable, - Columns: []string{item.GroupColumn}, - Bidi: false, - Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: group.FieldID, - }, - }, - } - _spec.Edges.Clear = append(_spec.Edges.Clear, edge) - } - if nodes := iuo.mutation.GroupIDs(); len(nodes) > 0 { - edge := &sqlgraph.EdgeSpec{ - Rel: sqlgraph.M2O, - Inverse: true, - Table: item.GroupTable, - Columns: []string{item.GroupColumn}, - Bidi: false, - Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: group.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -2272,10 +2260,7 @@ func (iuo *ItemUpdateOne) sqlSave(ctx context.Context) (_node *Item, err error) Columns: item.LabelPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: label.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(label.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -2288,10 +2273,7 @@ func (iuo *ItemUpdateOne) sqlSave(ctx context.Context) (_node *Item, err error) Columns: item.LabelPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: label.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(label.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -2307,10 +2289,7 @@ func (iuo *ItemUpdateOne) sqlSave(ctx context.Context) (_node *Item, err error) Columns: item.LabelPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: label.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(label.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -2326,10 +2305,7 @@ func (iuo *ItemUpdateOne) sqlSave(ctx context.Context) (_node *Item, err error) Columns: []string{item.LocationColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: location.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -2342,10 +2318,7 @@ func (iuo *ItemUpdateOne) sqlSave(ctx context.Context) (_node *Item, err error) Columns: []string{item.LocationColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: location.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -2361,10 +2334,7 @@ func (iuo *ItemUpdateOne) sqlSave(ctx context.Context) (_node *Item, err error) Columns: []string{item.FieldsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: itemfield.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(itemfield.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -2377,10 +2347,7 @@ func (iuo *ItemUpdateOne) sqlSave(ctx context.Context) (_node *Item, err error) Columns: []string{item.FieldsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: itemfield.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(itemfield.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -2396,10 +2363,7 @@ func (iuo *ItemUpdateOne) sqlSave(ctx context.Context) (_node *Item, err error) Columns: []string{item.FieldsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: itemfield.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(itemfield.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -2415,10 +2379,7 @@ func (iuo *ItemUpdateOne) sqlSave(ctx context.Context) (_node *Item, err error) Columns: []string{item.MaintenanceEntriesColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: maintenanceentry.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(maintenanceentry.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -2431,10 +2392,7 @@ func (iuo *ItemUpdateOne) sqlSave(ctx context.Context) (_node *Item, err error) Columns: []string{item.MaintenanceEntriesColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: maintenanceentry.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(maintenanceentry.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -2450,10 +2408,7 @@ func (iuo *ItemUpdateOne) sqlSave(ctx context.Context) (_node *Item, err error) Columns: []string{item.MaintenanceEntriesColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: maintenanceentry.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(maintenanceentry.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -2469,10 +2424,7 @@ func (iuo *ItemUpdateOne) sqlSave(ctx context.Context) (_node *Item, err error) Columns: []string{item.AttachmentsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: attachment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -2485,10 +2437,7 @@ func (iuo *ItemUpdateOne) sqlSave(ctx context.Context) (_node *Item, err error) Columns: []string{item.AttachmentsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: attachment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -2504,10 +2453,7 @@ func (iuo *ItemUpdateOne) sqlSave(ctx context.Context) (_node *Item, err error) Columns: []string{item.AttachmentsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: attachment.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID), }, } for _, k := range nodes { diff --git a/backend/internal/data/ent/itemfield.go b/backend/internal/data/ent/itemfield.go index cff6751..b2b8b8d 100644 --- a/backend/internal/data/ent/itemfield.go +++ b/backend/internal/data/ent/itemfield.go @@ -7,6 +7,7 @@ import ( "strings" "time" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/google/uuid" "github.com/hay-kot/homebox/backend/internal/data/ent/item" @@ -38,8 +39,9 @@ type ItemField struct { TimeValue time.Time `json:"time_value,omitempty"` // Edges holds the relations/edges for other nodes in the graph. // The values are being populated by the ItemFieldQuery when eager-loading is set. - Edges ItemFieldEdges `json:"edges"` - item_fields *uuid.UUID + Edges ItemFieldEdges `json:"edges"` + item_fields *uuid.UUID + selectValues sql.SelectValues } // ItemFieldEdges holds the relations/edges for other nodes in the graph. @@ -82,7 +84,7 @@ func (*ItemField) scanValues(columns []string) ([]any, error) { case itemfield.ForeignKeys[0]: // item_fields values[i] = &sql.NullScanner{S: new(uuid.UUID)} default: - return nil, fmt.Errorf("unexpected column %q for type ItemField", columns[i]) + values[i] = new(sql.UnknownType) } } return values, nil @@ -163,11 +165,19 @@ func (_if *ItemField) assignValues(columns []string, values []any) error { _if.item_fields = new(uuid.UUID) *_if.item_fields = *value.S.(*uuid.UUID) } + default: + _if.selectValues.Set(columns[i], values[i]) } } return nil } +// Value returns the ent.Value that was dynamically selected and assigned to the ItemField. +// This includes values selected through modifiers, order, etc. +func (_if *ItemField) Value(name string) (ent.Value, error) { + return _if.selectValues.Get(name) +} + // QueryItem queries the "item" edge of the ItemField entity. func (_if *ItemField) QueryItem() *ItemQuery { return NewItemFieldClient(_if.config).QueryItem(_if) diff --git a/backend/internal/data/ent/itemfield/itemfield.go b/backend/internal/data/ent/itemfield/itemfield.go index ccad0fe..dfbf378 100644 --- a/backend/internal/data/ent/itemfield/itemfield.go +++ b/backend/internal/data/ent/itemfield/itemfield.go @@ -6,6 +6,8 @@ import ( "fmt" "time" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" "github.com/google/uuid" ) @@ -125,3 +127,70 @@ func TypeValidator(_type Type) error { return fmt.Errorf("itemfield: invalid enum value for type field: %q", _type) } } + +// OrderOption defines the ordering options for the ItemField queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByName orders the results by the name field. +func ByName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldName, opts...).ToFunc() +} + +// ByDescription orders the results by the description field. +func ByDescription(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDescription, opts...).ToFunc() +} + +// ByType orders the results by the type field. +func ByType(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldType, opts...).ToFunc() +} + +// ByTextValue orders the results by the text_value field. +func ByTextValue(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldTextValue, opts...).ToFunc() +} + +// ByNumberValue orders the results by the number_value field. +func ByNumberValue(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldNumberValue, opts...).ToFunc() +} + +// ByBooleanValue orders the results by the boolean_value field. +func ByBooleanValue(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldBooleanValue, opts...).ToFunc() +} + +// ByTimeValue orders the results by the time_value field. +func ByTimeValue(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldTimeValue, opts...).ToFunc() +} + +// ByItemField orders the results by item field. +func ByItemField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newItemStep(), sql.OrderByField(field, opts...)) + } +} +func newItemStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(ItemInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, ItemTable, ItemColumn), + ) +} diff --git a/backend/internal/data/ent/itemfield/where.go b/backend/internal/data/ent/itemfield/where.go index 94805ea..8a2d4aa 100644 --- a/backend/internal/data/ent/itemfield/where.go +++ b/backend/internal/data/ent/itemfield/where.go @@ -525,11 +525,7 @@ func HasItem() predicate.ItemField { // HasItemWith applies the HasEdge predicate on the "item" edge with a given conditions (other predicates). func HasItemWith(preds ...predicate.Item) predicate.ItemField { return predicate.ItemField(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(ItemInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, true, ItemTable, ItemColumn), - ) + step := newItemStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -540,32 +536,15 @@ func HasItemWith(preds ...predicate.Item) predicate.ItemField { // And groups predicates with the AND operator between them. func And(predicates ...predicate.ItemField) predicate.ItemField { - return predicate.ItemField(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for _, p := range predicates { - p(s1) - } - s.Where(s1.P()) - }) + return predicate.ItemField(sql.AndPredicates(predicates...)) } // Or groups predicates with the OR operator between them. func Or(predicates ...predicate.ItemField) predicate.ItemField { - return predicate.ItemField(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for i, p := range predicates { - if i > 0 { - s1.Or() - } - p(s1) - } - s.Where(s1.P()) - }) + return predicate.ItemField(sql.OrPredicates(predicates...)) } // Not applies the not operator on the given predicate. func Not(p predicate.ItemField) predicate.ItemField { - return predicate.ItemField(func(s *sql.Selector) { - p(s.Not()) - }) + return predicate.ItemField(sql.NotPredicates(p)) } diff --git a/backend/internal/data/ent/itemfield_create.go b/backend/internal/data/ent/itemfield_create.go index c124c0e..65a22fb 100644 --- a/backend/internal/data/ent/itemfield_create.go +++ b/backend/internal/data/ent/itemfield_create.go @@ -173,7 +173,7 @@ func (ifc *ItemFieldCreate) Mutation() *ItemFieldMutation { // Save creates the ItemField in the database. func (ifc *ItemFieldCreate) Save(ctx context.Context) (*ItemField, error) { ifc.defaults() - return withHooks[*ItemField, ItemFieldMutation](ctx, ifc.sqlSave, ifc.mutation, ifc.hooks) + return withHooks(ctx, ifc.sqlSave, ifc.mutation, ifc.hooks) } // SaveX calls Save and panics if Save returns an error. @@ -341,10 +341,7 @@ func (ifc *ItemFieldCreate) createSpec() (*ItemField, *sqlgraph.CreateSpec) { Columns: []string{itemfield.ItemColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: item.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -359,11 +356,15 @@ func (ifc *ItemFieldCreate) createSpec() (*ItemField, *sqlgraph.CreateSpec) { // ItemFieldCreateBulk is the builder for creating many ItemField entities in bulk. type ItemFieldCreateBulk struct { config + err error builders []*ItemFieldCreate } // Save creates the ItemField entities in the database. func (ifcb *ItemFieldCreateBulk) Save(ctx context.Context) ([]*ItemField, error) { + if ifcb.err != nil { + return nil, ifcb.err + } specs := make([]*sqlgraph.CreateSpec, len(ifcb.builders)) nodes := make([]*ItemField, len(ifcb.builders)) mutators := make([]Mutator, len(ifcb.builders)) @@ -380,8 +381,8 @@ func (ifcb *ItemFieldCreateBulk) Save(ctx context.Context) ([]*ItemField, error) return nil, err } builder.mutation = mutation - nodes[i], specs[i] = builder.createSpec() var err error + nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { _, err = mutators[i+1].Mutate(root, ifcb.builders[i+1].mutation) } else { diff --git a/backend/internal/data/ent/itemfield_delete.go b/backend/internal/data/ent/itemfield_delete.go index 181b736..ba85cbc 100644 --- a/backend/internal/data/ent/itemfield_delete.go +++ b/backend/internal/data/ent/itemfield_delete.go @@ -27,7 +27,7 @@ func (ifd *ItemFieldDelete) Where(ps ...predicate.ItemField) *ItemFieldDelete { // Exec executes the deletion query and returns how many vertices were deleted. func (ifd *ItemFieldDelete) Exec(ctx context.Context) (int, error) { - return withHooks[int, ItemFieldMutation](ctx, ifd.sqlExec, ifd.mutation, ifd.hooks) + return withHooks(ctx, ifd.sqlExec, ifd.mutation, ifd.hooks) } // ExecX is like Exec, but panics if an error occurs. diff --git a/backend/internal/data/ent/itemfield_query.go b/backend/internal/data/ent/itemfield_query.go index c00e422..21bffb8 100644 --- a/backend/internal/data/ent/itemfield_query.go +++ b/backend/internal/data/ent/itemfield_query.go @@ -20,7 +20,7 @@ import ( type ItemFieldQuery struct { config ctx *QueryContext - order []OrderFunc + order []itemfield.OrderOption inters []Interceptor predicates []predicate.ItemField withItem *ItemQuery @@ -56,7 +56,7 @@ func (ifq *ItemFieldQuery) Unique(unique bool) *ItemFieldQuery { } // Order specifies how the records should be ordered. -func (ifq *ItemFieldQuery) Order(o ...OrderFunc) *ItemFieldQuery { +func (ifq *ItemFieldQuery) Order(o ...itemfield.OrderOption) *ItemFieldQuery { ifq.order = append(ifq.order, o...) return ifq } @@ -272,7 +272,7 @@ func (ifq *ItemFieldQuery) Clone() *ItemFieldQuery { return &ItemFieldQuery{ config: ifq.config, ctx: ifq.ctx.Clone(), - order: append([]OrderFunc{}, ifq.order...), + order: append([]itemfield.OrderOption{}, ifq.order...), inters: append([]Interceptor{}, ifq.inters...), predicates: append([]predicate.ItemField{}, ifq.predicates...), withItem: ifq.withItem.Clone(), diff --git a/backend/internal/data/ent/itemfield_update.go b/backend/internal/data/ent/itemfield_update.go index b7ff379..3f44dc1 100644 --- a/backend/internal/data/ent/itemfield_update.go +++ b/backend/internal/data/ent/itemfield_update.go @@ -42,6 +42,14 @@ func (ifu *ItemFieldUpdate) SetName(s string) *ItemFieldUpdate { return ifu } +// SetNillableName sets the "name" field if the given value is not nil. +func (ifu *ItemFieldUpdate) SetNillableName(s *string) *ItemFieldUpdate { + if s != nil { + ifu.SetName(*s) + } + return ifu +} + // SetDescription sets the "description" field. func (ifu *ItemFieldUpdate) SetDescription(s string) *ItemFieldUpdate { ifu.mutation.SetDescription(s) @@ -68,6 +76,14 @@ func (ifu *ItemFieldUpdate) SetType(i itemfield.Type) *ItemFieldUpdate { return ifu } +// SetNillableType sets the "type" field if the given value is not nil. +func (ifu *ItemFieldUpdate) SetNillableType(i *itemfield.Type) *ItemFieldUpdate { + if i != nil { + ifu.SetType(*i) + } + return ifu +} + // SetTextValue sets the "text_value" field. func (ifu *ItemFieldUpdate) SetTextValue(s string) *ItemFieldUpdate { ifu.mutation.SetTextValue(s) @@ -176,7 +192,7 @@ func (ifu *ItemFieldUpdate) ClearItem() *ItemFieldUpdate { // Save executes the query and returns the number of nodes affected by the update operation. func (ifu *ItemFieldUpdate) Save(ctx context.Context) (int, error) { ifu.defaults() - return withHooks[int, ItemFieldMutation](ctx, ifu.sqlSave, ifu.mutation, ifu.hooks) + return withHooks(ctx, ifu.sqlSave, ifu.mutation, ifu.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -290,10 +306,7 @@ func (ifu *ItemFieldUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{itemfield.ItemColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: item.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -306,10 +319,7 @@ func (ifu *ItemFieldUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{itemfield.ItemColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: item.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -349,6 +359,14 @@ func (ifuo *ItemFieldUpdateOne) SetName(s string) *ItemFieldUpdateOne { return ifuo } +// SetNillableName sets the "name" field if the given value is not nil. +func (ifuo *ItemFieldUpdateOne) SetNillableName(s *string) *ItemFieldUpdateOne { + if s != nil { + ifuo.SetName(*s) + } + return ifuo +} + // SetDescription sets the "description" field. func (ifuo *ItemFieldUpdateOne) SetDescription(s string) *ItemFieldUpdateOne { ifuo.mutation.SetDescription(s) @@ -375,6 +393,14 @@ func (ifuo *ItemFieldUpdateOne) SetType(i itemfield.Type) *ItemFieldUpdateOne { return ifuo } +// SetNillableType sets the "type" field if the given value is not nil. +func (ifuo *ItemFieldUpdateOne) SetNillableType(i *itemfield.Type) *ItemFieldUpdateOne { + if i != nil { + ifuo.SetType(*i) + } + return ifuo +} + // SetTextValue sets the "text_value" field. func (ifuo *ItemFieldUpdateOne) SetTextValue(s string) *ItemFieldUpdateOne { ifuo.mutation.SetTextValue(s) @@ -496,7 +522,7 @@ func (ifuo *ItemFieldUpdateOne) Select(field string, fields ...string) *ItemFiel // Save executes the query and returns the updated ItemField entity. func (ifuo *ItemFieldUpdateOne) Save(ctx context.Context) (*ItemField, error) { ifuo.defaults() - return withHooks[*ItemField, ItemFieldMutation](ctx, ifuo.sqlSave, ifuo.mutation, ifuo.hooks) + return withHooks(ctx, ifuo.sqlSave, ifuo.mutation, ifuo.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -627,10 +653,7 @@ func (ifuo *ItemFieldUpdateOne) sqlSave(ctx context.Context) (_node *ItemField, Columns: []string{itemfield.ItemColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: item.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -643,10 +666,7 @@ func (ifuo *ItemFieldUpdateOne) sqlSave(ctx context.Context) (_node *ItemField, Columns: []string{itemfield.ItemColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: item.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID), }, } for _, k := range nodes { diff --git a/backend/internal/data/ent/label.go b/backend/internal/data/ent/label.go index 945b597..fdd6f8d 100644 --- a/backend/internal/data/ent/label.go +++ b/backend/internal/data/ent/label.go @@ -7,6 +7,7 @@ import ( "strings" "time" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/google/uuid" "github.com/hay-kot/homebox/backend/internal/data/ent/group" @@ -32,6 +33,7 @@ type Label struct { // The values are being populated by the LabelQuery when eager-loading is set. Edges LabelEdges `json:"edges"` group_labels *uuid.UUID + selectValues sql.SelectValues } // LabelEdges holds the relations/edges for other nodes in the graph. @@ -81,7 +83,7 @@ func (*Label) scanValues(columns []string) ([]any, error) { case label.ForeignKeys[0]: // group_labels values[i] = &sql.NullScanner{S: new(uuid.UUID)} default: - return nil, fmt.Errorf("unexpected column %q for type Label", columns[i]) + values[i] = new(sql.UnknownType) } } return values, nil @@ -138,11 +140,19 @@ func (l *Label) assignValues(columns []string, values []any) error { l.group_labels = new(uuid.UUID) *l.group_labels = *value.S.(*uuid.UUID) } + default: + l.selectValues.Set(columns[i], values[i]) } } return nil } +// Value returns the ent.Value that was dynamically selected and assigned to the Label. +// This includes values selected through modifiers, order, etc. +func (l *Label) Value(name string) (ent.Value, error) { + return l.selectValues.Get(name) +} + // QueryGroup queries the "group" edge of the Label entity. func (l *Label) QueryGroup() *GroupQuery { return NewLabelClient(l.config).QueryGroup(l) diff --git a/backend/internal/data/ent/label/label.go b/backend/internal/data/ent/label/label.go index 82bcdbd..df34c87 100644 --- a/backend/internal/data/ent/label/label.go +++ b/backend/internal/data/ent/label/label.go @@ -5,6 +5,8 @@ package label import ( "time" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" "github.com/google/uuid" ) @@ -96,3 +98,71 @@ var ( // DefaultID holds the default value on creation for the "id" field. DefaultID func() uuid.UUID ) + +// OrderOption defines the ordering options for the Label queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByName orders the results by the name field. +func ByName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldName, opts...).ToFunc() +} + +// ByDescription orders the results by the description field. +func ByDescription(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDescription, opts...).ToFunc() +} + +// ByColor orders the results by the color field. +func ByColor(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldColor, opts...).ToFunc() +} + +// ByGroupField orders the results by group field. +func ByGroupField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newGroupStep(), sql.OrderByField(field, opts...)) + } +} + +// ByItemsCount orders the results by items count. +func ByItemsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newItemsStep(), opts...) + } +} + +// ByItems orders the results by items terms. +func ByItems(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newItemsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} +func newGroupStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(GroupInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn), + ) +} +func newItemsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(ItemsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2M, false, ItemsTable, ItemsPrimaryKey...), + ) +} diff --git a/backend/internal/data/ent/label/where.go b/backend/internal/data/ent/label/where.go index fd321e1..3754ac7 100644 --- a/backend/internal/data/ent/label/where.go +++ b/backend/internal/data/ent/label/where.go @@ -390,11 +390,7 @@ func HasGroup() predicate.Label { // HasGroupWith applies the HasEdge predicate on the "group" edge with a given conditions (other predicates). func HasGroupWith(preds ...predicate.Group) predicate.Label { return predicate.Label(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(GroupInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn), - ) + step := newGroupStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -417,11 +413,7 @@ func HasItems() predicate.Label { // HasItemsWith applies the HasEdge predicate on the "items" edge with a given conditions (other predicates). func HasItemsWith(preds ...predicate.Item) predicate.Label { return predicate.Label(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(ItemsInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2M, false, ItemsTable, ItemsPrimaryKey...), - ) + step := newItemsStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -432,32 +424,15 @@ func HasItemsWith(preds ...predicate.Item) predicate.Label { // And groups predicates with the AND operator between them. func And(predicates ...predicate.Label) predicate.Label { - return predicate.Label(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for _, p := range predicates { - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Label(sql.AndPredicates(predicates...)) } // Or groups predicates with the OR operator between them. func Or(predicates ...predicate.Label) predicate.Label { - return predicate.Label(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for i, p := range predicates { - if i > 0 { - s1.Or() - } - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Label(sql.OrPredicates(predicates...)) } // Not applies the not operator on the given predicate. func Not(p predicate.Label) predicate.Label { - return predicate.Label(func(s *sql.Selector) { - p(s.Not()) - }) + return predicate.Label(sql.NotPredicates(p)) } diff --git a/backend/internal/data/ent/label_create.go b/backend/internal/data/ent/label_create.go index 8df2903..0ad6469 100644 --- a/backend/internal/data/ent/label_create.go +++ b/backend/internal/data/ent/label_create.go @@ -133,7 +133,7 @@ func (lc *LabelCreate) Mutation() *LabelMutation { // Save creates the Label in the database. func (lc *LabelCreate) Save(ctx context.Context) (*Label, error) { lc.defaults() - return withHooks[*Label, LabelMutation](ctx, lc.sqlSave, lc.mutation, lc.hooks) + return withHooks(ctx, lc.sqlSave, lc.mutation, lc.hooks) } // SaveX calls Save and panics if Save returns an error. @@ -266,10 +266,7 @@ func (lc *LabelCreate) createSpec() (*Label, *sqlgraph.CreateSpec) { Columns: []string{label.GroupColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: group.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -286,10 +283,7 @@ func (lc *LabelCreate) createSpec() (*Label, *sqlgraph.CreateSpec) { Columns: label.ItemsPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: item.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -303,11 +297,15 @@ func (lc *LabelCreate) createSpec() (*Label, *sqlgraph.CreateSpec) { // LabelCreateBulk is the builder for creating many Label entities in bulk. type LabelCreateBulk struct { config + err error builders []*LabelCreate } // Save creates the Label entities in the database. func (lcb *LabelCreateBulk) Save(ctx context.Context) ([]*Label, error) { + if lcb.err != nil { + return nil, lcb.err + } specs := make([]*sqlgraph.CreateSpec, len(lcb.builders)) nodes := make([]*Label, len(lcb.builders)) mutators := make([]Mutator, len(lcb.builders)) @@ -324,8 +322,8 @@ func (lcb *LabelCreateBulk) Save(ctx context.Context) ([]*Label, error) { return nil, err } builder.mutation = mutation - nodes[i], specs[i] = builder.createSpec() var err error + nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { _, err = mutators[i+1].Mutate(root, lcb.builders[i+1].mutation) } else { diff --git a/backend/internal/data/ent/label_delete.go b/backend/internal/data/ent/label_delete.go index 2f6e251..f3b514a 100644 --- a/backend/internal/data/ent/label_delete.go +++ b/backend/internal/data/ent/label_delete.go @@ -27,7 +27,7 @@ func (ld *LabelDelete) Where(ps ...predicate.Label) *LabelDelete { // Exec executes the deletion query and returns how many vertices were deleted. func (ld *LabelDelete) Exec(ctx context.Context) (int, error) { - return withHooks[int, LabelMutation](ctx, ld.sqlExec, ld.mutation, ld.hooks) + return withHooks(ctx, ld.sqlExec, ld.mutation, ld.hooks) } // ExecX is like Exec, but panics if an error occurs. diff --git a/backend/internal/data/ent/label_query.go b/backend/internal/data/ent/label_query.go index 925641b..e3bb6d1 100644 --- a/backend/internal/data/ent/label_query.go +++ b/backend/internal/data/ent/label_query.go @@ -22,7 +22,7 @@ import ( type LabelQuery struct { config ctx *QueryContext - order []OrderFunc + order []label.OrderOption inters []Interceptor predicates []predicate.Label withGroup *GroupQuery @@ -59,7 +59,7 @@ func (lq *LabelQuery) Unique(unique bool) *LabelQuery { } // Order specifies how the records should be ordered. -func (lq *LabelQuery) Order(o ...OrderFunc) *LabelQuery { +func (lq *LabelQuery) Order(o ...label.OrderOption) *LabelQuery { lq.order = append(lq.order, o...) return lq } @@ -297,7 +297,7 @@ func (lq *LabelQuery) Clone() *LabelQuery { return &LabelQuery{ config: lq.config, ctx: lq.ctx.Clone(), - order: append([]OrderFunc{}, lq.order...), + order: append([]label.OrderOption{}, lq.order...), inters: append([]Interceptor{}, lq.inters...), predicates: append([]predicate.Label{}, lq.predicates...), withGroup: lq.withGroup.Clone(), diff --git a/backend/internal/data/ent/label_update.go b/backend/internal/data/ent/label_update.go index 57464ff..0862d22 100644 --- a/backend/internal/data/ent/label_update.go +++ b/backend/internal/data/ent/label_update.go @@ -43,6 +43,14 @@ func (lu *LabelUpdate) SetName(s string) *LabelUpdate { return lu } +// SetNillableName sets the "name" field if the given value is not nil. +func (lu *LabelUpdate) SetNillableName(s *string) *LabelUpdate { + if s != nil { + lu.SetName(*s) + } + return lu +} + // SetDescription sets the "description" field. func (lu *LabelUpdate) SetDescription(s string) *LabelUpdate { lu.mutation.SetDescription(s) @@ -144,7 +152,7 @@ func (lu *LabelUpdate) RemoveItems(i ...*Item) *LabelUpdate { // Save executes the query and returns the number of nodes affected by the update operation. func (lu *LabelUpdate) Save(ctx context.Context) (int, error) { lu.defaults() - return withHooks[int, LabelMutation](ctx, lu.sqlSave, lu.mutation, lu.hooks) + return withHooks(ctx, lu.sqlSave, lu.mutation, lu.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -238,10 +246,7 @@ func (lu *LabelUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{label.GroupColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: group.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -254,10 +259,7 @@ func (lu *LabelUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{label.GroupColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: group.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -273,10 +275,7 @@ func (lu *LabelUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: label.ItemsPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: item.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -289,10 +288,7 @@ func (lu *LabelUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: label.ItemsPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: item.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -308,10 +304,7 @@ func (lu *LabelUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: label.ItemsPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: item.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -351,6 +344,14 @@ func (luo *LabelUpdateOne) SetName(s string) *LabelUpdateOne { return luo } +// SetNillableName sets the "name" field if the given value is not nil. +func (luo *LabelUpdateOne) SetNillableName(s *string) *LabelUpdateOne { + if s != nil { + luo.SetName(*s) + } + return luo +} + // SetDescription sets the "description" field. func (luo *LabelUpdateOne) SetDescription(s string) *LabelUpdateOne { luo.mutation.SetDescription(s) @@ -465,7 +466,7 @@ func (luo *LabelUpdateOne) Select(field string, fields ...string) *LabelUpdateOn // Save executes the query and returns the updated Label entity. func (luo *LabelUpdateOne) Save(ctx context.Context) (*Label, error) { luo.defaults() - return withHooks[*Label, LabelMutation](ctx, luo.sqlSave, luo.mutation, luo.hooks) + return withHooks(ctx, luo.sqlSave, luo.mutation, luo.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -576,10 +577,7 @@ func (luo *LabelUpdateOne) sqlSave(ctx context.Context) (_node *Label, err error Columns: []string{label.GroupColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: group.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -592,10 +590,7 @@ func (luo *LabelUpdateOne) sqlSave(ctx context.Context) (_node *Label, err error Columns: []string{label.GroupColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: group.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -611,10 +606,7 @@ func (luo *LabelUpdateOne) sqlSave(ctx context.Context) (_node *Label, err error Columns: label.ItemsPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: item.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -627,10 +619,7 @@ func (luo *LabelUpdateOne) sqlSave(ctx context.Context) (_node *Label, err error Columns: label.ItemsPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: item.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -646,10 +635,7 @@ func (luo *LabelUpdateOne) sqlSave(ctx context.Context) (_node *Label, err error Columns: label.ItemsPrimaryKey, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: item.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID), }, } for _, k := range nodes { diff --git a/backend/internal/data/ent/location.go b/backend/internal/data/ent/location.go index 002cfb2..640f05e 100644 --- a/backend/internal/data/ent/location.go +++ b/backend/internal/data/ent/location.go @@ -7,6 +7,7 @@ import ( "strings" "time" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/google/uuid" "github.com/hay-kot/homebox/backend/internal/data/ent/group" @@ -31,16 +32,17 @@ type Location struct { Edges LocationEdges `json:"edges"` group_locations *uuid.UUID location_children *uuid.UUID + selectValues sql.SelectValues } // LocationEdges holds the relations/edges for other nodes in the graph. type LocationEdges struct { + // Group holds the value of the group edge. + Group *Group `json:"group,omitempty"` // Parent holds the value of the parent edge. Parent *Location `json:"parent,omitempty"` // Children holds the value of the children edge. Children []*Location `json:"children,omitempty"` - // Group holds the value of the group edge. - Group *Group `json:"group,omitempty"` // Items holds the value of the items edge. Items []*Item `json:"items,omitempty"` // loadedTypes holds the information for reporting if a @@ -48,10 +50,23 @@ type LocationEdges struct { loadedTypes [4]bool } +// GroupOrErr returns the Group value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e LocationEdges) GroupOrErr() (*Group, error) { + if e.loadedTypes[0] { + if e.Group == nil { + // Edge was loaded but was not found. + return nil, &NotFoundError{label: group.Label} + } + return e.Group, nil + } + return nil, &NotLoadedError{edge: "group"} +} + // ParentOrErr returns the Parent value or an error if the edge // was not loaded in eager-loading, or loaded but was not found. func (e LocationEdges) ParentOrErr() (*Location, error) { - if e.loadedTypes[0] { + if e.loadedTypes[1] { if e.Parent == nil { // Edge was loaded but was not found. return nil, &NotFoundError{label: location.Label} @@ -64,25 +79,12 @@ func (e LocationEdges) ParentOrErr() (*Location, error) { // ChildrenOrErr returns the Children value or an error if the edge // was not loaded in eager-loading. func (e LocationEdges) ChildrenOrErr() ([]*Location, error) { - if e.loadedTypes[1] { + if e.loadedTypes[2] { return e.Children, nil } return nil, &NotLoadedError{edge: "children"} } -// GroupOrErr returns the Group value or an error if the edge -// was not loaded in eager-loading, or loaded but was not found. -func (e LocationEdges) GroupOrErr() (*Group, error) { - if e.loadedTypes[2] { - if e.Group == nil { - // Edge was loaded but was not found. - return nil, &NotFoundError{label: group.Label} - } - return e.Group, nil - } - return nil, &NotLoadedError{edge: "group"} -} - // ItemsOrErr returns the Items value or an error if the edge // was not loaded in eager-loading. func (e LocationEdges) ItemsOrErr() ([]*Item, error) { @@ -108,7 +110,7 @@ func (*Location) scanValues(columns []string) ([]any, error) { case location.ForeignKeys[1]: // location_children values[i] = &sql.NullScanner{S: new(uuid.UUID)} default: - return nil, fmt.Errorf("unexpected column %q for type Location", columns[i]) + values[i] = new(sql.UnknownType) } } return values, nil @@ -166,11 +168,24 @@ func (l *Location) assignValues(columns []string, values []any) error { l.location_children = new(uuid.UUID) *l.location_children = *value.S.(*uuid.UUID) } + default: + l.selectValues.Set(columns[i], values[i]) } } return nil } +// Value returns the ent.Value that was dynamically selected and assigned to the Location. +// This includes values selected through modifiers, order, etc. +func (l *Location) Value(name string) (ent.Value, error) { + return l.selectValues.Get(name) +} + +// QueryGroup queries the "group" edge of the Location entity. +func (l *Location) QueryGroup() *GroupQuery { + return NewLocationClient(l.config).QueryGroup(l) +} + // QueryParent queries the "parent" edge of the Location entity. func (l *Location) QueryParent() *LocationQuery { return NewLocationClient(l.config).QueryParent(l) @@ -181,11 +196,6 @@ func (l *Location) QueryChildren() *LocationQuery { return NewLocationClient(l.config).QueryChildren(l) } -// QueryGroup queries the "group" edge of the Location entity. -func (l *Location) QueryGroup() *GroupQuery { - return NewLocationClient(l.config).QueryGroup(l) -} - // QueryItems queries the "items" edge of the Location entity. func (l *Location) QueryItems() *ItemQuery { return NewLocationClient(l.config).QueryItems(l) diff --git a/backend/internal/data/ent/location/location.go b/backend/internal/data/ent/location/location.go index 96cb75c..4a7fc16 100644 --- a/backend/internal/data/ent/location/location.go +++ b/backend/internal/data/ent/location/location.go @@ -5,6 +5,8 @@ package location import ( "time" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" "github.com/google/uuid" ) @@ -21,16 +23,23 @@ const ( FieldName = "name" // FieldDescription holds the string denoting the description field in the database. FieldDescription = "description" + // EdgeGroup holds the string denoting the group edge name in mutations. + EdgeGroup = "group" // EdgeParent holds the string denoting the parent edge name in mutations. EdgeParent = "parent" // EdgeChildren holds the string denoting the children edge name in mutations. EdgeChildren = "children" - // EdgeGroup holds the string denoting the group edge name in mutations. - EdgeGroup = "group" // EdgeItems holds the string denoting the items edge name in mutations. EdgeItems = "items" // Table holds the table name of the location in the database. Table = "locations" + // GroupTable is the table that holds the group relation/edge. + GroupTable = "locations" + // GroupInverseTable is the table name for the Group entity. + // It exists in this package in order to avoid circular dependency with the "group" package. + GroupInverseTable = "groups" + // GroupColumn is the table column denoting the group relation/edge. + GroupColumn = "group_locations" // ParentTable is the table that holds the parent relation/edge. ParentTable = "locations" // ParentColumn is the table column denoting the parent relation/edge. @@ -39,13 +48,6 @@ const ( ChildrenTable = "locations" // ChildrenColumn is the table column denoting the children relation/edge. ChildrenColumn = "location_children" - // GroupTable is the table that holds the group relation/edge. - GroupTable = "locations" - // GroupInverseTable is the table name for the Group entity. - // It exists in this package in order to avoid circular dependency with the "group" package. - GroupInverseTable = "groups" - // GroupColumn is the table column denoting the group relation/edge. - GroupColumn = "group_locations" // ItemsTable is the table that holds the items relation/edge. ItemsTable = "items" // ItemsInverseTable is the table name for the Item entity. @@ -100,3 +102,101 @@ var ( // DefaultID holds the default value on creation for the "id" field. DefaultID func() uuid.UUID ) + +// OrderOption defines the ordering options for the Location queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByName orders the results by the name field. +func ByName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldName, opts...).ToFunc() +} + +// ByDescription orders the results by the description field. +func ByDescription(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDescription, opts...).ToFunc() +} + +// ByGroupField orders the results by group field. +func ByGroupField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newGroupStep(), sql.OrderByField(field, opts...)) + } +} + +// ByParentField orders the results by parent field. +func ByParentField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newParentStep(), sql.OrderByField(field, opts...)) + } +} + +// ByChildrenCount orders the results by children count. +func ByChildrenCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newChildrenStep(), opts...) + } +} + +// ByChildren orders the results by children terms. +func ByChildren(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newChildrenStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByItemsCount orders the results by items count. +func ByItemsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newItemsStep(), opts...) + } +} + +// ByItems orders the results by items terms. +func ByItems(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newItemsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} +func newGroupStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(GroupInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn), + ) +} +func newParentStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, ParentTable, ParentColumn), + ) +} +func newChildrenStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, ChildrenTable, ChildrenColumn), + ) +} +func newItemsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(ItemsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, ItemsTable, ItemsColumn), + ) +} diff --git a/backend/internal/data/ent/location/where.go b/backend/internal/data/ent/location/where.go index cd9a20e..a89ef4d 100644 --- a/backend/internal/data/ent/location/where.go +++ b/backend/internal/data/ent/location/where.go @@ -296,6 +296,29 @@ func DescriptionContainsFold(v string) predicate.Location { return predicate.Location(sql.FieldContainsFold(FieldDescription, v)) } +// HasGroup applies the HasEdge predicate on the "group" edge. +func HasGroup() predicate.Location { + return predicate.Location(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasGroupWith applies the HasEdge predicate on the "group" edge with a given conditions (other predicates). +func HasGroupWith(preds ...predicate.Group) predicate.Location { + return predicate.Location(func(s *sql.Selector) { + step := newGroupStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + // HasParent applies the HasEdge predicate on the "parent" edge. func HasParent() predicate.Location { return predicate.Location(func(s *sql.Selector) { @@ -310,11 +333,7 @@ func HasParent() predicate.Location { // HasParentWith applies the HasEdge predicate on the "parent" edge with a given conditions (other predicates). func HasParentWith(preds ...predicate.Location) predicate.Location { return predicate.Location(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(Table, FieldID), - sqlgraph.Edge(sqlgraph.M2O, true, ParentTable, ParentColumn), - ) + step := newParentStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -337,38 +356,7 @@ func HasChildren() predicate.Location { // HasChildrenWith applies the HasEdge predicate on the "children" edge with a given conditions (other predicates). func HasChildrenWith(preds ...predicate.Location) predicate.Location { return predicate.Location(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(Table, FieldID), - sqlgraph.Edge(sqlgraph.O2M, false, ChildrenTable, ChildrenColumn), - ) - sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { - for _, p := range preds { - p(s) - } - }) - }) -} - -// HasGroup applies the HasEdge predicate on the "group" edge. -func HasGroup() predicate.Location { - return predicate.Location(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn), - ) - sqlgraph.HasNeighbors(s, step) - }) -} - -// HasGroupWith applies the HasEdge predicate on the "group" edge with a given conditions (other predicates). -func HasGroupWith(preds ...predicate.Group) predicate.Location { - return predicate.Location(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(GroupInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn), - ) + step := newChildrenStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -391,11 +379,7 @@ func HasItems() predicate.Location { // HasItemsWith applies the HasEdge predicate on the "items" edge with a given conditions (other predicates). func HasItemsWith(preds ...predicate.Item) predicate.Location { return predicate.Location(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(ItemsInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, false, ItemsTable, ItemsColumn), - ) + step := newItemsStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -406,32 +390,15 @@ func HasItemsWith(preds ...predicate.Item) predicate.Location { // And groups predicates with the AND operator between them. func And(predicates ...predicate.Location) predicate.Location { - return predicate.Location(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for _, p := range predicates { - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Location(sql.AndPredicates(predicates...)) } // Or groups predicates with the OR operator between them. func Or(predicates ...predicate.Location) predicate.Location { - return predicate.Location(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for i, p := range predicates { - if i > 0 { - s1.Or() - } - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Location(sql.OrPredicates(predicates...)) } // Not applies the not operator on the given predicate. func Not(p predicate.Location) predicate.Location { - return predicate.Location(func(s *sql.Selector) { - p(s.Not()) - }) + return predicate.Location(sql.NotPredicates(p)) } diff --git a/backend/internal/data/ent/location_create.go b/backend/internal/data/ent/location_create.go index 2fade30..98f0f7a 100644 --- a/backend/internal/data/ent/location_create.go +++ b/backend/internal/data/ent/location_create.go @@ -85,6 +85,17 @@ func (lc *LocationCreate) SetNillableID(u *uuid.UUID) *LocationCreate { return lc } +// SetGroupID sets the "group" edge to the Group entity by ID. +func (lc *LocationCreate) SetGroupID(id uuid.UUID) *LocationCreate { + lc.mutation.SetGroupID(id) + return lc +} + +// SetGroup sets the "group" edge to the Group entity. +func (lc *LocationCreate) SetGroup(g *Group) *LocationCreate { + return lc.SetGroupID(g.ID) +} + // SetParentID sets the "parent" edge to the Location entity by ID. func (lc *LocationCreate) SetParentID(id uuid.UUID) *LocationCreate { lc.mutation.SetParentID(id) @@ -119,17 +130,6 @@ func (lc *LocationCreate) AddChildren(l ...*Location) *LocationCreate { return lc.AddChildIDs(ids...) } -// SetGroupID sets the "group" edge to the Group entity by ID. -func (lc *LocationCreate) SetGroupID(id uuid.UUID) *LocationCreate { - lc.mutation.SetGroupID(id) - return lc -} - -// SetGroup sets the "group" edge to the Group entity. -func (lc *LocationCreate) SetGroup(g *Group) *LocationCreate { - return lc.SetGroupID(g.ID) -} - // AddItemIDs adds the "items" edge to the Item entity by IDs. func (lc *LocationCreate) AddItemIDs(ids ...uuid.UUID) *LocationCreate { lc.mutation.AddItemIDs(ids...) @@ -153,7 +153,7 @@ func (lc *LocationCreate) Mutation() *LocationMutation { // Save creates the Location in the database. func (lc *LocationCreate) Save(ctx context.Context) (*Location, error) { lc.defaults() - return withHooks[*Location, LocationMutation](ctx, lc.sqlSave, lc.mutation, lc.hooks) + return withHooks(ctx, lc.sqlSave, lc.mutation, lc.hooks) } // SaveX calls Save and panics if Save returns an error. @@ -269,6 +269,23 @@ func (lc *LocationCreate) createSpec() (*Location, *sqlgraph.CreateSpec) { _spec.SetField(location.FieldDescription, field.TypeString, value) _node.Description = value } + if nodes := lc.mutation.GroupIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: location.GroupTable, + Columns: []string{location.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.group_locations = &nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } if nodes := lc.mutation.ParentIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, @@ -277,10 +294,7 @@ func (lc *LocationCreate) createSpec() (*Location, *sqlgraph.CreateSpec) { Columns: []string{location.ParentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: location.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -297,10 +311,7 @@ func (lc *LocationCreate) createSpec() (*Location, *sqlgraph.CreateSpec) { Columns: []string{location.ChildrenColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: location.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -308,26 +319,6 @@ func (lc *LocationCreate) createSpec() (*Location, *sqlgraph.CreateSpec) { } _spec.Edges = append(_spec.Edges, edge) } - if nodes := lc.mutation.GroupIDs(); len(nodes) > 0 { - edge := &sqlgraph.EdgeSpec{ - Rel: sqlgraph.M2O, - Inverse: true, - Table: location.GroupTable, - Columns: []string{location.GroupColumn}, - Bidi: false, - Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: group.FieldID, - }, - }, - } - for _, k := range nodes { - edge.Target.Nodes = append(edge.Target.Nodes, k) - } - _node.group_locations = &nodes[0] - _spec.Edges = append(_spec.Edges, edge) - } if nodes := lc.mutation.ItemsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, @@ -336,10 +327,7 @@ func (lc *LocationCreate) createSpec() (*Location, *sqlgraph.CreateSpec) { Columns: []string{location.ItemsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: item.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -353,11 +341,15 @@ func (lc *LocationCreate) createSpec() (*Location, *sqlgraph.CreateSpec) { // LocationCreateBulk is the builder for creating many Location entities in bulk. type LocationCreateBulk struct { config + err error builders []*LocationCreate } // Save creates the Location entities in the database. func (lcb *LocationCreateBulk) Save(ctx context.Context) ([]*Location, error) { + if lcb.err != nil { + return nil, lcb.err + } specs := make([]*sqlgraph.CreateSpec, len(lcb.builders)) nodes := make([]*Location, len(lcb.builders)) mutators := make([]Mutator, len(lcb.builders)) @@ -374,8 +366,8 @@ func (lcb *LocationCreateBulk) Save(ctx context.Context) ([]*Location, error) { return nil, err } builder.mutation = mutation - nodes[i], specs[i] = builder.createSpec() var err error + nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { _, err = mutators[i+1].Mutate(root, lcb.builders[i+1].mutation) } else { diff --git a/backend/internal/data/ent/location_delete.go b/backend/internal/data/ent/location_delete.go index 67a2adc..451b7f1 100644 --- a/backend/internal/data/ent/location_delete.go +++ b/backend/internal/data/ent/location_delete.go @@ -27,7 +27,7 @@ func (ld *LocationDelete) Where(ps ...predicate.Location) *LocationDelete { // Exec executes the deletion query and returns how many vertices were deleted. func (ld *LocationDelete) Exec(ctx context.Context) (int, error) { - return withHooks[int, LocationMutation](ctx, ld.sqlExec, ld.mutation, ld.hooks) + return withHooks(ctx, ld.sqlExec, ld.mutation, ld.hooks) } // ExecX is like Exec, but panics if an error occurs. diff --git a/backend/internal/data/ent/location_query.go b/backend/internal/data/ent/location_query.go index 916215b..4aae965 100644 --- a/backend/internal/data/ent/location_query.go +++ b/backend/internal/data/ent/location_query.go @@ -22,12 +22,12 @@ import ( type LocationQuery struct { config ctx *QueryContext - order []OrderFunc + order []location.OrderOption inters []Interceptor predicates []predicate.Location + withGroup *GroupQuery withParent *LocationQuery withChildren *LocationQuery - withGroup *GroupQuery withItems *ItemQuery withFKs bool // intermediate query (i.e. traversal path). @@ -61,11 +61,33 @@ func (lq *LocationQuery) Unique(unique bool) *LocationQuery { } // Order specifies how the records should be ordered. -func (lq *LocationQuery) Order(o ...OrderFunc) *LocationQuery { +func (lq *LocationQuery) Order(o ...location.OrderOption) *LocationQuery { lq.order = append(lq.order, o...) return lq } +// QueryGroup chains the current query on the "group" edge. +func (lq *LocationQuery) QueryGroup() *GroupQuery { + query := (&GroupClient{config: lq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := lq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := lq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(location.Table, location.FieldID, selector), + sqlgraph.To(group.Table, group.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, location.GroupTable, location.GroupColumn), + ) + fromU = sqlgraph.SetNeighbors(lq.driver.Dialect(), step) + return fromU, nil + } + return query +} + // QueryParent chains the current query on the "parent" edge. func (lq *LocationQuery) QueryParent() *LocationQuery { query := (&LocationClient{config: lq.config}).Query() @@ -110,28 +132,6 @@ func (lq *LocationQuery) QueryChildren() *LocationQuery { return query } -// QueryGroup chains the current query on the "group" edge. -func (lq *LocationQuery) QueryGroup() *GroupQuery { - query := (&GroupClient{config: lq.config}).Query() - query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { - if err := lq.prepareQuery(ctx); err != nil { - return nil, err - } - selector := lq.sqlQuery(ctx) - if err := selector.Err(); err != nil { - return nil, err - } - step := sqlgraph.NewStep( - sqlgraph.From(location.Table, location.FieldID, selector), - sqlgraph.To(group.Table, group.FieldID), - sqlgraph.Edge(sqlgraph.M2O, true, location.GroupTable, location.GroupColumn), - ) - fromU = sqlgraph.SetNeighbors(lq.driver.Dialect(), step) - return fromU, nil - } - return query -} - // QueryItems chains the current query on the "items" edge. func (lq *LocationQuery) QueryItems() *ItemQuery { query := (&ItemClient{config: lq.config}).Query() @@ -343,12 +343,12 @@ func (lq *LocationQuery) Clone() *LocationQuery { return &LocationQuery{ config: lq.config, ctx: lq.ctx.Clone(), - order: append([]OrderFunc{}, lq.order...), + order: append([]location.OrderOption{}, lq.order...), inters: append([]Interceptor{}, lq.inters...), predicates: append([]predicate.Location{}, lq.predicates...), + withGroup: lq.withGroup.Clone(), withParent: lq.withParent.Clone(), withChildren: lq.withChildren.Clone(), - withGroup: lq.withGroup.Clone(), withItems: lq.withItems.Clone(), // clone intermediate query. sql: lq.sql.Clone(), @@ -356,6 +356,17 @@ func (lq *LocationQuery) Clone() *LocationQuery { } } +// WithGroup tells the query-builder to eager-load the nodes that are connected to +// the "group" edge. The optional arguments are used to configure the query builder of the edge. +func (lq *LocationQuery) WithGroup(opts ...func(*GroupQuery)) *LocationQuery { + query := (&GroupClient{config: lq.config}).Query() + for _, opt := range opts { + opt(query) + } + lq.withGroup = query + return lq +} + // WithParent tells the query-builder to eager-load the nodes that are connected to // the "parent" edge. The optional arguments are used to configure the query builder of the edge. func (lq *LocationQuery) WithParent(opts ...func(*LocationQuery)) *LocationQuery { @@ -378,17 +389,6 @@ func (lq *LocationQuery) WithChildren(opts ...func(*LocationQuery)) *LocationQue return lq } -// WithGroup tells the query-builder to eager-load the nodes that are connected to -// the "group" edge. The optional arguments are used to configure the query builder of the edge. -func (lq *LocationQuery) WithGroup(opts ...func(*GroupQuery)) *LocationQuery { - query := (&GroupClient{config: lq.config}).Query() - for _, opt := range opts { - opt(query) - } - lq.withGroup = query - return lq -} - // WithItems tells the query-builder to eager-load the nodes that are connected to // the "items" edge. The optional arguments are used to configure the query builder of the edge. func (lq *LocationQuery) WithItems(opts ...func(*ItemQuery)) *LocationQuery { @@ -480,13 +480,13 @@ func (lq *LocationQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Loc withFKs = lq.withFKs _spec = lq.querySpec() loadedTypes = [4]bool{ + lq.withGroup != nil, lq.withParent != nil, lq.withChildren != nil, - lq.withGroup != nil, lq.withItems != nil, } ) - if lq.withParent != nil || lq.withGroup != nil { + if lq.withGroup != nil || lq.withParent != nil { withFKs = true } if withFKs { @@ -510,6 +510,12 @@ func (lq *LocationQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Loc if len(nodes) == 0 { return nodes, nil } + if query := lq.withGroup; query != nil { + if err := lq.loadGroup(ctx, query, nodes, nil, + func(n *Location, e *Group) { n.Edges.Group = e }); err != nil { + return nil, err + } + } if query := lq.withParent; query != nil { if err := lq.loadParent(ctx, query, nodes, nil, func(n *Location, e *Location) { n.Edges.Parent = e }); err != nil { @@ -523,12 +529,6 @@ func (lq *LocationQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Loc return nil, err } } - if query := lq.withGroup; query != nil { - if err := lq.loadGroup(ctx, query, nodes, nil, - func(n *Location, e *Group) { n.Edges.Group = e }); err != nil { - return nil, err - } - } if query := lq.withItems; query != nil { if err := lq.loadItems(ctx, query, nodes, func(n *Location) { n.Edges.Items = []*Item{} }, @@ -539,6 +539,38 @@ func (lq *LocationQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Loc return nodes, nil } +func (lq *LocationQuery) loadGroup(ctx context.Context, query *GroupQuery, nodes []*Location, init func(*Location), assign func(*Location, *Group)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*Location) + for i := range nodes { + if nodes[i].group_locations == nil { + continue + } + fk := *nodes[i].group_locations + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(group.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "group_locations" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} func (lq *LocationQuery) loadParent(ctx context.Context, query *LocationQuery, nodes []*Location, init func(*Location), assign func(*Location, *Location)) error { ids := make([]uuid.UUID, 0, len(nodes)) nodeids := make(map[uuid.UUID][]*Location) @@ -583,7 +615,7 @@ func (lq *LocationQuery) loadChildren(ctx context.Context, query *LocationQuery, } query.withFKs = true query.Where(predicate.Location(func(s *sql.Selector) { - s.Where(sql.InValues(location.ChildrenColumn, fks...)) + s.Where(sql.InValues(s.C(location.ChildrenColumn), fks...)) })) neighbors, err := query.All(ctx) if err != nil { @@ -596,44 +628,12 @@ func (lq *LocationQuery) loadChildren(ctx context.Context, query *LocationQuery, } node, ok := nodeids[*fk] if !ok { - return fmt.Errorf(`unexpected foreign-key "location_children" returned %v for node %v`, *fk, n.ID) + return fmt.Errorf(`unexpected referenced foreign-key "location_children" returned %v for node %v`, *fk, n.ID) } assign(node, n) } return nil } -func (lq *LocationQuery) loadGroup(ctx context.Context, query *GroupQuery, nodes []*Location, init func(*Location), assign func(*Location, *Group)) error { - ids := make([]uuid.UUID, 0, len(nodes)) - nodeids := make(map[uuid.UUID][]*Location) - for i := range nodes { - if nodes[i].group_locations == nil { - continue - } - fk := *nodes[i].group_locations - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) - } - if len(ids) == 0 { - return nil - } - query.Where(group.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { - return err - } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return fmt.Errorf(`unexpected foreign-key "group_locations" returned %v`, n.ID) - } - for i := range nodes { - assign(nodes[i], n) - } - } - return nil -} func (lq *LocationQuery) loadItems(ctx context.Context, query *ItemQuery, nodes []*Location, init func(*Location), assign func(*Location, *Item)) error { fks := make([]driver.Value, 0, len(nodes)) nodeids := make(map[uuid.UUID]*Location) @@ -646,7 +646,7 @@ func (lq *LocationQuery) loadItems(ctx context.Context, query *ItemQuery, nodes } query.withFKs = true query.Where(predicate.Item(func(s *sql.Selector) { - s.Where(sql.InValues(location.ItemsColumn, fks...)) + s.Where(sql.InValues(s.C(location.ItemsColumn), fks...)) })) neighbors, err := query.All(ctx) if err != nil { @@ -659,7 +659,7 @@ func (lq *LocationQuery) loadItems(ctx context.Context, query *ItemQuery, nodes } node, ok := nodeids[*fk] if !ok { - return fmt.Errorf(`unexpected foreign-key "location_items" returned %v for node %v`, *fk, n.ID) + return fmt.Errorf(`unexpected referenced foreign-key "location_items" returned %v for node %v`, *fk, n.ID) } assign(node, n) } diff --git a/backend/internal/data/ent/location_update.go b/backend/internal/data/ent/location_update.go index b67f7e4..d569b21 100644 --- a/backend/internal/data/ent/location_update.go +++ b/backend/internal/data/ent/location_update.go @@ -43,6 +43,14 @@ func (lu *LocationUpdate) SetName(s string) *LocationUpdate { return lu } +// SetNillableName sets the "name" field if the given value is not nil. +func (lu *LocationUpdate) SetNillableName(s *string) *LocationUpdate { + if s != nil { + lu.SetName(*s) + } + return lu +} + // SetDescription sets the "description" field. func (lu *LocationUpdate) SetDescription(s string) *LocationUpdate { lu.mutation.SetDescription(s) @@ -63,6 +71,17 @@ func (lu *LocationUpdate) ClearDescription() *LocationUpdate { return lu } +// SetGroupID sets the "group" edge to the Group entity by ID. +func (lu *LocationUpdate) SetGroupID(id uuid.UUID) *LocationUpdate { + lu.mutation.SetGroupID(id) + return lu +} + +// SetGroup sets the "group" edge to the Group entity. +func (lu *LocationUpdate) SetGroup(g *Group) *LocationUpdate { + return lu.SetGroupID(g.ID) +} + // SetParentID sets the "parent" edge to the Location entity by ID. func (lu *LocationUpdate) SetParentID(id uuid.UUID) *LocationUpdate { lu.mutation.SetParentID(id) @@ -97,17 +116,6 @@ func (lu *LocationUpdate) AddChildren(l ...*Location) *LocationUpdate { return lu.AddChildIDs(ids...) } -// SetGroupID sets the "group" edge to the Group entity by ID. -func (lu *LocationUpdate) SetGroupID(id uuid.UUID) *LocationUpdate { - lu.mutation.SetGroupID(id) - return lu -} - -// SetGroup sets the "group" edge to the Group entity. -func (lu *LocationUpdate) SetGroup(g *Group) *LocationUpdate { - return lu.SetGroupID(g.ID) -} - // AddItemIDs adds the "items" edge to the Item entity by IDs. func (lu *LocationUpdate) AddItemIDs(ids ...uuid.UUID) *LocationUpdate { lu.mutation.AddItemIDs(ids...) @@ -128,6 +136,12 @@ func (lu *LocationUpdate) Mutation() *LocationMutation { return lu.mutation } +// ClearGroup clears the "group" edge to the Group entity. +func (lu *LocationUpdate) ClearGroup() *LocationUpdate { + lu.mutation.ClearGroup() + return lu +} + // ClearParent clears the "parent" edge to the Location entity. func (lu *LocationUpdate) ClearParent() *LocationUpdate { lu.mutation.ClearParent() @@ -155,12 +169,6 @@ func (lu *LocationUpdate) RemoveChildren(l ...*Location) *LocationUpdate { return lu.RemoveChildIDs(ids...) } -// ClearGroup clears the "group" edge to the Group entity. -func (lu *LocationUpdate) ClearGroup() *LocationUpdate { - lu.mutation.ClearGroup() - return lu -} - // ClearItems clears all "items" edges to the Item entity. func (lu *LocationUpdate) ClearItems() *LocationUpdate { lu.mutation.ClearItems() @@ -185,7 +193,7 @@ func (lu *LocationUpdate) RemoveItems(i ...*Item) *LocationUpdate { // Save executes the query and returns the number of nodes affected by the update operation. func (lu *LocationUpdate) Save(ctx context.Context) (int, error) { lu.defaults() - return withHooks[int, LocationMutation](ctx, lu.sqlSave, lu.mutation, lu.hooks) + return withHooks(ctx, lu.sqlSave, lu.mutation, lu.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -260,6 +268,35 @@ func (lu *LocationUpdate) sqlSave(ctx context.Context) (n int, err error) { if lu.mutation.DescriptionCleared() { _spec.ClearField(location.FieldDescription, field.TypeString) } + if lu.mutation.GroupCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: location.GroupTable, + Columns: []string{location.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := lu.mutation.GroupIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: location.GroupTable, + Columns: []string{location.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } if lu.mutation.ParentCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, @@ -268,10 +305,7 @@ func (lu *LocationUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{location.ParentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: location.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -284,10 +318,7 @@ func (lu *LocationUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{location.ParentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: location.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -303,10 +334,7 @@ func (lu *LocationUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{location.ChildrenColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: location.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -319,10 +347,7 @@ func (lu *LocationUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{location.ChildrenColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: location.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -338,45 +363,7 @@ func (lu *LocationUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{location.ChildrenColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: location.FieldID, - }, - }, - } - for _, k := range nodes { - edge.Target.Nodes = append(edge.Target.Nodes, k) - } - _spec.Edges.Add = append(_spec.Edges.Add, edge) - } - if lu.mutation.GroupCleared() { - edge := &sqlgraph.EdgeSpec{ - Rel: sqlgraph.M2O, - Inverse: true, - Table: location.GroupTable, - Columns: []string{location.GroupColumn}, - Bidi: false, - Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: group.FieldID, - }, - }, - } - _spec.Edges.Clear = append(_spec.Edges.Clear, edge) - } - if nodes := lu.mutation.GroupIDs(); len(nodes) > 0 { - edge := &sqlgraph.EdgeSpec{ - Rel: sqlgraph.M2O, - Inverse: true, - Table: location.GroupTable, - Columns: []string{location.GroupColumn}, - Bidi: false, - Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: group.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -392,10 +379,7 @@ func (lu *LocationUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{location.ItemsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: item.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -408,10 +392,7 @@ func (lu *LocationUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{location.ItemsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: item.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -427,10 +408,7 @@ func (lu *LocationUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{location.ItemsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: item.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -470,6 +448,14 @@ func (luo *LocationUpdateOne) SetName(s string) *LocationUpdateOne { return luo } +// SetNillableName sets the "name" field if the given value is not nil. +func (luo *LocationUpdateOne) SetNillableName(s *string) *LocationUpdateOne { + if s != nil { + luo.SetName(*s) + } + return luo +} + // SetDescription sets the "description" field. func (luo *LocationUpdateOne) SetDescription(s string) *LocationUpdateOne { luo.mutation.SetDescription(s) @@ -490,6 +476,17 @@ func (luo *LocationUpdateOne) ClearDescription() *LocationUpdateOne { return luo } +// SetGroupID sets the "group" edge to the Group entity by ID. +func (luo *LocationUpdateOne) SetGroupID(id uuid.UUID) *LocationUpdateOne { + luo.mutation.SetGroupID(id) + return luo +} + +// SetGroup sets the "group" edge to the Group entity. +func (luo *LocationUpdateOne) SetGroup(g *Group) *LocationUpdateOne { + return luo.SetGroupID(g.ID) +} + // SetParentID sets the "parent" edge to the Location entity by ID. func (luo *LocationUpdateOne) SetParentID(id uuid.UUID) *LocationUpdateOne { luo.mutation.SetParentID(id) @@ -524,17 +521,6 @@ func (luo *LocationUpdateOne) AddChildren(l ...*Location) *LocationUpdateOne { return luo.AddChildIDs(ids...) } -// SetGroupID sets the "group" edge to the Group entity by ID. -func (luo *LocationUpdateOne) SetGroupID(id uuid.UUID) *LocationUpdateOne { - luo.mutation.SetGroupID(id) - return luo -} - -// SetGroup sets the "group" edge to the Group entity. -func (luo *LocationUpdateOne) SetGroup(g *Group) *LocationUpdateOne { - return luo.SetGroupID(g.ID) -} - // AddItemIDs adds the "items" edge to the Item entity by IDs. func (luo *LocationUpdateOne) AddItemIDs(ids ...uuid.UUID) *LocationUpdateOne { luo.mutation.AddItemIDs(ids...) @@ -555,6 +541,12 @@ func (luo *LocationUpdateOne) Mutation() *LocationMutation { return luo.mutation } +// ClearGroup clears the "group" edge to the Group entity. +func (luo *LocationUpdateOne) ClearGroup() *LocationUpdateOne { + luo.mutation.ClearGroup() + return luo +} + // ClearParent clears the "parent" edge to the Location entity. func (luo *LocationUpdateOne) ClearParent() *LocationUpdateOne { luo.mutation.ClearParent() @@ -582,12 +574,6 @@ func (luo *LocationUpdateOne) RemoveChildren(l ...*Location) *LocationUpdateOne return luo.RemoveChildIDs(ids...) } -// ClearGroup clears the "group" edge to the Group entity. -func (luo *LocationUpdateOne) ClearGroup() *LocationUpdateOne { - luo.mutation.ClearGroup() - return luo -} - // ClearItems clears all "items" edges to the Item entity. func (luo *LocationUpdateOne) ClearItems() *LocationUpdateOne { luo.mutation.ClearItems() @@ -625,7 +611,7 @@ func (luo *LocationUpdateOne) Select(field string, fields ...string) *LocationUp // Save executes the query and returns the updated Location entity. func (luo *LocationUpdateOne) Save(ctx context.Context) (*Location, error) { luo.defaults() - return withHooks[*Location, LocationMutation](ctx, luo.sqlSave, luo.mutation, luo.hooks) + return withHooks(ctx, luo.sqlSave, luo.mutation, luo.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -717,6 +703,35 @@ func (luo *LocationUpdateOne) sqlSave(ctx context.Context) (_node *Location, err if luo.mutation.DescriptionCleared() { _spec.ClearField(location.FieldDescription, field.TypeString) } + if luo.mutation.GroupCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: location.GroupTable, + Columns: []string{location.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := luo.mutation.GroupIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: location.GroupTable, + Columns: []string{location.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } if luo.mutation.ParentCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, @@ -725,10 +740,7 @@ func (luo *LocationUpdateOne) sqlSave(ctx context.Context) (_node *Location, err Columns: []string{location.ParentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: location.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -741,10 +753,7 @@ func (luo *LocationUpdateOne) sqlSave(ctx context.Context) (_node *Location, err Columns: []string{location.ParentColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: location.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -760,10 +769,7 @@ func (luo *LocationUpdateOne) sqlSave(ctx context.Context) (_node *Location, err Columns: []string{location.ChildrenColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: location.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -776,10 +782,7 @@ func (luo *LocationUpdateOne) sqlSave(ctx context.Context) (_node *Location, err Columns: []string{location.ChildrenColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: location.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -795,45 +798,7 @@ func (luo *LocationUpdateOne) sqlSave(ctx context.Context) (_node *Location, err Columns: []string{location.ChildrenColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: location.FieldID, - }, - }, - } - for _, k := range nodes { - edge.Target.Nodes = append(edge.Target.Nodes, k) - } - _spec.Edges.Add = append(_spec.Edges.Add, edge) - } - if luo.mutation.GroupCleared() { - edge := &sqlgraph.EdgeSpec{ - Rel: sqlgraph.M2O, - Inverse: true, - Table: location.GroupTable, - Columns: []string{location.GroupColumn}, - Bidi: false, - Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: group.FieldID, - }, - }, - } - _spec.Edges.Clear = append(_spec.Edges.Clear, edge) - } - if nodes := luo.mutation.GroupIDs(); len(nodes) > 0 { - edge := &sqlgraph.EdgeSpec{ - Rel: sqlgraph.M2O, - Inverse: true, - Table: location.GroupTable, - Columns: []string{location.GroupColumn}, - Bidi: false, - Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: group.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -849,10 +814,7 @@ func (luo *LocationUpdateOne) sqlSave(ctx context.Context) (_node *Location, err Columns: []string{location.ItemsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: item.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -865,10 +827,7 @@ func (luo *LocationUpdateOne) sqlSave(ctx context.Context) (_node *Location, err Columns: []string{location.ItemsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: item.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -884,10 +843,7 @@ func (luo *LocationUpdateOne) sqlSave(ctx context.Context) (_node *Location, err Columns: []string{location.ItemsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: item.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID), }, } for _, k := range nodes { diff --git a/backend/internal/data/ent/maintenanceentry.go b/backend/internal/data/ent/maintenanceentry.go index b571bce..af35e0b 100644 --- a/backend/internal/data/ent/maintenanceentry.go +++ b/backend/internal/data/ent/maintenanceentry.go @@ -7,6 +7,7 @@ import ( "strings" "time" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/google/uuid" "github.com/hay-kot/homebox/backend/internal/data/ent/item" @@ -26,6 +27,8 @@ type MaintenanceEntry struct { ItemID uuid.UUID `json:"item_id,omitempty"` // Date holds the value of the "date" field. Date time.Time `json:"date,omitempty"` + // ScheduledDate holds the value of the "scheduled_date" field. + ScheduledDate time.Time `json:"scheduled_date,omitempty"` // Name holds the value of the "name" field. Name string `json:"name,omitempty"` // Description holds the value of the "description" field. @@ -34,7 +37,8 @@ type MaintenanceEntry struct { Cost float64 `json:"cost,omitempty"` // Edges holds the relations/edges for other nodes in the graph. // The values are being populated by the MaintenanceEntryQuery when eager-loading is set. - Edges MaintenanceEntryEdges `json:"edges"` + Edges MaintenanceEntryEdges `json:"edges"` + selectValues sql.SelectValues } // MaintenanceEntryEdges holds the relations/edges for other nodes in the graph. @@ -68,12 +72,12 @@ func (*MaintenanceEntry) scanValues(columns []string) ([]any, error) { values[i] = new(sql.NullFloat64) case maintenanceentry.FieldName, maintenanceentry.FieldDescription: values[i] = new(sql.NullString) - case maintenanceentry.FieldCreatedAt, maintenanceentry.FieldUpdatedAt, maintenanceentry.FieldDate: + case maintenanceentry.FieldCreatedAt, maintenanceentry.FieldUpdatedAt, maintenanceentry.FieldDate, maintenanceentry.FieldScheduledDate: values[i] = new(sql.NullTime) case maintenanceentry.FieldID, maintenanceentry.FieldItemID: values[i] = new(uuid.UUID) default: - return nil, fmt.Errorf("unexpected column %q for type MaintenanceEntry", columns[i]) + values[i] = new(sql.UnknownType) } } return values, nil @@ -117,6 +121,12 @@ func (me *MaintenanceEntry) assignValues(columns []string, values []any) error { } else if value.Valid { me.Date = value.Time } + case maintenanceentry.FieldScheduledDate: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field scheduled_date", values[i]) + } else if value.Valid { + me.ScheduledDate = value.Time + } case maintenanceentry.FieldName: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field name", values[i]) @@ -135,11 +145,19 @@ func (me *MaintenanceEntry) assignValues(columns []string, values []any) error { } else if value.Valid { me.Cost = value.Float64 } + default: + me.selectValues.Set(columns[i], values[i]) } } return nil } +// Value returns the ent.Value that was dynamically selected and assigned to the MaintenanceEntry. +// This includes values selected through modifiers, order, etc. +func (me *MaintenanceEntry) Value(name string) (ent.Value, error) { + return me.selectValues.Get(name) +} + // QueryItem queries the "item" edge of the MaintenanceEntry entity. func (me *MaintenanceEntry) QueryItem() *ItemQuery { return NewMaintenanceEntryClient(me.config).QueryItem(me) @@ -180,6 +198,9 @@ func (me *MaintenanceEntry) String() string { builder.WriteString("date=") builder.WriteString(me.Date.Format(time.ANSIC)) builder.WriteString(", ") + builder.WriteString("scheduled_date=") + builder.WriteString(me.ScheduledDate.Format(time.ANSIC)) + builder.WriteString(", ") builder.WriteString("name=") builder.WriteString(me.Name) builder.WriteString(", ") diff --git a/backend/internal/data/ent/maintenanceentry/maintenanceentry.go b/backend/internal/data/ent/maintenanceentry/maintenanceentry.go index c1dcffc..b4b8142 100644 --- a/backend/internal/data/ent/maintenanceentry/maintenanceentry.go +++ b/backend/internal/data/ent/maintenanceentry/maintenanceentry.go @@ -5,6 +5,8 @@ package maintenanceentry import ( "time" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" "github.com/google/uuid" ) @@ -21,6 +23,8 @@ const ( FieldItemID = "item_id" // FieldDate holds the string denoting the date field in the database. FieldDate = "date" + // FieldScheduledDate holds the string denoting the scheduled_date field in the database. + FieldScheduledDate = "scheduled_date" // FieldName holds the string denoting the name field in the database. FieldName = "name" // FieldDescription holds the string denoting the description field in the database. @@ -47,6 +51,7 @@ var Columns = []string{ FieldUpdatedAt, FieldItemID, FieldDate, + FieldScheduledDate, FieldName, FieldDescription, FieldCost, @@ -69,8 +74,6 @@ var ( DefaultUpdatedAt func() time.Time // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. UpdateDefaultUpdatedAt func() time.Time - // DefaultDate holds the default value on creation for the "date" field. - DefaultDate func() time.Time // NameValidator is a validator for the "name" field. It is called by the builders before save. NameValidator func(string) error // DescriptionValidator is a validator for the "description" field. It is called by the builders before save. @@ -80,3 +83,65 @@ var ( // DefaultID holds the default value on creation for the "id" field. DefaultID func() uuid.UUID ) + +// OrderOption defines the ordering options for the MaintenanceEntry queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByItemID orders the results by the item_id field. +func ByItemID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldItemID, opts...).ToFunc() +} + +// ByDate orders the results by the date field. +func ByDate(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDate, opts...).ToFunc() +} + +// ByScheduledDate orders the results by the scheduled_date field. +func ByScheduledDate(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldScheduledDate, opts...).ToFunc() +} + +// ByName orders the results by the name field. +func ByName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldName, opts...).ToFunc() +} + +// ByDescription orders the results by the description field. +func ByDescription(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDescription, opts...).ToFunc() +} + +// ByCost orders the results by the cost field. +func ByCost(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCost, opts...).ToFunc() +} + +// ByItemField orders the results by item field. +func ByItemField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newItemStep(), sql.OrderByField(field, opts...)) + } +} +func newItemStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(ItemInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, ItemTable, ItemColumn), + ) +} diff --git a/backend/internal/data/ent/maintenanceentry/where.go b/backend/internal/data/ent/maintenanceentry/where.go index 6a88550..85e736d 100644 --- a/backend/internal/data/ent/maintenanceentry/where.go +++ b/backend/internal/data/ent/maintenanceentry/where.go @@ -76,6 +76,11 @@ func Date(v time.Time) predicate.MaintenanceEntry { return predicate.MaintenanceEntry(sql.FieldEQ(FieldDate, v)) } +// ScheduledDate applies equality check predicate on the "scheduled_date" field. It's identical to ScheduledDateEQ. +func ScheduledDate(v time.Time) predicate.MaintenanceEntry { + return predicate.MaintenanceEntry(sql.FieldEQ(FieldScheduledDate, v)) +} + // Name applies equality check predicate on the "name" field. It's identical to NameEQ. func Name(v string) predicate.MaintenanceEntry { return predicate.MaintenanceEntry(sql.FieldEQ(FieldName, v)) @@ -231,6 +236,66 @@ func DateLTE(v time.Time) predicate.MaintenanceEntry { return predicate.MaintenanceEntry(sql.FieldLTE(FieldDate, v)) } +// DateIsNil applies the IsNil predicate on the "date" field. +func DateIsNil() predicate.MaintenanceEntry { + return predicate.MaintenanceEntry(sql.FieldIsNull(FieldDate)) +} + +// DateNotNil applies the NotNil predicate on the "date" field. +func DateNotNil() predicate.MaintenanceEntry { + return predicate.MaintenanceEntry(sql.FieldNotNull(FieldDate)) +} + +// ScheduledDateEQ applies the EQ predicate on the "scheduled_date" field. +func ScheduledDateEQ(v time.Time) predicate.MaintenanceEntry { + return predicate.MaintenanceEntry(sql.FieldEQ(FieldScheduledDate, v)) +} + +// ScheduledDateNEQ applies the NEQ predicate on the "scheduled_date" field. +func ScheduledDateNEQ(v time.Time) predicate.MaintenanceEntry { + return predicate.MaintenanceEntry(sql.FieldNEQ(FieldScheduledDate, v)) +} + +// ScheduledDateIn applies the In predicate on the "scheduled_date" field. +func ScheduledDateIn(vs ...time.Time) predicate.MaintenanceEntry { + return predicate.MaintenanceEntry(sql.FieldIn(FieldScheduledDate, vs...)) +} + +// ScheduledDateNotIn applies the NotIn predicate on the "scheduled_date" field. +func ScheduledDateNotIn(vs ...time.Time) predicate.MaintenanceEntry { + return predicate.MaintenanceEntry(sql.FieldNotIn(FieldScheduledDate, vs...)) +} + +// ScheduledDateGT applies the GT predicate on the "scheduled_date" field. +func ScheduledDateGT(v time.Time) predicate.MaintenanceEntry { + return predicate.MaintenanceEntry(sql.FieldGT(FieldScheduledDate, v)) +} + +// ScheduledDateGTE applies the GTE predicate on the "scheduled_date" field. +func ScheduledDateGTE(v time.Time) predicate.MaintenanceEntry { + return predicate.MaintenanceEntry(sql.FieldGTE(FieldScheduledDate, v)) +} + +// ScheduledDateLT applies the LT predicate on the "scheduled_date" field. +func ScheduledDateLT(v time.Time) predicate.MaintenanceEntry { + return predicate.MaintenanceEntry(sql.FieldLT(FieldScheduledDate, v)) +} + +// ScheduledDateLTE applies the LTE predicate on the "scheduled_date" field. +func ScheduledDateLTE(v time.Time) predicate.MaintenanceEntry { + return predicate.MaintenanceEntry(sql.FieldLTE(FieldScheduledDate, v)) +} + +// ScheduledDateIsNil applies the IsNil predicate on the "scheduled_date" field. +func ScheduledDateIsNil() predicate.MaintenanceEntry { + return predicate.MaintenanceEntry(sql.FieldIsNull(FieldScheduledDate)) +} + +// ScheduledDateNotNil applies the NotNil predicate on the "scheduled_date" field. +func ScheduledDateNotNil() predicate.MaintenanceEntry { + return predicate.MaintenanceEntry(sql.FieldNotNull(FieldScheduledDate)) +} + // NameEQ applies the EQ predicate on the "name" field. func NameEQ(v string) predicate.MaintenanceEntry { return predicate.MaintenanceEntry(sql.FieldEQ(FieldName, v)) @@ -425,11 +490,7 @@ func HasItem() predicate.MaintenanceEntry { // HasItemWith applies the HasEdge predicate on the "item" edge with a given conditions (other predicates). func HasItemWith(preds ...predicate.Item) predicate.MaintenanceEntry { return predicate.MaintenanceEntry(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(ItemInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, true, ItemTable, ItemColumn), - ) + step := newItemStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -440,32 +501,15 @@ func HasItemWith(preds ...predicate.Item) predicate.MaintenanceEntry { // And groups predicates with the AND operator between them. func And(predicates ...predicate.MaintenanceEntry) predicate.MaintenanceEntry { - return predicate.MaintenanceEntry(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for _, p := range predicates { - p(s1) - } - s.Where(s1.P()) - }) + return predicate.MaintenanceEntry(sql.AndPredicates(predicates...)) } // Or groups predicates with the OR operator between them. func Or(predicates ...predicate.MaintenanceEntry) predicate.MaintenanceEntry { - return predicate.MaintenanceEntry(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for i, p := range predicates { - if i > 0 { - s1.Or() - } - p(s1) - } - s.Where(s1.P()) - }) + return predicate.MaintenanceEntry(sql.OrPredicates(predicates...)) } // Not applies the not operator on the given predicate. func Not(p predicate.MaintenanceEntry) predicate.MaintenanceEntry { - return predicate.MaintenanceEntry(func(s *sql.Selector) { - p(s.Not()) - }) + return predicate.MaintenanceEntry(sql.NotPredicates(p)) } diff --git a/backend/internal/data/ent/maintenanceentry_create.go b/backend/internal/data/ent/maintenanceentry_create.go index ef5602b..ea71a4d 100644 --- a/backend/internal/data/ent/maintenanceentry_create.go +++ b/backend/internal/data/ent/maintenanceentry_create.go @@ -70,6 +70,20 @@ func (mec *MaintenanceEntryCreate) SetNillableDate(t *time.Time) *MaintenanceEnt return mec } +// SetScheduledDate sets the "scheduled_date" field. +func (mec *MaintenanceEntryCreate) SetScheduledDate(t time.Time) *MaintenanceEntryCreate { + mec.mutation.SetScheduledDate(t) + return mec +} + +// SetNillableScheduledDate sets the "scheduled_date" field if the given value is not nil. +func (mec *MaintenanceEntryCreate) SetNillableScheduledDate(t *time.Time) *MaintenanceEntryCreate { + if t != nil { + mec.SetScheduledDate(*t) + } + return mec +} + // SetName sets the "name" field. func (mec *MaintenanceEntryCreate) SetName(s string) *MaintenanceEntryCreate { mec.mutation.SetName(s) @@ -131,7 +145,7 @@ func (mec *MaintenanceEntryCreate) Mutation() *MaintenanceEntryMutation { // Save creates the MaintenanceEntry in the database. func (mec *MaintenanceEntryCreate) Save(ctx context.Context) (*MaintenanceEntry, error) { mec.defaults() - return withHooks[*MaintenanceEntry, MaintenanceEntryMutation](ctx, mec.sqlSave, mec.mutation, mec.hooks) + return withHooks(ctx, mec.sqlSave, mec.mutation, mec.hooks) } // SaveX calls Save and panics if Save returns an error. @@ -166,10 +180,6 @@ func (mec *MaintenanceEntryCreate) defaults() { v := maintenanceentry.DefaultUpdatedAt() mec.mutation.SetUpdatedAt(v) } - if _, ok := mec.mutation.Date(); !ok { - v := maintenanceentry.DefaultDate() - mec.mutation.SetDate(v) - } if _, ok := mec.mutation.Cost(); !ok { v := maintenanceentry.DefaultCost mec.mutation.SetCost(v) @@ -191,9 +201,6 @@ func (mec *MaintenanceEntryCreate) check() error { if _, ok := mec.mutation.ItemID(); !ok { return &ValidationError{Name: "item_id", err: errors.New(`ent: missing required field "MaintenanceEntry.item_id"`)} } - if _, ok := mec.mutation.Date(); !ok { - return &ValidationError{Name: "date", err: errors.New(`ent: missing required field "MaintenanceEntry.date"`)} - } if _, ok := mec.mutation.Name(); !ok { return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "MaintenanceEntry.name"`)} } @@ -260,6 +267,10 @@ func (mec *MaintenanceEntryCreate) createSpec() (*MaintenanceEntry, *sqlgraph.Cr _spec.SetField(maintenanceentry.FieldDate, field.TypeTime, value) _node.Date = value } + if value, ok := mec.mutation.ScheduledDate(); ok { + _spec.SetField(maintenanceentry.FieldScheduledDate, field.TypeTime, value) + _node.ScheduledDate = value + } if value, ok := mec.mutation.Name(); ok { _spec.SetField(maintenanceentry.FieldName, field.TypeString, value) _node.Name = value @@ -280,10 +291,7 @@ func (mec *MaintenanceEntryCreate) createSpec() (*MaintenanceEntry, *sqlgraph.Cr Columns: []string{maintenanceentry.ItemColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: item.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -298,11 +306,15 @@ func (mec *MaintenanceEntryCreate) createSpec() (*MaintenanceEntry, *sqlgraph.Cr // MaintenanceEntryCreateBulk is the builder for creating many MaintenanceEntry entities in bulk. type MaintenanceEntryCreateBulk struct { config + err error builders []*MaintenanceEntryCreate } // Save creates the MaintenanceEntry entities in the database. func (mecb *MaintenanceEntryCreateBulk) Save(ctx context.Context) ([]*MaintenanceEntry, error) { + if mecb.err != nil { + return nil, mecb.err + } specs := make([]*sqlgraph.CreateSpec, len(mecb.builders)) nodes := make([]*MaintenanceEntry, len(mecb.builders)) mutators := make([]Mutator, len(mecb.builders)) @@ -319,8 +331,8 @@ func (mecb *MaintenanceEntryCreateBulk) Save(ctx context.Context) ([]*Maintenanc return nil, err } builder.mutation = mutation - nodes[i], specs[i] = builder.createSpec() var err error + nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { _, err = mutators[i+1].Mutate(root, mecb.builders[i+1].mutation) } else { diff --git a/backend/internal/data/ent/maintenanceentry_delete.go b/backend/internal/data/ent/maintenanceentry_delete.go index d65ed8b..0323ae9 100644 --- a/backend/internal/data/ent/maintenanceentry_delete.go +++ b/backend/internal/data/ent/maintenanceentry_delete.go @@ -27,7 +27,7 @@ func (med *MaintenanceEntryDelete) Where(ps ...predicate.MaintenanceEntry) *Main // Exec executes the deletion query and returns how many vertices were deleted. func (med *MaintenanceEntryDelete) Exec(ctx context.Context) (int, error) { - return withHooks[int, MaintenanceEntryMutation](ctx, med.sqlExec, med.mutation, med.hooks) + return withHooks(ctx, med.sqlExec, med.mutation, med.hooks) } // ExecX is like Exec, but panics if an error occurs. diff --git a/backend/internal/data/ent/maintenanceentry_query.go b/backend/internal/data/ent/maintenanceentry_query.go index 52c8504..8d41f75 100644 --- a/backend/internal/data/ent/maintenanceentry_query.go +++ b/backend/internal/data/ent/maintenanceentry_query.go @@ -20,7 +20,7 @@ import ( type MaintenanceEntryQuery struct { config ctx *QueryContext - order []OrderFunc + order []maintenanceentry.OrderOption inters []Interceptor predicates []predicate.MaintenanceEntry withItem *ItemQuery @@ -55,7 +55,7 @@ func (meq *MaintenanceEntryQuery) Unique(unique bool) *MaintenanceEntryQuery { } // Order specifies how the records should be ordered. -func (meq *MaintenanceEntryQuery) Order(o ...OrderFunc) *MaintenanceEntryQuery { +func (meq *MaintenanceEntryQuery) Order(o ...maintenanceentry.OrderOption) *MaintenanceEntryQuery { meq.order = append(meq.order, o...) return meq } @@ -271,7 +271,7 @@ func (meq *MaintenanceEntryQuery) Clone() *MaintenanceEntryQuery { return &MaintenanceEntryQuery{ config: meq.config, ctx: meq.ctx.Clone(), - order: append([]OrderFunc{}, meq.order...), + order: append([]maintenanceentry.OrderOption{}, meq.order...), inters: append([]Interceptor{}, meq.inters...), predicates: append([]predicate.MaintenanceEntry{}, meq.predicates...), withItem: meq.withItem.Clone(), @@ -456,6 +456,9 @@ func (meq *MaintenanceEntryQuery) querySpec() *sqlgraph.QuerySpec { _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) } } + if meq.withItem != nil { + _spec.Node.AddColumnOnce(maintenanceentry.FieldItemID) + } } if ps := meq.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { diff --git a/backend/internal/data/ent/maintenanceentry_update.go b/backend/internal/data/ent/maintenanceentry_update.go index cade06b..3616d32 100644 --- a/backend/internal/data/ent/maintenanceentry_update.go +++ b/backend/internal/data/ent/maintenanceentry_update.go @@ -42,6 +42,14 @@ func (meu *MaintenanceEntryUpdate) SetItemID(u uuid.UUID) *MaintenanceEntryUpdat return meu } +// SetNillableItemID sets the "item_id" field if the given value is not nil. +func (meu *MaintenanceEntryUpdate) SetNillableItemID(u *uuid.UUID) *MaintenanceEntryUpdate { + if u != nil { + meu.SetItemID(*u) + } + return meu +} + // SetDate sets the "date" field. func (meu *MaintenanceEntryUpdate) SetDate(t time.Time) *MaintenanceEntryUpdate { meu.mutation.SetDate(t) @@ -56,12 +64,46 @@ func (meu *MaintenanceEntryUpdate) SetNillableDate(t *time.Time) *MaintenanceEnt return meu } +// ClearDate clears the value of the "date" field. +func (meu *MaintenanceEntryUpdate) ClearDate() *MaintenanceEntryUpdate { + meu.mutation.ClearDate() + return meu +} + +// SetScheduledDate sets the "scheduled_date" field. +func (meu *MaintenanceEntryUpdate) SetScheduledDate(t time.Time) *MaintenanceEntryUpdate { + meu.mutation.SetScheduledDate(t) + return meu +} + +// SetNillableScheduledDate sets the "scheduled_date" field if the given value is not nil. +func (meu *MaintenanceEntryUpdate) SetNillableScheduledDate(t *time.Time) *MaintenanceEntryUpdate { + if t != nil { + meu.SetScheduledDate(*t) + } + return meu +} + +// ClearScheduledDate clears the value of the "scheduled_date" field. +func (meu *MaintenanceEntryUpdate) ClearScheduledDate() *MaintenanceEntryUpdate { + meu.mutation.ClearScheduledDate() + return meu +} + // SetName sets the "name" field. func (meu *MaintenanceEntryUpdate) SetName(s string) *MaintenanceEntryUpdate { meu.mutation.SetName(s) return meu } +// SetNillableName sets the "name" field if the given value is not nil. +func (meu *MaintenanceEntryUpdate) SetNillableName(s *string) *MaintenanceEntryUpdate { + if s != nil { + meu.SetName(*s) + } + return meu +} + // SetDescription sets the "description" field. func (meu *MaintenanceEntryUpdate) SetDescription(s string) *MaintenanceEntryUpdate { meu.mutation.SetDescription(s) @@ -122,7 +164,7 @@ func (meu *MaintenanceEntryUpdate) ClearItem() *MaintenanceEntryUpdate { // Save executes the query and returns the number of nodes affected by the update operation. func (meu *MaintenanceEntryUpdate) Save(ctx context.Context) (int, error) { meu.defaults() - return withHooks[int, MaintenanceEntryMutation](ctx, meu.sqlSave, meu.mutation, meu.hooks) + return withHooks(ctx, meu.sqlSave, meu.mutation, meu.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -191,6 +233,15 @@ func (meu *MaintenanceEntryUpdate) sqlSave(ctx context.Context) (n int, err erro if value, ok := meu.mutation.Date(); ok { _spec.SetField(maintenanceentry.FieldDate, field.TypeTime, value) } + if meu.mutation.DateCleared() { + _spec.ClearField(maintenanceentry.FieldDate, field.TypeTime) + } + if value, ok := meu.mutation.ScheduledDate(); ok { + _spec.SetField(maintenanceentry.FieldScheduledDate, field.TypeTime, value) + } + if meu.mutation.ScheduledDateCleared() { + _spec.ClearField(maintenanceentry.FieldScheduledDate, field.TypeTime) + } if value, ok := meu.mutation.Name(); ok { _spec.SetField(maintenanceentry.FieldName, field.TypeString, value) } @@ -214,10 +265,7 @@ func (meu *MaintenanceEntryUpdate) sqlSave(ctx context.Context) (n int, err erro Columns: []string{maintenanceentry.ItemColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: item.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -230,10 +278,7 @@ func (meu *MaintenanceEntryUpdate) sqlSave(ctx context.Context) (n int, err erro Columns: []string{maintenanceentry.ItemColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: item.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -273,6 +318,14 @@ func (meuo *MaintenanceEntryUpdateOne) SetItemID(u uuid.UUID) *MaintenanceEntryU return meuo } +// SetNillableItemID sets the "item_id" field if the given value is not nil. +func (meuo *MaintenanceEntryUpdateOne) SetNillableItemID(u *uuid.UUID) *MaintenanceEntryUpdateOne { + if u != nil { + meuo.SetItemID(*u) + } + return meuo +} + // SetDate sets the "date" field. func (meuo *MaintenanceEntryUpdateOne) SetDate(t time.Time) *MaintenanceEntryUpdateOne { meuo.mutation.SetDate(t) @@ -287,12 +340,46 @@ func (meuo *MaintenanceEntryUpdateOne) SetNillableDate(t *time.Time) *Maintenanc return meuo } +// ClearDate clears the value of the "date" field. +func (meuo *MaintenanceEntryUpdateOne) ClearDate() *MaintenanceEntryUpdateOne { + meuo.mutation.ClearDate() + return meuo +} + +// SetScheduledDate sets the "scheduled_date" field. +func (meuo *MaintenanceEntryUpdateOne) SetScheduledDate(t time.Time) *MaintenanceEntryUpdateOne { + meuo.mutation.SetScheduledDate(t) + return meuo +} + +// SetNillableScheduledDate sets the "scheduled_date" field if the given value is not nil. +func (meuo *MaintenanceEntryUpdateOne) SetNillableScheduledDate(t *time.Time) *MaintenanceEntryUpdateOne { + if t != nil { + meuo.SetScheduledDate(*t) + } + return meuo +} + +// ClearScheduledDate clears the value of the "scheduled_date" field. +func (meuo *MaintenanceEntryUpdateOne) ClearScheduledDate() *MaintenanceEntryUpdateOne { + meuo.mutation.ClearScheduledDate() + return meuo +} + // SetName sets the "name" field. func (meuo *MaintenanceEntryUpdateOne) SetName(s string) *MaintenanceEntryUpdateOne { meuo.mutation.SetName(s) return meuo } +// SetNillableName sets the "name" field if the given value is not nil. +func (meuo *MaintenanceEntryUpdateOne) SetNillableName(s *string) *MaintenanceEntryUpdateOne { + if s != nil { + meuo.SetName(*s) + } + return meuo +} + // SetDescription sets the "description" field. func (meuo *MaintenanceEntryUpdateOne) SetDescription(s string) *MaintenanceEntryUpdateOne { meuo.mutation.SetDescription(s) @@ -366,7 +453,7 @@ func (meuo *MaintenanceEntryUpdateOne) Select(field string, fields ...string) *M // Save executes the query and returns the updated MaintenanceEntry entity. func (meuo *MaintenanceEntryUpdateOne) Save(ctx context.Context) (*MaintenanceEntry, error) { meuo.defaults() - return withHooks[*MaintenanceEntry, MaintenanceEntryMutation](ctx, meuo.sqlSave, meuo.mutation, meuo.hooks) + return withHooks(ctx, meuo.sqlSave, meuo.mutation, meuo.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -452,6 +539,15 @@ func (meuo *MaintenanceEntryUpdateOne) sqlSave(ctx context.Context) (_node *Main if value, ok := meuo.mutation.Date(); ok { _spec.SetField(maintenanceentry.FieldDate, field.TypeTime, value) } + if meuo.mutation.DateCleared() { + _spec.ClearField(maintenanceentry.FieldDate, field.TypeTime) + } + if value, ok := meuo.mutation.ScheduledDate(); ok { + _spec.SetField(maintenanceentry.FieldScheduledDate, field.TypeTime, value) + } + if meuo.mutation.ScheduledDateCleared() { + _spec.ClearField(maintenanceentry.FieldScheduledDate, field.TypeTime) + } if value, ok := meuo.mutation.Name(); ok { _spec.SetField(maintenanceentry.FieldName, field.TypeString, value) } @@ -475,10 +571,7 @@ func (meuo *MaintenanceEntryUpdateOne) sqlSave(ctx context.Context) (_node *Main Columns: []string{maintenanceentry.ItemColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: item.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -491,10 +584,7 @@ func (meuo *MaintenanceEntryUpdateOne) sqlSave(ctx context.Context) (_node *Main Columns: []string{maintenanceentry.ItemColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: item.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID), }, } for _, k := range nodes { diff --git a/backend/internal/data/ent/migrate/schema.go b/backend/internal/data/ent/migrate/schema.go index a5349e2..2b58838 100644 --- a/backend/internal/data/ent/migrate/schema.go +++ b/backend/internal/data/ent/migrate/schema.go @@ -14,6 +14,7 @@ var ( {Name: "created_at", Type: field.TypeTime}, {Name: "updated_at", Type: field.TypeTime}, {Name: "type", Type: field.TypeEnum, Enums: []string{"photo", "manual", "warranty", "attachment", "receipt"}, Default: "attachment"}, + {Name: "primary", Type: field.TypeBool, Default: false}, {Name: "document_attachments", Type: field.TypeUUID}, {Name: "item_attachments", Type: field.TypeUUID}, } @@ -25,13 +26,13 @@ var ( ForeignKeys: []*schema.ForeignKey{ { Symbol: "attachments_documents_attachments", - Columns: []*schema.Column{AttachmentsColumns[4]}, + Columns: []*schema.Column{AttachmentsColumns[5]}, RefColumns: []*schema.Column{DocumentsColumns[0]}, OnDelete: schema.Cascade, }, { Symbol: "attachments_items_attachments", - Columns: []*schema.Column{AttachmentsColumns[5]}, + Columns: []*schema.Column{AttachmentsColumns[6]}, RefColumns: []*schema.Column{ItemsColumns[0]}, OnDelete: schema.Cascade, }, @@ -116,7 +117,7 @@ var ( {Name: "created_at", Type: field.TypeTime}, {Name: "updated_at", Type: field.TypeTime}, {Name: "name", Type: field.TypeString, Size: 255}, - {Name: "currency", Type: field.TypeEnum, Enums: []string{"usd", "eur", "gbp", "jpy", "zar", "aud", "nok", "sek", "dkk", "inr", "rmb", "bgn"}, Default: "usd"}, + {Name: "currency", Type: field.TypeString, Default: "usd"}, } // GroupsTable holds the schema information for the "groups" table. GroupsTable = &schema.Table{ @@ -323,7 +324,8 @@ var ( {Name: "id", Type: field.TypeUUID}, {Name: "created_at", Type: field.TypeTime}, {Name: "updated_at", Type: field.TypeTime}, - {Name: "date", Type: field.TypeTime}, + {Name: "date", Type: field.TypeTime, Nullable: true}, + {Name: "scheduled_date", Type: field.TypeTime, Nullable: true}, {Name: "name", Type: field.TypeString, Size: 255}, {Name: "description", Type: field.TypeString, Nullable: true, Size: 2500}, {Name: "cost", Type: field.TypeFloat64, Default: 0}, @@ -337,12 +339,65 @@ var ( ForeignKeys: []*schema.ForeignKey{ { Symbol: "maintenance_entries_items_maintenance_entries", - Columns: []*schema.Column{MaintenanceEntriesColumns[7]}, + Columns: []*schema.Column{MaintenanceEntriesColumns[8]}, RefColumns: []*schema.Column{ItemsColumns[0]}, OnDelete: schema.Cascade, }, }, } + // NotifiersColumns holds the columns for the "notifiers" table. + NotifiersColumns = []*schema.Column{ + {Name: "id", Type: field.TypeUUID}, + {Name: "created_at", Type: field.TypeTime}, + {Name: "updated_at", Type: field.TypeTime}, + {Name: "name", Type: field.TypeString, Size: 255}, + {Name: "url", Type: field.TypeString, Size: 2083}, + {Name: "is_active", Type: field.TypeBool, Default: true}, + {Name: "group_id", Type: field.TypeUUID}, + {Name: "user_id", Type: field.TypeUUID}, + } + // NotifiersTable holds the schema information for the "notifiers" table. + NotifiersTable = &schema.Table{ + Name: "notifiers", + Columns: NotifiersColumns, + PrimaryKey: []*schema.Column{NotifiersColumns[0]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "notifiers_groups_notifiers", + Columns: []*schema.Column{NotifiersColumns[6]}, + RefColumns: []*schema.Column{GroupsColumns[0]}, + OnDelete: schema.Cascade, + }, + { + Symbol: "notifiers_users_notifiers", + Columns: []*schema.Column{NotifiersColumns[7]}, + RefColumns: []*schema.Column{UsersColumns[0]}, + OnDelete: schema.Cascade, + }, + }, + Indexes: []*schema.Index{ + { + Name: "notifier_user_id", + Unique: false, + Columns: []*schema.Column{NotifiersColumns[7]}, + }, + { + Name: "notifier_user_id_is_active", + Unique: false, + Columns: []*schema.Column{NotifiersColumns[7], NotifiersColumns[5]}, + }, + { + Name: "notifier_group_id", + Unique: false, + Columns: []*schema.Column{NotifiersColumns[6]}, + }, + { + Name: "notifier_group_id_is_active", + Unique: false, + Columns: []*schema.Column{NotifiersColumns[6], NotifiersColumns[5]}, + }, + }, + } // UsersColumns holds the columns for the "users" table. UsersColumns = []*schema.Column{ {Name: "id", Type: field.TypeUUID}, @@ -352,8 +407,8 @@ var ( {Name: "email", Type: field.TypeString, Unique: true, Size: 255}, {Name: "password", Type: field.TypeString, Size: 255}, {Name: "is_superuser", Type: field.TypeBool, Default: false}, - {Name: "role", Type: field.TypeEnum, Enums: []string{"user", "owner"}, Default: "user"}, {Name: "superuser", Type: field.TypeBool, Default: false}, + {Name: "role", Type: field.TypeEnum, Enums: []string{"user", "owner"}, Default: "user"}, {Name: "activated_on", Type: field.TypeTime, Nullable: true}, {Name: "group_users", Type: field.TypeUUID}, } @@ -409,6 +464,7 @@ var ( LabelsTable, LocationsTable, MaintenanceEntriesTable, + NotifiersTable, UsersTable, LabelItemsTable, } @@ -429,6 +485,8 @@ func init() { LocationsTable.ForeignKeys[0].RefTable = GroupsTable LocationsTable.ForeignKeys[1].RefTable = LocationsTable MaintenanceEntriesTable.ForeignKeys[0].RefTable = ItemsTable + NotifiersTable.ForeignKeys[0].RefTable = GroupsTable + NotifiersTable.ForeignKeys[1].RefTable = UsersTable UsersTable.ForeignKeys[0].RefTable = GroupsTable LabelItemsTable.ForeignKeys[0].RefTable = LabelsTable LabelItemsTable.ForeignKeys[1].RefTable = ItemsTable diff --git a/backend/internal/data/ent/mutation.go b/backend/internal/data/ent/mutation.go index 164071e..6fa15d3 100644 --- a/backend/internal/data/ent/mutation.go +++ b/backend/internal/data/ent/mutation.go @@ -9,6 +9,8 @@ import ( "sync" "time" + "entgo.io/ent" + "entgo.io/ent/dialect/sql" "github.com/google/uuid" "github.com/hay-kot/homebox/backend/internal/data/ent/attachment" "github.com/hay-kot/homebox/backend/internal/data/ent/authroles" @@ -21,11 +23,9 @@ import ( "github.com/hay-kot/homebox/backend/internal/data/ent/label" "github.com/hay-kot/homebox/backend/internal/data/ent/location" "github.com/hay-kot/homebox/backend/internal/data/ent/maintenanceentry" + "github.com/hay-kot/homebox/backend/internal/data/ent/notifier" "github.com/hay-kot/homebox/backend/internal/data/ent/predicate" "github.com/hay-kot/homebox/backend/internal/data/ent/user" - - "entgo.io/ent" - "entgo.io/ent/dialect/sql" ) const ( @@ -48,6 +48,7 @@ const ( TypeLabel = "Label" TypeLocation = "Location" TypeMaintenanceEntry = "MaintenanceEntry" + TypeNotifier = "Notifier" TypeUser = "User" ) @@ -60,6 +61,7 @@ type AttachmentMutation struct { created_at *time.Time updated_at *time.Time _type *attachment.Type + primary *bool clearedFields map[string]struct{} item *uuid.UUID cleareditem bool @@ -282,6 +284,42 @@ func (m *AttachmentMutation) ResetType() { m._type = nil } +// SetPrimary sets the "primary" field. +func (m *AttachmentMutation) SetPrimary(b bool) { + m.primary = &b +} + +// Primary returns the value of the "primary" field in the mutation. +func (m *AttachmentMutation) Primary() (r bool, exists bool) { + v := m.primary + if v == nil { + return + } + return *v, true +} + +// OldPrimary returns the old "primary" field's value of the Attachment entity. +// If the Attachment object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AttachmentMutation) OldPrimary(ctx context.Context) (v bool, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldPrimary is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldPrimary requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldPrimary: %w", err) + } + return oldValue.Primary, nil +} + +// ResetPrimary resets all changes to the "primary" field. +func (m *AttachmentMutation) ResetPrimary() { + m.primary = nil +} + // SetItemID sets the "item" edge to the Item entity by id. func (m *AttachmentMutation) SetItemID(id uuid.UUID) { m.item = &id @@ -394,7 +432,7 @@ func (m *AttachmentMutation) Type() string { // order to get all numeric fields that were incremented/decremented, call // AddedFields(). func (m *AttachmentMutation) Fields() []string { - fields := make([]string, 0, 3) + fields := make([]string, 0, 4) if m.created_at != nil { fields = append(fields, attachment.FieldCreatedAt) } @@ -404,6 +442,9 @@ func (m *AttachmentMutation) Fields() []string { if m._type != nil { fields = append(fields, attachment.FieldType) } + if m.primary != nil { + fields = append(fields, attachment.FieldPrimary) + } return fields } @@ -418,6 +459,8 @@ func (m *AttachmentMutation) Field(name string) (ent.Value, bool) { return m.UpdatedAt() case attachment.FieldType: return m.GetType() + case attachment.FieldPrimary: + return m.Primary() } return nil, false } @@ -433,6 +476,8 @@ func (m *AttachmentMutation) OldField(ctx context.Context, name string) (ent.Val return m.OldUpdatedAt(ctx) case attachment.FieldType: return m.OldType(ctx) + case attachment.FieldPrimary: + return m.OldPrimary(ctx) } return nil, fmt.Errorf("unknown Attachment field %s", name) } @@ -463,6 +508,13 @@ func (m *AttachmentMutation) SetField(name string, value ent.Value) error { } m.SetType(v) return nil + case attachment.FieldPrimary: + v, ok := value.(bool) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetPrimary(v) + return nil } return fmt.Errorf("unknown Attachment field %s", name) } @@ -521,6 +573,9 @@ func (m *AttachmentMutation) ResetField(name string) error { case attachment.FieldType: m.ResetType() return nil + case attachment.FieldPrimary: + m.ResetPrimary() + return nil } return fmt.Errorf("unknown Attachment field %s", name) } @@ -2285,7 +2340,7 @@ type GroupMutation struct { created_at *time.Time updated_at *time.Time name *string - currency *group.Currency + currency *string clearedFields map[string]struct{} users map[uuid.UUID]struct{} removedusers map[uuid.UUID]struct{} @@ -2305,6 +2360,9 @@ type GroupMutation struct { invitation_tokens map[uuid.UUID]struct{} removedinvitation_tokens map[uuid.UUID]struct{} clearedinvitation_tokens bool + notifiers map[uuid.UUID]struct{} + removednotifiers map[uuid.UUID]struct{} + clearednotifiers bool done bool oldValue func(context.Context) (*Group, error) predicates []predicate.Group @@ -2523,12 +2581,12 @@ func (m *GroupMutation) ResetName() { } // SetCurrency sets the "currency" field. -func (m *GroupMutation) SetCurrency(gr group.Currency) { - m.currency = &gr +func (m *GroupMutation) SetCurrency(s string) { + m.currency = &s } // Currency returns the value of the "currency" field in the mutation. -func (m *GroupMutation) Currency() (r group.Currency, exists bool) { +func (m *GroupMutation) Currency() (r string, exists bool) { v := m.currency if v == nil { return @@ -2539,7 +2597,7 @@ func (m *GroupMutation) Currency() (r group.Currency, exists bool) { // OldCurrency returns the old "currency" field's value of the Group entity. // If the Group object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *GroupMutation) OldCurrency(ctx context.Context) (v group.Currency, err error) { +func (m *GroupMutation) OldCurrency(ctx context.Context) (v string, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldCurrency is only allowed on UpdateOne operations") } @@ -2882,6 +2940,60 @@ func (m *GroupMutation) ResetInvitationTokens() { m.removedinvitation_tokens = nil } +// AddNotifierIDs adds the "notifiers" edge to the Notifier entity by ids. +func (m *GroupMutation) AddNotifierIDs(ids ...uuid.UUID) { + if m.notifiers == nil { + m.notifiers = make(map[uuid.UUID]struct{}) + } + for i := range ids { + m.notifiers[ids[i]] = struct{}{} + } +} + +// ClearNotifiers clears the "notifiers" edge to the Notifier entity. +func (m *GroupMutation) ClearNotifiers() { + m.clearednotifiers = true +} + +// NotifiersCleared reports if the "notifiers" edge to the Notifier entity was cleared. +func (m *GroupMutation) NotifiersCleared() bool { + return m.clearednotifiers +} + +// RemoveNotifierIDs removes the "notifiers" edge to the Notifier entity by IDs. +func (m *GroupMutation) RemoveNotifierIDs(ids ...uuid.UUID) { + if m.removednotifiers == nil { + m.removednotifiers = make(map[uuid.UUID]struct{}) + } + for i := range ids { + delete(m.notifiers, ids[i]) + m.removednotifiers[ids[i]] = struct{}{} + } +} + +// RemovedNotifiers returns the removed IDs of the "notifiers" edge to the Notifier entity. +func (m *GroupMutation) RemovedNotifiersIDs() (ids []uuid.UUID) { + for id := range m.removednotifiers { + ids = append(ids, id) + } + return +} + +// NotifiersIDs returns the "notifiers" edge IDs in the mutation. +func (m *GroupMutation) NotifiersIDs() (ids []uuid.UUID) { + for id := range m.notifiers { + ids = append(ids, id) + } + return +} + +// ResetNotifiers resets all changes to the "notifiers" edge. +func (m *GroupMutation) ResetNotifiers() { + m.notifiers = nil + m.clearednotifiers = false + m.removednotifiers = nil +} + // Where appends a list predicates to the GroupMutation builder. func (m *GroupMutation) Where(ps ...predicate.Group) { m.predicates = append(m.predicates, ps...) @@ -2993,7 +3105,7 @@ func (m *GroupMutation) SetField(name string, value ent.Value) error { m.SetName(v) return nil case group.FieldCurrency: - v, ok := value.(group.Currency) + v, ok := value.(string) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } @@ -3066,7 +3178,7 @@ func (m *GroupMutation) ResetField(name string) error { // AddedEdges returns all edge names that were set/added in this mutation. func (m *GroupMutation) AddedEdges() []string { - edges := make([]string, 0, 6) + edges := make([]string, 0, 7) if m.users != nil { edges = append(edges, group.EdgeUsers) } @@ -3085,6 +3197,9 @@ func (m *GroupMutation) AddedEdges() []string { if m.invitation_tokens != nil { edges = append(edges, group.EdgeInvitationTokens) } + if m.notifiers != nil { + edges = append(edges, group.EdgeNotifiers) + } return edges } @@ -3128,13 +3243,19 @@ func (m *GroupMutation) AddedIDs(name string) []ent.Value { ids = append(ids, id) } return ids + case group.EdgeNotifiers: + ids := make([]ent.Value, 0, len(m.notifiers)) + for id := range m.notifiers { + ids = append(ids, id) + } + return ids } return nil } // RemovedEdges returns all edge names that were removed in this mutation. func (m *GroupMutation) RemovedEdges() []string { - edges := make([]string, 0, 6) + edges := make([]string, 0, 7) if m.removedusers != nil { edges = append(edges, group.EdgeUsers) } @@ -3153,6 +3274,9 @@ func (m *GroupMutation) RemovedEdges() []string { if m.removedinvitation_tokens != nil { edges = append(edges, group.EdgeInvitationTokens) } + if m.removednotifiers != nil { + edges = append(edges, group.EdgeNotifiers) + } return edges } @@ -3196,13 +3320,19 @@ func (m *GroupMutation) RemovedIDs(name string) []ent.Value { ids = append(ids, id) } return ids + case group.EdgeNotifiers: + ids := make([]ent.Value, 0, len(m.removednotifiers)) + for id := range m.removednotifiers { + ids = append(ids, id) + } + return ids } return nil } // ClearedEdges returns all edge names that were cleared in this mutation. func (m *GroupMutation) ClearedEdges() []string { - edges := make([]string, 0, 6) + edges := make([]string, 0, 7) if m.clearedusers { edges = append(edges, group.EdgeUsers) } @@ -3221,6 +3351,9 @@ func (m *GroupMutation) ClearedEdges() []string { if m.clearedinvitation_tokens { edges = append(edges, group.EdgeInvitationTokens) } + if m.clearednotifiers { + edges = append(edges, group.EdgeNotifiers) + } return edges } @@ -3240,6 +3373,8 @@ func (m *GroupMutation) EdgeCleared(name string) bool { return m.cleareddocuments case group.EdgeInvitationTokens: return m.clearedinvitation_tokens + case group.EdgeNotifiers: + return m.clearednotifiers } return false } @@ -3274,6 +3409,9 @@ func (m *GroupMutation) ResetEdge(name string) error { case group.EdgeInvitationTokens: m.ResetInvitationTokens() return nil + case group.EdgeNotifiers: + m.ResetNotifiers() + return nil } return fmt.Errorf("unknown Group edge %s", name) } @@ -3963,13 +4101,13 @@ type ItemMutation struct { addsold_price *float64 sold_notes *string clearedFields map[string]struct{} + group *uuid.UUID + clearedgroup bool parent *uuid.UUID clearedparent bool children map[uuid.UUID]struct{} removedchildren map[uuid.UUID]struct{} clearedchildren bool - group *uuid.UUID - clearedgroup bool label map[uuid.UUID]struct{} removedlabel map[uuid.UUID]struct{} clearedlabel bool @@ -5170,6 +5308,45 @@ func (m *ItemMutation) ResetSoldNotes() { delete(m.clearedFields, item.FieldSoldNotes) } +// SetGroupID sets the "group" edge to the Group entity by id. +func (m *ItemMutation) SetGroupID(id uuid.UUID) { + m.group = &id +} + +// ClearGroup clears the "group" edge to the Group entity. +func (m *ItemMutation) ClearGroup() { + m.clearedgroup = true +} + +// GroupCleared reports if the "group" edge to the Group entity was cleared. +func (m *ItemMutation) GroupCleared() bool { + return m.clearedgroup +} + +// GroupID returns the "group" edge ID in the mutation. +func (m *ItemMutation) GroupID() (id uuid.UUID, exists bool) { + if m.group != nil { + return *m.group, true + } + return +} + +// GroupIDs returns the "group" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// GroupID instead. It exists only for internal usage by the builders. +func (m *ItemMutation) GroupIDs() (ids []uuid.UUID) { + if id := m.group; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetGroup resets all changes to the "group" edge. +func (m *ItemMutation) ResetGroup() { + m.group = nil + m.clearedgroup = false +} + // SetParentID sets the "parent" edge to the Item entity by id. func (m *ItemMutation) SetParentID(id uuid.UUID) { m.parent = &id @@ -5263,45 +5440,6 @@ func (m *ItemMutation) ResetChildren() { m.removedchildren = nil } -// SetGroupID sets the "group" edge to the Group entity by id. -func (m *ItemMutation) SetGroupID(id uuid.UUID) { - m.group = &id -} - -// ClearGroup clears the "group" edge to the Group entity. -func (m *ItemMutation) ClearGroup() { - m.clearedgroup = true -} - -// GroupCleared reports if the "group" edge to the Group entity was cleared. -func (m *ItemMutation) GroupCleared() bool { - return m.clearedgroup -} - -// GroupID returns the "group" edge ID in the mutation. -func (m *ItemMutation) GroupID() (id uuid.UUID, exists bool) { - if m.group != nil { - return *m.group, true - } - return -} - -// GroupIDs returns the "group" edge IDs in the mutation. -// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use -// GroupID instead. It exists only for internal usage by the builders. -func (m *ItemMutation) GroupIDs() (ids []uuid.UUID) { - if id := m.group; id != nil { - ids = append(ids, *id) - } - return -} - -// ResetGroup resets all changes to the "group" edge. -func (m *ItemMutation) ResetGroup() { - m.group = nil - m.clearedgroup = false -} - // AddLabelIDs adds the "label" edge to the Label entity by ids. func (m *ItemMutation) AddLabelIDs(ids ...uuid.UUID) { if m.label == nil { @@ -6197,15 +6335,15 @@ func (m *ItemMutation) ResetField(name string) error { // AddedEdges returns all edge names that were set/added in this mutation. func (m *ItemMutation) AddedEdges() []string { edges := make([]string, 0, 8) + if m.group != nil { + edges = append(edges, item.EdgeGroup) + } if m.parent != nil { edges = append(edges, item.EdgeParent) } if m.children != nil { edges = append(edges, item.EdgeChildren) } - if m.group != nil { - edges = append(edges, item.EdgeGroup) - } if m.label != nil { edges = append(edges, item.EdgeLabel) } @@ -6228,6 +6366,10 @@ func (m *ItemMutation) AddedEdges() []string { // name in this mutation. func (m *ItemMutation) AddedIDs(name string) []ent.Value { switch name { + case item.EdgeGroup: + if id := m.group; id != nil { + return []ent.Value{*id} + } case item.EdgeParent: if id := m.parent; id != nil { return []ent.Value{*id} @@ -6238,10 +6380,6 @@ func (m *ItemMutation) AddedIDs(name string) []ent.Value { ids = append(ids, id) } return ids - case item.EdgeGroup: - if id := m.group; id != nil { - return []ent.Value{*id} - } case item.EdgeLabel: ids := make([]ent.Value, 0, len(m.label)) for id := range m.label { @@ -6336,15 +6474,15 @@ func (m *ItemMutation) RemovedIDs(name string) []ent.Value { // ClearedEdges returns all edge names that were cleared in this mutation. func (m *ItemMutation) ClearedEdges() []string { edges := make([]string, 0, 8) + if m.clearedgroup { + edges = append(edges, item.EdgeGroup) + } if m.clearedparent { edges = append(edges, item.EdgeParent) } if m.clearedchildren { edges = append(edges, item.EdgeChildren) } - if m.clearedgroup { - edges = append(edges, item.EdgeGroup) - } if m.clearedlabel { edges = append(edges, item.EdgeLabel) } @@ -6367,12 +6505,12 @@ func (m *ItemMutation) ClearedEdges() []string { // was cleared in this mutation. func (m *ItemMutation) EdgeCleared(name string) bool { switch name { + case item.EdgeGroup: + return m.clearedgroup case item.EdgeParent: return m.clearedparent case item.EdgeChildren: return m.clearedchildren - case item.EdgeGroup: - return m.clearedgroup case item.EdgeLabel: return m.clearedlabel case item.EdgeLocation: @@ -6391,12 +6529,12 @@ func (m *ItemMutation) EdgeCleared(name string) bool { // if that edge is not defined in the schema. func (m *ItemMutation) ClearEdge(name string) error { switch name { - case item.EdgeParent: - m.ClearParent() - return nil case item.EdgeGroup: m.ClearGroup() return nil + case item.EdgeParent: + m.ClearParent() + return nil case item.EdgeLocation: m.ClearLocation() return nil @@ -6408,15 +6546,15 @@ func (m *ItemMutation) ClearEdge(name string) error { // It returns an error if the edge is not defined in the schema. func (m *ItemMutation) ResetEdge(name string) error { switch name { + case item.EdgeGroup: + m.ResetGroup() + return nil case item.EdgeParent: m.ResetParent() return nil case item.EdgeChildren: m.ResetChildren() return nil - case item.EdgeGroup: - m.ResetGroup() - return nil case item.EdgeLabel: m.ResetLabel() return nil @@ -8116,13 +8254,13 @@ type LocationMutation struct { name *string description *string clearedFields map[string]struct{} + group *uuid.UUID + clearedgroup bool parent *uuid.UUID clearedparent bool children map[uuid.UUID]struct{} removedchildren map[uuid.UUID]struct{} clearedchildren bool - group *uuid.UUID - clearedgroup bool items map[uuid.UUID]struct{} removeditems map[uuid.UUID]struct{} cleareditems bool @@ -8392,6 +8530,45 @@ func (m *LocationMutation) ResetDescription() { delete(m.clearedFields, location.FieldDescription) } +// SetGroupID sets the "group" edge to the Group entity by id. +func (m *LocationMutation) SetGroupID(id uuid.UUID) { + m.group = &id +} + +// ClearGroup clears the "group" edge to the Group entity. +func (m *LocationMutation) ClearGroup() { + m.clearedgroup = true +} + +// GroupCleared reports if the "group" edge to the Group entity was cleared. +func (m *LocationMutation) GroupCleared() bool { + return m.clearedgroup +} + +// GroupID returns the "group" edge ID in the mutation. +func (m *LocationMutation) GroupID() (id uuid.UUID, exists bool) { + if m.group != nil { + return *m.group, true + } + return +} + +// GroupIDs returns the "group" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// GroupID instead. It exists only for internal usage by the builders. +func (m *LocationMutation) GroupIDs() (ids []uuid.UUID) { + if id := m.group; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetGroup resets all changes to the "group" edge. +func (m *LocationMutation) ResetGroup() { + m.group = nil + m.clearedgroup = false +} + // SetParentID sets the "parent" edge to the Location entity by id. func (m *LocationMutation) SetParentID(id uuid.UUID) { m.parent = &id @@ -8485,45 +8662,6 @@ func (m *LocationMutation) ResetChildren() { m.removedchildren = nil } -// SetGroupID sets the "group" edge to the Group entity by id. -func (m *LocationMutation) SetGroupID(id uuid.UUID) { - m.group = &id -} - -// ClearGroup clears the "group" edge to the Group entity. -func (m *LocationMutation) ClearGroup() { - m.clearedgroup = true -} - -// GroupCleared reports if the "group" edge to the Group entity was cleared. -func (m *LocationMutation) GroupCleared() bool { - return m.clearedgroup -} - -// GroupID returns the "group" edge ID in the mutation. -func (m *LocationMutation) GroupID() (id uuid.UUID, exists bool) { - if m.group != nil { - return *m.group, true - } - return -} - -// GroupIDs returns the "group" edge IDs in the mutation. -// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use -// GroupID instead. It exists only for internal usage by the builders. -func (m *LocationMutation) GroupIDs() (ids []uuid.UUID) { - if id := m.group; id != nil { - ids = append(ids, *id) - } - return -} - -// ResetGroup resets all changes to the "group" edge. -func (m *LocationMutation) ResetGroup() { - m.group = nil - m.clearedgroup = false -} - // AddItemIDs adds the "items" edge to the Item entity by ids. func (m *LocationMutation) AddItemIDs(ids ...uuid.UUID) { if m.items == nil { @@ -8772,15 +8910,15 @@ func (m *LocationMutation) ResetField(name string) error { // AddedEdges returns all edge names that were set/added in this mutation. func (m *LocationMutation) AddedEdges() []string { edges := make([]string, 0, 4) + if m.group != nil { + edges = append(edges, location.EdgeGroup) + } if m.parent != nil { edges = append(edges, location.EdgeParent) } if m.children != nil { edges = append(edges, location.EdgeChildren) } - if m.group != nil { - edges = append(edges, location.EdgeGroup) - } if m.items != nil { edges = append(edges, location.EdgeItems) } @@ -8791,6 +8929,10 @@ func (m *LocationMutation) AddedEdges() []string { // name in this mutation. func (m *LocationMutation) AddedIDs(name string) []ent.Value { switch name { + case location.EdgeGroup: + if id := m.group; id != nil { + return []ent.Value{*id} + } case location.EdgeParent: if id := m.parent; id != nil { return []ent.Value{*id} @@ -8801,10 +8943,6 @@ func (m *LocationMutation) AddedIDs(name string) []ent.Value { ids = append(ids, id) } return ids - case location.EdgeGroup: - if id := m.group; id != nil { - return []ent.Value{*id} - } case location.EdgeItems: ids := make([]ent.Value, 0, len(m.items)) for id := range m.items { @@ -8850,15 +8988,15 @@ func (m *LocationMutation) RemovedIDs(name string) []ent.Value { // ClearedEdges returns all edge names that were cleared in this mutation. func (m *LocationMutation) ClearedEdges() []string { edges := make([]string, 0, 4) + if m.clearedgroup { + edges = append(edges, location.EdgeGroup) + } if m.clearedparent { edges = append(edges, location.EdgeParent) } if m.clearedchildren { edges = append(edges, location.EdgeChildren) } - if m.clearedgroup { - edges = append(edges, location.EdgeGroup) - } if m.cleareditems { edges = append(edges, location.EdgeItems) } @@ -8869,12 +9007,12 @@ func (m *LocationMutation) ClearedEdges() []string { // was cleared in this mutation. func (m *LocationMutation) EdgeCleared(name string) bool { switch name { + case location.EdgeGroup: + return m.clearedgroup case location.EdgeParent: return m.clearedparent case location.EdgeChildren: return m.clearedchildren - case location.EdgeGroup: - return m.clearedgroup case location.EdgeItems: return m.cleareditems } @@ -8885,12 +9023,12 @@ func (m *LocationMutation) EdgeCleared(name string) bool { // if that edge is not defined in the schema. func (m *LocationMutation) ClearEdge(name string) error { switch name { - case location.EdgeParent: - m.ClearParent() - return nil case location.EdgeGroup: m.ClearGroup() return nil + case location.EdgeParent: + m.ClearParent() + return nil } return fmt.Errorf("unknown Location unique edge %s", name) } @@ -8899,15 +9037,15 @@ func (m *LocationMutation) ClearEdge(name string) error { // It returns an error if the edge is not defined in the schema. func (m *LocationMutation) ResetEdge(name string) error { switch name { + case location.EdgeGroup: + m.ResetGroup() + return nil case location.EdgeParent: m.ResetParent() return nil case location.EdgeChildren: m.ResetChildren() return nil - case location.EdgeGroup: - m.ResetGroup() - return nil case location.EdgeItems: m.ResetItems() return nil @@ -8918,22 +9056,23 @@ func (m *LocationMutation) ResetEdge(name string) error { // MaintenanceEntryMutation represents an operation that mutates the MaintenanceEntry nodes in the graph. type MaintenanceEntryMutation struct { config - op Op - typ string - id *uuid.UUID - created_at *time.Time - updated_at *time.Time - date *time.Time - name *string - description *string - cost *float64 - addcost *float64 - clearedFields map[string]struct{} - item *uuid.UUID - cleareditem bool - done bool - oldValue func(context.Context) (*MaintenanceEntry, error) - predicates []predicate.MaintenanceEntry + op Op + typ string + id *uuid.UUID + created_at *time.Time + updated_at *time.Time + date *time.Time + scheduled_date *time.Time + name *string + description *string + cost *float64 + addcost *float64 + clearedFields map[string]struct{} + item *uuid.UUID + cleareditem bool + done bool + oldValue func(context.Context) (*MaintenanceEntry, error) + predicates []predicate.MaintenanceEntry } var _ ent.Mutation = (*MaintenanceEntryMutation)(nil) @@ -9179,9 +9318,71 @@ func (m *MaintenanceEntryMutation) OldDate(ctx context.Context) (v time.Time, er return oldValue.Date, nil } +// ClearDate clears the value of the "date" field. +func (m *MaintenanceEntryMutation) ClearDate() { + m.date = nil + m.clearedFields[maintenanceentry.FieldDate] = struct{}{} +} + +// DateCleared returns if the "date" field was cleared in this mutation. +func (m *MaintenanceEntryMutation) DateCleared() bool { + _, ok := m.clearedFields[maintenanceentry.FieldDate] + return ok +} + // ResetDate resets all changes to the "date" field. func (m *MaintenanceEntryMutation) ResetDate() { m.date = nil + delete(m.clearedFields, maintenanceentry.FieldDate) +} + +// SetScheduledDate sets the "scheduled_date" field. +func (m *MaintenanceEntryMutation) SetScheduledDate(t time.Time) { + m.scheduled_date = &t +} + +// ScheduledDate returns the value of the "scheduled_date" field in the mutation. +func (m *MaintenanceEntryMutation) ScheduledDate() (r time.Time, exists bool) { + v := m.scheduled_date + if v == nil { + return + } + return *v, true +} + +// OldScheduledDate returns the old "scheduled_date" field's value of the MaintenanceEntry entity. +// If the MaintenanceEntry object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *MaintenanceEntryMutation) OldScheduledDate(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldScheduledDate is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldScheduledDate requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldScheduledDate: %w", err) + } + return oldValue.ScheduledDate, nil +} + +// ClearScheduledDate clears the value of the "scheduled_date" field. +func (m *MaintenanceEntryMutation) ClearScheduledDate() { + m.scheduled_date = nil + m.clearedFields[maintenanceentry.FieldScheduledDate] = struct{}{} +} + +// ScheduledDateCleared returns if the "scheduled_date" field was cleared in this mutation. +func (m *MaintenanceEntryMutation) ScheduledDateCleared() bool { + _, ok := m.clearedFields[maintenanceentry.FieldScheduledDate] + return ok +} + +// ResetScheduledDate resets all changes to the "scheduled_date" field. +func (m *MaintenanceEntryMutation) ResetScheduledDate() { + m.scheduled_date = nil + delete(m.clearedFields, maintenanceentry.FieldScheduledDate) } // SetName sets the "name" field. @@ -9328,6 +9529,7 @@ func (m *MaintenanceEntryMutation) ResetCost() { // ClearItem clears the "item" edge to the Item entity. func (m *MaintenanceEntryMutation) ClearItem() { m.cleareditem = true + m.clearedFields[maintenanceentry.FieldItemID] = struct{}{} } // ItemCleared reports if the "item" edge to the Item entity was cleared. @@ -9385,7 +9587,7 @@ func (m *MaintenanceEntryMutation) Type() string { // order to get all numeric fields that were incremented/decremented, call // AddedFields(). func (m *MaintenanceEntryMutation) Fields() []string { - fields := make([]string, 0, 7) + fields := make([]string, 0, 8) if m.created_at != nil { fields = append(fields, maintenanceentry.FieldCreatedAt) } @@ -9398,6 +9600,9 @@ func (m *MaintenanceEntryMutation) Fields() []string { if m.date != nil { fields = append(fields, maintenanceentry.FieldDate) } + if m.scheduled_date != nil { + fields = append(fields, maintenanceentry.FieldScheduledDate) + } if m.name != nil { fields = append(fields, maintenanceentry.FieldName) } @@ -9423,6 +9628,8 @@ func (m *MaintenanceEntryMutation) Field(name string) (ent.Value, bool) { return m.ItemID() case maintenanceentry.FieldDate: return m.Date() + case maintenanceentry.FieldScheduledDate: + return m.ScheduledDate() case maintenanceentry.FieldName: return m.Name() case maintenanceentry.FieldDescription: @@ -9446,6 +9653,8 @@ func (m *MaintenanceEntryMutation) OldField(ctx context.Context, name string) (e return m.OldItemID(ctx) case maintenanceentry.FieldDate: return m.OldDate(ctx) + case maintenanceentry.FieldScheduledDate: + return m.OldScheduledDate(ctx) case maintenanceentry.FieldName: return m.OldName(ctx) case maintenanceentry.FieldDescription: @@ -9489,6 +9698,13 @@ func (m *MaintenanceEntryMutation) SetField(name string, value ent.Value) error } m.SetDate(v) return nil + case maintenanceentry.FieldScheduledDate: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetScheduledDate(v) + return nil case maintenanceentry.FieldName: v, ok := value.(string) if !ok { @@ -9555,6 +9771,12 @@ func (m *MaintenanceEntryMutation) AddField(name string, value ent.Value) error // mutation. func (m *MaintenanceEntryMutation) ClearedFields() []string { var fields []string + if m.FieldCleared(maintenanceentry.FieldDate) { + fields = append(fields, maintenanceentry.FieldDate) + } + if m.FieldCleared(maintenanceentry.FieldScheduledDate) { + fields = append(fields, maintenanceentry.FieldScheduledDate) + } if m.FieldCleared(maintenanceentry.FieldDescription) { fields = append(fields, maintenanceentry.FieldDescription) } @@ -9572,6 +9794,12 @@ func (m *MaintenanceEntryMutation) FieldCleared(name string) bool { // error if the field is not defined in the schema. func (m *MaintenanceEntryMutation) ClearField(name string) error { switch name { + case maintenanceentry.FieldDate: + m.ClearDate() + return nil + case maintenanceentry.FieldScheduledDate: + m.ClearScheduledDate() + return nil case maintenanceentry.FieldDescription: m.ClearDescription() return nil @@ -9595,6 +9823,9 @@ func (m *MaintenanceEntryMutation) ResetField(name string) error { case maintenanceentry.FieldDate: m.ResetDate() return nil + case maintenanceentry.FieldScheduledDate: + m.ResetScheduledDate() + return nil case maintenanceentry.FieldName: m.ResetName() return nil @@ -9682,6 +9913,762 @@ func (m *MaintenanceEntryMutation) ResetEdge(name string) error { return fmt.Errorf("unknown MaintenanceEntry edge %s", name) } +// NotifierMutation represents an operation that mutates the Notifier nodes in the graph. +type NotifierMutation struct { + config + op Op + typ string + id *uuid.UUID + created_at *time.Time + updated_at *time.Time + name *string + url *string + is_active *bool + clearedFields map[string]struct{} + group *uuid.UUID + clearedgroup bool + user *uuid.UUID + cleareduser bool + done bool + oldValue func(context.Context) (*Notifier, error) + predicates []predicate.Notifier +} + +var _ ent.Mutation = (*NotifierMutation)(nil) + +// notifierOption allows management of the mutation configuration using functional options. +type notifierOption func(*NotifierMutation) + +// newNotifierMutation creates new mutation for the Notifier entity. +func newNotifierMutation(c config, op Op, opts ...notifierOption) *NotifierMutation { + m := &NotifierMutation{ + config: c, + op: op, + typ: TypeNotifier, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withNotifierID sets the ID field of the mutation. +func withNotifierID(id uuid.UUID) notifierOption { + return func(m *NotifierMutation) { + var ( + err error + once sync.Once + value *Notifier + ) + m.oldValue = func(ctx context.Context) (*Notifier, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().Notifier.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withNotifier sets the old Notifier of the mutation. +func withNotifier(node *Notifier) notifierOption { + return func(m *NotifierMutation) { + m.oldValue = func(context.Context) (*Notifier, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m NotifierMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m NotifierMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// SetID sets the value of the id field. Note that this +// operation is only accepted on creation of Notifier entities. +func (m *NotifierMutation) SetID(id uuid.UUID) { + m.id = &id +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *NotifierMutation) ID() (id uuid.UUID, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *NotifierMutation) IDs(ctx context.Context) ([]uuid.UUID, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []uuid.UUID{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().Notifier.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetCreatedAt sets the "created_at" field. +func (m *NotifierMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *NotifierMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the Notifier entity. +// If the Notifier object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *NotifierMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *NotifierMutation) ResetCreatedAt() { + m.created_at = nil +} + +// SetUpdatedAt sets the "updated_at" field. +func (m *NotifierMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the value of the "updated_at" field in the mutation. +func (m *NotifierMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old "updated_at" field's value of the Notifier entity. +// If the Notifier object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *NotifierMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ResetUpdatedAt resets all changes to the "updated_at" field. +func (m *NotifierMutation) ResetUpdatedAt() { + m.updated_at = nil +} + +// SetGroupID sets the "group_id" field. +func (m *NotifierMutation) SetGroupID(u uuid.UUID) { + m.group = &u +} + +// GroupID returns the value of the "group_id" field in the mutation. +func (m *NotifierMutation) GroupID() (r uuid.UUID, exists bool) { + v := m.group + if v == nil { + return + } + return *v, true +} + +// OldGroupID returns the old "group_id" field's value of the Notifier entity. +// If the Notifier object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *NotifierMutation) OldGroupID(ctx context.Context) (v uuid.UUID, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldGroupID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldGroupID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldGroupID: %w", err) + } + return oldValue.GroupID, nil +} + +// ResetGroupID resets all changes to the "group_id" field. +func (m *NotifierMutation) ResetGroupID() { + m.group = nil +} + +// SetUserID sets the "user_id" field. +func (m *NotifierMutation) SetUserID(u uuid.UUID) { + m.user = &u +} + +// UserID returns the value of the "user_id" field in the mutation. +func (m *NotifierMutation) UserID() (r uuid.UUID, exists bool) { + v := m.user + if v == nil { + return + } + return *v, true +} + +// OldUserID returns the old "user_id" field's value of the Notifier entity. +// If the Notifier object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *NotifierMutation) OldUserID(ctx context.Context) (v uuid.UUID, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUserID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUserID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUserID: %w", err) + } + return oldValue.UserID, nil +} + +// ResetUserID resets all changes to the "user_id" field. +func (m *NotifierMutation) ResetUserID() { + m.user = nil +} + +// SetName sets the "name" field. +func (m *NotifierMutation) SetName(s string) { + m.name = &s +} + +// Name returns the value of the "name" field in the mutation. +func (m *NotifierMutation) Name() (r string, exists bool) { + v := m.name + if v == nil { + return + } + return *v, true +} + +// OldName returns the old "name" field's value of the Notifier entity. +// If the Notifier object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *NotifierMutation) OldName(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldName is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldName requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldName: %w", err) + } + return oldValue.Name, nil +} + +// ResetName resets all changes to the "name" field. +func (m *NotifierMutation) ResetName() { + m.name = nil +} + +// SetURL sets the "url" field. +func (m *NotifierMutation) SetURL(s string) { + m.url = &s +} + +// URL returns the value of the "url" field in the mutation. +func (m *NotifierMutation) URL() (r string, exists bool) { + v := m.url + if v == nil { + return + } + return *v, true +} + +// OldURL returns the old "url" field's value of the Notifier entity. +// If the Notifier object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *NotifierMutation) OldURL(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldURL is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldURL requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldURL: %w", err) + } + return oldValue.URL, nil +} + +// ResetURL resets all changes to the "url" field. +func (m *NotifierMutation) ResetURL() { + m.url = nil +} + +// SetIsActive sets the "is_active" field. +func (m *NotifierMutation) SetIsActive(b bool) { + m.is_active = &b +} + +// IsActive returns the value of the "is_active" field in the mutation. +func (m *NotifierMutation) IsActive() (r bool, exists bool) { + v := m.is_active + if v == nil { + return + } + return *v, true +} + +// OldIsActive returns the old "is_active" field's value of the Notifier entity. +// If the Notifier object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *NotifierMutation) OldIsActive(ctx context.Context) (v bool, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldIsActive is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldIsActive requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldIsActive: %w", err) + } + return oldValue.IsActive, nil +} + +// ResetIsActive resets all changes to the "is_active" field. +func (m *NotifierMutation) ResetIsActive() { + m.is_active = nil +} + +// ClearGroup clears the "group" edge to the Group entity. +func (m *NotifierMutation) ClearGroup() { + m.clearedgroup = true + m.clearedFields[notifier.FieldGroupID] = struct{}{} +} + +// GroupCleared reports if the "group" edge to the Group entity was cleared. +func (m *NotifierMutation) GroupCleared() bool { + return m.clearedgroup +} + +// GroupIDs returns the "group" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// GroupID instead. It exists only for internal usage by the builders. +func (m *NotifierMutation) GroupIDs() (ids []uuid.UUID) { + if id := m.group; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetGroup resets all changes to the "group" edge. +func (m *NotifierMutation) ResetGroup() { + m.group = nil + m.clearedgroup = false +} + +// ClearUser clears the "user" edge to the User entity. +func (m *NotifierMutation) ClearUser() { + m.cleareduser = true + m.clearedFields[notifier.FieldUserID] = struct{}{} +} + +// UserCleared reports if the "user" edge to the User entity was cleared. +func (m *NotifierMutation) UserCleared() bool { + return m.cleareduser +} + +// UserIDs returns the "user" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// UserID instead. It exists only for internal usage by the builders. +func (m *NotifierMutation) UserIDs() (ids []uuid.UUID) { + if id := m.user; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetUser resets all changes to the "user" edge. +func (m *NotifierMutation) ResetUser() { + m.user = nil + m.cleareduser = false +} + +// Where appends a list predicates to the NotifierMutation builder. +func (m *NotifierMutation) Where(ps ...predicate.Notifier) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the NotifierMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *NotifierMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Notifier, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *NotifierMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *NotifierMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (Notifier). +func (m *NotifierMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *NotifierMutation) Fields() []string { + fields := make([]string, 0, 7) + if m.created_at != nil { + fields = append(fields, notifier.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, notifier.FieldUpdatedAt) + } + if m.group != nil { + fields = append(fields, notifier.FieldGroupID) + } + if m.user != nil { + fields = append(fields, notifier.FieldUserID) + } + if m.name != nil { + fields = append(fields, notifier.FieldName) + } + if m.url != nil { + fields = append(fields, notifier.FieldURL) + } + if m.is_active != nil { + fields = append(fields, notifier.FieldIsActive) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *NotifierMutation) Field(name string) (ent.Value, bool) { + switch name { + case notifier.FieldCreatedAt: + return m.CreatedAt() + case notifier.FieldUpdatedAt: + return m.UpdatedAt() + case notifier.FieldGroupID: + return m.GroupID() + case notifier.FieldUserID: + return m.UserID() + case notifier.FieldName: + return m.Name() + case notifier.FieldURL: + return m.URL() + case notifier.FieldIsActive: + return m.IsActive() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *NotifierMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case notifier.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case notifier.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + case notifier.FieldGroupID: + return m.OldGroupID(ctx) + case notifier.FieldUserID: + return m.OldUserID(ctx) + case notifier.FieldName: + return m.OldName(ctx) + case notifier.FieldURL: + return m.OldURL(ctx) + case notifier.FieldIsActive: + return m.OldIsActive(ctx) + } + return nil, fmt.Errorf("unknown Notifier field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *NotifierMutation) SetField(name string, value ent.Value) error { + switch name { + case notifier.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case notifier.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + case notifier.FieldGroupID: + v, ok := value.(uuid.UUID) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetGroupID(v) + return nil + case notifier.FieldUserID: + v, ok := value.(uuid.UUID) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUserID(v) + return nil + case notifier.FieldName: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetName(v) + return nil + case notifier.FieldURL: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetURL(v) + return nil + case notifier.FieldIsActive: + v, ok := value.(bool) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetIsActive(v) + return nil + } + return fmt.Errorf("unknown Notifier field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *NotifierMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *NotifierMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *NotifierMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown Notifier numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *NotifierMutation) ClearedFields() []string { + return nil +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *NotifierMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *NotifierMutation) ClearField(name string) error { + return fmt.Errorf("unknown Notifier nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *NotifierMutation) ResetField(name string) error { + switch name { + case notifier.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case notifier.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + case notifier.FieldGroupID: + m.ResetGroupID() + return nil + case notifier.FieldUserID: + m.ResetUserID() + return nil + case notifier.FieldName: + m.ResetName() + return nil + case notifier.FieldURL: + m.ResetURL() + return nil + case notifier.FieldIsActive: + m.ResetIsActive() + return nil + } + return fmt.Errorf("unknown Notifier field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *NotifierMutation) AddedEdges() []string { + edges := make([]string, 0, 2) + if m.group != nil { + edges = append(edges, notifier.EdgeGroup) + } + if m.user != nil { + edges = append(edges, notifier.EdgeUser) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *NotifierMutation) AddedIDs(name string) []ent.Value { + switch name { + case notifier.EdgeGroup: + if id := m.group; id != nil { + return []ent.Value{*id} + } + case notifier.EdgeUser: + if id := m.user; id != nil { + return []ent.Value{*id} + } + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *NotifierMutation) RemovedEdges() []string { + edges := make([]string, 0, 2) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *NotifierMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *NotifierMutation) ClearedEdges() []string { + edges := make([]string, 0, 2) + if m.clearedgroup { + edges = append(edges, notifier.EdgeGroup) + } + if m.cleareduser { + edges = append(edges, notifier.EdgeUser) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *NotifierMutation) EdgeCleared(name string) bool { + switch name { + case notifier.EdgeGroup: + return m.clearedgroup + case notifier.EdgeUser: + return m.cleareduser + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *NotifierMutation) ClearEdge(name string) error { + switch name { + case notifier.EdgeGroup: + m.ClearGroup() + return nil + case notifier.EdgeUser: + m.ClearUser() + return nil + } + return fmt.Errorf("unknown Notifier unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *NotifierMutation) ResetEdge(name string) error { + switch name { + case notifier.EdgeGroup: + m.ResetGroup() + return nil + case notifier.EdgeUser: + m.ResetUser() + return nil + } + return fmt.Errorf("unknown Notifier edge %s", name) +} + // UserMutation represents an operation that mutates the User nodes in the graph. type UserMutation struct { config @@ -9694,8 +10681,8 @@ type UserMutation struct { email *string password *string is_superuser *bool - role *user.Role superuser *bool + role *user.Role activated_on *time.Time clearedFields map[string]struct{} group *uuid.UUID @@ -9703,6 +10690,9 @@ type UserMutation struct { auth_tokens map[uuid.UUID]struct{} removedauth_tokens map[uuid.UUID]struct{} clearedauth_tokens bool + notifiers map[uuid.UUID]struct{} + removednotifiers map[uuid.UUID]struct{} + clearednotifiers bool done bool oldValue func(context.Context) (*User, error) predicates []predicate.User @@ -10028,42 +11018,6 @@ func (m *UserMutation) ResetIsSuperuser() { m.is_superuser = nil } -// SetRole sets the "role" field. -func (m *UserMutation) SetRole(u user.Role) { - m.role = &u -} - -// Role returns the value of the "role" field in the mutation. -func (m *UserMutation) Role() (r user.Role, exists bool) { - v := m.role - if v == nil { - return - } - return *v, true -} - -// OldRole returns the old "role" field's value of the User entity. -// If the User object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *UserMutation) OldRole(ctx context.Context) (v user.Role, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldRole is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldRole requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldRole: %w", err) - } - return oldValue.Role, nil -} - -// ResetRole resets all changes to the "role" field. -func (m *UserMutation) ResetRole() { - m.role = nil -} - // SetSuperuser sets the "superuser" field. func (m *UserMutation) SetSuperuser(b bool) { m.superuser = &b @@ -10100,6 +11054,42 @@ func (m *UserMutation) ResetSuperuser() { m.superuser = nil } +// SetRole sets the "role" field. +func (m *UserMutation) SetRole(u user.Role) { + m.role = &u +} + +// Role returns the value of the "role" field in the mutation. +func (m *UserMutation) Role() (r user.Role, exists bool) { + v := m.role + if v == nil { + return + } + return *v, true +} + +// OldRole returns the old "role" field's value of the User entity. +// If the User object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserMutation) OldRole(ctx context.Context) (v user.Role, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldRole is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldRole requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldRole: %w", err) + } + return oldValue.Role, nil +} + +// ResetRole resets all changes to the "role" field. +func (m *UserMutation) ResetRole() { + m.role = nil +} + // SetActivatedOn sets the "activated_on" field. func (m *UserMutation) SetActivatedOn(t time.Time) { m.activated_on = &t @@ -10242,6 +11232,60 @@ func (m *UserMutation) ResetAuthTokens() { m.removedauth_tokens = nil } +// AddNotifierIDs adds the "notifiers" edge to the Notifier entity by ids. +func (m *UserMutation) AddNotifierIDs(ids ...uuid.UUID) { + if m.notifiers == nil { + m.notifiers = make(map[uuid.UUID]struct{}) + } + for i := range ids { + m.notifiers[ids[i]] = struct{}{} + } +} + +// ClearNotifiers clears the "notifiers" edge to the Notifier entity. +func (m *UserMutation) ClearNotifiers() { + m.clearednotifiers = true +} + +// NotifiersCleared reports if the "notifiers" edge to the Notifier entity was cleared. +func (m *UserMutation) NotifiersCleared() bool { + return m.clearednotifiers +} + +// RemoveNotifierIDs removes the "notifiers" edge to the Notifier entity by IDs. +func (m *UserMutation) RemoveNotifierIDs(ids ...uuid.UUID) { + if m.removednotifiers == nil { + m.removednotifiers = make(map[uuid.UUID]struct{}) + } + for i := range ids { + delete(m.notifiers, ids[i]) + m.removednotifiers[ids[i]] = struct{}{} + } +} + +// RemovedNotifiers returns the removed IDs of the "notifiers" edge to the Notifier entity. +func (m *UserMutation) RemovedNotifiersIDs() (ids []uuid.UUID) { + for id := range m.removednotifiers { + ids = append(ids, id) + } + return +} + +// NotifiersIDs returns the "notifiers" edge IDs in the mutation. +func (m *UserMutation) NotifiersIDs() (ids []uuid.UUID) { + for id := range m.notifiers { + ids = append(ids, id) + } + return +} + +// ResetNotifiers resets all changes to the "notifiers" edge. +func (m *UserMutation) ResetNotifiers() { + m.notifiers = nil + m.clearednotifiers = false + m.removednotifiers = nil +} + // Where appends a list predicates to the UserMutation builder. func (m *UserMutation) Where(ps ...predicate.User) { m.predicates = append(m.predicates, ps...) @@ -10295,12 +11339,12 @@ func (m *UserMutation) Fields() []string { if m.is_superuser != nil { fields = append(fields, user.FieldIsSuperuser) } - if m.role != nil { - fields = append(fields, user.FieldRole) - } if m.superuser != nil { fields = append(fields, user.FieldSuperuser) } + if m.role != nil { + fields = append(fields, user.FieldRole) + } if m.activated_on != nil { fields = append(fields, user.FieldActivatedOn) } @@ -10324,10 +11368,10 @@ func (m *UserMutation) Field(name string) (ent.Value, bool) { return m.Password() case user.FieldIsSuperuser: return m.IsSuperuser() - case user.FieldRole: - return m.Role() case user.FieldSuperuser: return m.Superuser() + case user.FieldRole: + return m.Role() case user.FieldActivatedOn: return m.ActivatedOn() } @@ -10351,10 +11395,10 @@ func (m *UserMutation) OldField(ctx context.Context, name string) (ent.Value, er return m.OldPassword(ctx) case user.FieldIsSuperuser: return m.OldIsSuperuser(ctx) - case user.FieldRole: - return m.OldRole(ctx) case user.FieldSuperuser: return m.OldSuperuser(ctx) + case user.FieldRole: + return m.OldRole(ctx) case user.FieldActivatedOn: return m.OldActivatedOn(ctx) } @@ -10408,13 +11452,6 @@ func (m *UserMutation) SetField(name string, value ent.Value) error { } m.SetIsSuperuser(v) return nil - case user.FieldRole: - v, ok := value.(user.Role) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetRole(v) - return nil case user.FieldSuperuser: v, ok := value.(bool) if !ok { @@ -10422,6 +11459,13 @@ func (m *UserMutation) SetField(name string, value ent.Value) error { } m.SetSuperuser(v) return nil + case user.FieldRole: + v, ok := value.(user.Role) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetRole(v) + return nil case user.FieldActivatedOn: v, ok := value.(time.Time) if !ok { @@ -10505,12 +11549,12 @@ func (m *UserMutation) ResetField(name string) error { case user.FieldIsSuperuser: m.ResetIsSuperuser() return nil - case user.FieldRole: - m.ResetRole() - return nil case user.FieldSuperuser: m.ResetSuperuser() return nil + case user.FieldRole: + m.ResetRole() + return nil case user.FieldActivatedOn: m.ResetActivatedOn() return nil @@ -10520,13 +11564,16 @@ func (m *UserMutation) ResetField(name string) error { // AddedEdges returns all edge names that were set/added in this mutation. func (m *UserMutation) AddedEdges() []string { - edges := make([]string, 0, 2) + edges := make([]string, 0, 3) if m.group != nil { edges = append(edges, user.EdgeGroup) } if m.auth_tokens != nil { edges = append(edges, user.EdgeAuthTokens) } + if m.notifiers != nil { + edges = append(edges, user.EdgeNotifiers) + } return edges } @@ -10544,16 +11591,25 @@ func (m *UserMutation) AddedIDs(name string) []ent.Value { ids = append(ids, id) } return ids + case user.EdgeNotifiers: + ids := make([]ent.Value, 0, len(m.notifiers)) + for id := range m.notifiers { + ids = append(ids, id) + } + return ids } return nil } // RemovedEdges returns all edge names that were removed in this mutation. func (m *UserMutation) RemovedEdges() []string { - edges := make([]string, 0, 2) + edges := make([]string, 0, 3) if m.removedauth_tokens != nil { edges = append(edges, user.EdgeAuthTokens) } + if m.removednotifiers != nil { + edges = append(edges, user.EdgeNotifiers) + } return edges } @@ -10567,19 +11623,28 @@ func (m *UserMutation) RemovedIDs(name string) []ent.Value { ids = append(ids, id) } return ids + case user.EdgeNotifiers: + ids := make([]ent.Value, 0, len(m.removednotifiers)) + for id := range m.removednotifiers { + ids = append(ids, id) + } + return ids } return nil } // ClearedEdges returns all edge names that were cleared in this mutation. func (m *UserMutation) ClearedEdges() []string { - edges := make([]string, 0, 2) + edges := make([]string, 0, 3) if m.clearedgroup { edges = append(edges, user.EdgeGroup) } if m.clearedauth_tokens { edges = append(edges, user.EdgeAuthTokens) } + if m.clearednotifiers { + edges = append(edges, user.EdgeNotifiers) + } return edges } @@ -10591,6 +11656,8 @@ func (m *UserMutation) EdgeCleared(name string) bool { return m.clearedgroup case user.EdgeAuthTokens: return m.clearedauth_tokens + case user.EdgeNotifiers: + return m.clearednotifiers } return false } @@ -10616,6 +11683,9 @@ func (m *UserMutation) ResetEdge(name string) error { case user.EdgeAuthTokens: m.ResetAuthTokens() return nil + case user.EdgeNotifiers: + m.ResetNotifiers() + return nil } return fmt.Errorf("unknown User edge %s", name) } diff --git a/backend/internal/data/ent/notifier.go b/backend/internal/data/ent/notifier.go new file mode 100644 index 0000000..05a267b --- /dev/null +++ b/backend/internal/data/ent/notifier.go @@ -0,0 +1,226 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/google/uuid" + "github.com/hay-kot/homebox/backend/internal/data/ent/group" + "github.com/hay-kot/homebox/backend/internal/data/ent/notifier" + "github.com/hay-kot/homebox/backend/internal/data/ent/user" +) + +// Notifier is the model entity for the Notifier schema. +type Notifier struct { + config `json:"-"` + // ID of the ent. + ID uuid.UUID `json:"id,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt time.Time `json:"updated_at,omitempty"` + // GroupID holds the value of the "group_id" field. + GroupID uuid.UUID `json:"group_id,omitempty"` + // UserID holds the value of the "user_id" field. + UserID uuid.UUID `json:"user_id,omitempty"` + // Name holds the value of the "name" field. + Name string `json:"name,omitempty"` + // URL holds the value of the "url" field. + URL string `json:"-"` + // IsActive holds the value of the "is_active" field. + IsActive bool `json:"is_active,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the NotifierQuery when eager-loading is set. + Edges NotifierEdges `json:"edges"` + selectValues sql.SelectValues +} + +// NotifierEdges holds the relations/edges for other nodes in the graph. +type NotifierEdges struct { + // Group holds the value of the group edge. + Group *Group `json:"group,omitempty"` + // User holds the value of the user edge. + User *User `json:"user,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [2]bool +} + +// GroupOrErr returns the Group value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e NotifierEdges) GroupOrErr() (*Group, error) { + if e.loadedTypes[0] { + if e.Group == nil { + // Edge was loaded but was not found. + return nil, &NotFoundError{label: group.Label} + } + return e.Group, nil + } + return nil, &NotLoadedError{edge: "group"} +} + +// UserOrErr returns the User value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e NotifierEdges) UserOrErr() (*User, error) { + if e.loadedTypes[1] { + if e.User == nil { + // Edge was loaded but was not found. + return nil, &NotFoundError{label: user.Label} + } + return e.User, nil + } + return nil, &NotLoadedError{edge: "user"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*Notifier) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case notifier.FieldIsActive: + values[i] = new(sql.NullBool) + case notifier.FieldName, notifier.FieldURL: + values[i] = new(sql.NullString) + case notifier.FieldCreatedAt, notifier.FieldUpdatedAt: + values[i] = new(sql.NullTime) + case notifier.FieldID, notifier.FieldGroupID, notifier.FieldUserID: + values[i] = new(uuid.UUID) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the Notifier fields. +func (n *Notifier) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case notifier.FieldID: + if value, ok := values[i].(*uuid.UUID); !ok { + return fmt.Errorf("unexpected type %T for field id", values[i]) + } else if value != nil { + n.ID = *value + } + case notifier.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + n.CreatedAt = value.Time + } + case notifier.FieldUpdatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[i]) + } else if value.Valid { + n.UpdatedAt = value.Time + } + case notifier.FieldGroupID: + if value, ok := values[i].(*uuid.UUID); !ok { + return fmt.Errorf("unexpected type %T for field group_id", values[i]) + } else if value != nil { + n.GroupID = *value + } + case notifier.FieldUserID: + if value, ok := values[i].(*uuid.UUID); !ok { + return fmt.Errorf("unexpected type %T for field user_id", values[i]) + } else if value != nil { + n.UserID = *value + } + case notifier.FieldName: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field name", values[i]) + } else if value.Valid { + n.Name = value.String + } + case notifier.FieldURL: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field url", values[i]) + } else if value.Valid { + n.URL = value.String + } + case notifier.FieldIsActive: + if value, ok := values[i].(*sql.NullBool); !ok { + return fmt.Errorf("unexpected type %T for field is_active", values[i]) + } else if value.Valid { + n.IsActive = value.Bool + } + default: + n.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the Notifier. +// This includes values selected through modifiers, order, etc. +func (n *Notifier) Value(name string) (ent.Value, error) { + return n.selectValues.Get(name) +} + +// QueryGroup queries the "group" edge of the Notifier entity. +func (n *Notifier) QueryGroup() *GroupQuery { + return NewNotifierClient(n.config).QueryGroup(n) +} + +// QueryUser queries the "user" edge of the Notifier entity. +func (n *Notifier) QueryUser() *UserQuery { + return NewNotifierClient(n.config).QueryUser(n) +} + +// Update returns a builder for updating this Notifier. +// Note that you need to call Notifier.Unwrap() before calling this method if this Notifier +// was returned from a transaction, and the transaction was committed or rolled back. +func (n *Notifier) Update() *NotifierUpdateOne { + return NewNotifierClient(n.config).UpdateOne(n) +} + +// Unwrap unwraps the Notifier entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (n *Notifier) Unwrap() *Notifier { + _tx, ok := n.config.driver.(*txDriver) + if !ok { + panic("ent: Notifier is not a transactional entity") + } + n.config.driver = _tx.drv + return n +} + +// String implements the fmt.Stringer. +func (n *Notifier) String() string { + var builder strings.Builder + builder.WriteString("Notifier(") + builder.WriteString(fmt.Sprintf("id=%v, ", n.ID)) + builder.WriteString("created_at=") + builder.WriteString(n.CreatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("updated_at=") + builder.WriteString(n.UpdatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("group_id=") + builder.WriteString(fmt.Sprintf("%v", n.GroupID)) + builder.WriteString(", ") + builder.WriteString("user_id=") + builder.WriteString(fmt.Sprintf("%v", n.UserID)) + builder.WriteString(", ") + builder.WriteString("name=") + builder.WriteString(n.Name) + builder.WriteString(", ") + builder.WriteString("url=") + builder.WriteString(", ") + builder.WriteString("is_active=") + builder.WriteString(fmt.Sprintf("%v", n.IsActive)) + builder.WriteByte(')') + return builder.String() +} + +// Notifiers is a parsable slice of Notifier. +type Notifiers []*Notifier diff --git a/backend/internal/data/ent/notifier/notifier.go b/backend/internal/data/ent/notifier/notifier.go new file mode 100644 index 0000000..d24b6bc --- /dev/null +++ b/backend/internal/data/ent/notifier/notifier.go @@ -0,0 +1,162 @@ +// Code generated by ent, DO NOT EDIT. + +package notifier + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/google/uuid" +) + +const ( + // Label holds the string label denoting the notifier type in the database. + Label = "notifier" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // FieldGroupID holds the string denoting the group_id field in the database. + FieldGroupID = "group_id" + // FieldUserID holds the string denoting the user_id field in the database. + FieldUserID = "user_id" + // FieldName holds the string denoting the name field in the database. + FieldName = "name" + // FieldURL holds the string denoting the url field in the database. + FieldURL = "url" + // FieldIsActive holds the string denoting the is_active field in the database. + FieldIsActive = "is_active" + // EdgeGroup holds the string denoting the group edge name in mutations. + EdgeGroup = "group" + // EdgeUser holds the string denoting the user edge name in mutations. + EdgeUser = "user" + // Table holds the table name of the notifier in the database. + Table = "notifiers" + // GroupTable is the table that holds the group relation/edge. + GroupTable = "notifiers" + // GroupInverseTable is the table name for the Group entity. + // It exists in this package in order to avoid circular dependency with the "group" package. + GroupInverseTable = "groups" + // GroupColumn is the table column denoting the group relation/edge. + GroupColumn = "group_id" + // UserTable is the table that holds the user relation/edge. + UserTable = "notifiers" + // UserInverseTable is the table name for the User entity. + // It exists in this package in order to avoid circular dependency with the "user" package. + UserInverseTable = "users" + // UserColumn is the table column denoting the user relation/edge. + UserColumn = "user_id" +) + +// Columns holds all SQL columns for notifier fields. +var Columns = []string{ + FieldID, + FieldCreatedAt, + FieldUpdatedAt, + FieldGroupID, + FieldUserID, + FieldName, + FieldURL, + FieldIsActive, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. + DefaultUpdatedAt func() time.Time + // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. + UpdateDefaultUpdatedAt func() time.Time + // NameValidator is a validator for the "name" field. It is called by the builders before save. + NameValidator func(string) error + // URLValidator is a validator for the "url" field. It is called by the builders before save. + URLValidator func(string) error + // DefaultIsActive holds the default value on creation for the "is_active" field. + DefaultIsActive bool + // DefaultID holds the default value on creation for the "id" field. + DefaultID func() uuid.UUID +) + +// OrderOption defines the ordering options for the Notifier queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByGroupID orders the results by the group_id field. +func ByGroupID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldGroupID, opts...).ToFunc() +} + +// ByUserID orders the results by the user_id field. +func ByUserID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUserID, opts...).ToFunc() +} + +// ByName orders the results by the name field. +func ByName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldName, opts...).ToFunc() +} + +// ByURL orders the results by the url field. +func ByURL(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldURL, opts...).ToFunc() +} + +// ByIsActive orders the results by the is_active field. +func ByIsActive(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldIsActive, opts...).ToFunc() +} + +// ByGroupField orders the results by group field. +func ByGroupField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newGroupStep(), sql.OrderByField(field, opts...)) + } +} + +// ByUserField orders the results by user field. +func ByUserField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newUserStep(), sql.OrderByField(field, opts...)) + } +} +func newGroupStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(GroupInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn), + ) +} +func newUserStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(UserInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn), + ) +} diff --git a/backend/internal/data/ent/notifier/where.go b/backend/internal/data/ent/notifier/where.go new file mode 100644 index 0000000..fa9b3bc --- /dev/null +++ b/backend/internal/data/ent/notifier/where.go @@ -0,0 +1,413 @@ +// Code generated by ent, DO NOT EDIT. + +package notifier + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/google/uuid" + "github.com/hay-kot/homebox/backend/internal/data/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id uuid.UUID) predicate.Notifier { + return predicate.Notifier(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id uuid.UUID) predicate.Notifier { + return predicate.Notifier(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id uuid.UUID) predicate.Notifier { + return predicate.Notifier(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...uuid.UUID) predicate.Notifier { + return predicate.Notifier(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...uuid.UUID) predicate.Notifier { + return predicate.Notifier(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id uuid.UUID) predicate.Notifier { + return predicate.Notifier(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id uuid.UUID) predicate.Notifier { + return predicate.Notifier(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id uuid.UUID) predicate.Notifier { + return predicate.Notifier(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id uuid.UUID) predicate.Notifier { + return predicate.Notifier(sql.FieldLTE(FieldID, id)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.Notifier { + return predicate.Notifier(sql.FieldEQ(FieldCreatedAt, v)) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.Notifier { + return predicate.Notifier(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// GroupID applies equality check predicate on the "group_id" field. It's identical to GroupIDEQ. +func GroupID(v uuid.UUID) predicate.Notifier { + return predicate.Notifier(sql.FieldEQ(FieldGroupID, v)) +} + +// UserID applies equality check predicate on the "user_id" field. It's identical to UserIDEQ. +func UserID(v uuid.UUID) predicate.Notifier { + return predicate.Notifier(sql.FieldEQ(FieldUserID, v)) +} + +// Name applies equality check predicate on the "name" field. It's identical to NameEQ. +func Name(v string) predicate.Notifier { + return predicate.Notifier(sql.FieldEQ(FieldName, v)) +} + +// URL applies equality check predicate on the "url" field. It's identical to URLEQ. +func URL(v string) predicate.Notifier { + return predicate.Notifier(sql.FieldEQ(FieldURL, v)) +} + +// IsActive applies equality check predicate on the "is_active" field. It's identical to IsActiveEQ. +func IsActive(v bool) predicate.Notifier { + return predicate.Notifier(sql.FieldEQ(FieldIsActive, v)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.Notifier { + return predicate.Notifier(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.Notifier { + return predicate.Notifier(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.Notifier { + return predicate.Notifier(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.Notifier { + return predicate.Notifier(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.Notifier { + return predicate.Notifier(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.Notifier { + return predicate.Notifier(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.Notifier { + return predicate.Notifier(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.Notifier { + return predicate.Notifier(sql.FieldLTE(FieldCreatedAt, v)) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.Notifier { + return predicate.Notifier(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.Notifier { + return predicate.Notifier(sql.FieldNEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.Notifier { + return predicate.Notifier(sql.FieldIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.Notifier { + return predicate.Notifier(sql.FieldNotIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.Notifier { + return predicate.Notifier(sql.FieldGT(FieldUpdatedAt, v)) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.Notifier { + return predicate.Notifier(sql.FieldGTE(FieldUpdatedAt, v)) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.Notifier { + return predicate.Notifier(sql.FieldLT(FieldUpdatedAt, v)) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.Notifier { + return predicate.Notifier(sql.FieldLTE(FieldUpdatedAt, v)) +} + +// GroupIDEQ applies the EQ predicate on the "group_id" field. +func GroupIDEQ(v uuid.UUID) predicate.Notifier { + return predicate.Notifier(sql.FieldEQ(FieldGroupID, v)) +} + +// GroupIDNEQ applies the NEQ predicate on the "group_id" field. +func GroupIDNEQ(v uuid.UUID) predicate.Notifier { + return predicate.Notifier(sql.FieldNEQ(FieldGroupID, v)) +} + +// GroupIDIn applies the In predicate on the "group_id" field. +func GroupIDIn(vs ...uuid.UUID) predicate.Notifier { + return predicate.Notifier(sql.FieldIn(FieldGroupID, vs...)) +} + +// GroupIDNotIn applies the NotIn predicate on the "group_id" field. +func GroupIDNotIn(vs ...uuid.UUID) predicate.Notifier { + return predicate.Notifier(sql.FieldNotIn(FieldGroupID, vs...)) +} + +// UserIDEQ applies the EQ predicate on the "user_id" field. +func UserIDEQ(v uuid.UUID) predicate.Notifier { + return predicate.Notifier(sql.FieldEQ(FieldUserID, v)) +} + +// UserIDNEQ applies the NEQ predicate on the "user_id" field. +func UserIDNEQ(v uuid.UUID) predicate.Notifier { + return predicate.Notifier(sql.FieldNEQ(FieldUserID, v)) +} + +// UserIDIn applies the In predicate on the "user_id" field. +func UserIDIn(vs ...uuid.UUID) predicate.Notifier { + return predicate.Notifier(sql.FieldIn(FieldUserID, vs...)) +} + +// UserIDNotIn applies the NotIn predicate on the "user_id" field. +func UserIDNotIn(vs ...uuid.UUID) predicate.Notifier { + return predicate.Notifier(sql.FieldNotIn(FieldUserID, vs...)) +} + +// NameEQ applies the EQ predicate on the "name" field. +func NameEQ(v string) predicate.Notifier { + return predicate.Notifier(sql.FieldEQ(FieldName, v)) +} + +// NameNEQ applies the NEQ predicate on the "name" field. +func NameNEQ(v string) predicate.Notifier { + return predicate.Notifier(sql.FieldNEQ(FieldName, v)) +} + +// NameIn applies the In predicate on the "name" field. +func NameIn(vs ...string) predicate.Notifier { + return predicate.Notifier(sql.FieldIn(FieldName, vs...)) +} + +// NameNotIn applies the NotIn predicate on the "name" field. +func NameNotIn(vs ...string) predicate.Notifier { + return predicate.Notifier(sql.FieldNotIn(FieldName, vs...)) +} + +// NameGT applies the GT predicate on the "name" field. +func NameGT(v string) predicate.Notifier { + return predicate.Notifier(sql.FieldGT(FieldName, v)) +} + +// NameGTE applies the GTE predicate on the "name" field. +func NameGTE(v string) predicate.Notifier { + return predicate.Notifier(sql.FieldGTE(FieldName, v)) +} + +// NameLT applies the LT predicate on the "name" field. +func NameLT(v string) predicate.Notifier { + return predicate.Notifier(sql.FieldLT(FieldName, v)) +} + +// NameLTE applies the LTE predicate on the "name" field. +func NameLTE(v string) predicate.Notifier { + return predicate.Notifier(sql.FieldLTE(FieldName, v)) +} + +// NameContains applies the Contains predicate on the "name" field. +func NameContains(v string) predicate.Notifier { + return predicate.Notifier(sql.FieldContains(FieldName, v)) +} + +// NameHasPrefix applies the HasPrefix predicate on the "name" field. +func NameHasPrefix(v string) predicate.Notifier { + return predicate.Notifier(sql.FieldHasPrefix(FieldName, v)) +} + +// NameHasSuffix applies the HasSuffix predicate on the "name" field. +func NameHasSuffix(v string) predicate.Notifier { + return predicate.Notifier(sql.FieldHasSuffix(FieldName, v)) +} + +// NameEqualFold applies the EqualFold predicate on the "name" field. +func NameEqualFold(v string) predicate.Notifier { + return predicate.Notifier(sql.FieldEqualFold(FieldName, v)) +} + +// NameContainsFold applies the ContainsFold predicate on the "name" field. +func NameContainsFold(v string) predicate.Notifier { + return predicate.Notifier(sql.FieldContainsFold(FieldName, v)) +} + +// URLEQ applies the EQ predicate on the "url" field. +func URLEQ(v string) predicate.Notifier { + return predicate.Notifier(sql.FieldEQ(FieldURL, v)) +} + +// URLNEQ applies the NEQ predicate on the "url" field. +func URLNEQ(v string) predicate.Notifier { + return predicate.Notifier(sql.FieldNEQ(FieldURL, v)) +} + +// URLIn applies the In predicate on the "url" field. +func URLIn(vs ...string) predicate.Notifier { + return predicate.Notifier(sql.FieldIn(FieldURL, vs...)) +} + +// URLNotIn applies the NotIn predicate on the "url" field. +func URLNotIn(vs ...string) predicate.Notifier { + return predicate.Notifier(sql.FieldNotIn(FieldURL, vs...)) +} + +// URLGT applies the GT predicate on the "url" field. +func URLGT(v string) predicate.Notifier { + return predicate.Notifier(sql.FieldGT(FieldURL, v)) +} + +// URLGTE applies the GTE predicate on the "url" field. +func URLGTE(v string) predicate.Notifier { + return predicate.Notifier(sql.FieldGTE(FieldURL, v)) +} + +// URLLT applies the LT predicate on the "url" field. +func URLLT(v string) predicate.Notifier { + return predicate.Notifier(sql.FieldLT(FieldURL, v)) +} + +// URLLTE applies the LTE predicate on the "url" field. +func URLLTE(v string) predicate.Notifier { + return predicate.Notifier(sql.FieldLTE(FieldURL, v)) +} + +// URLContains applies the Contains predicate on the "url" field. +func URLContains(v string) predicate.Notifier { + return predicate.Notifier(sql.FieldContains(FieldURL, v)) +} + +// URLHasPrefix applies the HasPrefix predicate on the "url" field. +func URLHasPrefix(v string) predicate.Notifier { + return predicate.Notifier(sql.FieldHasPrefix(FieldURL, v)) +} + +// URLHasSuffix applies the HasSuffix predicate on the "url" field. +func URLHasSuffix(v string) predicate.Notifier { + return predicate.Notifier(sql.FieldHasSuffix(FieldURL, v)) +} + +// URLEqualFold applies the EqualFold predicate on the "url" field. +func URLEqualFold(v string) predicate.Notifier { + return predicate.Notifier(sql.FieldEqualFold(FieldURL, v)) +} + +// URLContainsFold applies the ContainsFold predicate on the "url" field. +func URLContainsFold(v string) predicate.Notifier { + return predicate.Notifier(sql.FieldContainsFold(FieldURL, v)) +} + +// IsActiveEQ applies the EQ predicate on the "is_active" field. +func IsActiveEQ(v bool) predicate.Notifier { + return predicate.Notifier(sql.FieldEQ(FieldIsActive, v)) +} + +// IsActiveNEQ applies the NEQ predicate on the "is_active" field. +func IsActiveNEQ(v bool) predicate.Notifier { + return predicate.Notifier(sql.FieldNEQ(FieldIsActive, v)) +} + +// HasGroup applies the HasEdge predicate on the "group" edge. +func HasGroup() predicate.Notifier { + return predicate.Notifier(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasGroupWith applies the HasEdge predicate on the "group" edge with a given conditions (other predicates). +func HasGroupWith(preds ...predicate.Group) predicate.Notifier { + return predicate.Notifier(func(s *sql.Selector) { + step := newGroupStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasUser applies the HasEdge predicate on the "user" edge. +func HasUser() predicate.Notifier { + return predicate.Notifier(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasUserWith applies the HasEdge predicate on the "user" edge with a given conditions (other predicates). +func HasUserWith(preds ...predicate.User) predicate.Notifier { + return predicate.Notifier(func(s *sql.Selector) { + step := newUserStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.Notifier) predicate.Notifier { + return predicate.Notifier(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.Notifier) predicate.Notifier { + return predicate.Notifier(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.Notifier) predicate.Notifier { + return predicate.Notifier(sql.NotPredicates(p)) +} diff --git a/backend/internal/data/ent/notifier_create.go b/backend/internal/data/ent/notifier_create.go new file mode 100644 index 0000000..42265e2 --- /dev/null +++ b/backend/internal/data/ent/notifier_create.go @@ -0,0 +1,382 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/google/uuid" + "github.com/hay-kot/homebox/backend/internal/data/ent/group" + "github.com/hay-kot/homebox/backend/internal/data/ent/notifier" + "github.com/hay-kot/homebox/backend/internal/data/ent/user" +) + +// NotifierCreate is the builder for creating a Notifier entity. +type NotifierCreate struct { + config + mutation *NotifierMutation + hooks []Hook +} + +// SetCreatedAt sets the "created_at" field. +func (nc *NotifierCreate) SetCreatedAt(t time.Time) *NotifierCreate { + nc.mutation.SetCreatedAt(t) + return nc +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (nc *NotifierCreate) SetNillableCreatedAt(t *time.Time) *NotifierCreate { + if t != nil { + nc.SetCreatedAt(*t) + } + return nc +} + +// SetUpdatedAt sets the "updated_at" field. +func (nc *NotifierCreate) SetUpdatedAt(t time.Time) *NotifierCreate { + nc.mutation.SetUpdatedAt(t) + return nc +} + +// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. +func (nc *NotifierCreate) SetNillableUpdatedAt(t *time.Time) *NotifierCreate { + if t != nil { + nc.SetUpdatedAt(*t) + } + return nc +} + +// SetGroupID sets the "group_id" field. +func (nc *NotifierCreate) SetGroupID(u uuid.UUID) *NotifierCreate { + nc.mutation.SetGroupID(u) + return nc +} + +// SetUserID sets the "user_id" field. +func (nc *NotifierCreate) SetUserID(u uuid.UUID) *NotifierCreate { + nc.mutation.SetUserID(u) + return nc +} + +// SetName sets the "name" field. +func (nc *NotifierCreate) SetName(s string) *NotifierCreate { + nc.mutation.SetName(s) + return nc +} + +// SetURL sets the "url" field. +func (nc *NotifierCreate) SetURL(s string) *NotifierCreate { + nc.mutation.SetURL(s) + return nc +} + +// SetIsActive sets the "is_active" field. +func (nc *NotifierCreate) SetIsActive(b bool) *NotifierCreate { + nc.mutation.SetIsActive(b) + return nc +} + +// SetNillableIsActive sets the "is_active" field if the given value is not nil. +func (nc *NotifierCreate) SetNillableIsActive(b *bool) *NotifierCreate { + if b != nil { + nc.SetIsActive(*b) + } + return nc +} + +// SetID sets the "id" field. +func (nc *NotifierCreate) SetID(u uuid.UUID) *NotifierCreate { + nc.mutation.SetID(u) + return nc +} + +// SetNillableID sets the "id" field if the given value is not nil. +func (nc *NotifierCreate) SetNillableID(u *uuid.UUID) *NotifierCreate { + if u != nil { + nc.SetID(*u) + } + return nc +} + +// SetGroup sets the "group" edge to the Group entity. +func (nc *NotifierCreate) SetGroup(g *Group) *NotifierCreate { + return nc.SetGroupID(g.ID) +} + +// SetUser sets the "user" edge to the User entity. +func (nc *NotifierCreate) SetUser(u *User) *NotifierCreate { + return nc.SetUserID(u.ID) +} + +// Mutation returns the NotifierMutation object of the builder. +func (nc *NotifierCreate) Mutation() *NotifierMutation { + return nc.mutation +} + +// Save creates the Notifier in the database. +func (nc *NotifierCreate) Save(ctx context.Context) (*Notifier, error) { + nc.defaults() + return withHooks(ctx, nc.sqlSave, nc.mutation, nc.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (nc *NotifierCreate) SaveX(ctx context.Context) *Notifier { + v, err := nc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (nc *NotifierCreate) Exec(ctx context.Context) error { + _, err := nc.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (nc *NotifierCreate) ExecX(ctx context.Context) { + if err := nc.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (nc *NotifierCreate) defaults() { + if _, ok := nc.mutation.CreatedAt(); !ok { + v := notifier.DefaultCreatedAt() + nc.mutation.SetCreatedAt(v) + } + if _, ok := nc.mutation.UpdatedAt(); !ok { + v := notifier.DefaultUpdatedAt() + nc.mutation.SetUpdatedAt(v) + } + if _, ok := nc.mutation.IsActive(); !ok { + v := notifier.DefaultIsActive + nc.mutation.SetIsActive(v) + } + if _, ok := nc.mutation.ID(); !ok { + v := notifier.DefaultID() + nc.mutation.SetID(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (nc *NotifierCreate) check() error { + if _, ok := nc.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "Notifier.created_at"`)} + } + if _, ok := nc.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "Notifier.updated_at"`)} + } + if _, ok := nc.mutation.GroupID(); !ok { + return &ValidationError{Name: "group_id", err: errors.New(`ent: missing required field "Notifier.group_id"`)} + } + if _, ok := nc.mutation.UserID(); !ok { + return &ValidationError{Name: "user_id", err: errors.New(`ent: missing required field "Notifier.user_id"`)} + } + if _, ok := nc.mutation.Name(); !ok { + return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "Notifier.name"`)} + } + if v, ok := nc.mutation.Name(); ok { + if err := notifier.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "Notifier.name": %w`, err)} + } + } + if _, ok := nc.mutation.URL(); !ok { + return &ValidationError{Name: "url", err: errors.New(`ent: missing required field "Notifier.url"`)} + } + if v, ok := nc.mutation.URL(); ok { + if err := notifier.URLValidator(v); err != nil { + return &ValidationError{Name: "url", err: fmt.Errorf(`ent: validator failed for field "Notifier.url": %w`, err)} + } + } + if _, ok := nc.mutation.IsActive(); !ok { + return &ValidationError{Name: "is_active", err: errors.New(`ent: missing required field "Notifier.is_active"`)} + } + if _, ok := nc.mutation.GroupID(); !ok { + return &ValidationError{Name: "group", err: errors.New(`ent: missing required edge "Notifier.group"`)} + } + if _, ok := nc.mutation.UserID(); !ok { + return &ValidationError{Name: "user", err: errors.New(`ent: missing required edge "Notifier.user"`)} + } + return nil +} + +func (nc *NotifierCreate) sqlSave(ctx context.Context) (*Notifier, error) { + if err := nc.check(); err != nil { + return nil, err + } + _node, _spec := nc.createSpec() + if err := sqlgraph.CreateNode(ctx, nc.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + if _spec.ID.Value != nil { + if id, ok := _spec.ID.Value.(*uuid.UUID); ok { + _node.ID = *id + } else if err := _node.ID.Scan(_spec.ID.Value); err != nil { + return nil, err + } + } + nc.mutation.id = &_node.ID + nc.mutation.done = true + return _node, nil +} + +func (nc *NotifierCreate) createSpec() (*Notifier, *sqlgraph.CreateSpec) { + var ( + _node = &Notifier{config: nc.config} + _spec = sqlgraph.NewCreateSpec(notifier.Table, sqlgraph.NewFieldSpec(notifier.FieldID, field.TypeUUID)) + ) + if id, ok := nc.mutation.ID(); ok { + _node.ID = id + _spec.ID.Value = &id + } + if value, ok := nc.mutation.CreatedAt(); ok { + _spec.SetField(notifier.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + if value, ok := nc.mutation.UpdatedAt(); ok { + _spec.SetField(notifier.FieldUpdatedAt, field.TypeTime, value) + _node.UpdatedAt = value + } + if value, ok := nc.mutation.Name(); ok { + _spec.SetField(notifier.FieldName, field.TypeString, value) + _node.Name = value + } + if value, ok := nc.mutation.URL(); ok { + _spec.SetField(notifier.FieldURL, field.TypeString, value) + _node.URL = value + } + if value, ok := nc.mutation.IsActive(); ok { + _spec.SetField(notifier.FieldIsActive, field.TypeBool, value) + _node.IsActive = value + } + if nodes := nc.mutation.GroupIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: notifier.GroupTable, + Columns: []string{notifier.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.GroupID = nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := nc.mutation.UserIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: notifier.UserTable, + Columns: []string{notifier.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.UserID = nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// NotifierCreateBulk is the builder for creating many Notifier entities in bulk. +type NotifierCreateBulk struct { + config + err error + builders []*NotifierCreate +} + +// Save creates the Notifier entities in the database. +func (ncb *NotifierCreateBulk) Save(ctx context.Context) ([]*Notifier, error) { + if ncb.err != nil { + return nil, ncb.err + } + specs := make([]*sqlgraph.CreateSpec, len(ncb.builders)) + nodes := make([]*Notifier, len(ncb.builders)) + mutators := make([]Mutator, len(ncb.builders)) + for i := range ncb.builders { + func(i int, root context.Context) { + builder := ncb.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*NotifierMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, ncb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, ncb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, ncb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (ncb *NotifierCreateBulk) SaveX(ctx context.Context) []*Notifier { + v, err := ncb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (ncb *NotifierCreateBulk) Exec(ctx context.Context) error { + _, err := ncb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (ncb *NotifierCreateBulk) ExecX(ctx context.Context) { + if err := ncb.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/internal/data/ent/notifier_delete.go b/backend/internal/data/ent/notifier_delete.go new file mode 100644 index 0000000..586b093 --- /dev/null +++ b/backend/internal/data/ent/notifier_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/hay-kot/homebox/backend/internal/data/ent/notifier" + "github.com/hay-kot/homebox/backend/internal/data/ent/predicate" +) + +// NotifierDelete is the builder for deleting a Notifier entity. +type NotifierDelete struct { + config + hooks []Hook + mutation *NotifierMutation +} + +// Where appends a list predicates to the NotifierDelete builder. +func (nd *NotifierDelete) Where(ps ...predicate.Notifier) *NotifierDelete { + nd.mutation.Where(ps...) + return nd +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (nd *NotifierDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, nd.sqlExec, nd.mutation, nd.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (nd *NotifierDelete) ExecX(ctx context.Context) int { + n, err := nd.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (nd *NotifierDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(notifier.Table, sqlgraph.NewFieldSpec(notifier.FieldID, field.TypeUUID)) + if ps := nd.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, nd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + nd.mutation.done = true + return affected, err +} + +// NotifierDeleteOne is the builder for deleting a single Notifier entity. +type NotifierDeleteOne struct { + nd *NotifierDelete +} + +// Where appends a list predicates to the NotifierDelete builder. +func (ndo *NotifierDeleteOne) Where(ps ...predicate.Notifier) *NotifierDeleteOne { + ndo.nd.mutation.Where(ps...) + return ndo +} + +// Exec executes the deletion query. +func (ndo *NotifierDeleteOne) Exec(ctx context.Context) error { + n, err := ndo.nd.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{notifier.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (ndo *NotifierDeleteOne) ExecX(ctx context.Context) { + if err := ndo.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/internal/data/ent/notifier_query.go b/backend/internal/data/ent/notifier_query.go new file mode 100644 index 0000000..c88b4ef --- /dev/null +++ b/backend/internal/data/ent/notifier_query.go @@ -0,0 +1,681 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "math" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/google/uuid" + "github.com/hay-kot/homebox/backend/internal/data/ent/group" + "github.com/hay-kot/homebox/backend/internal/data/ent/notifier" + "github.com/hay-kot/homebox/backend/internal/data/ent/predicate" + "github.com/hay-kot/homebox/backend/internal/data/ent/user" +) + +// NotifierQuery is the builder for querying Notifier entities. +type NotifierQuery struct { + config + ctx *QueryContext + order []notifier.OrderOption + inters []Interceptor + predicates []predicate.Notifier + withGroup *GroupQuery + withUser *UserQuery + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the NotifierQuery builder. +func (nq *NotifierQuery) Where(ps ...predicate.Notifier) *NotifierQuery { + nq.predicates = append(nq.predicates, ps...) + return nq +} + +// Limit the number of records to be returned by this query. +func (nq *NotifierQuery) Limit(limit int) *NotifierQuery { + nq.ctx.Limit = &limit + return nq +} + +// Offset to start from. +func (nq *NotifierQuery) Offset(offset int) *NotifierQuery { + nq.ctx.Offset = &offset + return nq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (nq *NotifierQuery) Unique(unique bool) *NotifierQuery { + nq.ctx.Unique = &unique + return nq +} + +// Order specifies how the records should be ordered. +func (nq *NotifierQuery) Order(o ...notifier.OrderOption) *NotifierQuery { + nq.order = append(nq.order, o...) + return nq +} + +// QueryGroup chains the current query on the "group" edge. +func (nq *NotifierQuery) QueryGroup() *GroupQuery { + query := (&GroupClient{config: nq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := nq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := nq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(notifier.Table, notifier.FieldID, selector), + sqlgraph.To(group.Table, group.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, notifier.GroupTable, notifier.GroupColumn), + ) + fromU = sqlgraph.SetNeighbors(nq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryUser chains the current query on the "user" edge. +func (nq *NotifierQuery) QueryUser() *UserQuery { + query := (&UserClient{config: nq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := nq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := nq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(notifier.Table, notifier.FieldID, selector), + sqlgraph.To(user.Table, user.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, notifier.UserTable, notifier.UserColumn), + ) + fromU = sqlgraph.SetNeighbors(nq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first Notifier entity from the query. +// Returns a *NotFoundError when no Notifier was found. +func (nq *NotifierQuery) First(ctx context.Context) (*Notifier, error) { + nodes, err := nq.Limit(1).All(setContextOp(ctx, nq.ctx, "First")) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{notifier.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (nq *NotifierQuery) FirstX(ctx context.Context) *Notifier { + node, err := nq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first Notifier ID from the query. +// Returns a *NotFoundError when no Notifier ID was found. +func (nq *NotifierQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { + var ids []uuid.UUID + if ids, err = nq.Limit(1).IDs(setContextOp(ctx, nq.ctx, "FirstID")); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{notifier.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (nq *NotifierQuery) FirstIDX(ctx context.Context) uuid.UUID { + id, err := nq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single Notifier entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one Notifier entity is found. +// Returns a *NotFoundError when no Notifier entities are found. +func (nq *NotifierQuery) Only(ctx context.Context) (*Notifier, error) { + nodes, err := nq.Limit(2).All(setContextOp(ctx, nq.ctx, "Only")) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{notifier.Label} + default: + return nil, &NotSingularError{notifier.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (nq *NotifierQuery) OnlyX(ctx context.Context) *Notifier { + node, err := nq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only Notifier ID in the query. +// Returns a *NotSingularError when more than one Notifier ID is found. +// Returns a *NotFoundError when no entities are found. +func (nq *NotifierQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { + var ids []uuid.UUID + if ids, err = nq.Limit(2).IDs(setContextOp(ctx, nq.ctx, "OnlyID")); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{notifier.Label} + default: + err = &NotSingularError{notifier.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (nq *NotifierQuery) OnlyIDX(ctx context.Context) uuid.UUID { + id, err := nq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of Notifiers. +func (nq *NotifierQuery) All(ctx context.Context) ([]*Notifier, error) { + ctx = setContextOp(ctx, nq.ctx, "All") + if err := nq.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*Notifier, *NotifierQuery]() + return withInterceptors[[]*Notifier](ctx, nq, qr, nq.inters) +} + +// AllX is like All, but panics if an error occurs. +func (nq *NotifierQuery) AllX(ctx context.Context) []*Notifier { + nodes, err := nq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of Notifier IDs. +func (nq *NotifierQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if nq.ctx.Unique == nil && nq.path != nil { + nq.Unique(true) + } + ctx = setContextOp(ctx, nq.ctx, "IDs") + if err = nq.Select(notifier.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (nq *NotifierQuery) IDsX(ctx context.Context) []uuid.UUID { + ids, err := nq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (nq *NotifierQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, nq.ctx, "Count") + if err := nq.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, nq, querierCount[*NotifierQuery](), nq.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (nq *NotifierQuery) CountX(ctx context.Context) int { + count, err := nq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (nq *NotifierQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, nq.ctx, "Exist") + switch _, err := nq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (nq *NotifierQuery) ExistX(ctx context.Context) bool { + exist, err := nq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the NotifierQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (nq *NotifierQuery) Clone() *NotifierQuery { + if nq == nil { + return nil + } + return &NotifierQuery{ + config: nq.config, + ctx: nq.ctx.Clone(), + order: append([]notifier.OrderOption{}, nq.order...), + inters: append([]Interceptor{}, nq.inters...), + predicates: append([]predicate.Notifier{}, nq.predicates...), + withGroup: nq.withGroup.Clone(), + withUser: nq.withUser.Clone(), + // clone intermediate query. + sql: nq.sql.Clone(), + path: nq.path, + } +} + +// WithGroup tells the query-builder to eager-load the nodes that are connected to +// the "group" edge. The optional arguments are used to configure the query builder of the edge. +func (nq *NotifierQuery) WithGroup(opts ...func(*GroupQuery)) *NotifierQuery { + query := (&GroupClient{config: nq.config}).Query() + for _, opt := range opts { + opt(query) + } + nq.withGroup = query + return nq +} + +// WithUser tells the query-builder to eager-load the nodes that are connected to +// the "user" edge. The optional arguments are used to configure the query builder of the edge. +func (nq *NotifierQuery) WithUser(opts ...func(*UserQuery)) *NotifierQuery { + query := (&UserClient{config: nq.config}).Query() + for _, opt := range opts { + opt(query) + } + nq.withUser = query + return nq +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.Notifier.Query(). +// GroupBy(notifier.FieldCreatedAt). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (nq *NotifierQuery) GroupBy(field string, fields ...string) *NotifierGroupBy { + nq.ctx.Fields = append([]string{field}, fields...) + grbuild := &NotifierGroupBy{build: nq} + grbuild.flds = &nq.ctx.Fields + grbuild.label = notifier.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// } +// +// client.Notifier.Query(). +// Select(notifier.FieldCreatedAt). +// Scan(ctx, &v) +func (nq *NotifierQuery) Select(fields ...string) *NotifierSelect { + nq.ctx.Fields = append(nq.ctx.Fields, fields...) + sbuild := &NotifierSelect{NotifierQuery: nq} + sbuild.label = notifier.Label + sbuild.flds, sbuild.scan = &nq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a NotifierSelect configured with the given aggregations. +func (nq *NotifierQuery) Aggregate(fns ...AggregateFunc) *NotifierSelect { + return nq.Select().Aggregate(fns...) +} + +func (nq *NotifierQuery) prepareQuery(ctx context.Context) error { + for _, inter := range nq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, nq); err != nil { + return err + } + } + } + for _, f := range nq.ctx.Fields { + if !notifier.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if nq.path != nil { + prev, err := nq.path(ctx) + if err != nil { + return err + } + nq.sql = prev + } + return nil +} + +func (nq *NotifierQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Notifier, error) { + var ( + nodes = []*Notifier{} + _spec = nq.querySpec() + loadedTypes = [2]bool{ + nq.withGroup != nil, + nq.withUser != nil, + } + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*Notifier).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &Notifier{config: nq.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, nq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := nq.withGroup; query != nil { + if err := nq.loadGroup(ctx, query, nodes, nil, + func(n *Notifier, e *Group) { n.Edges.Group = e }); err != nil { + return nil, err + } + } + if query := nq.withUser; query != nil { + if err := nq.loadUser(ctx, query, nodes, nil, + func(n *Notifier, e *User) { n.Edges.User = e }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (nq *NotifierQuery) loadGroup(ctx context.Context, query *GroupQuery, nodes []*Notifier, init func(*Notifier), assign func(*Notifier, *Group)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*Notifier) + for i := range nodes { + fk := nodes[i].GroupID + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(group.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "group_id" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} +func (nq *NotifierQuery) loadUser(ctx context.Context, query *UserQuery, nodes []*Notifier, init func(*Notifier), assign func(*Notifier, *User)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*Notifier) + for i := range nodes { + fk := nodes[i].UserID + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(user.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "user_id" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} + +func (nq *NotifierQuery) sqlCount(ctx context.Context) (int, error) { + _spec := nq.querySpec() + _spec.Node.Columns = nq.ctx.Fields + if len(nq.ctx.Fields) > 0 { + _spec.Unique = nq.ctx.Unique != nil && *nq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, nq.driver, _spec) +} + +func (nq *NotifierQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(notifier.Table, notifier.Columns, sqlgraph.NewFieldSpec(notifier.FieldID, field.TypeUUID)) + _spec.From = nq.sql + if unique := nq.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if nq.path != nil { + _spec.Unique = true + } + if fields := nq.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, notifier.FieldID) + for i := range fields { + if fields[i] != notifier.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + if nq.withGroup != nil { + _spec.Node.AddColumnOnce(notifier.FieldGroupID) + } + if nq.withUser != nil { + _spec.Node.AddColumnOnce(notifier.FieldUserID) + } + } + if ps := nq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := nq.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := nq.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := nq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (nq *NotifierQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(nq.driver.Dialect()) + t1 := builder.Table(notifier.Table) + columns := nq.ctx.Fields + if len(columns) == 0 { + columns = notifier.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if nq.sql != nil { + selector = nq.sql + selector.Select(selector.Columns(columns...)...) + } + if nq.ctx.Unique != nil && *nq.ctx.Unique { + selector.Distinct() + } + for _, p := range nq.predicates { + p(selector) + } + for _, p := range nq.order { + p(selector) + } + if offset := nq.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := nq.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// NotifierGroupBy is the group-by builder for Notifier entities. +type NotifierGroupBy struct { + selector + build *NotifierQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (ngb *NotifierGroupBy) Aggregate(fns ...AggregateFunc) *NotifierGroupBy { + ngb.fns = append(ngb.fns, fns...) + return ngb +} + +// Scan applies the selector query and scans the result into the given value. +func (ngb *NotifierGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, ngb.build.ctx, "GroupBy") + if err := ngb.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*NotifierQuery, *NotifierGroupBy](ctx, ngb.build, ngb, ngb.build.inters, v) +} + +func (ngb *NotifierGroupBy) sqlScan(ctx context.Context, root *NotifierQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(ngb.fns)) + for _, fn := range ngb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*ngb.flds)+len(ngb.fns)) + for _, f := range *ngb.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*ngb.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := ngb.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// NotifierSelect is the builder for selecting fields of Notifier entities. +type NotifierSelect struct { + *NotifierQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (ns *NotifierSelect) Aggregate(fns ...AggregateFunc) *NotifierSelect { + ns.fns = append(ns.fns, fns...) + return ns +} + +// Scan applies the selector query and scans the result into the given value. +func (ns *NotifierSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, ns.ctx, "Select") + if err := ns.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*NotifierQuery, *NotifierSelect](ctx, ns.NotifierQuery, ns, ns.inters, v) +} + +func (ns *NotifierSelect) sqlScan(ctx context.Context, root *NotifierQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(ns.fns)) + for _, fn := range ns.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*ns.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := ns.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backend/internal/data/ent/notifier_update.go b/backend/internal/data/ent/notifier_update.go new file mode 100644 index 0000000..ea28f32 --- /dev/null +++ b/backend/internal/data/ent/notifier_update.go @@ -0,0 +1,581 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/google/uuid" + "github.com/hay-kot/homebox/backend/internal/data/ent/group" + "github.com/hay-kot/homebox/backend/internal/data/ent/notifier" + "github.com/hay-kot/homebox/backend/internal/data/ent/predicate" + "github.com/hay-kot/homebox/backend/internal/data/ent/user" +) + +// NotifierUpdate is the builder for updating Notifier entities. +type NotifierUpdate struct { + config + hooks []Hook + mutation *NotifierMutation +} + +// Where appends a list predicates to the NotifierUpdate builder. +func (nu *NotifierUpdate) Where(ps ...predicate.Notifier) *NotifierUpdate { + nu.mutation.Where(ps...) + return nu +} + +// SetUpdatedAt sets the "updated_at" field. +func (nu *NotifierUpdate) SetUpdatedAt(t time.Time) *NotifierUpdate { + nu.mutation.SetUpdatedAt(t) + return nu +} + +// SetGroupID sets the "group_id" field. +func (nu *NotifierUpdate) SetGroupID(u uuid.UUID) *NotifierUpdate { + nu.mutation.SetGroupID(u) + return nu +} + +// SetNillableGroupID sets the "group_id" field if the given value is not nil. +func (nu *NotifierUpdate) SetNillableGroupID(u *uuid.UUID) *NotifierUpdate { + if u != nil { + nu.SetGroupID(*u) + } + return nu +} + +// SetUserID sets the "user_id" field. +func (nu *NotifierUpdate) SetUserID(u uuid.UUID) *NotifierUpdate { + nu.mutation.SetUserID(u) + return nu +} + +// SetNillableUserID sets the "user_id" field if the given value is not nil. +func (nu *NotifierUpdate) SetNillableUserID(u *uuid.UUID) *NotifierUpdate { + if u != nil { + nu.SetUserID(*u) + } + return nu +} + +// SetName sets the "name" field. +func (nu *NotifierUpdate) SetName(s string) *NotifierUpdate { + nu.mutation.SetName(s) + return nu +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (nu *NotifierUpdate) SetNillableName(s *string) *NotifierUpdate { + if s != nil { + nu.SetName(*s) + } + return nu +} + +// SetURL sets the "url" field. +func (nu *NotifierUpdate) SetURL(s string) *NotifierUpdate { + nu.mutation.SetURL(s) + return nu +} + +// SetNillableURL sets the "url" field if the given value is not nil. +func (nu *NotifierUpdate) SetNillableURL(s *string) *NotifierUpdate { + if s != nil { + nu.SetURL(*s) + } + return nu +} + +// SetIsActive sets the "is_active" field. +func (nu *NotifierUpdate) SetIsActive(b bool) *NotifierUpdate { + nu.mutation.SetIsActive(b) + return nu +} + +// SetNillableIsActive sets the "is_active" field if the given value is not nil. +func (nu *NotifierUpdate) SetNillableIsActive(b *bool) *NotifierUpdate { + if b != nil { + nu.SetIsActive(*b) + } + return nu +} + +// SetGroup sets the "group" edge to the Group entity. +func (nu *NotifierUpdate) SetGroup(g *Group) *NotifierUpdate { + return nu.SetGroupID(g.ID) +} + +// SetUser sets the "user" edge to the User entity. +func (nu *NotifierUpdate) SetUser(u *User) *NotifierUpdate { + return nu.SetUserID(u.ID) +} + +// Mutation returns the NotifierMutation object of the builder. +func (nu *NotifierUpdate) Mutation() *NotifierMutation { + return nu.mutation +} + +// ClearGroup clears the "group" edge to the Group entity. +func (nu *NotifierUpdate) ClearGroup() *NotifierUpdate { + nu.mutation.ClearGroup() + return nu +} + +// ClearUser clears the "user" edge to the User entity. +func (nu *NotifierUpdate) ClearUser() *NotifierUpdate { + nu.mutation.ClearUser() + return nu +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (nu *NotifierUpdate) Save(ctx context.Context) (int, error) { + nu.defaults() + return withHooks(ctx, nu.sqlSave, nu.mutation, nu.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (nu *NotifierUpdate) SaveX(ctx context.Context) int { + affected, err := nu.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (nu *NotifierUpdate) Exec(ctx context.Context) error { + _, err := nu.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (nu *NotifierUpdate) ExecX(ctx context.Context) { + if err := nu.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (nu *NotifierUpdate) defaults() { + if _, ok := nu.mutation.UpdatedAt(); !ok { + v := notifier.UpdateDefaultUpdatedAt() + nu.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (nu *NotifierUpdate) check() error { + if v, ok := nu.mutation.Name(); ok { + if err := notifier.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "Notifier.name": %w`, err)} + } + } + if v, ok := nu.mutation.URL(); ok { + if err := notifier.URLValidator(v); err != nil { + return &ValidationError{Name: "url", err: fmt.Errorf(`ent: validator failed for field "Notifier.url": %w`, err)} + } + } + if _, ok := nu.mutation.GroupID(); nu.mutation.GroupCleared() && !ok { + return errors.New(`ent: clearing a required unique edge "Notifier.group"`) + } + if _, ok := nu.mutation.UserID(); nu.mutation.UserCleared() && !ok { + return errors.New(`ent: clearing a required unique edge "Notifier.user"`) + } + return nil +} + +func (nu *NotifierUpdate) sqlSave(ctx context.Context) (n int, err error) { + if err := nu.check(); err != nil { + return n, err + } + _spec := sqlgraph.NewUpdateSpec(notifier.Table, notifier.Columns, sqlgraph.NewFieldSpec(notifier.FieldID, field.TypeUUID)) + if ps := nu.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := nu.mutation.UpdatedAt(); ok { + _spec.SetField(notifier.FieldUpdatedAt, field.TypeTime, value) + } + if value, ok := nu.mutation.Name(); ok { + _spec.SetField(notifier.FieldName, field.TypeString, value) + } + if value, ok := nu.mutation.URL(); ok { + _spec.SetField(notifier.FieldURL, field.TypeString, value) + } + if value, ok := nu.mutation.IsActive(); ok { + _spec.SetField(notifier.FieldIsActive, field.TypeBool, value) + } + if nu.mutation.GroupCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: notifier.GroupTable, + Columns: []string{notifier.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := nu.mutation.GroupIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: notifier.GroupTable, + Columns: []string{notifier.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if nu.mutation.UserCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: notifier.UserTable, + Columns: []string{notifier.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := nu.mutation.UserIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: notifier.UserTable, + Columns: []string{notifier.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if n, err = sqlgraph.UpdateNodes(ctx, nu.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{notifier.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + nu.mutation.done = true + return n, nil +} + +// NotifierUpdateOne is the builder for updating a single Notifier entity. +type NotifierUpdateOne struct { + config + fields []string + hooks []Hook + mutation *NotifierMutation +} + +// SetUpdatedAt sets the "updated_at" field. +func (nuo *NotifierUpdateOne) SetUpdatedAt(t time.Time) *NotifierUpdateOne { + nuo.mutation.SetUpdatedAt(t) + return nuo +} + +// SetGroupID sets the "group_id" field. +func (nuo *NotifierUpdateOne) SetGroupID(u uuid.UUID) *NotifierUpdateOne { + nuo.mutation.SetGroupID(u) + return nuo +} + +// SetNillableGroupID sets the "group_id" field if the given value is not nil. +func (nuo *NotifierUpdateOne) SetNillableGroupID(u *uuid.UUID) *NotifierUpdateOne { + if u != nil { + nuo.SetGroupID(*u) + } + return nuo +} + +// SetUserID sets the "user_id" field. +func (nuo *NotifierUpdateOne) SetUserID(u uuid.UUID) *NotifierUpdateOne { + nuo.mutation.SetUserID(u) + return nuo +} + +// SetNillableUserID sets the "user_id" field if the given value is not nil. +func (nuo *NotifierUpdateOne) SetNillableUserID(u *uuid.UUID) *NotifierUpdateOne { + if u != nil { + nuo.SetUserID(*u) + } + return nuo +} + +// SetName sets the "name" field. +func (nuo *NotifierUpdateOne) SetName(s string) *NotifierUpdateOne { + nuo.mutation.SetName(s) + return nuo +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (nuo *NotifierUpdateOne) SetNillableName(s *string) *NotifierUpdateOne { + if s != nil { + nuo.SetName(*s) + } + return nuo +} + +// SetURL sets the "url" field. +func (nuo *NotifierUpdateOne) SetURL(s string) *NotifierUpdateOne { + nuo.mutation.SetURL(s) + return nuo +} + +// SetNillableURL sets the "url" field if the given value is not nil. +func (nuo *NotifierUpdateOne) SetNillableURL(s *string) *NotifierUpdateOne { + if s != nil { + nuo.SetURL(*s) + } + return nuo +} + +// SetIsActive sets the "is_active" field. +func (nuo *NotifierUpdateOne) SetIsActive(b bool) *NotifierUpdateOne { + nuo.mutation.SetIsActive(b) + return nuo +} + +// SetNillableIsActive sets the "is_active" field if the given value is not nil. +func (nuo *NotifierUpdateOne) SetNillableIsActive(b *bool) *NotifierUpdateOne { + if b != nil { + nuo.SetIsActive(*b) + } + return nuo +} + +// SetGroup sets the "group" edge to the Group entity. +func (nuo *NotifierUpdateOne) SetGroup(g *Group) *NotifierUpdateOne { + return nuo.SetGroupID(g.ID) +} + +// SetUser sets the "user" edge to the User entity. +func (nuo *NotifierUpdateOne) SetUser(u *User) *NotifierUpdateOne { + return nuo.SetUserID(u.ID) +} + +// Mutation returns the NotifierMutation object of the builder. +func (nuo *NotifierUpdateOne) Mutation() *NotifierMutation { + return nuo.mutation +} + +// ClearGroup clears the "group" edge to the Group entity. +func (nuo *NotifierUpdateOne) ClearGroup() *NotifierUpdateOne { + nuo.mutation.ClearGroup() + return nuo +} + +// ClearUser clears the "user" edge to the User entity. +func (nuo *NotifierUpdateOne) ClearUser() *NotifierUpdateOne { + nuo.mutation.ClearUser() + return nuo +} + +// Where appends a list predicates to the NotifierUpdate builder. +func (nuo *NotifierUpdateOne) Where(ps ...predicate.Notifier) *NotifierUpdateOne { + nuo.mutation.Where(ps...) + return nuo +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (nuo *NotifierUpdateOne) Select(field string, fields ...string) *NotifierUpdateOne { + nuo.fields = append([]string{field}, fields...) + return nuo +} + +// Save executes the query and returns the updated Notifier entity. +func (nuo *NotifierUpdateOne) Save(ctx context.Context) (*Notifier, error) { + nuo.defaults() + return withHooks(ctx, nuo.sqlSave, nuo.mutation, nuo.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (nuo *NotifierUpdateOne) SaveX(ctx context.Context) *Notifier { + node, err := nuo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (nuo *NotifierUpdateOne) Exec(ctx context.Context) error { + _, err := nuo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (nuo *NotifierUpdateOne) ExecX(ctx context.Context) { + if err := nuo.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (nuo *NotifierUpdateOne) defaults() { + if _, ok := nuo.mutation.UpdatedAt(); !ok { + v := notifier.UpdateDefaultUpdatedAt() + nuo.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (nuo *NotifierUpdateOne) check() error { + if v, ok := nuo.mutation.Name(); ok { + if err := notifier.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "Notifier.name": %w`, err)} + } + } + if v, ok := nuo.mutation.URL(); ok { + if err := notifier.URLValidator(v); err != nil { + return &ValidationError{Name: "url", err: fmt.Errorf(`ent: validator failed for field "Notifier.url": %w`, err)} + } + } + if _, ok := nuo.mutation.GroupID(); nuo.mutation.GroupCleared() && !ok { + return errors.New(`ent: clearing a required unique edge "Notifier.group"`) + } + if _, ok := nuo.mutation.UserID(); nuo.mutation.UserCleared() && !ok { + return errors.New(`ent: clearing a required unique edge "Notifier.user"`) + } + return nil +} + +func (nuo *NotifierUpdateOne) sqlSave(ctx context.Context) (_node *Notifier, err error) { + if err := nuo.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(notifier.Table, notifier.Columns, sqlgraph.NewFieldSpec(notifier.FieldID, field.TypeUUID)) + id, ok := nuo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Notifier.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := nuo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, notifier.FieldID) + for _, f := range fields { + if !notifier.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != notifier.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := nuo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := nuo.mutation.UpdatedAt(); ok { + _spec.SetField(notifier.FieldUpdatedAt, field.TypeTime, value) + } + if value, ok := nuo.mutation.Name(); ok { + _spec.SetField(notifier.FieldName, field.TypeString, value) + } + if value, ok := nuo.mutation.URL(); ok { + _spec.SetField(notifier.FieldURL, field.TypeString, value) + } + if value, ok := nuo.mutation.IsActive(); ok { + _spec.SetField(notifier.FieldIsActive, field.TypeBool, value) + } + if nuo.mutation.GroupCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: notifier.GroupTable, + Columns: []string{notifier.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := nuo.mutation.GroupIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: notifier.GroupTable, + Columns: []string{notifier.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if nuo.mutation.UserCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: notifier.UserTable, + Columns: []string{notifier.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := nuo.mutation.UserIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: notifier.UserTable, + Columns: []string{notifier.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &Notifier{config: nuo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, nuo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{notifier.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + nuo.mutation.done = true + return _node, nil +} diff --git a/backend/internal/data/ent/predicate/predicate.go b/backend/internal/data/ent/predicate/predicate.go index b1fbe67..bd36616 100644 --- a/backend/internal/data/ent/predicate/predicate.go +++ b/backend/internal/data/ent/predicate/predicate.go @@ -39,5 +39,8 @@ type Location func(*sql.Selector) // MaintenanceEntry is the predicate function for maintenanceentry builders. type MaintenanceEntry func(*sql.Selector) +// Notifier is the predicate function for notifier builders. +type Notifier func(*sql.Selector) + // User is the predicate function for user builders. type User func(*sql.Selector) diff --git a/backend/internal/data/ent/runtime.go b/backend/internal/data/ent/runtime.go index 4ce9d2c..c3aff00 100644 --- a/backend/internal/data/ent/runtime.go +++ b/backend/internal/data/ent/runtime.go @@ -16,6 +16,7 @@ import ( "github.com/hay-kot/homebox/backend/internal/data/ent/label" "github.com/hay-kot/homebox/backend/internal/data/ent/location" "github.com/hay-kot/homebox/backend/internal/data/ent/maintenanceentry" + "github.com/hay-kot/homebox/backend/internal/data/ent/notifier" "github.com/hay-kot/homebox/backend/internal/data/ent/schema" "github.com/hay-kot/homebox/backend/internal/data/ent/user" ) @@ -39,6 +40,10 @@ func init() { attachment.DefaultUpdatedAt = attachmentDescUpdatedAt.Default.(func() time.Time) // attachment.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. attachment.UpdateDefaultUpdatedAt = attachmentDescUpdatedAt.UpdateDefault.(func() time.Time) + // attachmentDescPrimary is the schema descriptor for primary field. + attachmentDescPrimary := attachmentFields[1].Descriptor() + // attachment.DefaultPrimary holds the default value on creation for the primary field. + attachment.DefaultPrimary = attachmentDescPrimary.Default.(bool) // attachmentDescID is the schema descriptor for id field. attachmentDescID := attachmentMixinFields0[0].Descriptor() // attachment.DefaultID holds the default value on creation for the id field. @@ -156,6 +161,10 @@ func init() { return nil } }() + // groupDescCurrency is the schema descriptor for currency field. + groupDescCurrency := groupFields[1].Descriptor() + // group.DefaultCurrency holds the default value on creation for the currency field. + group.DefaultCurrency = groupDescCurrency.Default.(string) // groupDescID is the schema descriptor for id field. groupDescID := groupMixinFields0[0].Descriptor() // group.DefaultID holds the default value on creation for the id field. @@ -446,12 +455,8 @@ func init() { maintenanceentry.DefaultUpdatedAt = maintenanceentryDescUpdatedAt.Default.(func() time.Time) // maintenanceentry.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. maintenanceentry.UpdateDefaultUpdatedAt = maintenanceentryDescUpdatedAt.UpdateDefault.(func() time.Time) - // maintenanceentryDescDate is the schema descriptor for date field. - maintenanceentryDescDate := maintenanceentryFields[1].Descriptor() - // maintenanceentry.DefaultDate holds the default value on creation for the date field. - maintenanceentry.DefaultDate = maintenanceentryDescDate.Default.(func() time.Time) // maintenanceentryDescName is the schema descriptor for name field. - maintenanceentryDescName := maintenanceentryFields[2].Descriptor() + maintenanceentryDescName := maintenanceentryFields[3].Descriptor() // maintenanceentry.NameValidator is a validator for the "name" field. It is called by the builders before save. maintenanceentry.NameValidator = func() func(string) error { validators := maintenanceentryDescName.Validators @@ -469,17 +474,76 @@ func init() { } }() // maintenanceentryDescDescription is the schema descriptor for description field. - maintenanceentryDescDescription := maintenanceentryFields[3].Descriptor() + maintenanceentryDescDescription := maintenanceentryFields[4].Descriptor() // maintenanceentry.DescriptionValidator is a validator for the "description" field. It is called by the builders before save. maintenanceentry.DescriptionValidator = maintenanceentryDescDescription.Validators[0].(func(string) error) // maintenanceentryDescCost is the schema descriptor for cost field. - maintenanceentryDescCost := maintenanceentryFields[4].Descriptor() + maintenanceentryDescCost := maintenanceentryFields[5].Descriptor() // maintenanceentry.DefaultCost holds the default value on creation for the cost field. maintenanceentry.DefaultCost = maintenanceentryDescCost.Default.(float64) // maintenanceentryDescID is the schema descriptor for id field. maintenanceentryDescID := maintenanceentryMixinFields0[0].Descriptor() // maintenanceentry.DefaultID holds the default value on creation for the id field. maintenanceentry.DefaultID = maintenanceentryDescID.Default.(func() uuid.UUID) + notifierMixin := schema.Notifier{}.Mixin() + notifierMixinFields0 := notifierMixin[0].Fields() + _ = notifierMixinFields0 + notifierFields := schema.Notifier{}.Fields() + _ = notifierFields + // notifierDescCreatedAt is the schema descriptor for created_at field. + notifierDescCreatedAt := notifierMixinFields0[1].Descriptor() + // notifier.DefaultCreatedAt holds the default value on creation for the created_at field. + notifier.DefaultCreatedAt = notifierDescCreatedAt.Default.(func() time.Time) + // notifierDescUpdatedAt is the schema descriptor for updated_at field. + notifierDescUpdatedAt := notifierMixinFields0[2].Descriptor() + // notifier.DefaultUpdatedAt holds the default value on creation for the updated_at field. + notifier.DefaultUpdatedAt = notifierDescUpdatedAt.Default.(func() time.Time) + // notifier.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. + notifier.UpdateDefaultUpdatedAt = notifierDescUpdatedAt.UpdateDefault.(func() time.Time) + // notifierDescName is the schema descriptor for name field. + notifierDescName := notifierFields[0].Descriptor() + // notifier.NameValidator is a validator for the "name" field. It is called by the builders before save. + notifier.NameValidator = func() func(string) error { + validators := notifierDescName.Validators + fns := [...]func(string) error{ + validators[0].(func(string) error), + validators[1].(func(string) error), + } + return func(name string) error { + for _, fn := range fns { + if err := fn(name); err != nil { + return err + } + } + return nil + } + }() + // notifierDescURL is the schema descriptor for url field. + notifierDescURL := notifierFields[1].Descriptor() + // notifier.URLValidator is a validator for the "url" field. It is called by the builders before save. + notifier.URLValidator = func() func(string) error { + validators := notifierDescURL.Validators + fns := [...]func(string) error{ + validators[0].(func(string) error), + validators[1].(func(string) error), + } + return func(url string) error { + for _, fn := range fns { + if err := fn(url); err != nil { + return err + } + } + return nil + } + }() + // notifierDescIsActive is the schema descriptor for is_active field. + notifierDescIsActive := notifierFields[2].Descriptor() + // notifier.DefaultIsActive holds the default value on creation for the is_active field. + notifier.DefaultIsActive = notifierDescIsActive.Default.(bool) + // notifierDescID is the schema descriptor for id field. + notifierDescID := notifierMixinFields0[0].Descriptor() + // notifier.DefaultID holds the default value on creation for the id field. + notifier.DefaultID = notifierDescID.Default.(func() uuid.UUID) userMixin := schema.User{}.Mixin() userMixinFields0 := userMixin[0].Fields() _ = userMixinFields0 @@ -554,7 +618,7 @@ func init() { // user.DefaultIsSuperuser holds the default value on creation for the is_superuser field. user.DefaultIsSuperuser = userDescIsSuperuser.Default.(bool) // userDescSuperuser is the schema descriptor for superuser field. - userDescSuperuser := userFields[5].Descriptor() + userDescSuperuser := userFields[4].Descriptor() // user.DefaultSuperuser holds the default value on creation for the superuser field. user.DefaultSuperuser = userDescSuperuser.Default.(bool) // userDescID is the schema descriptor for id field. diff --git a/backend/internal/data/ent/runtime/runtime.go b/backend/internal/data/ent/runtime/runtime.go index f6cb4c1..b5773b1 100644 --- a/backend/internal/data/ent/runtime/runtime.go +++ b/backend/internal/data/ent/runtime/runtime.go @@ -5,6 +5,6 @@ package runtime // The schema-stitching logic is generated in github.com/hay-kot/homebox/backend/internal/data/ent/runtime.go const ( - Version = "v0.11.8" // Version of ent codegen. - Sum = "h1:M/M0QL1CYCUSdqGRXUrXhFYSDRJPsOOrr+RLEej/gyQ=" // Sum of ent codegen. + Version = "v0.12.5" // Version of ent codegen. + Sum = "h1:KREM5E4CSoej4zeGa88Ou/gfturAnpUv0mzAjch1sj4=" // Sum of ent codegen. ) diff --git a/backend/internal/data/ent/schema/attachment.go b/backend/internal/data/ent/schema/attachment.go index 7f4673a..589b684 100644 --- a/backend/internal/data/ent/schema/attachment.go +++ b/backend/internal/data/ent/schema/attachment.go @@ -24,6 +24,8 @@ func (Attachment) Fields() []ent.Field { field.Enum("type"). Values("photo", "manual", "warranty", "attachment", "receipt"). Default("attachment"), + field.Bool("primary"). + Default(false), } } diff --git a/backend/internal/data/ent/schema/document.go b/backend/internal/data/ent/schema/document.go index a2c26e2..d814f60 100644 --- a/backend/internal/data/ent/schema/document.go +++ b/backend/internal/data/ent/schema/document.go @@ -16,6 +16,7 @@ type Document struct { func (Document) Mixin() []ent.Mixin { return []ent.Mixin{ mixins.BaseMixin{}, + GroupMixin{ref: "documents"}, } } @@ -34,10 +35,6 @@ func (Document) Fields() []ent.Field { // Edges of the Document. func (Document) Edges() []ent.Edge { return []ent.Edge{ - edge.From("group", Group.Type). - Ref("documents"). - Required(). - Unique(), edge.To("attachments", Attachment.Type). Annotations(entsql.Annotation{ OnDelete: entsql.Cascade, diff --git a/backend/internal/data/ent/schema/group.go b/backend/internal/data/ent/schema/group.go index 2c0a5fe..352ac0b 100644 --- a/backend/internal/data/ent/schema/group.go +++ b/backend/internal/data/ent/schema/group.go @@ -5,6 +5,8 @@ import ( "entgo.io/ent/dialect/entsql" "entgo.io/ent/schema/edge" "entgo.io/ent/schema/field" + "entgo.io/ent/schema/mixin" + "github.com/google/uuid" "github.com/hay-kot/homebox/backend/internal/data/ent/schema/mixins" ) @@ -25,38 +27,59 @@ func (Group) Fields() []ent.Field { field.String("name"). MaxLen(255). NotEmpty(), - field.Enum("currency"). - Default("usd"). - Values("usd", "eur", "gbp", "jpy", "zar", "aud", "nok", "sek", "dkk", "inr", "rmb", "bgn"), + field.String("currency"). + Default("usd"), } } // Edges of the Home. func (Group) Edges() []ent.Edge { + owned := func(name string, t any) ent.Edge { + return edge.To(name, t). + Annotations(entsql.Annotation{ + OnDelete: entsql.Cascade, + }) + } + return []ent.Edge{ - edge.To("users", User.Type). - Annotations(entsql.Annotation{ - OnDelete: entsql.Cascade, - }), - edge.To("locations", Location.Type). - Annotations(entsql.Annotation{ - OnDelete: entsql.Cascade, - }), - edge.To("items", Item.Type). - Annotations(entsql.Annotation{ - OnDelete: entsql.Cascade, - }), - edge.To("labels", Label.Type). - Annotations(entsql.Annotation{ - OnDelete: entsql.Cascade, - }), - edge.To("documents", Document.Type). - Annotations(entsql.Annotation{ - OnDelete: entsql.Cascade, - }), - edge.To("invitation_tokens", GroupInvitationToken.Type). - Annotations(entsql.Annotation{ - OnDelete: entsql.Cascade, - }), + owned("users", User.Type), + owned("locations", Location.Type), + owned("items", Item.Type), + owned("labels", Label.Type), + owned("documents", Document.Type), + owned("invitation_tokens", GroupInvitationToken.Type), + owned("notifiers", Notifier.Type), + // $scaffold_edge } } + +// GroupMixin when embedded in an ent.Schema, adds a reference to +// the Group entity. +type GroupMixin struct { + ref string + field string + mixin.Schema +} + +func (g GroupMixin) Fields() []ent.Field { + if g.field != "" { + return []ent.Field{ + field.UUID(g.field, uuid.UUID{}), + } + } + + return nil +} + +func (g GroupMixin) Edges() []ent.Edge { + edge := edge.From("group", Group.Type). + Ref(g.ref). + Unique(). + Required() + + if g.field != "" { + edge = edge.Field(g.field) + } + + return []ent.Edge{edge} +} diff --git a/backend/internal/data/ent/schema/item.go b/backend/internal/data/ent/schema/item.go index 5180f27..344829f 100644 --- a/backend/internal/data/ent/schema/item.go +++ b/backend/internal/data/ent/schema/item.go @@ -18,6 +18,7 @@ func (Item) Mixin() []ent.Mixin { return []ent.Mixin{ mixins.BaseMixin{}, mixins.DetailsMixin{}, + GroupMixin{ref: "items"}, } } @@ -38,8 +39,7 @@ func (Item) Fields() []ent.Field { return []ent.Field{ field.String("import_ref"). Optional(). - MaxLen(100). - Immutable(), + MaxLen(100), field.String("notes"). MaxLen(1000). Optional(), @@ -99,30 +99,24 @@ func (Item) Fields() []ent.Field { // Edges of the Item. func (Item) Edges() []ent.Edge { + owned := func(s string, t any) ent.Edge { + return edge.To(s, t). + Annotations(entsql.Annotation{ + OnDelete: entsql.Cascade, + }) + } + return []ent.Edge{ edge.To("children", Item.Type). From("parent"). Unique(), - edge.From("group", Group.Type). - Ref("items"). - Required(). - Unique(), edge.From("label", Label.Type). Ref("items"), edge.From("location", Location.Type). Ref("items"). Unique(), - edge.To("fields", ItemField.Type). - Annotations(entsql.Annotation{ - OnDelete: entsql.Cascade, - }), - edge.To("maintenance_entries", MaintenanceEntry.Type). - Annotations(entsql.Annotation{ - OnDelete: entsql.Cascade, - }), - edge.To("attachments", Attachment.Type). - Annotations(entsql.Annotation{ - OnDelete: entsql.Cascade, - }), + owned("fields", ItemField.Type), + owned("maintenance_entries", MaintenanceEntry.Type), + owned("attachments", Attachment.Type), } } diff --git a/backend/internal/data/ent/schema/label.go b/backend/internal/data/ent/schema/label.go index 72d6078..c54c713 100644 --- a/backend/internal/data/ent/schema/label.go +++ b/backend/internal/data/ent/schema/label.go @@ -16,6 +16,7 @@ func (Label) Mixin() []ent.Mixin { return []ent.Mixin{ mixins.BaseMixin{}, mixins.DetailsMixin{}, + GroupMixin{ref: "labels"}, } } @@ -31,10 +32,6 @@ func (Label) Fields() []ent.Field { // Edges of the Label. func (Label) Edges() []ent.Edge { return []ent.Edge{ - edge.From("group", Group.Type). - Ref("labels"). - Required(). - Unique(), edge.To("items", Item.Type), } } diff --git a/backend/internal/data/ent/schema/location.go b/backend/internal/data/ent/schema/location.go index b3142b4..b52cb7a 100644 --- a/backend/internal/data/ent/schema/location.go +++ b/backend/internal/data/ent/schema/location.go @@ -16,6 +16,7 @@ func (Location) Mixin() []ent.Mixin { return []ent.Mixin{ mixins.BaseMixin{}, mixins.DetailsMixin{}, + GroupMixin{ref: "locations"}, } } @@ -30,10 +31,6 @@ func (Location) Edges() []ent.Edge { edge.To("children", Location.Type). From("parent"). Unique(), - edge.From("group", Group.Type). - Ref("locations"). - Unique(). - Required(), edge.To("items", Item.Type). Annotations(entsql.Annotation{ OnDelete: entsql.Cascade, diff --git a/backend/internal/data/ent/schema/maintenance_entry.go b/backend/internal/data/ent/schema/maintenance_entry.go index 7fd9643..1c623cf 100644 --- a/backend/internal/data/ent/schema/maintenance_entry.go +++ b/backend/internal/data/ent/schema/maintenance_entry.go @@ -1,8 +1,6 @@ package schema import ( - "time" - "entgo.io/ent" "entgo.io/ent/schema/edge" "entgo.io/ent/schema/field" @@ -24,7 +22,9 @@ func (MaintenanceEntry) Fields() []ent.Field { return []ent.Field{ field.UUID("item_id", uuid.UUID{}), field.Time("date"). - Default(time.Now), + Optional(), + field.Time("scheduled_date"). + Optional(), field.String("name"). MaxLen(255). NotEmpty(), diff --git a/backend/internal/data/ent/schema/notifier.go b/backend/internal/data/ent/schema/notifier.go new file mode 100755 index 0000000..c3561d0 --- /dev/null +++ b/backend/internal/data/ent/schema/notifier.go @@ -0,0 +1,51 @@ +package schema + +import ( + "entgo.io/ent" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" + + "github.com/hay-kot/homebox/backend/internal/data/ent/schema/mixins" +) + +type Notifier struct { + ent.Schema +} + +func (Notifier) Mixin() []ent.Mixin { + return []ent.Mixin{ + mixins.BaseMixin{}, + GroupMixin{ + ref: "notifiers", + field: "group_id", + }, + UserMixin{ + ref: "notifiers", + field: "user_id", + }, + } +} + +// Fields of the Notifier. +func (Notifier) Fields() []ent.Field { + return []ent.Field{ + field.String("name"). + MaxLen(255). + NotEmpty(), + field.String("url"). + Sensitive(). + MaxLen(2083). // supposed max length of URL + NotEmpty(), + field.Bool("is_active"). + Default(true), + } +} + +func (Notifier) Indexes() []ent.Index { + return []ent.Index{ + index.Fields("user_id"), + index.Fields("user_id", "is_active"), + index.Fields("group_id"), + index.Fields("group_id", "is_active"), + } +} diff --git a/backend/internal/data/ent/schema/templates/has_id.tmpl b/backend/internal/data/ent/schema/templates/has_id.tmpl index cc6e30a..d9134e9 100644 --- a/backend/internal/data/ent/schema/templates/has_id.tmpl +++ b/backend/internal/data/ent/schema/templates/has_id.tmpl @@ -20,4 +20,4 @@ import "github.com/google/uuid" } {{ end }} -{{ end }} \ No newline at end of file +{{ end }} diff --git a/backend/internal/data/ent/schema/user.go b/backend/internal/data/ent/schema/user.go index b3342a8..10b0a8a 100644 --- a/backend/internal/data/ent/schema/user.go +++ b/backend/internal/data/ent/schema/user.go @@ -5,6 +5,8 @@ import ( "entgo.io/ent/dialect/entsql" "entgo.io/ent/schema/edge" "entgo.io/ent/schema/field" + "entgo.io/ent/schema/mixin" + "github.com/google/uuid" "github.com/hay-kot/homebox/backend/internal/data/ent/schema/mixins" ) @@ -16,6 +18,7 @@ type User struct { func (User) Mixin() []ent.Mixin { return []ent.Mixin{ mixins.BaseMixin{}, + GroupMixin{ref: "users"}, } } @@ -35,11 +38,11 @@ func (User) Fields() []ent.Field { Sensitive(), field.Bool("is_superuser"). Default(false), + field.Bool("superuser"). + Default(false), field.Enum("role"). Default("user"). Values("user", "owner"), - field.Bool("superuser"). - Default(false), field.Time("activated_on"). Optional(), } @@ -48,13 +51,44 @@ func (User) Fields() []ent.Field { // Edges of the User. func (User) Edges() []ent.Edge { return []ent.Edge{ - edge.From("group", Group.Type). - Ref("users"). - Required(). - Unique(), edge.To("auth_tokens", AuthTokens.Type). Annotations(entsql.Annotation{ OnDelete: entsql.Cascade, }), + edge.To("notifiers", Notifier.Type). + Annotations(entsql.Annotation{ + OnDelete: entsql.Cascade, + }), } } + +// UserMixin when embedded in an ent.Schema, adds a reference to +// the Group entity. +type UserMixin struct { + ref string + field string + mixin.Schema +} + +func (g UserMixin) Fields() []ent.Field { + if g.field != "" { + return []ent.Field{ + field.UUID(g.field, uuid.UUID{}), + } + } + + return nil +} + +func (g UserMixin) Edges() []ent.Edge { + edge := edge.From("user", User.Type). + Ref(g.ref). + Unique(). + Required() + + if g.field != "" { + edge = edge.Field(g.field) + } + + return []ent.Edge{edge} +} diff --git a/backend/internal/data/ent/tx.go b/backend/internal/data/ent/tx.go index 0703ce5..f51f2ac 100644 --- a/backend/internal/data/ent/tx.go +++ b/backend/internal/data/ent/tx.go @@ -34,6 +34,8 @@ type Tx struct { Location *LocationClient // MaintenanceEntry is the client for interacting with the MaintenanceEntry builders. MaintenanceEntry *MaintenanceEntryClient + // Notifier is the client for interacting with the Notifier builders. + Notifier *NotifierClient // User is the client for interacting with the User builders. User *UserClient @@ -178,6 +180,7 @@ func (tx *Tx) init() { tx.Label = NewLabelClient(tx.config) tx.Location = NewLocationClient(tx.config) tx.MaintenanceEntry = NewMaintenanceEntryClient(tx.config) + tx.Notifier = NewNotifierClient(tx.config) tx.User = NewUserClient(tx.config) } diff --git a/backend/internal/data/ent/user.go b/backend/internal/data/ent/user.go index 97a9279..3331de7 100644 --- a/backend/internal/data/ent/user.go +++ b/backend/internal/data/ent/user.go @@ -7,6 +7,7 @@ import ( "strings" "time" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/google/uuid" "github.com/hay-kot/homebox/backend/internal/data/ent/group" @@ -30,16 +31,17 @@ type User struct { Password string `json:"-"` // IsSuperuser holds the value of the "is_superuser" field. IsSuperuser bool `json:"is_superuser,omitempty"` - // Role holds the value of the "role" field. - Role user.Role `json:"role,omitempty"` // Superuser holds the value of the "superuser" field. Superuser bool `json:"superuser,omitempty"` + // Role holds the value of the "role" field. + Role user.Role `json:"role,omitempty"` // ActivatedOn holds the value of the "activated_on" field. ActivatedOn time.Time `json:"activated_on,omitempty"` // Edges holds the relations/edges for other nodes in the graph. // The values are being populated by the UserQuery when eager-loading is set. - Edges UserEdges `json:"edges"` - group_users *uuid.UUID + Edges UserEdges `json:"edges"` + group_users *uuid.UUID + selectValues sql.SelectValues } // UserEdges holds the relations/edges for other nodes in the graph. @@ -48,9 +50,11 @@ type UserEdges struct { Group *Group `json:"group,omitempty"` // AuthTokens holds the value of the auth_tokens edge. AuthTokens []*AuthTokens `json:"auth_tokens,omitempty"` + // Notifiers holds the value of the notifiers edge. + Notifiers []*Notifier `json:"notifiers,omitempty"` // loadedTypes holds the information for reporting if a // type was loaded (or requested) in eager-loading or not. - loadedTypes [2]bool + loadedTypes [3]bool } // GroupOrErr returns the Group value or an error if the edge @@ -75,6 +79,15 @@ func (e UserEdges) AuthTokensOrErr() ([]*AuthTokens, error) { return nil, &NotLoadedError{edge: "auth_tokens"} } +// NotifiersOrErr returns the Notifiers value or an error if the edge +// was not loaded in eager-loading. +func (e UserEdges) NotifiersOrErr() ([]*Notifier, error) { + if e.loadedTypes[2] { + return e.Notifiers, nil + } + return nil, &NotLoadedError{edge: "notifiers"} +} + // scanValues returns the types for scanning values from sql.Rows. func (*User) scanValues(columns []string) ([]any, error) { values := make([]any, len(columns)) @@ -91,7 +104,7 @@ func (*User) scanValues(columns []string) ([]any, error) { case user.ForeignKeys[0]: // group_users values[i] = &sql.NullScanner{S: new(uuid.UUID)} default: - return nil, fmt.Errorf("unexpected column %q for type User", columns[i]) + values[i] = new(sql.UnknownType) } } return values, nil @@ -147,18 +160,18 @@ func (u *User) assignValues(columns []string, values []any) error { } else if value.Valid { u.IsSuperuser = value.Bool } - case user.FieldRole: - if value, ok := values[i].(*sql.NullString); !ok { - return fmt.Errorf("unexpected type %T for field role", values[i]) - } else if value.Valid { - u.Role = user.Role(value.String) - } case user.FieldSuperuser: if value, ok := values[i].(*sql.NullBool); !ok { return fmt.Errorf("unexpected type %T for field superuser", values[i]) } else if value.Valid { u.Superuser = value.Bool } + case user.FieldRole: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field role", values[i]) + } else if value.Valid { + u.Role = user.Role(value.String) + } case user.FieldActivatedOn: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field activated_on", values[i]) @@ -172,11 +185,19 @@ func (u *User) assignValues(columns []string, values []any) error { u.group_users = new(uuid.UUID) *u.group_users = *value.S.(*uuid.UUID) } + default: + u.selectValues.Set(columns[i], values[i]) } } return nil } +// Value returns the ent.Value that was dynamically selected and assigned to the User. +// This includes values selected through modifiers, order, etc. +func (u *User) Value(name string) (ent.Value, error) { + return u.selectValues.Get(name) +} + // QueryGroup queries the "group" edge of the User entity. func (u *User) QueryGroup() *GroupQuery { return NewUserClient(u.config).QueryGroup(u) @@ -187,6 +208,11 @@ func (u *User) QueryAuthTokens() *AuthTokensQuery { return NewUserClient(u.config).QueryAuthTokens(u) } +// QueryNotifiers queries the "notifiers" edge of the User entity. +func (u *User) QueryNotifiers() *NotifierQuery { + return NewUserClient(u.config).QueryNotifiers(u) +} + // Update returns a builder for updating this User. // Note that you need to call User.Unwrap() before calling this method if this User // was returned from a transaction, and the transaction was committed or rolled back. @@ -227,12 +253,12 @@ func (u *User) String() string { builder.WriteString("is_superuser=") builder.WriteString(fmt.Sprintf("%v", u.IsSuperuser)) builder.WriteString(", ") - builder.WriteString("role=") - builder.WriteString(fmt.Sprintf("%v", u.Role)) - builder.WriteString(", ") builder.WriteString("superuser=") builder.WriteString(fmt.Sprintf("%v", u.Superuser)) builder.WriteString(", ") + builder.WriteString("role=") + builder.WriteString(fmt.Sprintf("%v", u.Role)) + builder.WriteString(", ") builder.WriteString("activated_on=") builder.WriteString(u.ActivatedOn.Format(time.ANSIC)) builder.WriteByte(')') diff --git a/backend/internal/data/ent/user/user.go b/backend/internal/data/ent/user/user.go index c8b61c2..33b657b 100644 --- a/backend/internal/data/ent/user/user.go +++ b/backend/internal/data/ent/user/user.go @@ -6,6 +6,8 @@ import ( "fmt" "time" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" "github.com/google/uuid" ) @@ -26,16 +28,18 @@ const ( FieldPassword = "password" // FieldIsSuperuser holds the string denoting the is_superuser field in the database. FieldIsSuperuser = "is_superuser" - // FieldRole holds the string denoting the role field in the database. - FieldRole = "role" // FieldSuperuser holds the string denoting the superuser field in the database. FieldSuperuser = "superuser" + // FieldRole holds the string denoting the role field in the database. + FieldRole = "role" // FieldActivatedOn holds the string denoting the activated_on field in the database. FieldActivatedOn = "activated_on" // EdgeGroup holds the string denoting the group edge name in mutations. EdgeGroup = "group" // EdgeAuthTokens holds the string denoting the auth_tokens edge name in mutations. EdgeAuthTokens = "auth_tokens" + // EdgeNotifiers holds the string denoting the notifiers edge name in mutations. + EdgeNotifiers = "notifiers" // Table holds the table name of the user in the database. Table = "users" // GroupTable is the table that holds the group relation/edge. @@ -52,6 +56,13 @@ const ( AuthTokensInverseTable = "auth_tokens" // AuthTokensColumn is the table column denoting the auth_tokens relation/edge. AuthTokensColumn = "user_auth_tokens" + // NotifiersTable is the table that holds the notifiers relation/edge. + NotifiersTable = "notifiers" + // NotifiersInverseTable is the table name for the Notifier entity. + // It exists in this package in order to avoid circular dependency with the "notifier" package. + NotifiersInverseTable = "notifiers" + // NotifiersColumn is the table column denoting the notifiers relation/edge. + NotifiersColumn = "user_id" ) // Columns holds all SQL columns for user fields. @@ -63,8 +74,8 @@ var Columns = []string{ FieldEmail, FieldPassword, FieldIsSuperuser, - FieldRole, FieldSuperuser, + FieldRole, FieldActivatedOn, } @@ -135,3 +146,112 @@ func RoleValidator(r Role) error { return fmt.Errorf("user: invalid enum value for role field: %q", r) } } + +// OrderOption defines the ordering options for the User queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByName orders the results by the name field. +func ByName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldName, opts...).ToFunc() +} + +// ByEmail orders the results by the email field. +func ByEmail(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldEmail, opts...).ToFunc() +} + +// ByPassword orders the results by the password field. +func ByPassword(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldPassword, opts...).ToFunc() +} + +// ByIsSuperuser orders the results by the is_superuser field. +func ByIsSuperuser(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldIsSuperuser, opts...).ToFunc() +} + +// BySuperuser orders the results by the superuser field. +func BySuperuser(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSuperuser, opts...).ToFunc() +} + +// ByRole orders the results by the role field. +func ByRole(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldRole, opts...).ToFunc() +} + +// ByActivatedOn orders the results by the activated_on field. +func ByActivatedOn(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldActivatedOn, opts...).ToFunc() +} + +// ByGroupField orders the results by group field. +func ByGroupField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newGroupStep(), sql.OrderByField(field, opts...)) + } +} + +// ByAuthTokensCount orders the results by auth_tokens count. +func ByAuthTokensCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newAuthTokensStep(), opts...) + } +} + +// ByAuthTokens orders the results by auth_tokens terms. +func ByAuthTokens(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newAuthTokensStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByNotifiersCount orders the results by notifiers count. +func ByNotifiersCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newNotifiersStep(), opts...) + } +} + +// ByNotifiers orders the results by notifiers terms. +func ByNotifiers(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newNotifiersStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} +func newGroupStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(GroupInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn), + ) +} +func newAuthTokensStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(AuthTokensInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, AuthTokensTable, AuthTokensColumn), + ) +} +func newNotifiersStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(NotifiersInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, NotifiersTable, NotifiersColumn), + ) +} diff --git a/backend/internal/data/ent/user/where.go b/backend/internal/data/ent/user/where.go index 78335a7..8686e73 100644 --- a/backend/internal/data/ent/user/where.go +++ b/backend/internal/data/ent/user/where.go @@ -381,6 +381,16 @@ func IsSuperuserNEQ(v bool) predicate.User { return predicate.User(sql.FieldNEQ(FieldIsSuperuser, v)) } +// SuperuserEQ applies the EQ predicate on the "superuser" field. +func SuperuserEQ(v bool) predicate.User { + return predicate.User(sql.FieldEQ(FieldSuperuser, v)) +} + +// SuperuserNEQ applies the NEQ predicate on the "superuser" field. +func SuperuserNEQ(v bool) predicate.User { + return predicate.User(sql.FieldNEQ(FieldSuperuser, v)) +} + // RoleEQ applies the EQ predicate on the "role" field. func RoleEQ(v Role) predicate.User { return predicate.User(sql.FieldEQ(FieldRole, v)) @@ -401,16 +411,6 @@ func RoleNotIn(vs ...Role) predicate.User { return predicate.User(sql.FieldNotIn(FieldRole, vs...)) } -// SuperuserEQ applies the EQ predicate on the "superuser" field. -func SuperuserEQ(v bool) predicate.User { - return predicate.User(sql.FieldEQ(FieldSuperuser, v)) -} - -// SuperuserNEQ applies the NEQ predicate on the "superuser" field. -func SuperuserNEQ(v bool) predicate.User { - return predicate.User(sql.FieldNEQ(FieldSuperuser, v)) -} - // ActivatedOnEQ applies the EQ predicate on the "activated_on" field. func ActivatedOnEQ(v time.Time) predicate.User { return predicate.User(sql.FieldEQ(FieldActivatedOn, v)) @@ -475,11 +475,7 @@ func HasGroup() predicate.User { // HasGroupWith applies the HasEdge predicate on the "group" edge with a given conditions (other predicates). func HasGroupWith(preds ...predicate.Group) predicate.User { return predicate.User(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(GroupInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn), - ) + step := newGroupStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -501,12 +497,31 @@ func HasAuthTokens() predicate.User { // HasAuthTokensWith applies the HasEdge predicate on the "auth_tokens" edge with a given conditions (other predicates). func HasAuthTokensWith(preds ...predicate.AuthTokens) predicate.User { + return predicate.User(func(s *sql.Selector) { + step := newAuthTokensStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasNotifiers applies the HasEdge predicate on the "notifiers" edge. +func HasNotifiers() predicate.User { return predicate.User(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(AuthTokensInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, false, AuthTokensTable, AuthTokensColumn), + sqlgraph.Edge(sqlgraph.O2M, false, NotifiersTable, NotifiersColumn), ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasNotifiersWith applies the HasEdge predicate on the "notifiers" edge with a given conditions (other predicates). +func HasNotifiersWith(preds ...predicate.Notifier) predicate.User { + return predicate.User(func(s *sql.Selector) { + step := newNotifiersStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -517,32 +532,15 @@ func HasAuthTokensWith(preds ...predicate.AuthTokens) predicate.User { // And groups predicates with the AND operator between them. func And(predicates ...predicate.User) predicate.User { - return predicate.User(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for _, p := range predicates { - p(s1) - } - s.Where(s1.P()) - }) + return predicate.User(sql.AndPredicates(predicates...)) } // Or groups predicates with the OR operator between them. func Or(predicates ...predicate.User) predicate.User { - return predicate.User(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for i, p := range predicates { - if i > 0 { - s1.Or() - } - p(s1) - } - s.Where(s1.P()) - }) + return predicate.User(sql.OrPredicates(predicates...)) } // Not applies the not operator on the given predicate. func Not(p predicate.User) predicate.User { - return predicate.User(func(s *sql.Selector) { - p(s.Not()) - }) + return predicate.User(sql.NotPredicates(p)) } diff --git a/backend/internal/data/ent/user_create.go b/backend/internal/data/ent/user_create.go index 3dc703d..2cfe2d1 100644 --- a/backend/internal/data/ent/user_create.go +++ b/backend/internal/data/ent/user_create.go @@ -13,6 +13,7 @@ import ( "github.com/google/uuid" "github.com/hay-kot/homebox/backend/internal/data/ent/authtokens" "github.com/hay-kot/homebox/backend/internal/data/ent/group" + "github.com/hay-kot/homebox/backend/internal/data/ent/notifier" "github.com/hay-kot/homebox/backend/internal/data/ent/user" ) @@ -83,20 +84,6 @@ func (uc *UserCreate) SetNillableIsSuperuser(b *bool) *UserCreate { return uc } -// SetRole sets the "role" field. -func (uc *UserCreate) SetRole(u user.Role) *UserCreate { - uc.mutation.SetRole(u) - return uc -} - -// SetNillableRole sets the "role" field if the given value is not nil. -func (uc *UserCreate) SetNillableRole(u *user.Role) *UserCreate { - if u != nil { - uc.SetRole(*u) - } - return uc -} - // SetSuperuser sets the "superuser" field. func (uc *UserCreate) SetSuperuser(b bool) *UserCreate { uc.mutation.SetSuperuser(b) @@ -111,6 +98,20 @@ func (uc *UserCreate) SetNillableSuperuser(b *bool) *UserCreate { return uc } +// SetRole sets the "role" field. +func (uc *UserCreate) SetRole(u user.Role) *UserCreate { + uc.mutation.SetRole(u) + return uc +} + +// SetNillableRole sets the "role" field if the given value is not nil. +func (uc *UserCreate) SetNillableRole(u *user.Role) *UserCreate { + if u != nil { + uc.SetRole(*u) + } + return uc +} + // SetActivatedOn sets the "activated_on" field. func (uc *UserCreate) SetActivatedOn(t time.Time) *UserCreate { uc.mutation.SetActivatedOn(t) @@ -165,6 +166,21 @@ func (uc *UserCreate) AddAuthTokens(a ...*AuthTokens) *UserCreate { return uc.AddAuthTokenIDs(ids...) } +// AddNotifierIDs adds the "notifiers" edge to the Notifier entity by IDs. +func (uc *UserCreate) AddNotifierIDs(ids ...uuid.UUID) *UserCreate { + uc.mutation.AddNotifierIDs(ids...) + return uc +} + +// AddNotifiers adds the "notifiers" edges to the Notifier entity. +func (uc *UserCreate) AddNotifiers(n ...*Notifier) *UserCreate { + ids := make([]uuid.UUID, len(n)) + for i := range n { + ids[i] = n[i].ID + } + return uc.AddNotifierIDs(ids...) +} + // Mutation returns the UserMutation object of the builder. func (uc *UserCreate) Mutation() *UserMutation { return uc.mutation @@ -173,7 +189,7 @@ func (uc *UserCreate) Mutation() *UserMutation { // Save creates the User in the database. func (uc *UserCreate) Save(ctx context.Context) (*User, error) { uc.defaults() - return withHooks[*User, UserMutation](ctx, uc.sqlSave, uc.mutation, uc.hooks) + return withHooks(ctx, uc.sqlSave, uc.mutation, uc.hooks) } // SaveX calls Save and panics if Save returns an error. @@ -212,14 +228,14 @@ func (uc *UserCreate) defaults() { v := user.DefaultIsSuperuser uc.mutation.SetIsSuperuser(v) } - if _, ok := uc.mutation.Role(); !ok { - v := user.DefaultRole - uc.mutation.SetRole(v) - } if _, ok := uc.mutation.Superuser(); !ok { v := user.DefaultSuperuser uc.mutation.SetSuperuser(v) } + if _, ok := uc.mutation.Role(); !ok { + v := user.DefaultRole + uc.mutation.SetRole(v) + } if _, ok := uc.mutation.ID(); !ok { v := user.DefaultID() uc.mutation.SetID(v) @@ -261,6 +277,9 @@ func (uc *UserCreate) check() error { if _, ok := uc.mutation.IsSuperuser(); !ok { return &ValidationError{Name: "is_superuser", err: errors.New(`ent: missing required field "User.is_superuser"`)} } + if _, ok := uc.mutation.Superuser(); !ok { + return &ValidationError{Name: "superuser", err: errors.New(`ent: missing required field "User.superuser"`)} + } if _, ok := uc.mutation.Role(); !ok { return &ValidationError{Name: "role", err: errors.New(`ent: missing required field "User.role"`)} } @@ -269,9 +288,6 @@ func (uc *UserCreate) check() error { return &ValidationError{Name: "role", err: fmt.Errorf(`ent: validator failed for field "User.role": %w`, err)} } } - if _, ok := uc.mutation.Superuser(); !ok { - return &ValidationError{Name: "superuser", err: errors.New(`ent: missing required field "User.superuser"`)} - } if _, ok := uc.mutation.GroupID(); !ok { return &ValidationError{Name: "group", err: errors.New(`ent: missing required edge "User.group"`)} } @@ -334,14 +350,14 @@ func (uc *UserCreate) createSpec() (*User, *sqlgraph.CreateSpec) { _spec.SetField(user.FieldIsSuperuser, field.TypeBool, value) _node.IsSuperuser = value } - if value, ok := uc.mutation.Role(); ok { - _spec.SetField(user.FieldRole, field.TypeEnum, value) - _node.Role = value - } if value, ok := uc.mutation.Superuser(); ok { _spec.SetField(user.FieldSuperuser, field.TypeBool, value) _node.Superuser = value } + if value, ok := uc.mutation.Role(); ok { + _spec.SetField(user.FieldRole, field.TypeEnum, value) + _node.Role = value + } if value, ok := uc.mutation.ActivatedOn(); ok { _spec.SetField(user.FieldActivatedOn, field.TypeTime, value) _node.ActivatedOn = value @@ -354,10 +370,7 @@ func (uc *UserCreate) createSpec() (*User, *sqlgraph.CreateSpec) { Columns: []string{user.GroupColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: group.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -374,10 +387,23 @@ func (uc *UserCreate) createSpec() (*User, *sqlgraph.CreateSpec) { Columns: []string{user.AuthTokensColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: authtokens.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(authtokens.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := uc.mutation.NotifiersIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.NotifiersTable, + Columns: []string{user.NotifiersColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(notifier.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -391,11 +417,15 @@ func (uc *UserCreate) createSpec() (*User, *sqlgraph.CreateSpec) { // UserCreateBulk is the builder for creating many User entities in bulk. type UserCreateBulk struct { config + err error builders []*UserCreate } // Save creates the User entities in the database. func (ucb *UserCreateBulk) Save(ctx context.Context) ([]*User, error) { + if ucb.err != nil { + return nil, ucb.err + } specs := make([]*sqlgraph.CreateSpec, len(ucb.builders)) nodes := make([]*User, len(ucb.builders)) mutators := make([]Mutator, len(ucb.builders)) @@ -412,8 +442,8 @@ func (ucb *UserCreateBulk) Save(ctx context.Context) ([]*User, error) { return nil, err } builder.mutation = mutation - nodes[i], specs[i] = builder.createSpec() var err error + nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { _, err = mutators[i+1].Mutate(root, ucb.builders[i+1].mutation) } else { diff --git a/backend/internal/data/ent/user_delete.go b/backend/internal/data/ent/user_delete.go index 4e38aab..08fd3ef 100644 --- a/backend/internal/data/ent/user_delete.go +++ b/backend/internal/data/ent/user_delete.go @@ -27,7 +27,7 @@ func (ud *UserDelete) Where(ps ...predicate.User) *UserDelete { // Exec executes the deletion query and returns how many vertices were deleted. func (ud *UserDelete) Exec(ctx context.Context) (int, error) { - return withHooks[int, UserMutation](ctx, ud.sqlExec, ud.mutation, ud.hooks) + return withHooks(ctx, ud.sqlExec, ud.mutation, ud.hooks) } // ExecX is like Exec, but panics if an error occurs. diff --git a/backend/internal/data/ent/user_query.go b/backend/internal/data/ent/user_query.go index a722f2e..7205e9b 100644 --- a/backend/internal/data/ent/user_query.go +++ b/backend/internal/data/ent/user_query.go @@ -14,6 +14,7 @@ import ( "github.com/google/uuid" "github.com/hay-kot/homebox/backend/internal/data/ent/authtokens" "github.com/hay-kot/homebox/backend/internal/data/ent/group" + "github.com/hay-kot/homebox/backend/internal/data/ent/notifier" "github.com/hay-kot/homebox/backend/internal/data/ent/predicate" "github.com/hay-kot/homebox/backend/internal/data/ent/user" ) @@ -22,11 +23,12 @@ import ( type UserQuery struct { config ctx *QueryContext - order []OrderFunc + order []user.OrderOption inters []Interceptor predicates []predicate.User withGroup *GroupQuery withAuthTokens *AuthTokensQuery + withNotifiers *NotifierQuery withFKs bool // intermediate query (i.e. traversal path). sql *sql.Selector @@ -59,7 +61,7 @@ func (uq *UserQuery) Unique(unique bool) *UserQuery { } // Order specifies how the records should be ordered. -func (uq *UserQuery) Order(o ...OrderFunc) *UserQuery { +func (uq *UserQuery) Order(o ...user.OrderOption) *UserQuery { uq.order = append(uq.order, o...) return uq } @@ -108,6 +110,28 @@ func (uq *UserQuery) QueryAuthTokens() *AuthTokensQuery { return query } +// QueryNotifiers chains the current query on the "notifiers" edge. +func (uq *UserQuery) QueryNotifiers() *NotifierQuery { + query := (&NotifierClient{config: uq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := uq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := uq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(user.Table, user.FieldID, selector), + sqlgraph.To(notifier.Table, notifier.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, user.NotifiersTable, user.NotifiersColumn), + ) + fromU = sqlgraph.SetNeighbors(uq.driver.Dialect(), step) + return fromU, nil + } + return query +} + // First returns the first User entity from the query. // Returns a *NotFoundError when no User was found. func (uq *UserQuery) First(ctx context.Context) (*User, error) { @@ -297,11 +321,12 @@ func (uq *UserQuery) Clone() *UserQuery { return &UserQuery{ config: uq.config, ctx: uq.ctx.Clone(), - order: append([]OrderFunc{}, uq.order...), + order: append([]user.OrderOption{}, uq.order...), inters: append([]Interceptor{}, uq.inters...), predicates: append([]predicate.User{}, uq.predicates...), withGroup: uq.withGroup.Clone(), withAuthTokens: uq.withAuthTokens.Clone(), + withNotifiers: uq.withNotifiers.Clone(), // clone intermediate query. sql: uq.sql.Clone(), path: uq.path, @@ -330,6 +355,17 @@ func (uq *UserQuery) WithAuthTokens(opts ...func(*AuthTokensQuery)) *UserQuery { return uq } +// WithNotifiers tells the query-builder to eager-load the nodes that are connected to +// the "notifiers" edge. The optional arguments are used to configure the query builder of the edge. +func (uq *UserQuery) WithNotifiers(opts ...func(*NotifierQuery)) *UserQuery { + query := (&NotifierClient{config: uq.config}).Query() + for _, opt := range opts { + opt(query) + } + uq.withNotifiers = query + return uq +} + // GroupBy is used to group vertices by one or more fields/columns. // It is often used with aggregate functions, like: count, max, mean, min, sum. // @@ -409,9 +445,10 @@ func (uq *UserQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*User, e nodes = []*User{} withFKs = uq.withFKs _spec = uq.querySpec() - loadedTypes = [2]bool{ + loadedTypes = [3]bool{ uq.withGroup != nil, uq.withAuthTokens != nil, + uq.withNotifiers != nil, } ) if uq.withGroup != nil { @@ -451,6 +488,13 @@ func (uq *UserQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*User, e return nil, err } } + if query := uq.withNotifiers; query != nil { + if err := uq.loadNotifiers(ctx, query, nodes, + func(n *User) { n.Edges.Notifiers = []*Notifier{} }, + func(n *User, e *Notifier) { n.Edges.Notifiers = append(n.Edges.Notifiers, e) }); err != nil { + return nil, err + } + } return nodes, nil } @@ -498,7 +542,7 @@ func (uq *UserQuery) loadAuthTokens(ctx context.Context, query *AuthTokensQuery, } query.withFKs = true query.Where(predicate.AuthTokens(func(s *sql.Selector) { - s.Where(sql.InValues(user.AuthTokensColumn, fks...)) + s.Where(sql.InValues(s.C(user.AuthTokensColumn), fks...)) })) neighbors, err := query.All(ctx) if err != nil { @@ -511,7 +555,37 @@ func (uq *UserQuery) loadAuthTokens(ctx context.Context, query *AuthTokensQuery, } node, ok := nodeids[*fk] if !ok { - return fmt.Errorf(`unexpected foreign-key "user_auth_tokens" returned %v for node %v`, *fk, n.ID) + return fmt.Errorf(`unexpected referenced foreign-key "user_auth_tokens" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) + } + return nil +} +func (uq *UserQuery) loadNotifiers(ctx context.Context, query *NotifierQuery, nodes []*User, init func(*User), assign func(*User, *Notifier)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*User) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(notifier.FieldUserID) + } + query.Where(predicate.Notifier(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(user.NotifiersColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.UserID + node, ok := nodeids[fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "user_id" returned %v for node %v`, fk, n.ID) } assign(node, n) } diff --git a/backend/internal/data/ent/user_update.go b/backend/internal/data/ent/user_update.go index 4bb0296..0e4c01a 100644 --- a/backend/internal/data/ent/user_update.go +++ b/backend/internal/data/ent/user_update.go @@ -14,6 +14,7 @@ import ( "github.com/google/uuid" "github.com/hay-kot/homebox/backend/internal/data/ent/authtokens" "github.com/hay-kot/homebox/backend/internal/data/ent/group" + "github.com/hay-kot/homebox/backend/internal/data/ent/notifier" "github.com/hay-kot/homebox/backend/internal/data/ent/predicate" "github.com/hay-kot/homebox/backend/internal/data/ent/user" ) @@ -43,18 +44,42 @@ func (uu *UserUpdate) SetName(s string) *UserUpdate { return uu } +// SetNillableName sets the "name" field if the given value is not nil. +func (uu *UserUpdate) SetNillableName(s *string) *UserUpdate { + if s != nil { + uu.SetName(*s) + } + return uu +} + // SetEmail sets the "email" field. func (uu *UserUpdate) SetEmail(s string) *UserUpdate { uu.mutation.SetEmail(s) return uu } +// SetNillableEmail sets the "email" field if the given value is not nil. +func (uu *UserUpdate) SetNillableEmail(s *string) *UserUpdate { + if s != nil { + uu.SetEmail(*s) + } + return uu +} + // SetPassword sets the "password" field. func (uu *UserUpdate) SetPassword(s string) *UserUpdate { uu.mutation.SetPassword(s) return uu } +// SetNillablePassword sets the "password" field if the given value is not nil. +func (uu *UserUpdate) SetNillablePassword(s *string) *UserUpdate { + if s != nil { + uu.SetPassword(*s) + } + return uu +} + // SetIsSuperuser sets the "is_superuser" field. func (uu *UserUpdate) SetIsSuperuser(b bool) *UserUpdate { uu.mutation.SetIsSuperuser(b) @@ -69,20 +94,6 @@ func (uu *UserUpdate) SetNillableIsSuperuser(b *bool) *UserUpdate { return uu } -// SetRole sets the "role" field. -func (uu *UserUpdate) SetRole(u user.Role) *UserUpdate { - uu.mutation.SetRole(u) - return uu -} - -// SetNillableRole sets the "role" field if the given value is not nil. -func (uu *UserUpdate) SetNillableRole(u *user.Role) *UserUpdate { - if u != nil { - uu.SetRole(*u) - } - return uu -} - // SetSuperuser sets the "superuser" field. func (uu *UserUpdate) SetSuperuser(b bool) *UserUpdate { uu.mutation.SetSuperuser(b) @@ -97,6 +108,20 @@ func (uu *UserUpdate) SetNillableSuperuser(b *bool) *UserUpdate { return uu } +// SetRole sets the "role" field. +func (uu *UserUpdate) SetRole(u user.Role) *UserUpdate { + uu.mutation.SetRole(u) + return uu +} + +// SetNillableRole sets the "role" field if the given value is not nil. +func (uu *UserUpdate) SetNillableRole(u *user.Role) *UserUpdate { + if u != nil { + uu.SetRole(*u) + } + return uu +} + // SetActivatedOn sets the "activated_on" field. func (uu *UserUpdate) SetActivatedOn(t time.Time) *UserUpdate { uu.mutation.SetActivatedOn(t) @@ -143,6 +168,21 @@ func (uu *UserUpdate) AddAuthTokens(a ...*AuthTokens) *UserUpdate { return uu.AddAuthTokenIDs(ids...) } +// AddNotifierIDs adds the "notifiers" edge to the Notifier entity by IDs. +func (uu *UserUpdate) AddNotifierIDs(ids ...uuid.UUID) *UserUpdate { + uu.mutation.AddNotifierIDs(ids...) + return uu +} + +// AddNotifiers adds the "notifiers" edges to the Notifier entity. +func (uu *UserUpdate) AddNotifiers(n ...*Notifier) *UserUpdate { + ids := make([]uuid.UUID, len(n)) + for i := range n { + ids[i] = n[i].ID + } + return uu.AddNotifierIDs(ids...) +} + // Mutation returns the UserMutation object of the builder. func (uu *UserUpdate) Mutation() *UserMutation { return uu.mutation @@ -175,10 +215,31 @@ func (uu *UserUpdate) RemoveAuthTokens(a ...*AuthTokens) *UserUpdate { return uu.RemoveAuthTokenIDs(ids...) } +// ClearNotifiers clears all "notifiers" edges to the Notifier entity. +func (uu *UserUpdate) ClearNotifiers() *UserUpdate { + uu.mutation.ClearNotifiers() + return uu +} + +// RemoveNotifierIDs removes the "notifiers" edge to Notifier entities by IDs. +func (uu *UserUpdate) RemoveNotifierIDs(ids ...uuid.UUID) *UserUpdate { + uu.mutation.RemoveNotifierIDs(ids...) + return uu +} + +// RemoveNotifiers removes "notifiers" edges to Notifier entities. +func (uu *UserUpdate) RemoveNotifiers(n ...*Notifier) *UserUpdate { + ids := make([]uuid.UUID, len(n)) + for i := range n { + ids[i] = n[i].ID + } + return uu.RemoveNotifierIDs(ids...) +} + // Save executes the query and returns the number of nodes affected by the update operation. func (uu *UserUpdate) Save(ctx context.Context) (int, error) { uu.defaults() - return withHooks[int, UserMutation](ctx, uu.sqlSave, uu.mutation, uu.hooks) + return withHooks(ctx, uu.sqlSave, uu.mutation, uu.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -266,12 +327,12 @@ func (uu *UserUpdate) sqlSave(ctx context.Context) (n int, err error) { if value, ok := uu.mutation.IsSuperuser(); ok { _spec.SetField(user.FieldIsSuperuser, field.TypeBool, value) } - if value, ok := uu.mutation.Role(); ok { - _spec.SetField(user.FieldRole, field.TypeEnum, value) - } if value, ok := uu.mutation.Superuser(); ok { _spec.SetField(user.FieldSuperuser, field.TypeBool, value) } + if value, ok := uu.mutation.Role(); ok { + _spec.SetField(user.FieldRole, field.TypeEnum, value) + } if value, ok := uu.mutation.ActivatedOn(); ok { _spec.SetField(user.FieldActivatedOn, field.TypeTime, value) } @@ -286,10 +347,7 @@ func (uu *UserUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{user.GroupColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: group.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -302,10 +360,7 @@ func (uu *UserUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{user.GroupColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: group.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -321,10 +376,7 @@ func (uu *UserUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{user.AuthTokensColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: authtokens.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(authtokens.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -337,10 +389,7 @@ func (uu *UserUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{user.AuthTokensColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: authtokens.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(authtokens.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -356,10 +405,52 @@ func (uu *UserUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{user.AuthTokensColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: authtokens.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(authtokens.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if uu.mutation.NotifiersCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.NotifiersTable, + Columns: []string{user.NotifiersColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(notifier.FieldID, field.TypeUUID), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := uu.mutation.RemovedNotifiersIDs(); len(nodes) > 0 && !uu.mutation.NotifiersCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.NotifiersTable, + Columns: []string{user.NotifiersColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(notifier.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := uu.mutation.NotifiersIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.NotifiersTable, + Columns: []string{user.NotifiersColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(notifier.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -399,18 +490,42 @@ func (uuo *UserUpdateOne) SetName(s string) *UserUpdateOne { return uuo } +// SetNillableName sets the "name" field if the given value is not nil. +func (uuo *UserUpdateOne) SetNillableName(s *string) *UserUpdateOne { + if s != nil { + uuo.SetName(*s) + } + return uuo +} + // SetEmail sets the "email" field. func (uuo *UserUpdateOne) SetEmail(s string) *UserUpdateOne { uuo.mutation.SetEmail(s) return uuo } +// SetNillableEmail sets the "email" field if the given value is not nil. +func (uuo *UserUpdateOne) SetNillableEmail(s *string) *UserUpdateOne { + if s != nil { + uuo.SetEmail(*s) + } + return uuo +} + // SetPassword sets the "password" field. func (uuo *UserUpdateOne) SetPassword(s string) *UserUpdateOne { uuo.mutation.SetPassword(s) return uuo } +// SetNillablePassword sets the "password" field if the given value is not nil. +func (uuo *UserUpdateOne) SetNillablePassword(s *string) *UserUpdateOne { + if s != nil { + uuo.SetPassword(*s) + } + return uuo +} + // SetIsSuperuser sets the "is_superuser" field. func (uuo *UserUpdateOne) SetIsSuperuser(b bool) *UserUpdateOne { uuo.mutation.SetIsSuperuser(b) @@ -425,20 +540,6 @@ func (uuo *UserUpdateOne) SetNillableIsSuperuser(b *bool) *UserUpdateOne { return uuo } -// SetRole sets the "role" field. -func (uuo *UserUpdateOne) SetRole(u user.Role) *UserUpdateOne { - uuo.mutation.SetRole(u) - return uuo -} - -// SetNillableRole sets the "role" field if the given value is not nil. -func (uuo *UserUpdateOne) SetNillableRole(u *user.Role) *UserUpdateOne { - if u != nil { - uuo.SetRole(*u) - } - return uuo -} - // SetSuperuser sets the "superuser" field. func (uuo *UserUpdateOne) SetSuperuser(b bool) *UserUpdateOne { uuo.mutation.SetSuperuser(b) @@ -453,6 +554,20 @@ func (uuo *UserUpdateOne) SetNillableSuperuser(b *bool) *UserUpdateOne { return uuo } +// SetRole sets the "role" field. +func (uuo *UserUpdateOne) SetRole(u user.Role) *UserUpdateOne { + uuo.mutation.SetRole(u) + return uuo +} + +// SetNillableRole sets the "role" field if the given value is not nil. +func (uuo *UserUpdateOne) SetNillableRole(u *user.Role) *UserUpdateOne { + if u != nil { + uuo.SetRole(*u) + } + return uuo +} + // SetActivatedOn sets the "activated_on" field. func (uuo *UserUpdateOne) SetActivatedOn(t time.Time) *UserUpdateOne { uuo.mutation.SetActivatedOn(t) @@ -499,6 +614,21 @@ func (uuo *UserUpdateOne) AddAuthTokens(a ...*AuthTokens) *UserUpdateOne { return uuo.AddAuthTokenIDs(ids...) } +// AddNotifierIDs adds the "notifiers" edge to the Notifier entity by IDs. +func (uuo *UserUpdateOne) AddNotifierIDs(ids ...uuid.UUID) *UserUpdateOne { + uuo.mutation.AddNotifierIDs(ids...) + return uuo +} + +// AddNotifiers adds the "notifiers" edges to the Notifier entity. +func (uuo *UserUpdateOne) AddNotifiers(n ...*Notifier) *UserUpdateOne { + ids := make([]uuid.UUID, len(n)) + for i := range n { + ids[i] = n[i].ID + } + return uuo.AddNotifierIDs(ids...) +} + // Mutation returns the UserMutation object of the builder. func (uuo *UserUpdateOne) Mutation() *UserMutation { return uuo.mutation @@ -531,6 +661,27 @@ func (uuo *UserUpdateOne) RemoveAuthTokens(a ...*AuthTokens) *UserUpdateOne { return uuo.RemoveAuthTokenIDs(ids...) } +// ClearNotifiers clears all "notifiers" edges to the Notifier entity. +func (uuo *UserUpdateOne) ClearNotifiers() *UserUpdateOne { + uuo.mutation.ClearNotifiers() + return uuo +} + +// RemoveNotifierIDs removes the "notifiers" edge to Notifier entities by IDs. +func (uuo *UserUpdateOne) RemoveNotifierIDs(ids ...uuid.UUID) *UserUpdateOne { + uuo.mutation.RemoveNotifierIDs(ids...) + return uuo +} + +// RemoveNotifiers removes "notifiers" edges to Notifier entities. +func (uuo *UserUpdateOne) RemoveNotifiers(n ...*Notifier) *UserUpdateOne { + ids := make([]uuid.UUID, len(n)) + for i := range n { + ids[i] = n[i].ID + } + return uuo.RemoveNotifierIDs(ids...) +} + // Where appends a list predicates to the UserUpdate builder. func (uuo *UserUpdateOne) Where(ps ...predicate.User) *UserUpdateOne { uuo.mutation.Where(ps...) @@ -547,7 +698,7 @@ func (uuo *UserUpdateOne) Select(field string, fields ...string) *UserUpdateOne // Save executes the query and returns the updated User entity. func (uuo *UserUpdateOne) Save(ctx context.Context) (*User, error) { uuo.defaults() - return withHooks[*User, UserMutation](ctx, uuo.sqlSave, uuo.mutation, uuo.hooks) + return withHooks(ctx, uuo.sqlSave, uuo.mutation, uuo.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -652,12 +803,12 @@ func (uuo *UserUpdateOne) sqlSave(ctx context.Context) (_node *User, err error) if value, ok := uuo.mutation.IsSuperuser(); ok { _spec.SetField(user.FieldIsSuperuser, field.TypeBool, value) } - if value, ok := uuo.mutation.Role(); ok { - _spec.SetField(user.FieldRole, field.TypeEnum, value) - } if value, ok := uuo.mutation.Superuser(); ok { _spec.SetField(user.FieldSuperuser, field.TypeBool, value) } + if value, ok := uuo.mutation.Role(); ok { + _spec.SetField(user.FieldRole, field.TypeEnum, value) + } if value, ok := uuo.mutation.ActivatedOn(); ok { _spec.SetField(user.FieldActivatedOn, field.TypeTime, value) } @@ -672,10 +823,7 @@ func (uuo *UserUpdateOne) sqlSave(ctx context.Context) (_node *User, err error) Columns: []string{user.GroupColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: group.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -688,10 +836,7 @@ func (uuo *UserUpdateOne) sqlSave(ctx context.Context) (_node *User, err error) Columns: []string{user.GroupColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: group.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -707,10 +852,7 @@ func (uuo *UserUpdateOne) sqlSave(ctx context.Context) (_node *User, err error) Columns: []string{user.AuthTokensColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: authtokens.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(authtokens.FieldID, field.TypeUUID), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -723,10 +865,7 @@ func (uuo *UserUpdateOne) sqlSave(ctx context.Context) (_node *User, err error) Columns: []string{user.AuthTokensColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: authtokens.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(authtokens.FieldID, field.TypeUUID), }, } for _, k := range nodes { @@ -742,10 +881,52 @@ func (uuo *UserUpdateOne) sqlSave(ctx context.Context) (_node *User, err error) Columns: []string{user.AuthTokensColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeUUID, - Column: authtokens.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(authtokens.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if uuo.mutation.NotifiersCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.NotifiersTable, + Columns: []string{user.NotifiersColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(notifier.FieldID, field.TypeUUID), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := uuo.mutation.RemovedNotifiersIDs(); len(nodes) > 0 && !uuo.mutation.NotifiersCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.NotifiersTable, + Columns: []string{user.NotifiersColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(notifier.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := uuo.mutation.NotifiersIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.NotifiersTable, + Columns: []string{user.NotifiersColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(notifier.FieldID, field.TypeUUID), }, } for _, k := range nodes { diff --git a/backend/internal/data/migrations/migrations.go b/backend/internal/data/migrations/migrations.go index d477df9..a2afdc8 100644 --- a/backend/internal/data/migrations/migrations.go +++ b/backend/internal/data/migrations/migrations.go @@ -1,9 +1,10 @@ +// Package migrations provides a way to embed the migrations into the binary. package migrations import ( "embed" "os" - "path/filepath" + "path" ) //go:embed all:migrations @@ -28,12 +29,12 @@ func Write(temp string) error { continue } - b, err := Files.ReadFile(filepath.Join("migrations", f.Name())) + b, err := Files.ReadFile(path.Join("migrations", f.Name())) if err != nil { return err } - err = os.WriteFile(filepath.Join(temp, f.Name()), b, 0o644) + err = os.WriteFile(path.Join(temp, f.Name()), b, 0o644) if err != nil { return err } diff --git a/backend/internal/data/migrations/migrations/20230227024134_add_scheduled_date.sql b/backend/internal/data/migrations/migrations/20230227024134_add_scheduled_date.sql new file mode 100644 index 0000000..a43ecfb --- /dev/null +++ b/backend/internal/data/migrations/migrations/20230227024134_add_scheduled_date.sql @@ -0,0 +1,12 @@ +-- disable the enforcement of foreign-keys constraints +PRAGMA foreign_keys = off; +-- create "new_maintenance_entries" table +CREATE TABLE `new_maintenance_entries` (`id` uuid NOT NULL, `created_at` datetime NOT NULL, `updated_at` datetime NOT NULL, `date` datetime NULL, `scheduled_date` datetime NULL, `name` text NOT NULL, `description` text NULL, `cost` real NOT NULL DEFAULT 0, `item_id` uuid NOT NULL, PRIMARY KEY (`id`), CONSTRAINT `maintenance_entries_items_maintenance_entries` FOREIGN KEY (`item_id`) REFERENCES `items` (`id`) ON DELETE CASCADE); +-- copy rows from old table "maintenance_entries" to new temporary table "new_maintenance_entries" +INSERT INTO `new_maintenance_entries` (`id`, `created_at`, `updated_at`, `date`, `name`, `description`, `cost`, `item_id`) SELECT `id`, `created_at`, `updated_at`, `date`, `name`, `description`, `cost`, `item_id` FROM `maintenance_entries`; +-- drop "maintenance_entries" table after copying rows +DROP TABLE `maintenance_entries`; +-- rename temporary table "new_maintenance_entries" to "maintenance_entries" +ALTER TABLE `new_maintenance_entries` RENAME TO `maintenance_entries`; +-- enable back the enforcement of foreign-keys constraints +PRAGMA foreign_keys = on; diff --git a/backend/internal/data/migrations/migrations/20230305065819_add_notifier_types.sql b/backend/internal/data/migrations/migrations/20230305065819_add_notifier_types.sql new file mode 100644 index 0000000..09b1824 --- /dev/null +++ b/backend/internal/data/migrations/migrations/20230305065819_add_notifier_types.sql @@ -0,0 +1,6 @@ +-- create "notifiers" table +CREATE TABLE `notifiers` (`id` uuid NOT NULL, `created_at` datetime NOT NULL, `updated_at` datetime NOT NULL, `name` text NOT NULL, `url` text NOT NULL, `is_active` bool NOT NULL DEFAULT true, `user_id` uuid NOT NULL, PRIMARY KEY (`id`), CONSTRAINT `notifiers_users_notifiers` FOREIGN KEY (`user_id`) REFERENCES `users` (`id`) ON DELETE CASCADE); +-- create index "notifier_user_id" to table: "notifiers" +CREATE INDEX `notifier_user_id` ON `notifiers` (`user_id`); +-- create index "notifier_user_id_is_active" to table: "notifiers" +CREATE INDEX `notifier_user_id_is_active` ON `notifiers` (`user_id`, `is_active`); diff --git a/backend/internal/data/migrations/migrations/20230305071524_add_group_id_to_notifiers.sql b/backend/internal/data/migrations/migrations/20230305071524_add_group_id_to_notifiers.sql new file mode 100644 index 0000000..5f0f16d --- /dev/null +++ b/backend/internal/data/migrations/migrations/20230305071524_add_group_id_to_notifiers.sql @@ -0,0 +1,20 @@ +-- disable the enforcement of foreign-keys constraints +PRAGMA foreign_keys = off; +-- create "new_notifiers" table +CREATE TABLE `new_notifiers` (`id` uuid NOT NULL, `created_at` datetime NOT NULL, `updated_at` datetime NOT NULL, `name` text NOT NULL, `url` text NOT NULL, `is_active` bool NOT NULL DEFAULT true, `group_id` uuid NOT NULL, `user_id` uuid NOT NULL, PRIMARY KEY (`id`), CONSTRAINT `notifiers_groups_notifiers` FOREIGN KEY (`group_id`) REFERENCES `groups` (`id`) ON DELETE CASCADE, CONSTRAINT `notifiers_users_notifiers` FOREIGN KEY (`user_id`) REFERENCES `users` (`id`) ON DELETE CASCADE); +-- copy rows from old table "notifiers" to new temporary table "new_notifiers" +INSERT INTO `new_notifiers` (`id`, `created_at`, `updated_at`, `name`, `url`, `is_active`, `user_id`) SELECT `id`, `created_at`, `updated_at`, `name`, `url`, `is_active`, `user_id` FROM `notifiers`; +-- drop "notifiers" table after copying rows +DROP TABLE `notifiers`; +-- rename temporary table "new_notifiers" to "notifiers" +ALTER TABLE `new_notifiers` RENAME TO `notifiers`; +-- create index "notifier_user_id" to table: "notifiers" +CREATE INDEX `notifier_user_id` ON `notifiers` (`user_id`); +-- create index "notifier_user_id_is_active" to table: "notifiers" +CREATE INDEX `notifier_user_id_is_active` ON `notifiers` (`user_id`, `is_active`); +-- create index "notifier_group_id" to table: "notifiers" +CREATE INDEX `notifier_group_id` ON `notifiers` (`group_id`); +-- create index "notifier_group_id_is_active" to table: "notifiers" +CREATE INDEX `notifier_group_id_is_active` ON `notifiers` (`group_id`, `is_active`); +-- enable back the enforcement of foreign-keys constraints +PRAGMA foreign_keys = on; diff --git a/backend/internal/data/migrations/migrations/20231006213457_add_primary_attachment_flag.sql b/backend/internal/data/migrations/migrations/20231006213457_add_primary_attachment_flag.sql new file mode 100644 index 0000000..b7506c1 --- /dev/null +++ b/backend/internal/data/migrations/migrations/20231006213457_add_primary_attachment_flag.sql @@ -0,0 +1,12 @@ +-- Disable the enforcement of foreign-keys constraints +PRAGMA foreign_keys = off; +-- Create "new_attachments" table +CREATE TABLE `new_attachments` (`id` uuid NOT NULL, `created_at` datetime NOT NULL, `updated_at` datetime NOT NULL, `type` text NOT NULL DEFAULT 'attachment', `primary` bool NOT NULL DEFAULT false, `document_attachments` uuid NOT NULL, `item_attachments` uuid NOT NULL, PRIMARY KEY (`id`), CONSTRAINT `attachments_documents_attachments` FOREIGN KEY (`document_attachments`) REFERENCES `documents` (`id`) ON DELETE CASCADE, CONSTRAINT `attachments_items_attachments` FOREIGN KEY (`item_attachments`) REFERENCES `items` (`id`) ON DELETE CASCADE); +-- Copy rows from old table "attachments" to new temporary table "new_attachments" +INSERT INTO `new_attachments` (`id`, `created_at`, `updated_at`, `type`, `document_attachments`, `item_attachments`) SELECT `id`, `created_at`, `updated_at`, `type`, `document_attachments`, `item_attachments` FROM `attachments`; +-- Drop "attachments" table after copying rows +DROP TABLE `attachments`; +-- Rename temporary table "new_attachments" to "attachments" +ALTER TABLE `new_attachments` RENAME TO `attachments`; +-- Enable back the enforcement of foreign-keys constraints +PRAGMA foreign_keys = on; diff --git a/backend/internal/data/migrations/migrations/atlas.sum b/backend/internal/data/migrations/migrations/atlas.sum index 5de79cc..e8d99a6 100644 --- a/backend/internal/data/migrations/migrations/atlas.sum +++ b/backend/internal/data/migrations/migrations/atlas.sum @@ -1,4 +1,4 @@ -h1:dn3XsqwgjCxEtpLXmHlt2ALRwg2cZB6m8lg2faxeLXM= +h1:sjJCTAqc9FG8BKBIzh5ZynYD/Ilz6vnLqM4XX83WQ4M= 20220929052825_init.sql h1:ZlCqm1wzjDmofeAcSX3jE4h4VcdTNGpRg2eabztDy9Q= 20221001210956_group_invitations.sql h1:YQKJFtE39wFOcRNbZQ/d+ZlHwrcfcsZlcv/pLEYdpjw= 20221009173029_add_user_roles.sql h1:vWmzAfgEWQeGk0Vn70zfVPCcfEZth3E0JcvyKTjpYyU= @@ -9,3 +9,7 @@ h1:dn3XsqwgjCxEtpLXmHlt2ALRwg2cZB6m8lg2faxeLXM= 20221205230404_drop_document_tokens.sql h1:9dCbNFcjtsT6lEhkxCn/vYaGRmQrl1LefdEJgvkfhGg= 20221205234214_add_maintenance_entries.sql h1:B56VzCuDsed1k3/sYUoKlOkP90DcdLufxFK0qYvoafU= 20221205234812_cascade_delete_roles.sql h1:VIiaImR48nCHF3uFbOYOX1E79Ta5HsUBetGaSAbh9Gk= +20230227024134_add_scheduled_date.sql h1:8qO5OBZ0AzsfYEQOAQQrYIjyhSwM+v1A+/ylLSoiyoc= +20230305065819_add_notifier_types.sql h1:r5xrgCKYQ2o9byBqYeAX1zdp94BLdaxf4vq9OmGHNl0= +20230305071524_add_group_id_to_notifiers.sql h1:xDShqbyClcFhvJbwclOHdczgXbdffkxXNWjV61hL/t4= +20231006213457_add_primary_attachment_flag.sql h1:J4tMSJQFa7vaj0jpnh8YKTssdyIjRyq6RXDXZIzDDu4= diff --git a/backend/internal/data/repo/asset_id_type.go b/backend/internal/data/repo/asset_id_type.go index 06d610e..0a53a4a 100644 --- a/backend/internal/data/repo/asset_id_type.go +++ b/backend/internal/data/repo/asset_id_type.go @@ -32,13 +32,26 @@ func ParseAssetID(s string) (AID AssetID, ok bool) { return ParseAssetIDBytes([]byte(s)) } -func (aid AssetID) MarshalJSON() ([]byte, error) { +func (aid AssetID) String() string { + if aid.Nil() { + return "" + } + aidStr := fmt.Sprintf("%06d", aid) aidStr = fmt.Sprintf("%s-%s", aidStr[:3], aidStr[3:]) - return []byte(fmt.Sprintf(`"%s"`, aidStr)), nil + return aidStr +} + +func (aid AssetID) MarshalJSON() ([]byte, error) { + return []byte(`"` + aid.String() + `"`), nil } func (aid *AssetID) UnmarshalJSON(d []byte) error { + if len(d) == 0 || bytes.Equal(d, []byte(`""`)) { + *aid = -1 + return nil + } + d = bytes.Replace(d, []byte(`"`), []byte(``), -1) d = bytes.Replace(d, []byte(`-`), []byte(``), -1) @@ -50,3 +63,11 @@ func (aid *AssetID) UnmarshalJSON(d []byte) error { *aid = AssetID(aidInt) return nil } + +func (aid AssetID) MarshalCSV() (string, error) { + return aid.String(), nil +} + +func (aid *AssetID) UnmarshalCSV(d string) error { + return aid.UnmarshalJSON([]byte(d)) +} diff --git a/backend/internal/data/repo/asset_id_type_test.go b/backend/internal/data/repo/asset_id_type_test.go index 6a692d9..6aa7b99 100644 --- a/backend/internal/data/repo/asset_id_type_test.go +++ b/backend/internal/data/repo/asset_id_type_test.go @@ -21,7 +21,7 @@ func TestAssetID_MarshalJSON(t *testing.T) { { name: "zero test", aid: 0, - want: []byte(`"000-000"`), + want: []byte(`""`), }, { name: "large int", diff --git a/backend/internal/data/repo/automappers.go b/backend/internal/data/repo/automappers.go new file mode 100644 index 0000000..279164b --- /dev/null +++ b/backend/internal/data/repo/automappers.go @@ -0,0 +1,32 @@ +package repo + +type MapFunc[T any, U any] func(T) U + +func (a MapFunc[T, U]) Map(v T) U { + return a(v) +} + +func (a MapFunc[T, U]) MapEach(v []T) []U { + result := make([]U, len(v)) + for i, item := range v { + result[i] = a(item) + } + return result +} + +func (a MapFunc[T, U]) MapErr(v T, err error) (U, error) { + if err != nil { + var zero U + return zero, err + } + + return a(v), nil +} + +func (a MapFunc[T, U]) MapEachErr(v []T, err error) ([]U, error) { + if err != nil { + return nil, err + } + + return a.MapEach(v), nil +} diff --git a/backend/internal/data/repo/main_test.go b/backend/internal/data/repo/main_test.go index 221fbd5..47e5ec0 100644 --- a/backend/internal/data/repo/main_test.go +++ b/backend/internal/data/repo/main_test.go @@ -3,18 +3,18 @@ package repo import ( "context" "log" - "math/rand" "os" "testing" - "time" + "github.com/hay-kot/homebox/backend/internal/core/services/reporting/eventbus" "github.com/hay-kot/homebox/backend/internal/data/ent" "github.com/hay-kot/homebox/backend/pkgs/faker" _ "github.com/mattn/go-sqlite3" ) var ( - fk = faker.NewFaker() + fk = faker.NewFaker() + tbus = eventbus.New() tClient *ent.Client tRepos *AllRepos @@ -40,21 +40,23 @@ func bootstrap() { } func TestMain(m *testing.M) { - rand.Seed(int64(time.Now().Unix())) - client, err := ent.Open("sqlite3", "file:ent?mode=memory&cache=shared&_fk=1") if err != nil { log.Fatalf("failed opening connection to sqlite: %v", err) } + go func() { + _ = tbus.Run(context.Background()) + }() + err = client.Schema.Create(context.Background()) if err != nil { log.Fatalf("failed creating schema resources: %v", err) } tClient = client - tRepos = New(tClient, os.TempDir()) - defer client.Close() + tRepos = New(tClient, tbus, os.TempDir()) + defer func() { _ = client.Close() }() bootstrap() diff --git a/backend/internal/data/repo/repo_documents_test.go b/backend/internal/data/repo/repo_documents_test.go index 2a22fac..4634235 100644 --- a/backend/internal/data/repo/repo_documents_test.go +++ b/backend/internal/data/repo/repo_documents_test.go @@ -11,6 +11,7 @@ import ( "github.com/google/uuid" "github.com/hay-kot/homebox/backend/internal/data/ent" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func useDocs(t *testing.T, num int) []DocumentOut { @@ -25,7 +26,7 @@ func useDocs(t *testing.T, num int) []DocumentOut { Content: bytes.NewReader([]byte(fk.Str(10))), }) - assert.NoError(t, err) + require.NoError(t, err) assert.NotNil(t, doc) results = append(results, doc) ids = append(ids, doc.ID) @@ -80,31 +81,31 @@ func TestDocumentRepository_CreateUpdateDelete(t *testing.T) { t.Run(tt.name, func(t *testing.T) { // Create Document got, err := r.Create(tt.args.ctx, tt.args.gid, tt.args.doc) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, tt.title, got.Title) assert.Equal(t, fmt.Sprintf("%s/%s/documents", temp, tt.args.gid), filepath.Dir(got.Path)) ensureRead := func() { // Read Document bts, err := os.ReadFile(got.Path) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, tt.content, string(bts)) } ensureRead() // Update Document got, err = r.Rename(tt.args.ctx, got.ID, "__"+tt.title+"__") - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, "__"+tt.title+"__", got.Title) ensureRead() // Delete Document err = r.Delete(tt.args.ctx, got.ID) - assert.NoError(t, err) + require.NoError(t, err) _, err = os.Stat(got.Path) - assert.Error(t, err) + require.Error(t, err) }) } } diff --git a/backend/internal/data/repo/repo_group.go b/backend/internal/data/repo/repo_group.go index 678130c..8f93c78 100644 --- a/backend/internal/data/repo/repo_group.go +++ b/backend/internal/data/repo/repo_group.go @@ -16,7 +16,36 @@ import ( ) type GroupRepository struct { - db *ent.Client + db *ent.Client + groupMapper MapFunc[*ent.Group, Group] + invitationMapper MapFunc[*ent.GroupInvitationToken, GroupInvitation] +} + +func NewGroupRepository(db *ent.Client) *GroupRepository { + gmap := func(g *ent.Group) Group { + return Group{ + ID: g.ID, + Name: g.Name, + CreatedAt: g.CreatedAt, + UpdatedAt: g.UpdatedAt, + Currency: strings.ToUpper(g.Currency), + } + } + + imap := func(i *ent.GroupInvitationToken) GroupInvitation { + return GroupInvitation{ + ID: i.ID, + ExpiresAt: i.ExpiresAt, + Uses: i.Uses, + Group: gmap(i.Edges.Group), + } + } + + return &GroupRepository{ + db: db, + groupMapper: gmap, + invitationMapper: imap, + } } type ( @@ -76,27 +105,8 @@ type ( } ) -var mapToGroupErr = mapTErrFunc(mapToGroup) - -func mapToGroup(g *ent.Group) Group { - return Group{ - ID: g.ID, - Name: g.Name, - CreatedAt: g.CreatedAt, - UpdatedAt: g.UpdatedAt, - Currency: strings.ToUpper(g.Currency.String()), - } -} - -var mapToGroupInvitationErr = mapTErrFunc(mapToGroupInvitation) - -func mapToGroupInvitation(g *ent.GroupInvitationToken) GroupInvitation { - return GroupInvitation{ - ID: g.ID, - ExpiresAt: g.ExpiresAt, - Uses: g.Uses, - Group: mapToGroup(g.Edges.Group), - } +func (r *GroupRepository) GetAllGroups(ctx context.Context) ([]Group, error) { + return r.groupMapper.MapEachErr(r.db.Group.Query().All(ctx)) } func (r *GroupRepository) StatsLocationsByPurchasePrice(ctx context.Context, GID uuid.UUID) ([]TotalsByOrganizer, error) { @@ -223,7 +233,7 @@ func (r *GroupRepository) StatsGroup(ctx context.Context, GID uuid.UUID) (GroupS (SELECT COUNT(*) FROM items WHERE group_items = ? AND items.archived = false) AS total_items, (SELECT COUNT(*) FROM locations WHERE group_locations = ?) AS total_locations, (SELECT COUNT(*) FROM labels WHERE group_labels = ?) AS total_labels, - (SELECT SUM(purchase_price) FROM items WHERE group_items = ? AND items.archived = false) AS total_item_price, + (SELECT SUM(purchase_price*quantity) FROM items WHERE group_items = ? AND items.archived = false) AS total_item_price, (SELECT COUNT(*) FROM items WHERE group_items = ? @@ -234,37 +244,41 @@ func (r *GroupRepository) StatsGroup(ctx context.Context, GID uuid.UUID) (GroupS var stats GroupStatistics row := r.db.Sql().QueryRowContext(ctx, q, GID, GID, GID, GID, GID, GID) - err := row.Scan(&stats.TotalUsers, &stats.TotalItems, &stats.TotalLocations, &stats.TotalLabels, &stats.TotalItemPrice, &stats.TotalWithWarranty) + var maybeTotalItemPrice *float64 + var maybeTotalWithWarranty *int + + err := row.Scan(&stats.TotalUsers, &stats.TotalItems, &stats.TotalLocations, &stats.TotalLabels, &maybeTotalItemPrice, &maybeTotalWithWarranty) if err != nil { return GroupStatistics{}, err } + stats.TotalItemPrice = orDefault(maybeTotalItemPrice, 0) + stats.TotalWithWarranty = orDefault(maybeTotalWithWarranty, 0) + return stats, nil } func (r *GroupRepository) GroupCreate(ctx context.Context, name string) (Group, error) { - return mapToGroupErr(r.db.Group.Create(). + return r.groupMapper.MapErr(r.db.Group.Create(). SetName(name). Save(ctx)) } func (r *GroupRepository) GroupUpdate(ctx context.Context, ID uuid.UUID, data GroupUpdate) (Group, error) { - currency := group.Currency(strings.ToLower(data.Currency)) - entity, err := r.db.Group.UpdateOneID(ID). SetName(data.Name). - SetCurrency(currency). + SetCurrency(strings.ToLower(data.Currency)). Save(ctx) - return mapToGroupErr(entity, err) + return r.groupMapper.MapErr(entity, err) } func (r *GroupRepository) GroupByID(ctx context.Context, id uuid.UUID) (Group, error) { - return mapToGroupErr(r.db.Group.Get(ctx, id)) + return r.groupMapper.MapErr(r.db.Group.Get(ctx, id)) } func (r *GroupRepository) InvitationGet(ctx context.Context, token []byte) (GroupInvitation, error) { - return mapToGroupInvitationErr(r.db.GroupInvitationToken.Query(). + return r.invitationMapper.MapErr(r.db.GroupInvitationToken.Query(). Where(groupinvitationtoken.Token(token)). WithGroup(). Only(ctx)) diff --git a/backend/internal/data/repo/repo_group_test.go b/backend/internal/data/repo/repo_group_test.go index 4321fec..180d72e 100644 --- a/backend/internal/data/repo/repo_group_test.go +++ b/backend/internal/data/repo/repo_group_test.go @@ -5,29 +5,30 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func Test_Group_Create(t *testing.T) { g, err := tRepos.Groups.GroupCreate(context.Background(), "test") - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, "test", g.Name) // Get by ID foundGroup, err := tRepos.Groups.GroupByID(context.Background(), g.ID) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, g.ID, foundGroup.ID) } func Test_Group_Update(t *testing.T) { g, err := tRepos.Groups.GroupCreate(context.Background(), "test") - assert.NoError(t, err) + require.NoError(t, err) g, err = tRepos.Groups.GroupUpdate(context.Background(), g.ID, GroupUpdate{ Name: "test2", Currency: "eur", }) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, "test2", g.Name) assert.Equal(t, "EUR", g.Currency) } @@ -38,7 +39,7 @@ func Test_Group_GroupStatistics(t *testing.T) { stats, err := tRepos.Groups.StatsGroup(context.Background(), tGroup.ID) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, 20, stats.TotalItems) assert.Equal(t, 20, stats.TotalLabels) assert.Equal(t, 1, stats.TotalUsers) diff --git a/backend/internal/data/repo/repo_item_attachments.go b/backend/internal/data/repo/repo_item_attachments.go index a034369..da57b31 100644 --- a/backend/internal/data/repo/repo_item_attachments.go +++ b/backend/internal/data/repo/repo_item_attachments.go @@ -7,6 +7,7 @@ import ( "github.com/google/uuid" "github.com/hay-kot/homebox/backend/internal/data/ent" "github.com/hay-kot/homebox/backend/internal/data/ent/attachment" + "github.com/hay-kot/homebox/backend/internal/data/ent/item" ) // AttachmentRepo is a repository for Attachments table that links Items to Documents @@ -24,12 +25,14 @@ type ( UpdatedAt time.Time `json:"updatedAt"` Type string `json:"type"` Document DocumentOut `json:"document"` + Primary bool `json:"primary"` } ItemAttachmentUpdate struct { - ID uuid.UUID `json:"-"` - Type string `json:"type"` - Title string `json:"title"` + ID uuid.UUID `json:"-"` + Type string `json:"type"` + Title string `json:"title"` + Primary bool `json:"primary"` } ) @@ -39,6 +42,7 @@ func ToItemAttachment(attachment *ent.Attachment) ItemAttachment { CreatedAt: attachment.CreatedAt, UpdatedAt: attachment.UpdatedAt, Type: attachment.Type.String(), + Primary: attachment.Primary, Document: DocumentOut{ ID: attachment.Edges.Document.ID, Title: attachment.Edges.Document.Title, @@ -47,12 +51,31 @@ func ToItemAttachment(attachment *ent.Attachment) ItemAttachment { } } -func (r *AttachmentRepo) Create(ctx context.Context, itemId, docId uuid.UUID, typ attachment.Type) (*ent.Attachment, error) { - return r.db.Attachment.Create(). +func (r *AttachmentRepo) Create(ctx context.Context, itemID, docID uuid.UUID, typ attachment.Type) (*ent.Attachment, error) { + bldr := r.db.Attachment.Create(). SetType(typ). - SetDocumentID(docId). - SetItemID(itemId). - Save(ctx) + SetDocumentID(docID). + SetItemID(itemID) + + // Autoset primary to true if this is the first attachment + // that is of type photo + if typ == attachment.TypePhoto { + cnt, err := r.db.Attachment.Query(). + Where( + attachment.HasItemWith(item.ID(itemID)), + attachment.TypeEQ(typ), + ). + Count(ctx) + if err != nil { + return nil, err + } + + if cnt == 0 { + bldr = bldr.SetPrimary(true) + } + } + + return bldr.Save(ctx) } func (r *AttachmentRepo) Get(ctx context.Context, id uuid.UUID) (*ent.Attachment, error) { @@ -64,10 +87,33 @@ func (r *AttachmentRepo) Get(ctx context.Context, id uuid.UUID) (*ent.Attachment Only(ctx) } -func (r *AttachmentRepo) Update(ctx context.Context, itemId uuid.UUID, typ attachment.Type) (*ent.Attachment, error) { - itm, err := r.db.Attachment.UpdateOneID(itemId). - SetType(typ). - Save(ctx) +func (r *AttachmentRepo) Update(ctx context.Context, itemID uuid.UUID, data *ItemAttachmentUpdate) (*ent.Attachment, error) { + // TODO: execute within Tx + typ := attachment.Type(data.Type) + + bldr := r.db.Attachment.UpdateOneID(itemID). + SetType(typ) + + // Primary only applies to photos + if typ == attachment.TypePhoto { + bldr = bldr.SetPrimary(data.Primary) + } else { + bldr = bldr.SetPrimary(false) + } + + itm, err := bldr.Save(ctx) + if err != nil { + return nil, err + } + + // Ensure all other attachments are not primary + err = r.db.Attachment.Update(). + Where( + attachment.HasItemWith(item.ID(itemID)), + attachment.IDNEQ(itm.ID), + ). + SetPrimary(false). + Exec(ctx) if err != nil { return nil, err } diff --git a/backend/internal/data/repo/repo_item_attachments_test.go b/backend/internal/data/repo/repo_item_attachments_test.go index 4c9d77d..9007b2e 100644 --- a/backend/internal/data/repo/repo_item_attachments_test.go +++ b/backend/internal/data/repo/repo_item_attachments_test.go @@ -8,6 +8,7 @@ import ( "github.com/hay-kot/homebox/backend/internal/data/ent" "github.com/hay-kot/homebox/backend/internal/data/ent/attachment" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestAttachmentRepo_Create(t *testing.T) { @@ -23,8 +24,8 @@ func TestAttachmentRepo_Create(t *testing.T) { type args struct { ctx context.Context - itemId uuid.UUID - docId uuid.UUID + itemID uuid.UUID + docID uuid.UUID typ attachment.Type } tests := []struct { @@ -37,8 +38,8 @@ func TestAttachmentRepo_Create(t *testing.T) { name: "create attachment", args: args{ ctx: context.Background(), - itemId: item.ID, - docId: doc.ID, + itemID: item.ID, + docID: doc.ID, typ: attachment.TypePhoto, }, want: &ent.Attachment{ @@ -49,8 +50,8 @@ func TestAttachmentRepo_Create(t *testing.T) { name: "create attachment with invalid item id", args: args{ ctx: context.Background(), - itemId: uuid.New(), - docId: doc.ID, + itemID: uuid.New(), + docID: doc.ID, typ: "blarg", }, wantErr: true, @@ -58,7 +59,7 @@ func TestAttachmentRepo_Create(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := tRepos.Attachments.Create(tt.args.ctx, tt.args.itemId, tt.args.docId, tt.args.typ) + got, err := tRepos.Attachments.Create(tt.args.ctx, tt.args.itemID, tt.args.docID, tt.args.typ) if (err != nil) != tt.wantErr { t.Errorf("AttachmentRepo.Create() error = %v, wantErr %v", err, tt.wantErr) return @@ -71,9 +72,9 @@ func TestAttachmentRepo_Create(t *testing.T) { assert.Equal(t, tt.want.Type, got.Type) withItems, err := tRepos.Attachments.Get(tt.args.ctx, got.ID) - assert.NoError(t, err) - assert.Equal(t, tt.args.itemId, withItems.Edges.Item.ID) - assert.Equal(t, tt.args.docId, withItems.Edges.Document.ID) + require.NoError(t, err) + assert.Equal(t, tt.args.itemID, withItems.Edges.Item.ID) + assert.Equal(t, tt.args.docID, withItems.Edges.Document.ID) ids = append(ids, got.ID) }) @@ -96,7 +97,7 @@ func useAttachments(t *testing.T, n int) []*ent.Attachment { attachments := make([]*ent.Attachment, n) for i := 0; i < n; i++ { attachment, err := tRepos.Attachments.Create(context.Background(), item.ID, doc.ID, attachment.TypePhoto) - assert.NoError(t, err) + require.NoError(t, err) attachments[i] = attachment ids = append(ids, attachment.ID) @@ -110,11 +111,14 @@ func TestAttachmentRepo_Update(t *testing.T) { for _, typ := range []attachment.Type{"photo", "manual", "warranty", "attachment"} { t.Run(string(typ), func(t *testing.T) { - _, err := tRepos.Attachments.Update(context.Background(), entity.ID, typ) - assert.NoError(t, err) + _, err := tRepos.Attachments.Update(context.Background(), entity.ID, &ItemAttachmentUpdate{ + Type: string(typ), + }) + + require.NoError(t, err) updated, err := tRepos.Attachments.Get(context.Background(), entity.ID) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, typ, updated.Type) }) } @@ -124,8 +128,8 @@ func TestAttachmentRepo_Delete(t *testing.T) { entity := useAttachments(t, 1)[0] err := tRepos.Attachments.Delete(context.Background(), entity.ID) - assert.NoError(t, err) + require.NoError(t, err) _, err = tRepos.Attachments.Get(context.Background(), entity.ID) - assert.Error(t, err) + require.Error(t, err) } diff --git a/backend/internal/data/repo/repo_items.go b/backend/internal/data/repo/repo_items.go index 5e73565..72ba904 100644 --- a/backend/internal/data/repo/repo_items.go +++ b/backend/internal/data/repo/repo_items.go @@ -6,7 +6,9 @@ import ( "time" "github.com/google/uuid" + "github.com/hay-kot/homebox/backend/internal/core/services/reporting/eventbus" "github.com/hay-kot/homebox/backend/internal/data/ent" + "github.com/hay-kot/homebox/backend/internal/data/ent/attachment" "github.com/hay-kot/homebox/backend/internal/data/ent/group" "github.com/hay-kot/homebox/backend/internal/data/ent/item" "github.com/hay-kot/homebox/backend/internal/data/ent/itemfield" @@ -17,7 +19,8 @@ import ( ) type ItemsRepository struct { - db *ent.Client + db *ent.Client + bus *eventbus.EventBus } type ( @@ -29,13 +32,15 @@ type ( ItemQuery struct { Page int PageSize int - Search string `json:"search"` - AssetID AssetID `json:"assetId"` - LocationIDs []uuid.UUID `json:"locationIds"` - LabelIDs []uuid.UUID `json:"labelIds"` - SortBy string `json:"sortBy"` - IncludeArchived bool `json:"includeArchived"` - Fields []FieldQuery + Search string `json:"search"` + AssetID AssetID `json:"assetId"` + LocationIDs []uuid.UUID `json:"locationIds"` + LabelIDs []uuid.UUID `json:"labelIds"` + ParentItemIDs []uuid.UUID `json:"parentIds"` + SortBy string `json:"sortBy"` + IncludeArchived bool `json:"includeArchived"` + Fields []FieldQuery `json:"fields"` + OrderBy string `json:"orderBy"` } ItemField struct { @@ -50,19 +55,20 @@ type ( ItemCreate struct { ImportRef string `json:"-"` - ParentID uuid.UUID `json:"parentId" extensions:"x-nullable"` - Name string `json:"name"` - Description string `json:"description"` + ParentID uuid.UUID `json:"parentId" extensions:"x-nullable"` + Name string `json:"name" validate:"required,min=1,max=255"` + Description string `json:"description" validate:"max=1000"` AssetID AssetID `json:"-"` // Edges LocationID uuid.UUID `json:"locationId"` LabelIDs []uuid.UUID `json:"labelIds"` } + ItemUpdate struct { - ParentID uuid.UUID `json:"parentId" extensions:"x-nullable,x-omitempty"` + ParentID uuid.UUID `json:"parentId" extensions:"x-nullable,x-omitempty"` ID uuid.UUID `json:"id"` - AssetID AssetID `json:"assetId"` + AssetID AssetID `json:"assetId" swaggertype:"string"` Name string `json:"name"` Description string `json:"description"` Quantity int `json:"quantity"` @@ -99,6 +105,12 @@ type ( Fields []ItemField `json:"fields"` } + ItemPatch struct { + ID uuid.UUID `json:"id"` + Quantity *int `json:"quantity,omitempty" extensions:"x-nullable,x-omitempty"` + ImportRef *string `json:"-,omitempty" extensions:"x-nullable,x-omitempty"` + } + ItemSummary struct { ImportRef string `json:"-"` ID uuid.UUID `json:"id"` @@ -115,6 +127,8 @@ type ( // Edges Location *LocationSummary `json:"location,omitempty" extensions:"x-nullable,x-omitempty"` Labels []LabelSummary `json:"labels"` + + ImageID *uuid.UUID `json:"imageId,omitempty"` } ItemOut struct { @@ -146,7 +160,6 @@ type ( Attachments []ItemAttachment `json:"attachments"` Fields []ItemField `json:"fields"` - Children []ItemSummary `json:"children"` } ) @@ -164,10 +177,21 @@ func mapItemSummary(item *ent.Item) ItemSummary { labels = mapEach(item.Edges.Label, mapLabelSummary) } + var imageID *uuid.UUID + if item.Edges.Attachments != nil { + for _, a := range item.Edges.Attachments { + if a.Primary && a.Edges.Document != nil { + imageID = &a.ID + break + } + } + } + return ItemSummary{ ID: item.ID, Name: item.Name, Description: item.Description, + ImportRef: item.ImportRef, Quantity: item.Quantity, CreatedAt: item.CreatedAt, UpdatedAt: item.UpdatedAt, @@ -180,6 +204,7 @@ func mapItemSummary(item *ent.Item) ItemSummary { // Warranty Insured: item.Insured, + ImageID: imageID, } } @@ -215,11 +240,6 @@ func mapItemOut(item *ent.Item) ItemOut { fields = mapFields(item.Edges.Fields) } - var children []ItemSummary - if item.Edges.Children != nil { - children = mapEach(item.Edges.Children, mapItemSummary) - } - var parent *ItemSummary if item.Edges.Parent != nil { v := mapItemSummary(item.Edges.Parent) @@ -253,7 +273,12 @@ func mapItemOut(item *ent.Item) ItemOut { Notes: item.Notes, Attachments: attachments, Fields: fields, - Children: children, + } +} + +func (e *ItemsRepository) publishMutationEvent(GID uuid.UUID) { + if e.bus != nil { + e.bus.Publish(eventbus.EventItemMutation, eventbus.GroupMutationEvent{GID: GID}) } } @@ -265,7 +290,6 @@ func (e *ItemsRepository) getOne(ctx context.Context, where ...predicate.Item) ( WithLabel(). WithLocation(). WithGroup(). - WithChildren(). WithParent(). WithAttachments(func(aq *ent.AttachmentQuery) { aq.WithDocument() @@ -285,6 +309,10 @@ func (e *ItemsRepository) CheckRef(ctx context.Context, GID uuid.UUID, ref strin return q.Where(item.ImportRef(ref)).Exist(ctx) } +func (e *ItemsRepository) GetByRef(ctx context.Context, GID uuid.UUID, ref string) (ItemOut, error) { + return e.getOne(ctx, item.ImportRef(ref), item.HasGroupWith(group.ID(GID))) +} + // GetOneByGroup returns a single item by ID. If the item does not exist, an error is returned. // GetOneByGroup ensures that the item belongs to a specific group. func (e *ItemsRepository) GetOneByGroup(ctx context.Context, gid, id uuid.UUID) (ItemOut, error) { @@ -313,6 +341,9 @@ func (e *ItemsRepository) QueryByGroup(ctx context.Context, gid uuid.UUID, q Ite item.Or( item.NameContainsFold(q.Search), item.DescriptionContainsFold(q.Search), + item.SerialNumberContainsFold(q.Search), + item.ModelNumberContainsFold(q.Search), + item.ManufacturerContainsFold(q.Search), item.NotesContainsFold(q.Search), ), ) @@ -362,6 +393,10 @@ func (e *ItemsRepository) QueryByGroup(ctx context.Context, gid uuid.UUID, q Ite andPredicates = append(andPredicates, item.Or(fieldPredicates...)) } + + if len(q.ParentItemIDs) > 0 { + andPredicates = append(andPredicates, item.HasParentWith(item.IDIn(q.ParentItemIDs...))) + } } if len(andPredicates) > 0 { @@ -373,9 +408,25 @@ func (e *ItemsRepository) QueryByGroup(ctx context.Context, gid uuid.UUID, q Ite return PaginationResult[ItemSummary]{}, err } - qb = qb.Order(ent.Asc(item.FieldName)). + // Order + switch q.OrderBy { + case "createdAt": + qb = qb.Order(ent.Desc(item.FieldCreatedAt)) + case "updatedAt": + qb = qb.Order(ent.Desc(item.FieldUpdatedAt)) + default: // "name" + qb = qb.Order(ent.Asc(item.FieldName)) + } + + qb = qb. WithLabel(). - WithLocation() + WithLocation(). + WithAttachments(func(aq *ent.AttachmentQuery) { + aq.Where( + attachment.Primary(true), + ). + WithDocument() + }) if q.Page != -1 || q.PageSize != -1 { qb = qb. @@ -496,11 +547,18 @@ func (e *ItemsRepository) Create(ctx context.Context, gid uuid.UUID, data ItemCr return ItemOut{}, err } + e.publishMutationEvent(gid) return e.GetOne(ctx, result.ID) } func (e *ItemsRepository) Delete(ctx context.Context, id uuid.UUID) error { - return e.db.Item.DeleteOneID(id).Exec(ctx) + err := e.db.Item.DeleteOneID(id).Exec(ctx) + if err != nil { + return err + } + + e.publishMutationEvent(id) + return nil } func (e *ItemsRepository) DeleteByGroup(ctx context.Context, gid, id uuid.UUID) error { @@ -510,6 +568,11 @@ func (e *ItemsRepository) DeleteByGroup(ctx context.Context, gid, id uuid.UUID) item.ID(id), item.HasGroupWith(group.ID(gid)), ).Exec(ctx) + if err != nil { + return err + } + + e.publishMutationEvent(gid) return err } @@ -625,9 +688,49 @@ func (e *ItemsRepository) UpdateByGroup(ctx context.Context, GID uuid.UUID, data } } + e.publishMutationEvent(GID) return e.GetOne(ctx, data.ID) } +func (e *ItemsRepository) GetAllZeroImportRef(ctx context.Context, GID uuid.UUID) ([]uuid.UUID, error) { + var ids []uuid.UUID + + err := e.db.Item.Query(). + Where( + item.HasGroupWith(group.ID(GID)), + item.Or( + item.ImportRefEQ(""), + item.ImportRefIsNil(), + ), + ). + Select(item.FieldID). + Scan(ctx, &ids) + if err != nil { + return nil, err + } + + return ids, nil +} + +func (e *ItemsRepository) Patch(ctx context.Context, GID, ID uuid.UUID, data ItemPatch) error { + q := e.db.Item.Update(). + Where( + item.ID(ID), + item.HasGroupWith(group.ID(GID)), + ) + + if data.ImportRef != nil { + q.SetImportRef(*data.ImportRef) + } + + if data.Quantity != nil { + q.SetQuantity(*data.Quantity) + } + + e.publishMutationEvent(GID) + return q.Exec(ctx) +} + func (e *ItemsRepository) GetAllCustomFieldValues(ctx context.Context, GID uuid.UUID, name string) ([]string, error) { type st struct { Value string `json:"text_value"` @@ -696,8 +799,11 @@ func (e *ItemsRepository) ZeroOutTimeFields(ctx context.Context, GID uuid.UUID) item.HasGroupWith(group.ID(GID)), item.Or( item.PurchaseTimeNotNil(), + item.PurchaseFromLT("0002-01-01"), item.SoldTimeNotNil(), + item.SoldToLT("0002-01-01"), item.WarrantyExpiresNotNil(), + item.WarrantyDetailsLT("0002-01-01"), ), ) @@ -716,15 +822,36 @@ func (e *ItemsRepository) ZeroOutTimeFields(ctx context.Context, GID uuid.UUID) updateQ := e.db.Item.Update().Where(item.ID(i.ID)) if !i.PurchaseTime.IsZero() { - updateQ.SetPurchaseTime(toDateOnly(i.PurchaseTime)) + switch { + case i.PurchaseTime.Year() < 100: + updateQ.ClearPurchaseTime() + default: + updateQ.SetPurchaseTime(toDateOnly(i.PurchaseTime)) + } + } else { + updateQ.ClearPurchaseTime() } if !i.SoldTime.IsZero() { - updateQ.SetSoldTime(toDateOnly(i.SoldTime)) + switch { + case i.SoldTime.Year() < 100: + updateQ.ClearSoldTime() + default: + updateQ.SetSoldTime(toDateOnly(i.SoldTime)) + } + } else { + updateQ.ClearSoldTime() } if !i.WarrantyExpires.IsZero() { - updateQ.SetWarrantyExpires(toDateOnly(i.WarrantyExpires)) + switch { + case i.WarrantyExpires.Year() < 100: + updateQ.ClearWarrantyExpires() + default: + updateQ.SetWarrantyExpires(toDateOnly(i.WarrantyExpires)) + } + } else { + updateQ.ClearWarrantyExpires() } _, err = updateQ.Save(ctx) @@ -737,3 +864,51 @@ func (e *ItemsRepository) ZeroOutTimeFields(ctx context.Context, GID uuid.UUID) return updated, nil } + +func (e *ItemsRepository) SetPrimaryPhotos(ctx context.Context, GID uuid.UUID) (int, error) { + // All items where there is no primary photo + itemIDs, err := e.db.Item.Query(). + Where( + item.HasGroupWith(group.ID(GID)), + item.HasAttachmentsWith( + attachment.TypeEQ(attachment.TypePhoto), + attachment.Not( + attachment.And( + attachment.Primary(true), + attachment.TypeEQ(attachment.TypePhoto), + ), + ), + ), + ). + IDs(ctx) + if err != nil { + return -1, err + } + + updated := 0 + for _, id := range itemIDs { + // Find the first photo attachment + a, err := e.db.Attachment.Query(). + Where( + attachment.HasItemWith(item.ID(id)), + attachment.TypeEQ(attachment.TypePhoto), + attachment.Primary(false), + ). + First(ctx) + if err != nil { + return updated, err + } + + // Set it as primary + _, err = e.db.Attachment.UpdateOne(a). + SetPrimary(true). + Save(ctx) + if err != nil { + return updated, err + } + + updated++ + } + + return updated, nil +} diff --git a/backend/internal/data/repo/repo_items_test.go b/backend/internal/data/repo/repo_items_test.go index ac2814d..9d60596 100644 --- a/backend/internal/data/repo/repo_items_test.go +++ b/backend/internal/data/repo/repo_items_test.go @@ -22,7 +22,7 @@ func useItems(t *testing.T, len int) []ItemOut { t.Helper() location, err := tRepos.Locations.Create(context.Background(), tGroup.ID, locationFactory()) - assert.NoError(t, err) + require.NoError(t, err) items := make([]ItemOut, len) for i := 0; i < len; i++ { @@ -30,7 +30,7 @@ func useItems(t *testing.T, len int) []ItemOut { itm.LocationID = location.ID item, err := tRepos.Items.Create(context.Background(), tGroup.ID, itm) - assert.NoError(t, err) + require.NoError(t, err) items[i] = item } @@ -39,7 +39,7 @@ func useItems(t *testing.T, len int) []ItemOut { _ = tRepos.Items.Delete(context.Background(), item.ID) } - _ = tRepos.Locations.Delete(context.Background(), location.ID) + _ = tRepos.Locations.delete(context.Background(), location.ID) }) return items @@ -61,23 +61,22 @@ func TestItemsRepository_RecursiveRelationships(t *testing.T) { // Append Parent ID _, err := tRepos.Items.UpdateByGroup(context.Background(), tGroup.ID, update) - assert.NoError(t, err) + require.NoError(t, err) // Check Parent ID updated, err := tRepos.Items.GetOne(context.Background(), child.ID) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, parent.ID, updated.Parent.ID) // Remove Parent ID update.ParentID = uuid.Nil _, err = tRepos.Items.UpdateByGroup(context.Background(), tGroup.ID, update) - assert.NoError(t, err) + require.NoError(t, err) // Check Parent ID updated, err = tRepos.Items.GetOne(context.Background(), child.ID) - assert.NoError(t, err) + require.NoError(t, err) assert.Nil(t, updated.Parent) - } } @@ -86,7 +85,7 @@ func TestItemsRepository_GetOne(t *testing.T) { for _, item := range entity { result, err := tRepos.Items.GetOne(context.Background(), item.ID) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, item.ID, result.ID) } } @@ -96,9 +95,9 @@ func TestItemsRepository_GetAll(t *testing.T) { expected := useItems(t, length) results, err := tRepos.Items.GetAll(context.Background(), tGroup.ID) - assert.NoError(t, err) + require.NoError(t, err) - assert.Equal(t, length, len(results)) + assert.Len(t, results, length) for _, item := range results { for _, expectedItem := range expected { @@ -113,23 +112,23 @@ func TestItemsRepository_GetAll(t *testing.T) { func TestItemsRepository_Create(t *testing.T) { location, err := tRepos.Locations.Create(context.Background(), tGroup.ID, locationFactory()) - assert.NoError(t, err) + require.NoError(t, err) itm := itemFactory() itm.LocationID = location.ID result, err := tRepos.Items.Create(context.Background(), tGroup.ID, itm) - assert.NoError(t, err) + require.NoError(t, err) assert.NotEmpty(t, result.ID) // Cleanup - Also deletes item - err = tRepos.Locations.Delete(context.Background(), location.ID) - assert.NoError(t, err) + err = tRepos.Locations.delete(context.Background(), location.ID) + require.NoError(t, err) } func TestItemsRepository_Create_Location(t *testing.T) { location, err := tRepos.Locations.Create(context.Background(), tGroup.ID, locationFactory()) - assert.NoError(t, err) + require.NoError(t, err) assert.NotEmpty(t, location.ID) item := itemFactory() @@ -137,18 +136,18 @@ func TestItemsRepository_Create_Location(t *testing.T) { // Create Resource result, err := tRepos.Items.Create(context.Background(), tGroup.ID, item) - assert.NoError(t, err) + require.NoError(t, err) assert.NotEmpty(t, result.ID) // Get Resource foundItem, err := tRepos.Items.GetOne(context.Background(), result.ID) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, result.ID, foundItem.ID) assert.Equal(t, location.ID, foundItem.Location.ID) // Cleanup - Also deletes item - err = tRepos.Locations.Delete(context.Background(), location.ID) - assert.NoError(t, err) + err = tRepos.Locations.delete(context.Background(), location.ID) + require.NoError(t, err) } func TestItemsRepository_Delete(t *testing.T) { @@ -156,11 +155,11 @@ func TestItemsRepository_Delete(t *testing.T) { for _, item := range entities { err := tRepos.Items.Delete(context.Background(), item.ID) - assert.NoError(t, err) + require.NoError(t, err) } results, err := tRepos.Items.GetAll(context.Background(), tGroup.ID) - assert.NoError(t, err) + require.NoError(t, err) assert.Empty(t, results) } @@ -213,7 +212,7 @@ func TestItemsRepository_Update_Labels(t *testing.T) { } updated, err := tRepos.Items.UpdateByGroup(context.Background(), tGroup.ID, updateData) - assert.NoError(t, err) + require.NoError(t, err) assert.Len(t, tt.want, len(updated.Labels)) for _, label := range updated.Labels { @@ -250,10 +249,10 @@ func TestItemsRepository_Update(t *testing.T) { } updatedEntity, err := tRepos.Items.UpdateByGroup(context.Background(), tGroup.ID, updateData) - assert.NoError(t, err) + require.NoError(t, err) got, err := tRepos.Items.GetOne(context.Background(), updatedEntity.ID) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, updateData.ID, got.ID) assert.Equal(t, updateData.Name, got.Name) @@ -263,10 +262,10 @@ func TestItemsRepository_Update(t *testing.T) { assert.Equal(t, updateData.Manufacturer, got.Manufacturer) // assert.Equal(t, updateData.PurchaseTime, got.PurchaseTime) assert.Equal(t, updateData.PurchaseFrom, got.PurchaseFrom) - assert.Equal(t, updateData.PurchasePrice, got.PurchasePrice) + assert.InDelta(t, updateData.PurchasePrice, got.PurchasePrice, 0.01) // assert.Equal(t, updateData.SoldTime, got.SoldTime) assert.Equal(t, updateData.SoldTo, got.SoldTo) - assert.Equal(t, updateData.SoldPrice, got.SoldPrice) + assert.InDelta(t, updateData.SoldPrice, got.SoldPrice, 0.01) assert.Equal(t, updateData.SoldNotes, got.SoldNotes) assert.Equal(t, updateData.Notes, got.Notes) // assert.Equal(t, updateData.WarrantyExpires, got.WarrantyExpires) @@ -275,15 +274,15 @@ func TestItemsRepository_Update(t *testing.T) { } func TestItemRepository_GetAllCustomFields(t *testing.T) { - const FIELDS_COUNT = 5 + const FieldsCount = 5 entity := useItems(t, 1)[0] - fields := make([]ItemField, FIELDS_COUNT) - names := make([]string, FIELDS_COUNT) - values := make([]string, FIELDS_COUNT) + fields := make([]ItemField, FieldsCount) + names := make([]string, FieldsCount) + values := make([]string, FieldsCount) - for i := 0; i < FIELDS_COUNT; i++ { + for i := 0; i < FieldsCount; i++ { name := fk.Str(10) fields[i] = ItemField{ Name: name, @@ -306,7 +305,7 @@ func TestItemRepository_GetAllCustomFields(t *testing.T) { // Test getting all fields { results, err := tRepos.Items.GetAllCustomFieldNames(context.Background(), tGroup.ID) - assert.NoError(t, err) + require.NoError(t, err) assert.ElementsMatch(t, names, results) } @@ -314,7 +313,7 @@ func TestItemRepository_GetAllCustomFields(t *testing.T) { { results, err := tRepos.Items.GetAllCustomFieldValues(context.Background(), tUser.GroupID, names[0]) - assert.NoError(t, err) + require.NoError(t, err) assert.ElementsMatch(t, values[:1], results) } } diff --git a/backend/internal/data/repo/repo_labels.go b/backend/internal/data/repo/repo_labels.go index a761ef8..2358f9c 100644 --- a/backend/internal/data/repo/repo_labels.go +++ b/backend/internal/data/repo/repo_labels.go @@ -5,27 +5,29 @@ import ( "time" "github.com/google/uuid" + "github.com/hay-kot/homebox/backend/internal/core/services/reporting/eventbus" "github.com/hay-kot/homebox/backend/internal/data/ent" "github.com/hay-kot/homebox/backend/internal/data/ent/group" - "github.com/hay-kot/homebox/backend/internal/data/ent/item" "github.com/hay-kot/homebox/backend/internal/data/ent/label" "github.com/hay-kot/homebox/backend/internal/data/ent/predicate" ) type LabelRepository struct { - db *ent.Client + db *ent.Client + bus *eventbus.EventBus } + type ( LabelCreate struct { - Name string `json:"name"` - Description string `json:"description"` + Name string `json:"name" validate:"required,min=1,max=255"` + Description string `json:"description" validate:"max=255"` Color string `json:"color"` } LabelUpdate struct { ID uuid.UUID `json:"id"` - Name string `json:"name"` - Description string `json:"description"` + Name string `json:"name" validate:"required,min=1,max=255"` + Description string `json:"description" validate:"max=255"` Color string `json:"color"` } @@ -39,7 +41,6 @@ type ( LabelOut struct { LabelSummary - Items []ItemSummary `json:"items"` } ) @@ -61,7 +62,12 @@ var ( func mapLabelOut(label *ent.Label) LabelOut { return LabelOut{ LabelSummary: mapLabelSummary(label), - Items: mapEach(label.Edges.Items, mapItemSummary), + } +} + +func (r *LabelRepository) publishMutationEvent(GID uuid.UUID) { + if r.bus != nil { + r.bus.Publish(eventbus.EventLabelMutation, eventbus.GroupMutationEvent{GID: GID}) } } @@ -69,9 +75,6 @@ func (r *LabelRepository) getOne(ctx context.Context, where ...predicate.Label) return mapLabelOutErr(r.db.Label.Query(). Where(where...). WithGroup(). - WithItems(func(iq *ent.ItemQuery) { - iq.Where(item.Archived(false)) - }). Only(ctx), ) } @@ -84,27 +87,28 @@ func (r *LabelRepository) GetOneByGroup(ctx context.Context, gid, ld uuid.UUID) return r.getOne(ctx, label.ID(ld), label.HasGroupWith(group.ID(gid))) } -func (r *LabelRepository) GetAll(ctx context.Context, groupId uuid.UUID) ([]LabelSummary, error) { +func (r *LabelRepository) GetAll(ctx context.Context, groupID uuid.UUID) ([]LabelSummary, error) { return mapLabelsOut(r.db.Label.Query(). - Where(label.HasGroupWith(group.ID(groupId))). + Where(label.HasGroupWith(group.ID(groupID))). Order(ent.Asc(label.FieldName)). WithGroup(). All(ctx), ) } -func (r *LabelRepository) Create(ctx context.Context, groupdId uuid.UUID, data LabelCreate) (LabelOut, error) { +func (r *LabelRepository) Create(ctx context.Context, groupID uuid.UUID, data LabelCreate) (LabelOut, error) { label, err := r.db.Label.Create(). SetName(data.Name). SetDescription(data.Description). SetColor(data.Color). - SetGroupID(groupdId). + SetGroupID(groupID). Save(ctx) if err != nil { return LabelOut{}, err } - label.Edges.Group = &ent.Group{ID: groupdId} // bootstrap group ID + label.Edges.Group = &ent.Group{ID: groupID} // bootstrap group ID + r.publishMutationEvent(groupID) return mapLabelOut(label), err } @@ -121,25 +125,19 @@ func (r *LabelRepository) update(ctx context.Context, data LabelUpdate, where .. Save(ctx) } -func (r *LabelRepository) Update(ctx context.Context, data LabelUpdate) (LabelOut, error) { - _, err := r.update(ctx, data, label.ID(data.ID)) - if err != nil { - return LabelOut{}, err - } - - return r.GetOne(ctx, data.ID) -} - func (r *LabelRepository) UpdateByGroup(ctx context.Context, GID uuid.UUID, data LabelUpdate) (LabelOut, error) { _, err := r.update(ctx, data, label.ID(data.ID), label.HasGroupWith(group.ID(GID))) if err != nil { return LabelOut{}, err } + r.publishMutationEvent(GID) return r.GetOne(ctx, data.ID) } -func (r *LabelRepository) Delete(ctx context.Context, id uuid.UUID) error { +// delete removes the label from the database. This should only be used when +// the label's ownership is already confirmed/validated. +func (r *LabelRepository) delete(ctx context.Context, id uuid.UUID) error { return r.db.Label.DeleteOneID(id).Exec(ctx) } @@ -149,6 +147,11 @@ func (r *LabelRepository) DeleteByGroup(ctx context.Context, gid, id uuid.UUID) label.ID(id), label.HasGroupWith(group.ID(gid)), ).Exec(ctx) + if err != nil { + return err + } - return err + r.publishMutationEvent(gid) + + return nil } diff --git a/backend/internal/data/repo/repo_labels_test.go b/backend/internal/data/repo/repo_labels_test.go index 691b915..8b1d66f 100644 --- a/backend/internal/data/repo/repo_labels_test.go +++ b/backend/internal/data/repo/repo_labels_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func labelFactory() LabelCreate { @@ -22,13 +23,13 @@ func useLabels(t *testing.T, len int) []LabelOut { itm := labelFactory() item, err := tRepos.Labels.Create(context.Background(), tGroup.ID, itm) - assert.NoError(t, err) + require.NoError(t, err) labels[i] = item } t.Cleanup(func() { for _, item := range labels { - _ = tRepos.Labels.Delete(context.Background(), item.ID) + _ = tRepos.Labels.delete(context.Background(), item.ID) } }) @@ -41,7 +42,7 @@ func TestLabelRepository_Get(t *testing.T) { // Get by ID foundLoc, err := tRepos.Labels.GetOne(context.Background(), label.ID) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, label.ID, foundLoc.ID) } @@ -49,26 +50,26 @@ func TestLabelRepositoryGetAll(t *testing.T) { useLabels(t, 10) all, err := tRepos.Labels.GetAll(context.Background(), tGroup.ID) - assert.NoError(t, err) + require.NoError(t, err) assert.Len(t, all, 10) } func TestLabelRepository_Create(t *testing.T) { loc, err := tRepos.Labels.Create(context.Background(), tGroup.ID, labelFactory()) - assert.NoError(t, err) + require.NoError(t, err) // Get by ID foundLoc, err := tRepos.Labels.GetOne(context.Background(), loc.ID) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, loc.ID, foundLoc.ID) - err = tRepos.Labels.Delete(context.Background(), loc.ID) - assert.NoError(t, err) + err = tRepos.Labels.delete(context.Background(), loc.ID) + require.NoError(t, err) } func TestLabelRepository_Update(t *testing.T) { loc, err := tRepos.Labels.Create(context.Background(), tGroup.ID, labelFactory()) - assert.NoError(t, err) + require.NoError(t, err) updateData := LabelUpdate{ ID: loc.ID, @@ -76,27 +77,27 @@ func TestLabelRepository_Update(t *testing.T) { Description: fk.Str(100), } - update, err := tRepos.Labels.Update(context.Background(), updateData) - assert.NoError(t, err) + update, err := tRepos.Labels.UpdateByGroup(context.Background(), tGroup.ID, updateData) + require.NoError(t, err) foundLoc, err := tRepos.Labels.GetOne(context.Background(), loc.ID) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, update.ID, foundLoc.ID) assert.Equal(t, update.Name, foundLoc.Name) assert.Equal(t, update.Description, foundLoc.Description) - err = tRepos.Labels.Delete(context.Background(), loc.ID) - assert.NoError(t, err) + err = tRepos.Labels.delete(context.Background(), loc.ID) + require.NoError(t, err) } func TestLabelRepository_Delete(t *testing.T) { loc, err := tRepos.Labels.Create(context.Background(), tGroup.ID, labelFactory()) - assert.NoError(t, err) + require.NoError(t, err) - err = tRepos.Labels.Delete(context.Background(), loc.ID) - assert.NoError(t, err) + err = tRepos.Labels.delete(context.Background(), loc.ID) + require.NoError(t, err) _, err = tRepos.Labels.GetOne(context.Background(), loc.ID) - assert.Error(t, err) + require.Error(t, err) } diff --git a/backend/internal/data/repo/repo_locations.go b/backend/internal/data/repo/repo_locations.go index 2d3ea56..fd98fd7 100644 --- a/backend/internal/data/repo/repo_locations.go +++ b/backend/internal/data/repo/repo_locations.go @@ -6,26 +6,27 @@ import ( "time" "github.com/google/uuid" + "github.com/hay-kot/homebox/backend/internal/core/services/reporting/eventbus" "github.com/hay-kot/homebox/backend/internal/data/ent" "github.com/hay-kot/homebox/backend/internal/data/ent/group" - "github.com/hay-kot/homebox/backend/internal/data/ent/item" "github.com/hay-kot/homebox/backend/internal/data/ent/location" "github.com/hay-kot/homebox/backend/internal/data/ent/predicate" ) type LocationRepository struct { - db *ent.Client + db *ent.Client + bus *eventbus.EventBus } type ( LocationCreate struct { Name string `json:"name"` - ParentID uuid.UUID `json:"parentId" extensions:"x-nullable"` + ParentID uuid.UUID `json:"parentId" extensions:"x-nullable"` Description string `json:"description"` } LocationUpdate struct { - ParentID uuid.UUID `json:"parentId" extensions:"x-nullable"` + ParentID uuid.UUID `json:"parentId" extensions:"x-nullable"` ID uuid.UUID `json:"id"` Name string `json:"name"` Description string `json:"description"` @@ -47,7 +48,6 @@ type ( LocationOut struct { Parent *LocationSummary `json:"parent,omitempty"` LocationSummary - Items []ItemSummary `json:"items"` Children []LocationSummary `json:"children"` } ) @@ -86,15 +86,20 @@ func mapLocationOut(location *ent.Location) LocationOut { CreatedAt: location.CreatedAt, UpdatedAt: location.UpdatedAt, }, - Items: mapEach(location.Edges.Items, mapItemSummary), + } +} + +func (r *LocationRepository) publishMutationEvent(GID uuid.UUID) { + if r.bus != nil { + r.bus.Publish(eventbus.EventLocationMutation, eventbus.GroupMutationEvent{GID: GID}) } } type LocationQuery struct { - FilterChildren bool `json:"filterChildren"` + FilterChildren bool `json:"filterChildren" schema:"filterChildren"` } -// GetALlWithCount returns all locations with item count field populated +// GetAll returns all locations with item count field populated func (r *LocationRepository) GetAll(ctx context.Context, GID uuid.UUID, filter LocationQuery) ([]LocationOutCount, error) { query := `--sql SELECT @@ -105,7 +110,7 @@ func (r *LocationRepository) GetAll(ctx context.Context, GID uuid.UUID, filter L updated_at, ( SELECT - COUNT(*) + SUM(items.quantity) FROM items WHERE @@ -130,16 +135,23 @@ func (r *LocationRepository) GetAll(ctx context.Context, GID uuid.UUID, filter L if err != nil { return nil, err } + defer func() { _ = rows.Close() }() list := []LocationOutCount{} for rows.Next() { var ct LocationOutCount - err := rows.Scan(&ct.ID, &ct.Name, &ct.Description, &ct.CreatedAt, &ct.UpdatedAt, &ct.ItemCount) + var maybeCount *int + + err := rows.Scan(&ct.ID, &ct.Name, &ct.Description, &ct.CreatedAt, &ct.UpdatedAt, &maybeCount) if err != nil { return nil, err } + if maybeCount != nil { + ct.ItemCount = *maybeCount + } + list = append(list, ct) } @@ -150,11 +162,6 @@ func (r *LocationRepository) getOne(ctx context.Context, where ...predicate.Loca return mapLocationOutErr(r.db.Location.Query(). Where(where...). WithGroup(). - WithItems(func(iq *ent.ItemQuery) { - iq.Where(item.Archived(false)). - Order(ent.Asc(item.FieldName)). - WithLabel() - }). WithParent(). WithChildren(). Only(ctx)) @@ -184,6 +191,7 @@ func (r *LocationRepository) Create(ctx context.Context, GID uuid.UUID, data Loc } location.Edges.Group = &ent.Group{ID: GID} // bootstrap group ID + r.publishMutationEvent(GID) return mapLocationOut(location), nil } @@ -207,20 +215,29 @@ func (r *LocationRepository) update(ctx context.Context, data LocationUpdate, wh return r.Get(ctx, data.ID) } -func (r *LocationRepository) Update(ctx context.Context, data LocationUpdate) (LocationOut, error) { - return r.update(ctx, data, location.ID(data.ID)) +func (r *LocationRepository) UpdateByGroup(ctx context.Context, GID, ID uuid.UUID, data LocationUpdate) (LocationOut, error) { + v, err := r.update(ctx, data, location.ID(ID), location.HasGroupWith(group.ID(GID))) + if err != nil { + return LocationOut{}, err + } + + r.publishMutationEvent(GID) + return v, err } -func (r *LocationRepository) UpdateOneByGroup(ctx context.Context, GID, ID uuid.UUID, data LocationUpdate) (LocationOut, error) { - return r.update(ctx, data, location.ID(ID), location.HasGroupWith(group.ID(GID))) -} - -func (r *LocationRepository) Delete(ctx context.Context, ID uuid.UUID) error { +// delete should only be used after checking that the location is owned by the +// group. Otherwise, use DeleteByGroup +func (r *LocationRepository) delete(ctx context.Context, ID uuid.UUID) error { return r.db.Location.DeleteOneID(ID).Exec(ctx) } func (r *LocationRepository) DeleteByGroup(ctx context.Context, GID, ID uuid.UUID) error { _, err := r.db.Location.Delete().Where(location.ID(ID), location.HasGroupWith(group.ID(GID))).Exec(ctx) + if err != nil { + return err + } + r.publishMutationEvent(GID) + return err } @@ -240,16 +257,76 @@ type FlatTreeItem struct { } type TreeQuery struct { - WithItems bool `json:"withItems"` + WithItems bool `json:"withItems" schema:"withItems"` } -func (lr *LocationRepository) Tree(ctx context.Context, GID uuid.UUID, tq TreeQuery) ([]TreeItem, error) { +type ItemType string + +const ( + ItemTypeLocation ItemType = "location" + ItemTypeItem ItemType = "item" +) + +type ItemPath struct { + Type ItemType `json:"type"` + ID uuid.UUID `json:"id"` + Name string `json:"name"` +} + +func (r *LocationRepository) PathForLoc(ctx context.Context, GID, locID uuid.UUID) ([]ItemPath, error) { + query := `WITH RECURSIVE location_path AS ( + SELECT id, name, location_children + FROM locations + WHERE id = ? -- Replace ? with the ID of the item's location + AND group_locations = ? -- Replace ? with the ID of the group + + UNION ALL + + SELECT loc.id, loc.name, loc.location_children + FROM locations loc + JOIN location_path lp ON loc.id = lp.location_children + ) + + SELECT id, name + FROM location_path` + + rows, err := r.db.Sql().QueryContext(ctx, query, locID, GID) + if err != nil { + return nil, err + } + defer func() { _ = rows.Close() }() + + var locations []ItemPath + + for rows.Next() { + var location ItemPath + location.Type = ItemTypeLocation + if err := rows.Scan(&location.ID, &location.Name); err != nil { + return nil, err + } + locations = append(locations, location) + } + + if err := rows.Err(); err != nil { + return nil, err + } + + // Reverse the order of the locations so that the root is last + for i := len(locations)/2 - 1; i >= 0; i-- { + opp := len(locations) - 1 - i + locations[i], locations[opp] = locations[opp], locations[i] + } + + return locations, nil +} + +func (r *LocationRepository) Tree(ctx context.Context, GID uuid.UUID, tq TreeQuery) ([]TreeItem, error) { query := ` - WITH recursive location_tree(id, NAME, location_children, level, node_type) AS + WITH recursive location_tree(id, NAME, parent_id, level, node_type) AS ( SELECT id, NAME, - location_children, + location_children AS parent_id, 0 AS level, 'location' AS node_type FROM locations @@ -259,48 +336,77 @@ func (lr *LocationRepository) Tree(ctx context.Context, GID uuid.UUID, tq TreeQu UNION ALL SELECT c.id, c.NAME, - c.location_children, + c.location_children AS parent_id, level + 1, 'location' AS node_type FROM locations c JOIN location_tree p ON c.location_children = p.id WHERE level < 10 -- prevent infinite loop & excessive recursion + ){{ WITH_ITEMS }} - {{ WITH_ITEMS }} - ) SELECT id, NAME, level, - location_children, + parent_id, node_type - FROM location_tree - ORDER BY level, - node_type DESC, -- sort locations before items + FROM ( + SELECT * + FROM location_tree + + + {{ WITH_ITEMS_FROM }} + + + ) tree + ORDER BY node_type DESC, -- sort locations before items + level, lower(NAME)` if tq.WithItems { - itemQuery := ` + itemQuery := `, item_tree(id, NAME, parent_id, level, node_type) AS + ( + SELECT id, + NAME, + location_items as parent_id, + 0 AS level, + 'item' AS node_type + FROM items + WHERE item_children IS NULL + AND location_items IN (SELECT id FROM location_tree) + UNION ALL - SELECT i.id, - i.name, - location_items as location_children, + + SELECT c.id, + c.NAME, + c.item_children AS parent_id, level + 1, 'item' AS node_type - FROM items i - JOIN location_tree p - ON i.location_items = p.id - WHERE level < 10 -- prevent infinite loop & excessive recursion` + FROM items c + JOIN item_tree p + ON c.item_children = p.id + WHERE c.item_children IS NOT NULL + AND level < 10 -- prevent infinite loop & excessive recursion + )` + + // Conditional table joined to main query + itemsFrom := ` + UNION ALL + SELECT * + FROM item_tree` + query = strings.ReplaceAll(query, "{{ WITH_ITEMS }}", itemQuery) + query = strings.ReplaceAll(query, "{{ WITH_ITEMS_FROM }}", itemsFrom) } else { query = strings.ReplaceAll(query, "{{ WITH_ITEMS }}", "") + query = strings.ReplaceAll(query, "{{ WITH_ITEMS_FROM }}", "") } - rows, err := lr.db.Sql().QueryContext(ctx, query, GID) + rows, err := r.db.Sql().QueryContext(ctx, query, GID) if err != nil { return nil, err } - defer rows.Close() + defer func() { _ = rows.Close() }() var locations []FlatTreeItem for rows.Next() { diff --git a/backend/internal/data/repo/repo_locations_test.go b/backend/internal/data/repo/repo_locations_test.go index 0334a42..e8b353c 100644 --- a/backend/internal/data/repo/repo_locations_test.go +++ b/backend/internal/data/repo/repo_locations_test.go @@ -8,6 +8,7 @@ import ( "github.com/google/uuid" "github.com/hay-kot/homebox/backend/internal/data/ent" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func locationFactory() LocationCreate { @@ -24,13 +25,13 @@ func useLocations(t *testing.T, len int) []LocationOut { for i := 0; i < len; i++ { loc, err := tRepos.Locations.Create(context.Background(), tGroup.ID, locationFactory()) - assert.NoError(t, err) + require.NoError(t, err) out[i] = loc } t.Cleanup(func() { for _, loc := range out { - err := tRepos.Locations.Delete(context.Background(), loc.ID) + err := tRepos.Locations.delete(context.Background(), loc.ID) if err != nil { assert.True(t, ent.IsNotFound(err)) } @@ -42,15 +43,15 @@ func useLocations(t *testing.T, len int) []LocationOut { func TestLocationRepository_Get(t *testing.T) { loc, err := tRepos.Locations.Create(context.Background(), tGroup.ID, locationFactory()) - assert.NoError(t, err) + require.NoError(t, err) // Get by ID foundLoc, err := tRepos.Locations.Get(context.Background(), loc.ID) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, loc.ID, foundLoc.ID) - err = tRepos.Locations.Delete(context.Background(), loc.ID) - assert.NoError(t, err) + err = tRepos.Locations.delete(context.Background(), loc.ID) + require.NoError(t, err) } func TestLocationRepositoryGetAllWithCount(t *testing.T) { @@ -63,10 +64,10 @@ func TestLocationRepositoryGetAllWithCount(t *testing.T) { LocationID: result.ID, }) - assert.NoError(t, err) + require.NoError(t, err) results, err := tRepos.Locations.GetAll(context.Background(), tGroup.ID, LocationQuery{}) - assert.NoError(t, err) + require.NoError(t, err) for _, loc := range results { if loc.ID == result.ID { @@ -80,11 +81,11 @@ func TestLocationRepository_Create(t *testing.T) { // Get by ID foundLoc, err := tRepos.Locations.Get(context.Background(), loc.ID) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, loc.ID, foundLoc.ID) - err = tRepos.Locations.Delete(context.Background(), loc.ID) - assert.NoError(t, err) + err = tRepos.Locations.delete(context.Background(), loc.ID) + require.NoError(t, err) } func TestLocationRepository_Update(t *testing.T) { @@ -96,56 +97,84 @@ func TestLocationRepository_Update(t *testing.T) { Description: fk.Str(100), } - update, err := tRepos.Locations.Update(context.Background(), updateData) - assert.NoError(t, err) + update, err := tRepos.Locations.UpdateByGroup(context.Background(), tGroup.ID, updateData.ID, updateData) + require.NoError(t, err) foundLoc, err := tRepos.Locations.Get(context.Background(), loc.ID) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, update.ID, foundLoc.ID) assert.Equal(t, update.Name, foundLoc.Name) assert.Equal(t, update.Description, foundLoc.Description) - err = tRepos.Locations.Delete(context.Background(), loc.ID) - assert.NoError(t, err) + err = tRepos.Locations.delete(context.Background(), loc.ID) + require.NoError(t, err) } func TestLocationRepository_Delete(t *testing.T) { loc := useLocations(t, 1)[0] - err := tRepos.Locations.Delete(context.Background(), loc.ID) - assert.NoError(t, err) + err := tRepos.Locations.delete(context.Background(), loc.ID) + require.NoError(t, err) _, err = tRepos.Locations.Get(context.Background(), loc.ID) - assert.Error(t, err) + require.Error(t, err) } func TestItemRepository_TreeQuery(t *testing.T) { locs := useLocations(t, 3) // Set relations - _, err := tRepos.Locations.UpdateOneByGroup(context.Background(), tGroup.ID, locs[0].ID, LocationUpdate{ + _, err := tRepos.Locations.UpdateByGroup(context.Background(), tGroup.ID, locs[0].ID, LocationUpdate{ ID: locs[0].ID, ParentID: locs[1].ID, Name: locs[0].Name, Description: locs[0].Description, }) - assert.NoError(t, err) + require.NoError(t, err) locations, err := tRepos.Locations.Tree(context.Background(), tGroup.ID, TreeQuery{WithItems: true}) - assert.NoError(t, err) + require.NoError(t, err) - assert.Equal(t, 2, len(locations)) + assert.Len(t, locations, 2) // Check roots for _, loc := range locations { if loc.ID == locs[1].ID { - assert.Equal(t, 1, len(loc.Children)) + assert.Len(t, loc.Children, 1) } } } +func TestLocationRepository_PathForLoc(t *testing.T) { + locs := useLocations(t, 3) + + // Set relations 3 -> 2 -> 1 + for i := 0; i < 2; i++ { + _, err := tRepos.Locations.UpdateByGroup(context.Background(), tGroup.ID, locs[i].ID, LocationUpdate{ + ID: locs[i].ID, + ParentID: locs[i+1].ID, + Name: locs[i].Name, + Description: locs[i].Description, + }) + require.NoError(t, err) + } + + last := locs[0] + + path, err := tRepos.Locations.PathForLoc(context.Background(), tGroup.ID, last.ID) + + require.NoError(t, err) + assert.Len(t, path, 3) + + // Check path and order + for i, loc := range path { + assert.Equal(t, locs[2-i].ID, loc.ID) + assert.Equal(t, locs[2-i].Name, loc.Name) + } +} + func TestConvertLocationsToTree(t *testing.T) { uuid1, uuid2, uuid3, uuid4 := uuid.New(), uuid.New(), uuid.New(), uuid.New() diff --git a/backend/internal/data/repo/repo_maintenance_entry.go b/backend/internal/data/repo/repo_maintenance_entry.go index 5f95e50..2714bbd 100644 --- a/backend/internal/data/repo/repo_maintenance_entry.go +++ b/backend/internal/data/repo/repo_maintenance_entry.go @@ -2,11 +2,15 @@ package repo import ( "context" + "errors" "time" "github.com/google/uuid" "github.com/hay-kot/homebox/backend/internal/data/ent" + "github.com/hay-kot/homebox/backend/internal/data/ent/group" + "github.com/hay-kot/homebox/backend/internal/data/ent/item" "github.com/hay-kot/homebox/backend/internal/data/ent/maintenanceentry" + "github.com/hay-kot/homebox/backend/internal/data/types" ) // MaintenanceEntryRepository is a repository for maintenance entries that are @@ -15,27 +19,45 @@ import ( type MaintenanceEntryRepository struct { db *ent.Client } + +type MaintenanceEntryCreate struct { + CompletedDate types.Date `json:"completedDate"` + ScheduledDate types.Date `json:"scheduledDate"` + Name string `json:"name" validate:"required"` + Description string `json:"description"` + Cost float64 `json:"cost,string"` +} + +func (mc MaintenanceEntryCreate) Validate() error { + if mc.CompletedDate.Time().IsZero() && mc.ScheduledDate.Time().IsZero() { + return errors.New("either completedDate or scheduledDate must be set") + } + return nil +} + +type MaintenanceEntryUpdate struct { + CompletedDate types.Date `json:"completedDate"` + ScheduledDate types.Date `json:"scheduledDate"` + Name string `json:"name"` + Description string `json:"description"` + Cost float64 `json:"cost,string"` +} + +func (mu MaintenanceEntryUpdate) Validate() error { + if mu.CompletedDate.Time().IsZero() && mu.ScheduledDate.Time().IsZero() { + return errors.New("either completedDate or scheduledDate must be set") + } + return nil +} + type ( - MaintenanceEntryCreate struct { - Date time.Time `json:"date"` - Name string `json:"name"` - Description string `json:"description"` - Cost float64 `json:"cost,string"` - } - MaintenanceEntry struct { - ID uuid.UUID `json:"id"` - Date time.Time `json:"date"` - Name string `json:"name"` - Description string `json:"description"` - Cost float64 `json:"cost,string"` - } - - MaintenanceEntryUpdate struct { - Date time.Time `json:"date"` - Name string `json:"name"` - Description string `json:"description"` - Cost float64 `json:"cost,string"` + ID uuid.UUID `json:"id"` + CompletedDate types.Date `json:"completedDate"` + ScheduledDate types.Date `json:"scheduledDate"` + Name string `json:"name"` + Description string `json:"description"` + Cost float64 `json:"cost,string"` } MaintenanceLog struct { @@ -53,18 +75,41 @@ var ( func mapMaintenanceEntry(entry *ent.MaintenanceEntry) MaintenanceEntry { return MaintenanceEntry{ - ID: entry.ID, - Date: entry.Date, - Name: entry.Name, - Description: entry.Description, - Cost: entry.Cost, + ID: entry.ID, + CompletedDate: types.Date(entry.Date), + ScheduledDate: types.Date(entry.ScheduledDate), + Name: entry.Name, + Description: entry.Description, + Cost: entry.Cost, } } +func (r *MaintenanceEntryRepository) GetScheduled(ctx context.Context, GID uuid.UUID, dt types.Date) ([]MaintenanceEntry, error) { + entries, err := r.db.MaintenanceEntry.Query(). + Where( + maintenanceentry.HasItemWith( + item.HasGroupWith(group.ID(GID)), + ), + maintenanceentry.ScheduledDate(dt.Time()), + maintenanceentry.Or( + maintenanceentry.DateIsNil(), + maintenanceentry.DateEQ(time.Time{}), + ), + ). + All(ctx) + + if err != nil { + return nil, err + } + + return mapEachMaintenanceEntry(entries), nil +} + func (r *MaintenanceEntryRepository) Create(ctx context.Context, itemID uuid.UUID, input MaintenanceEntryCreate) (MaintenanceEntry, error) { item, err := r.db.MaintenanceEntry.Create(). SetItemID(itemID). - SetDate(input.Date). + SetDate(input.CompletedDate.Time()). + SetScheduledDate(input.ScheduledDate.Time()). SetName(input.Name). SetDescription(input.Description). SetCost(input.Cost). @@ -75,7 +120,8 @@ func (r *MaintenanceEntryRepository) Create(ctx context.Context, itemID uuid.UUI func (r *MaintenanceEntryRepository) Update(ctx context.Context, ID uuid.UUID, input MaintenanceEntryUpdate) (MaintenanceEntry, error) { item, err := r.db.MaintenanceEntry.UpdateOneID(ID). - SetDate(input.Date). + SetDate(input.CompletedDate.Time()). + SetScheduledDate(input.ScheduledDate.Time()). SetName(input.Name). SetDescription(input.Description). SetCost(input.Cost). @@ -84,14 +130,40 @@ func (r *MaintenanceEntryRepository) Update(ctx context.Context, ID uuid.UUID, i return mapMaintenanceEntryErr(item, err) } -func (r *MaintenanceEntryRepository) GetLog(ctx context.Context, itemID uuid.UUID) (MaintenanceLog, error) { +type MaintenanceLogQuery struct { + Completed bool `json:"completed" schema:"completed"` + Scheduled bool `json:"scheduled" schema:"scheduled"` +} + +func (r *MaintenanceEntryRepository) GetLog(ctx context.Context, groupID, itemID uuid.UUID, query MaintenanceLogQuery) (MaintenanceLog, error) { log := MaintenanceLog{ ItemID: itemID, } - entries, err := r.db.MaintenanceEntry.Query(). - Where(maintenanceentry.ItemID(itemID)). - Order(ent.Desc(maintenanceentry.FieldDate)). + q := r.db.MaintenanceEntry.Query().Where( + maintenanceentry.ItemID(itemID), + maintenanceentry.HasItemWith( + item.HasGroupWith(group.IDEQ(groupID)), + ), + ) + + if query.Completed { + q = q.Where(maintenanceentry.And( + maintenanceentry.DateNotNil(), + maintenanceentry.DateNEQ(time.Time{}), + )) + } else if query.Scheduled { + q = q.Where(maintenanceentry.And( + maintenanceentry.Or( + maintenanceentry.DateIsNil(), + maintenanceentry.DateEQ(time.Time{}), + ), + maintenanceentry.ScheduledDateNotNil(), + maintenanceentry.ScheduledDateNEQ(time.Time{}), + )) + } + + entries, err := q.Order(ent.Desc(maintenanceentry.FieldDate)). All(ctx) if err != nil { return MaintenanceLog{}, err @@ -102,7 +174,7 @@ func (r *MaintenanceEntryRepository) GetLog(ctx context.Context, itemID uuid.UUI var maybeTotal *float64 var maybeAverage *float64 - q := ` + statement := ` SELECT SUM(cost_total) AS total_of_totals, AVG(cost_total) AS avg_of_averages @@ -119,7 +191,7 @@ FROM my )` - row := r.db.Sql().QueryRowContext(ctx, q, itemID) + row := r.db.Sql().QueryRowContext(ctx, statement, itemID) err = row.Scan(&maybeTotal, &maybeAverage) if err != nil { return MaintenanceLog{}, err diff --git a/backend/internal/data/repo/repo_maintenance_entry_test.go b/backend/internal/data/repo/repo_maintenance_entry_test.go index bc9f6af..0fa288c 100644 --- a/backend/internal/data/repo/repo_maintenance_entry_test.go +++ b/backend/internal/data/repo/repo_maintenance_entry_test.go @@ -5,7 +5,9 @@ import ( "testing" "time" + "github.com/hay-kot/homebox/backend/internal/data/types" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) // get the previous month from the current month, accounts for errors when run @@ -16,9 +18,7 @@ func getPrevMonth(now time.Time) time.Time { // avoid infinite loop max := 15 for t.Month() == now.Month() { - println("month is the same") t = t.AddDate(0, 0, -1) - println(t.String()) max-- if max == 0 { @@ -45,10 +45,10 @@ func TestMaintenanceEntryRepository_GetLog(t *testing.T) { } created[i] = MaintenanceEntryCreate{ - Date: dt, - Name: "Maintenance", - Description: "Maintenance description", - Cost: 10, + CompletedDate: types.DateFromTime(dt), + Name: "Maintenance", + Description: "Maintenance description", + Cost: 10, } } @@ -60,13 +60,15 @@ func TestMaintenanceEntryRepository_GetLog(t *testing.T) { } // Get the log for the item - log, err := tRepos.MaintEntry.GetLog(context.Background(), item.ID) + log, err := tRepos.MaintEntry.GetLog(context.Background(), tGroup.ID, item.ID, MaintenanceLogQuery{ + Completed: true, + }) if err != nil { t.Fatalf("failed to get maintenance log: %v", err) } assert.Equal(t, item.ID, log.ItemID) - assert.Equal(t, 10, len(log.Entries)) + assert.Len(t, log.Entries, 10) // Calculate the average cost var total float64 @@ -75,11 +77,11 @@ func TestMaintenanceEntryRepository_GetLog(t *testing.T) { total += entry.Cost } - assert.Equal(t, total, log.CostTotal, "total cost should be equal to the sum of all entries") - assert.Equal(t, total/2, log.CostAverage, "average cost should be the average of the two months") + assert.InDelta(t, total, log.CostTotal, .001, "total cost should be equal to the sum of all entries") + assert.InDelta(t, total/2, log.CostAverage, 001, "average cost should be the average of the two months") for _, entry := range log.Entries { err := tRepos.MaintEntry.Delete(context.Background(), entry.ID) - assert.NoError(t, err) + require.NoError(t, err) } } diff --git a/backend/internal/data/repo/repo_notifier.go b/backend/internal/data/repo/repo_notifier.go new file mode 100644 index 0000000..f31be4b --- /dev/null +++ b/backend/internal/data/repo/repo_notifier.go @@ -0,0 +1,120 @@ +package repo + +import ( + "context" + "time" + + "github.com/google/uuid" + "github.com/hay-kot/homebox/backend/internal/data/ent" + "github.com/hay-kot/homebox/backend/internal/data/ent/notifier" +) + +type NotifierRepository struct { + db *ent.Client + mapper MapFunc[*ent.Notifier, NotifierOut] +} + +func NewNotifierRepository(db *ent.Client) *NotifierRepository { + return &NotifierRepository{ + db: db, + mapper: func(n *ent.Notifier) NotifierOut { + return NotifierOut{ + ID: n.ID, + UserID: n.UserID, + GroupID: n.GroupID, + CreatedAt: n.CreatedAt, + UpdatedAt: n.UpdatedAt, + + Name: n.Name, + IsActive: n.IsActive, + URL: n.URL, + } + }, + } +} + +type ( + NotifierCreate struct { + Name string `json:"name" validate:"required,min=1,max=255"` + IsActive bool `json:"isActive"` + URL string `json:"url" validate:"required,shoutrrr"` + } + + NotifierUpdate struct { + Name string `json:"name" validate:"required,min=1,max=255"` + IsActive bool `json:"isActive"` + URL *string `json:"url" validate:"omitempty,shoutrrr" extensions:"x-nullable"` + } + + NotifierOut struct { + ID uuid.UUID `json:"id"` + UserID uuid.UUID `json:"userId"` + GroupID uuid.UUID `json:"groupId"` + CreatedAt time.Time `json:"createdAt"` + UpdatedAt time.Time `json:"updatedAt"` + + Name string `json:"name"` + IsActive bool `json:"isActive"` + URL string `json:"-"` // URL field is not exposed to the client + } +) + +func (r *NotifierRepository) GetByUser(ctx context.Context, userID uuid.UUID) ([]NotifierOut, error) { + notifier, err := r.db.Notifier.Query(). + Where(notifier.UserID(userID)). + Order(ent.Asc(notifier.FieldName)). + All(ctx) + + return r.mapper.MapEachErr(notifier, err) +} + +func (r *NotifierRepository) GetByGroup(ctx context.Context, groupID uuid.UUID) ([]NotifierOut, error) { + notifier, err := r.db.Notifier.Query(). + Where(notifier.GroupID(groupID)). + Order(ent.Asc(notifier.FieldName)). + All(ctx) + + return r.mapper.MapEachErr(notifier, err) +} + +func (r *NotifierRepository) GetActiveByGroup(ctx context.Context, groupID uuid.UUID) ([]NotifierOut, error) { + notifier, err := r.db.Notifier.Query(). + Where(notifier.GroupID(groupID), notifier.IsActive(true)). + Order(ent.Asc(notifier.FieldName)). + All(ctx) + + return r.mapper.MapEachErr(notifier, err) +} + +func (r *NotifierRepository) Create(ctx context.Context, groupID, userID uuid.UUID, input NotifierCreate) (NotifierOut, error) { + notifier, err := r.db.Notifier. + Create(). + SetGroupID(groupID). + SetUserID(userID). + SetName(input.Name). + SetIsActive(input.IsActive). + SetURL(input.URL). + Save(ctx) + + return r.mapper.MapErr(notifier, err) +} + +func (r *NotifierRepository) Update(ctx context.Context, userID uuid.UUID, id uuid.UUID, input NotifierUpdate) (NotifierOut, error) { + q := r.db.Notifier. + UpdateOneID(id). + SetName(input.Name). + SetIsActive(input.IsActive) + + if input.URL != nil { + q.SetURL(*input.URL) + } + + notifier, err := q.Save(ctx) + + return r.mapper.MapErr(notifier, err) +} + +func (r *NotifierRepository) Delete(ctx context.Context, userID uuid.UUID, ID uuid.UUID) error { + _, err := r.db.Notifier.Delete().Where(notifier.UserID(userID), notifier.ID(ID)).Exec(ctx) + return err +} diff --git a/backend/internal/data/repo/repo_tokens.go b/backend/internal/data/repo/repo_tokens.go index 7ba982e..42843e0 100644 --- a/backend/internal/data/repo/repo_tokens.go +++ b/backend/internal/data/repo/repo_tokens.go @@ -71,7 +71,7 @@ func (r *TokenRepository) GetRoles(ctx context.Context, token string) (*set.Set[ return &roleSet, nil } -// Creates a token for a user +// CreateToken Creates a token for a user func (r *TokenRepository) CreateToken(ctx context.Context, createToken UserAuthTokenCreate, roles ...authroles.Role) (UserAuthToken, error) { dbToken, err := r.db.AuthTokens.Create(). SetToken(createToken.TokenHash). diff --git a/backend/internal/data/repo/repo_tokens_test.go b/backend/internal/data/repo/repo_tokens_test.go index e066911..a0b4375 100644 --- a/backend/internal/data/repo/repo_tokens_test.go +++ b/backend/internal/data/repo/repo_tokens_test.go @@ -7,15 +7,15 @@ import ( "github.com/hay-kot/homebox/backend/pkgs/hasher" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestAuthTokenRepo_CreateToken(t *testing.T) { - asrt := assert.New(t) ctx := context.Background() user := userFactory() userOut, err := tRepos.Users.Create(ctx, user) - asrt.NoError(err) + require.NoError(t, err) expiresAt := time.Now().Add(time.Hour) @@ -27,23 +27,22 @@ func TestAuthTokenRepo_CreateToken(t *testing.T) { UserID: userOut.ID, }) - asrt.NoError(err) - asrt.Equal(userOut.ID, token.UserID) - asrt.Equal(expiresAt, token.ExpiresAt) + require.NoError(t, err) + assert.Equal(t, userOut.ID, token.UserID) + assert.Equal(t, expiresAt, token.ExpiresAt) // Cleanup - asrt.NoError(tRepos.Users.Delete(ctx, userOut.ID)) + require.NoError(t, tRepos.Users.Delete(ctx, userOut.ID)) _, err = tRepos.AuthTokens.DeleteAll(ctx) - asrt.NoError(err) + require.NoError(t, err) } func TestAuthTokenRepo_DeleteToken(t *testing.T) { - asrt := assert.New(t) ctx := context.Background() user := userFactory() userOut, err := tRepos.Users.Create(ctx, user) - asrt.NoError(err) + require.NoError(t, err) expiresAt := time.Now().Add(time.Hour) @@ -54,15 +53,14 @@ func TestAuthTokenRepo_DeleteToken(t *testing.T) { ExpiresAt: expiresAt, UserID: userOut.ID, }) - asrt.NoError(err) + require.NoError(t, err) // Delete token err = tRepos.AuthTokens.DeleteToken(ctx, []byte(generatedToken.Raw)) - asrt.NoError(err) + require.NoError(t, err) } func TestAuthTokenRepo_GetUserByToken(t *testing.T) { - assert := assert.New(t) ctx := context.Background() user := userFactory() @@ -77,24 +75,23 @@ func TestAuthTokenRepo_GetUserByToken(t *testing.T) { UserID: userOut.ID, }) - assert.NoError(err) + require.NoError(t, err) // Get User from token foundUser, err := tRepos.AuthTokens.GetUserFromToken(ctx, token.TokenHash) - assert.NoError(err) - assert.Equal(userOut.ID, foundUser.ID) - assert.Equal(userOut.Name, foundUser.Name) - assert.Equal(userOut.Email, foundUser.Email) + require.NoError(t, err) + assert.Equal(t, userOut.ID, foundUser.ID) + assert.Equal(t, userOut.Name, foundUser.Name) + assert.Equal(t, userOut.Email, foundUser.Email) // Cleanup - assert.NoError(tRepos.Users.Delete(ctx, userOut.ID)) + require.NoError(t, tRepos.Users.Delete(ctx, userOut.ID)) _, err = tRepos.AuthTokens.DeleteAll(ctx) - assert.NoError(err) + require.NoError(t, err) } func TestAuthTokenRepo_PurgeExpiredTokens(t *testing.T) { - assert := assert.New(t) ctx := context.Background() user := userFactory() @@ -112,27 +109,26 @@ func TestAuthTokenRepo_PurgeExpiredTokens(t *testing.T) { UserID: userOut.ID, }) - assert.NoError(err) - assert.NotNil(createdToken) + require.NoError(t, err) + assert.NotNil(t, createdToken) createdTokens = append(createdTokens, createdToken) - } // Purge expired tokens tokensDeleted, err := tRepos.AuthTokens.PurgeExpiredTokens(ctx) - assert.NoError(err) - assert.Equal(5, tokensDeleted) + require.NoError(t, err) + assert.Equal(t, 5, tokensDeleted) // Check if tokens are deleted for _, token := range createdTokens { _, err := tRepos.AuthTokens.GetUserFromToken(ctx, token.TokenHash) - assert.Error(err) + require.Error(t, err) } // Cleanup - assert.NoError(tRepos.Users.Delete(ctx, userOut.ID)) + require.NoError(t, tRepos.Users.Delete(ctx, userOut.ID)) _, err = tRepos.AuthTokens.DeleteAll(ctx) - assert.NoError(err) + require.NoError(t, err) } diff --git a/backend/internal/data/repo/repo_users.go b/backend/internal/data/repo/repo_users.go index 03850d6..68b1eb5 100644 --- a/backend/internal/data/repo/repo_users.go +++ b/backend/internal/data/repo/repo_users.go @@ -60,32 +60,32 @@ func mapUserOut(user *ent.User) UserOut { } } -func (e *UserRepository) GetOneId(ctx context.Context, id uuid.UUID) (UserOut, error) { - return mapUserOutErr(e.db.User.Query(). - Where(user.ID(id)). +func (r *UserRepository) GetOneID(ctx context.Context, ID uuid.UUID) (UserOut, error) { + return mapUserOutErr(r.db.User.Query(). + Where(user.ID(ID)). WithGroup(). Only(ctx)) } -func (e *UserRepository) GetOneEmail(ctx context.Context, email string) (UserOut, error) { - return mapUserOutErr(e.db.User.Query(). +func (r *UserRepository) GetOneEmail(ctx context.Context, email string) (UserOut, error) { + return mapUserOutErr(r.db.User.Query(). Where(user.EmailEqualFold(email)). WithGroup(). Only(ctx), ) } -func (e *UserRepository) GetAll(ctx context.Context) ([]UserOut, error) { - return mapUsersOutErr(e.db.User.Query().WithGroup().All(ctx)) +func (r *UserRepository) GetAll(ctx context.Context) ([]UserOut, error) { + return mapUsersOutErr(r.db.User.Query().WithGroup().All(ctx)) } -func (e *UserRepository) Create(ctx context.Context, usr UserCreate) (UserOut, error) { +func (r *UserRepository) Create(ctx context.Context, usr UserCreate) (UserOut, error) { role := user.RoleUser if usr.IsOwner { role = user.RoleOwner } - entUser, err := e.db.User. + entUser, err := r.db.User. Create(). SetName(usr.Name). SetEmail(usr.Email). @@ -98,11 +98,11 @@ func (e *UserRepository) Create(ctx context.Context, usr UserCreate) (UserOut, e return UserOut{}, err } - return e.GetOneId(ctx, entUser.ID) + return r.GetOneID(ctx, entUser.ID) } -func (e *UserRepository) Update(ctx context.Context, ID uuid.UUID, data UserUpdate) error { - q := e.db.User.Update(). +func (r *UserRepository) Update(ctx context.Context, ID uuid.UUID, data UserUpdate) error { + q := r.db.User.Update(). Where(user.ID(ID)). SetName(data.Name). SetEmail(data.Email) @@ -111,18 +111,18 @@ func (e *UserRepository) Update(ctx context.Context, ID uuid.UUID, data UserUpda return err } -func (e *UserRepository) Delete(ctx context.Context, id uuid.UUID) error { - _, err := e.db.User.Delete().Where(user.ID(id)).Exec(ctx) +func (r *UserRepository) Delete(ctx context.Context, id uuid.UUID) error { + _, err := r.db.User.Delete().Where(user.ID(id)).Exec(ctx) return err } -func (e *UserRepository) DeleteAll(ctx context.Context) error { - _, err := e.db.User.Delete().Exec(ctx) +func (r *UserRepository) DeleteAll(ctx context.Context) error { + _, err := r.db.User.Delete().Exec(ctx) return err } -func (e *UserRepository) GetSuperusers(ctx context.Context) ([]*ent.User, error) { - users, err := e.db.User.Query().Where(user.IsSuperuser(true)).All(ctx) +func (r *UserRepository) GetSuperusers(ctx context.Context) ([]*ent.User, error) { + users, err := r.db.User.Query().Where(user.IsSuperuser(true)).All(ctx) if err != nil { return nil, err } diff --git a/backend/internal/data/repo/repo_users_test.go b/backend/internal/data/repo/repo_users_test.go index 31d2737..ef85f44 100644 --- a/backend/internal/data/repo/repo_users_test.go +++ b/backend/internal/data/repo/repo_users_test.go @@ -2,10 +2,10 @@ package repo import ( "context" - "fmt" "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func userFactory() UserCreate { @@ -24,18 +24,18 @@ func TestUserRepo_GetOneEmail(t *testing.T) { ctx := context.Background() _, err := tRepos.Users.Create(ctx, user) - assert.NoError(err) + require.NoError(t, err) foundUser, err := tRepos.Users.GetOneEmail(ctx, user.Email) assert.NotNil(foundUser) - assert.Nil(err) + require.NoError(t, err) assert.Equal(user.Email, foundUser.Email) assert.Equal(user.Name, foundUser.Name) // Cleanup err = tRepos.Users.DeleteAll(ctx) - assert.NoError(err) + require.NoError(t, err) } func TestUserRepo_GetOneId(t *testing.T) { @@ -44,16 +44,16 @@ func TestUserRepo_GetOneId(t *testing.T) { ctx := context.Background() userOut, _ := tRepos.Users.Create(ctx, user) - foundUser, err := tRepos.Users.GetOneId(ctx, userOut.ID) + foundUser, err := tRepos.Users.GetOneID(ctx, userOut.ID) assert.NotNil(foundUser) - assert.Nil(err) + require.NoError(t, err) assert.Equal(user.Email, foundUser.Email) assert.Equal(user.Name, foundUser.Name) // Cleanup err = tRepos.Users.DeleteAll(ctx) - assert.NoError(err) + require.NoError(t, err) } func TestUserRepo_GetAll(t *testing.T) { @@ -77,11 +77,10 @@ func TestUserRepo_GetAll(t *testing.T) { // Validate allUsers, err := tRepos.Users.GetAll(ctx) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, len(created), len(allUsers)) for _, usr := range created { - fmt.Printf("%+v\n", usr) for _, usr2 := range allUsers { if usr.ID == usr2.ID { assert.Equal(t, usr.Email, usr2.Email) @@ -98,12 +97,12 @@ func TestUserRepo_GetAll(t *testing.T) { // Cleanup err = tRepos.Users.DeleteAll(ctx) - assert.NoError(t, err) + require.NoError(t, err) } func TestUserRepo_Update(t *testing.T) { user, err := tRepos.Users.Create(context.Background(), userFactory()) - assert.NoError(t, err) + require.NoError(t, err) updateData := UserUpdate{ Name: fk.Str(10), @@ -112,11 +111,11 @@ func TestUserRepo_Update(t *testing.T) { // Update err = tRepos.Users.Update(context.Background(), user.ID, updateData) - assert.NoError(t, err) + require.NoError(t, err) // Validate - updated, err := tRepos.Users.GetOneId(context.Background(), user.ID) - assert.NoError(t, err) + updated, err := tRepos.Users.GetOneID(context.Background(), user.ID) + require.NoError(t, err) assert.NotEqual(t, user.Name, updated.Name) assert.NotEqual(t, user.Email, updated.Email) } @@ -133,12 +132,12 @@ func TestUserRepo_Delete(t *testing.T) { ctx := context.Background() allUsers, _ := tRepos.Users.GetAll(ctx) - assert.Greater(t, len(allUsers), 0) + assert.NotEmpty(t, allUsers) err := tRepos.Users.DeleteAll(ctx) - assert.NoError(t, err) + require.NoError(t, err) allUsers, _ = tRepos.Users.GetAll(ctx) - assert.Equal(t, len(allUsers), 0) + assert.Empty(t, allUsers) } func TestUserRepo_GetSuperusers(t *testing.T) { @@ -162,7 +161,7 @@ func TestUserRepo_GetSuperusers(t *testing.T) { ctx := context.Background() superUsers, err := tRepos.Users.GetSuperusers(ctx) - assert.NoError(t, err) + require.NoError(t, err) for _, usr := range superUsers { assert.True(t, usr.IsSuperuser) @@ -170,5 +169,5 @@ func TestUserRepo_GetSuperusers(t *testing.T) { // Cleanup err = tRepos.Users.DeleteAll(ctx) - assert.NoError(t, err) + require.NoError(t, err) } diff --git a/backend/internal/data/repo/repos_all.go b/backend/internal/data/repo/repos_all.go index 40748cb..2ccc022 100644 --- a/backend/internal/data/repo/repos_all.go +++ b/backend/internal/data/repo/repos_all.go @@ -1,6 +1,10 @@ +// Package repo provides the data access layer for the application. package repo -import "github.com/hay-kot/homebox/backend/internal/data/ent" +import ( + "github.com/hay-kot/homebox/backend/internal/core/services/reporting/eventbus" + "github.com/hay-kot/homebox/backend/internal/data/ent" +) // AllRepos is a container for all the repository interfaces type AllRepos struct { @@ -13,18 +17,20 @@ type AllRepos struct { Docs *DocumentRepository Attachments *AttachmentRepo MaintEntry *MaintenanceEntryRepository + Notifiers *NotifierRepository } -func New(db *ent.Client, root string) *AllRepos { +func New(db *ent.Client, bus *eventbus.EventBus, root string) *AllRepos { return &AllRepos{ Users: &UserRepository{db}, AuthTokens: &TokenRepository{db}, - Groups: &GroupRepository{db}, - Locations: &LocationRepository{db}, - Labels: &LabelRepository{db}, - Items: &ItemsRepository{db}, + Groups: NewGroupRepository(db), + Locations: &LocationRepository{db, bus}, + Labels: &LabelRepository{db, bus}, + Items: &ItemsRepository{db, bus}, Docs: &DocumentRepository{db, root}, Attachments: &AttachmentRepo{db}, MaintEntry: &MaintenanceEntryRepository{db}, + Notifiers: NewNotifierRepository(db), } } diff --git a/backend/internal/data/types/date.go b/backend/internal/data/types/date.go index 0dc09db..9401e06 100644 --- a/backend/internal/data/types/date.go +++ b/backend/internal/data/types/date.go @@ -1,8 +1,8 @@ +// Package types provides custom types for the application. package types import ( "errors" - "fmt" "strings" "time" ) @@ -42,6 +42,7 @@ func DateFromString(s string) Date { try := [...]string{ "2006-01-02", "01/02/2006", + "2006/01/02", time.RFC3339, } @@ -74,9 +75,7 @@ func (d Date) MarshalJSON() ([]byte, error) { func (d *Date) UnmarshalJSON(data []byte) (err error) { // unescape the string if necessary `\"` -> `"` str := strings.Trim(string(data), "\"") - fmt.Printf("str: %q\n", str) if str == "" || str == "null" || str == `""` { - println("empty date") *d = Date{} return nil } diff --git a/backend/internal/sys/config/conf.go b/backend/internal/sys/config/conf.go index c1655c2..8b7b23c 100644 --- a/backend/internal/sys/config/conf.go +++ b/backend/internal/sys/config/conf.go @@ -1,3 +1,4 @@ +// Package config provides the configuration for the application. package config import ( @@ -5,6 +6,7 @@ import ( "errors" "fmt" "os" + "time" "github.com/ardanlabs/conf/v3" ) @@ -15,44 +17,48 @@ const ( ) type Config struct { - Mode string `yaml:"mode" conf:"default:development"` // development or production - Web WebConfig `yaml:"web"` - Storage Storage `yaml:"storage"` - Log LoggerConf `yaml:"logger"` - Mailer MailerConf `yaml:"mailer"` - Swagger SwaggerConf `yaml:"swagger"` - Demo bool `yaml:"demo"` - Debug DebugConf `yaml:"debug"` - Options Options `yaml:"options"` + conf.Version + Mode string `yaml:"mode" conf:"default:development"` // development or production + Web WebConfig `yaml:"web"` + Storage Storage `yaml:"storage"` + Log LoggerConf `yaml:"logger"` + Mailer MailerConf `yaml:"mailer"` + Demo bool `yaml:"demo"` + Debug DebugConf `yaml:"debug"` + Options Options `yaml:"options"` } type Options struct { - AllowRegistration bool `yaml:"disable_registration" conf:"default:true"` - AutoIncrementAssetID bool `yaml:"auto_increment_asset_id" conf:"default:true"` + AllowRegistration bool `yaml:"disable_registration" conf:"default:true"` + AutoIncrementAssetID bool `yaml:"auto_increment_asset_id" conf:"default:true"` + CurrencyConfig string `yaml:"currencies"` } type DebugConf struct { Enabled bool `yaml:"enabled" conf:"default:false"` - Port string `yaml:"port" conf:"default:4000"` -} - -type SwaggerConf struct { - Host string `yaml:"host" conf:"default:localhost:7745"` - Scheme string `yaml:"scheme" conf:"default:http"` + Port string `yaml:"port" conf:"default:4000"` } type WebConfig struct { - Port string `yaml:"port" conf:"default:7745"` - Host string `yaml:"host"` - MaxUploadSize int64 `yaml:"max_file_upload" conf:"default:10"` + Port string `yaml:"port" conf:"default:7745"` + Host string `yaml:"host"` + MaxUploadSize int64 `yaml:"max_file_upload" conf:"default:10"` + ReadTimeout time.Duration `yaml:"read_timeout" conf:"default:10s"` + WriteTimeout time.Duration `yaml:"write_timeout" conf:"default:10s"` + IdleTimeout time.Duration `yaml:"idle_timeout" conf:"default:30s"` } // New parses the CLI/Config file and returns a Config struct. If the file argument is an empty string, the // file is not read. If the file is not empty, the file is read and the Config struct is returned. -func New() (*Config, error) { +func New(buildstr string, description string) (*Config, error) { var cfg Config const prefix = "HBOX" + cfg.Version = conf.Version{ + Build: buildstr, + Desc: description, + } + help, err := conf.Parse(prefix, &cfg) if err != nil { if errors.Is(err, conf.ErrHelpWanted) { diff --git a/backend/internal/sys/config/conf_database.go b/backend/internal/sys/config/conf_database.go index 69a67b9..2c6a761 100644 --- a/backend/internal/sys/config/conf_database.go +++ b/backend/internal/sys/config/conf_database.go @@ -6,6 +6,6 @@ const ( type Storage struct { // Data is the path to the root directory - Data string `yaml:"data" conf:"default:./.data"` - SqliteUrl string `yaml:"sqlite-url" conf:"default:./.data/homebox.db?_fk=1"` + Data string `yaml:"data" conf:"default:./.data"` + SqliteURL string `yaml:"sqlite-url" conf:"default:./.data/homebox.db?_pragma=busy_timeout=999&_pragma=journal_mode=WAL&_fk=1"` } diff --git a/backend/internal/sys/validate/errors.go b/backend/internal/sys/validate/errors.go index b5c101a..09fdf2c 100644 --- a/backend/internal/sys/validate/errors.go +++ b/backend/internal/sys/validate/errors.go @@ -5,7 +5,8 @@ import ( "errors" ) -type UnauthorizedError struct{} +type UnauthorizedError struct { +} func (err *UnauthorizedError) Error() string { return "unauthorized" @@ -28,7 +29,7 @@ func (err *InvalidRouteKeyError) Error() string { return "invalid route key: " + err.key } -func NewInvalidRouteKeyError(key string) error { +func NewRouteKeyError(key string) error { return &InvalidRouteKeyError{key} } @@ -87,7 +88,7 @@ func (fe FieldErrors) Nil() bool { return len(fe) == 0 } -// Error implments the error interface. +// Error implements the error interface. func (fe FieldErrors) Error() string { d, err := json.Marshal(fe) if err != nil { @@ -100,6 +101,10 @@ func NewFieldErrors(errs ...FieldError) FieldErrors { return errs } +func NewFieldError(field, reason string) FieldError { + return FieldError{Field: field, Error: reason} +} + func IsFieldError(err error) bool { v := FieldErrors{} return errors.As(err, &v) diff --git a/backend/internal/sys/validate/validate.go b/backend/internal/sys/validate/validate.go index 4a4b7a9..d9dbe24 100644 --- a/backend/internal/sys/validate/validate.go +++ b/backend/internal/sys/validate/validate.go @@ -1,21 +1,68 @@ +// Package validate provides a wrapper around the go-playground/validator package package validate -import "github.com/go-playground/validator/v10" +import ( + "strings" + + "github.com/go-playground/validator/v10" +) var validate *validator.Validate -func init() { +func init() { // nolint validate = validator.New() + + err := validate.RegisterValidation("shoutrrr", func(fl validator.FieldLevel) bool { + prefixes := [...]string{ + "bark://", + "discord://", + "smtp://", + "gotify://", + "googlechat://", + "ifttt://", + "join://", + "mattermost://", + "matrix://", + "ntfy://", + "opsgenie://", + "pushbullet://", + "pushover://", + "rocketchat://", + "slack://", + "teams://", + "telegram://", + "zulip://", + "generic://", + "generic+", + } + + str := fl.Field().String() + if str == "" { + return false + } + + for _, prefix := range prefixes { + if strings.HasPrefix(str, prefix) { + return true + } + } + + return false + }) + + if err != nil { + panic(err) + } } -// Checks a struct for validation errors and returns any errors the occur. This +// Check a struct for validation errors and returns any errors the occur. This // wraps the validate.Struct() function and provides some error wrapping. When // a validator.ValidationErrors is returned, it is wrapped transformed into a // FieldErrors array and returned. func Check(val any) error { err := validate.Struct(val) if err != nil { - verrors, ok := err.(validator.ValidationErrors) + verrors, ok := err.(validator.ValidationErrors) // nolint - we know it's a validator.ValidationErrors if !ok { return err } diff --git a/backend/internal/web/adapters/actions.go b/backend/internal/web/adapters/actions.go new file mode 100644 index 0000000..3905723 --- /dev/null +++ b/backend/internal/web/adapters/actions.go @@ -0,0 +1,75 @@ +package adapters + +import ( + "net/http" + + "github.com/hay-kot/httpkit/errchain" + "github.com/hay-kot/httpkit/server" +) + +// Action is a function that adapts a function to the server.Handler interface. +// It decodes the request body into a value of type T and passes it to the function f. +// The function f is expected to return a value of type Y and an error. +// +// Example: +// +// type Body struct { +// Foo string `json:"foo"` +// } +// +// fn := func(r *http.Request, b Body) (any, error) { +// // do something with b +// return nil, nil +// } +// +// r.Post("/foo", adapters.Action(fn, http.StatusCreated)) +func Action[T any, Y any](f AdapterFunc[T, Y], ok int) errchain.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) error { + v, err := DecodeBody[T](r) + if err != nil { + return err + } + + res, err := f(r, v) + if err != nil { + return err + } + + return server.JSON(w, ok, res) + } +} + +// ActionID functions the same as Action, but it also decodes a UUID from the URL path. +// +// Example: +// +// type Body struct { +// Foo string `json:"foo"` +// } +// +// fn := func(r *http.Request, ID uuid.UUID, b Body) (any, error) { +// // do something with ID and b +// return nil, nil +// } +// +// r.Post("/foo/{id}", adapters.ActionID(fn, http.StatusCreated)) +func ActionID[T any, Y any](param string, f IDFunc[T, Y], ok int) errchain.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) error { + ID, err := RouteUUID(r, param) + if err != nil { + return err + } + + v, err := DecodeBody[T](r) + if err != nil { + return err + } + + res, err := f(r, ID, v) + if err != nil { + return err + } + + return server.JSON(w, ok, res) + } +} diff --git a/backend/internal/web/adapters/adapters.go b/backend/internal/web/adapters/adapters.go new file mode 100644 index 0000000..8372a60 --- /dev/null +++ b/backend/internal/web/adapters/adapters.go @@ -0,0 +1,10 @@ +package adapters + +import ( + "net/http" + + "github.com/google/uuid" +) + +type AdapterFunc[T any, Y any] func(*http.Request, T) (Y, error) +type IDFunc[T any, Y any] func(*http.Request, uuid.UUID, T) (Y, error) diff --git a/backend/internal/web/adapters/command.go b/backend/internal/web/adapters/command.go new file mode 100644 index 0000000..d3d099b --- /dev/null +++ b/backend/internal/web/adapters/command.go @@ -0,0 +1,62 @@ +package adapters + +import ( + "net/http" + + "github.com/google/uuid" + "github.com/hay-kot/httpkit/errchain" + "github.com/hay-kot/httpkit/server" +) + +type CommandFunc[T any] func(*http.Request) (T, error) +type CommandIDFunc[T any] func(*http.Request, uuid.UUID) (T, error) + +// Command is an HandlerAdapter that returns a errchain.HandlerFunc that +// The command adapters are used to handle commands that do not accept a body +// or a query. You can think of them as a way to handle RPC style Rest Endpoints. +// +// Example: +// +// fn := func(r *http.Request) (interface{}, error) { +// // do something +// return nil, nil +// } +// +// r.Get("/foo", adapters.Command(fn, http.NoContent)) +func Command[T any](f CommandFunc[T], ok int) errchain.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) error { + res, err := f(r) + if err != nil { + return err + } + + return server.JSON(w, ok, res) + } +} + +// CommandID is the same as the Command adapter but it accepts a UUID as a parameter +// in the URL. The parameter name is passed as the first argument. +// +// Example: +// +// fn := func(r *http.Request, id uuid.UUID) (interface{}, error) { +// // do something +// return nil, nil +// } +// +// r.Get("/foo/{id}", adapters.CommandID("id", fn, http.NoContent)) +func CommandID[T any](param string, f CommandIDFunc[T], ok int) errchain.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) error { + ID, err := RouteUUID(r, param) + if err != nil { + return err + } + + res, err := f(r, ID) + if err != nil { + return err + } + + return server.JSON(w, ok, res) + } +} diff --git a/backend/internal/web/adapters/decoders.go b/backend/internal/web/adapters/decoders.go new file mode 100644 index 0000000..ad5b82b --- /dev/null +++ b/backend/internal/web/adapters/decoders.go @@ -0,0 +1,65 @@ +package adapters + +import ( + "net/http" + + "github.com/pkg/errors" + + "github.com/go-chi/chi/v5" + "github.com/google/uuid" + "github.com/gorilla/schema" + "github.com/hay-kot/homebox/backend/internal/sys/validate" + "github.com/hay-kot/httpkit/server" +) + +var queryDecoder = schema.NewDecoder() + +func DecodeQuery[T any](r *http.Request) (T, error) { + var v T + err := queryDecoder.Decode(&v, r.URL.Query()) + if err != nil { + return v, errors.Wrap(err, "decoding error") + } + + err = validate.Check(v) + if err != nil { + return v, errors.Wrap(err, "validation error") + } + + return v, nil +} + +type Validator interface { + Validate() error +} + +func DecodeBody[T any](r *http.Request) (T, error) { + var val T + + err := server.Decode(r, &val) + if err != nil { + return val, errors.Wrap(err, "body decoding error") + } + + err = validate.Check(val) + if err != nil { + return val, err + } + + if v, ok := any(val).(Validator); ok { + err = v.Validate() + if err != nil { + return val, errors.Wrap(err, "validation error") + } + } + + return val, nil +} + +func RouteUUID(r *http.Request, key string) (uuid.UUID, error) { + ID, err := uuid.Parse(chi.URLParam(r, key)) + if err != nil { + return uuid.Nil, validate.NewRouteKeyError(key) + } + return ID, nil +} diff --git a/backend/internal/web/adapters/doc.go b/backend/internal/web/adapters/doc.go new file mode 100644 index 0000000..1b6792b --- /dev/null +++ b/backend/internal/web/adapters/doc.go @@ -0,0 +1,9 @@ +/* +Package adapters offers common adapters for turing regular functions into HTTP Handlers +There are three types of adapters + + - Query adapters + - Action adapters + - Command adapters +*/ +package adapters diff --git a/backend/internal/web/adapters/query.go b/backend/internal/web/adapters/query.go new file mode 100644 index 0000000..b044475 --- /dev/null +++ b/backend/internal/web/adapters/query.go @@ -0,0 +1,73 @@ +package adapters + +import ( + "net/http" + + "github.com/hay-kot/httpkit/errchain" + "github.com/hay-kot/httpkit/server" +) + +// Query is a server.Handler that decodes a query from the request and calls the provided function. +// +// Example: +// +// type Query struct { +// Foo string `schema:"foo"` +// } +// +// fn := func(r *http.Request, q Query) (any, error) { +// // do something with q +// return nil, nil +// } +// +// r.Get("/foo", adapters.Query(fn, http.StatusOK)) +func Query[T any, Y any](f AdapterFunc[T, Y], ok int) errchain.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) error { + q, err := DecodeQuery[T](r) + if err != nil { + return err + } + + res, err := f(r, q) + if err != nil { + return err + } + + return server.JSON(w, ok, res) + } +} + +// QueryID is a server.Handler that decodes a query and an ID from the request and calls the provided function. +// +// Example: +// +// type Query struct { +// Foo string `schema:"foo"` +// } +// +// fn := func(r *http.Request, ID uuid.UUID, q Query) (any, error) { +// // do something with ID and q +// return nil, nil +// } +// +// r.Get("/foo/{id}", adapters.QueryID(fn, http.StatusOK)) +func QueryID[T any, Y any](param string, f IDFunc[T, Y], ok int) errchain.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) error { + ID, err := RouteUUID(r, param) + if err != nil { + return err + } + + q, err := DecodeQuery[T](r) + if err != nil { + return err + } + + res, err := f(r, ID, q) + if err != nil { + return err + } + + return server.JSON(w, ok, res) + } +} diff --git a/backend/internal/web/mid/doc.go b/backend/internal/web/mid/doc.go new file mode 100644 index 0000000..4f71563 --- /dev/null +++ b/backend/internal/web/mid/doc.go @@ -0,0 +1,2 @@ +// Package mid provides web middleware. +package mid diff --git a/backend/internal/web/mid/errors.go b/backend/internal/web/mid/errors.go index dc716c9..c8b04d6 100644 --- a/backend/internal/web/mid/errors.go +++ b/backend/internal/web/mid/errors.go @@ -3,37 +3,48 @@ package mid import ( "net/http" + "github.com/go-chi/chi/v5/middleware" "github.com/hay-kot/homebox/backend/internal/data/ent" "github.com/hay-kot/homebox/backend/internal/sys/validate" - "github.com/hay-kot/homebox/backend/pkgs/server" + "github.com/hay-kot/httpkit/errchain" + "github.com/hay-kot/httpkit/server" "github.com/rs/zerolog" ) -func Errors(log zerolog.Logger) server.Middleware { - return func(h server.Handler) server.Handler { - return server.HandlerFunc(func(w http.ResponseWriter, r *http.Request) error { +type ErrorResponse struct { + Error string `json:"error"` + Fields map[string]string `json:"fields,omitempty"` +} + +func Errors(log zerolog.Logger) errchain.ErrorHandler { + return func(h errchain.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { err := h.ServeHTTP(w, r) if err != nil { - var resp server.ErrorResponse + var resp ErrorResponse var code int + traceID := r.Context().Value(middleware.RequestIDKey).(string) log.Err(err). - Str("trace_id", server.GetTraceID(r.Context())). + Stack(). + Str("req_id", traceID). Msg("ERROR occurred") switch { case validate.IsUnauthorizedError(err): code = http.StatusUnauthorized - resp = server.ErrorResponse{ + resp = ErrorResponse{ Error: "unauthorized", } case validate.IsInvalidRouteKeyError(err): code = http.StatusBadRequest - resp = server.ErrorResponse{ + resp = ErrorResponse{ Error: err.Error(), } case validate.IsFieldError(err): - fieldErrors := err.(validate.FieldErrors) + code = http.StatusUnprocessableEntity + + fieldErrors := err.(validate.FieldErrors) // nolint resp.Error = "Validation Error" resp.Fields = map[string]string{} @@ -41,29 +52,26 @@ func Errors(log zerolog.Logger) server.Middleware { resp.Fields[fieldError.Field] = fieldError.Error } case validate.IsRequestError(err): - requestError := err.(*validate.RequestError) + requestError := err.(*validate.RequestError) // nolint resp.Error = requestError.Error() - code = requestError.Status + + if requestError.Status == 0 { + code = http.StatusBadRequest + } else { + code = requestError.Status + } case ent.IsNotFound(err): resp.Error = "Not Found" code = http.StatusNotFound default: resp.Error = "Unknown Error" code = http.StatusInternalServerError - } - if err := server.Respond(w, code, resp); err != nil { - return err - } - - // If Showdown error, return error - if server.IsShutdownError(err) { - return err + if err := server.JSON(w, code, resp); err != nil { + log.Err(err).Msg("failed to write response") } } - - return nil }) } } diff --git a/backend/internal/web/mid/logger.go b/backend/internal/web/mid/logger.go index fb39c67..0be4722 100644 --- a/backend/internal/web/mid/logger.go +++ b/backend/internal/web/mid/logger.go @@ -1,96 +1,44 @@ package mid import ( - "fmt" + "bufio" + "errors" + "net" "net/http" - "github.com/hay-kot/homebox/backend/pkgs/server" + "github.com/go-chi/chi/v5/middleware" "github.com/rs/zerolog" ) -type statusRecorder struct { +type spy struct { http.ResponseWriter - Status int + status int } -func (r *statusRecorder) WriteHeader(status int) { - r.Status = status - r.ResponseWriter.WriteHeader(status) +func (s *spy) WriteHeader(status int) { + s.status = status + s.ResponseWriter.WriteHeader(status) } -func Logger(log zerolog.Logger) server.Middleware { - return func(next server.Handler) server.Handler { - return server.HandlerFunc(func(w http.ResponseWriter, r *http.Request) error { - traceId := server.GetTraceID(r.Context()) +func (s *spy) Hijack() (net.Conn, *bufio.ReadWriter, error) { + hj, ok := s.ResponseWriter.(http.Hijacker) + if !ok { + return nil, nil, errors.New("response writer does not support hijacking") + } + return hj.Hijack() +} - log.Info(). - Str("trace_id", traceId). - Str("method", r.Method). - Str("path", r.URL.Path). - Str("remove_address", r.RemoteAddr). - Msg("request started") +func Logger(l zerolog.Logger) func(http.Handler) http.Handler { + return func(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + reqID := r.Context().Value(middleware.RequestIDKey).(string) - record := &statusRecorder{ResponseWriter: w, Status: http.StatusOK} + l.Info().Str("method", r.Method).Str("path", r.URL.Path).Str("rid", reqID).Msg("request received") - err := next.ServeHTTP(record, r) + s := &spy{ResponseWriter: w} + h.ServeHTTP(s, r) - log.Info(). - Str("trace_id", traceId). - Str("method", r.Method). - Str("url", r.URL.Path). - Str("remote_address", r.RemoteAddr). - Int("status_code", record.Status). - Msg("request completed") - - return err - }) - } -} - -func SugarLogger(log zerolog.Logger) server.Middleware { - orange := func(s string) string { return "\033[33m" + s + "\033[0m" } - aqua := func(s string) string { return "\033[36m" + s + "\033[0m" } - red := func(s string) string { return "\033[31m" + s + "\033[0m" } - green := func(s string) string { return "\033[32m" + s + "\033[0m" } - - fmtCode := func(code int) string { - switch { - case code >= 500: - return red(fmt.Sprintf("%d", code)) - case code >= 400: - return orange(fmt.Sprintf("%d", code)) - case code >= 300: - return aqua(fmt.Sprintf("%d", code)) - default: - return green(fmt.Sprintf("%d", code)) - } - } - bold := func(s string) string { return "\033[1m" + s + "\033[0m" } - - atLeast6 := func(s string) string { - for len(s) <= 6 { - s += " " - } - return s - } - - return func(next server.Handler) server.Handler { - return server.HandlerFunc(func(w http.ResponseWriter, r *http.Request) error { - record := &statusRecorder{ResponseWriter: w, Status: http.StatusOK} - - err := next.ServeHTTP(record, r) // Blocks until the next handler returns. - - url := fmt.Sprintf("%s %s", r.RequestURI, r.Proto) - - log.Info(). - Str("trace_id", server.GetTraceID(r.Context())). - Msgf("%s %s %s", - bold(fmtCode(record.Status)), - bold(orange(atLeast6(r.Method))), - aqua(url), - ) - - return err + l.Info().Str("method", r.Method).Str("path", r.URL.Path).Int("status", s.status).Str("rid", reqID).Msg("request finished") }) } } diff --git a/backend/internal/web/mid/panic.go b/backend/internal/web/mid/panic.go deleted file mode 100644 index 9879bb8..0000000 --- a/backend/internal/web/mid/panic.go +++ /dev/null @@ -1,33 +0,0 @@ -package mid - -import ( - "fmt" - "net/http" - "runtime/debug" - - "github.com/hay-kot/homebox/backend/pkgs/server" -) - -// Panic is a middleware that recovers from panics anywhere in the chain and wraps the error. -// and returns it up the middleware chain. -func Panic(develop bool) server.Middleware { - return func(h server.Handler) server.Handler { - return server.HandlerFunc(func(w http.ResponseWriter, r *http.Request) (err error) { - defer func() { - if rec := recover(); rec != nil { - trace := debug.Stack() - - if develop { - err = fmt.Errorf("PANIC [%v]", rec) - fmt.Printf("%s", string(trace)) - } else { - err = fmt.Errorf("PANIC [%v] TRACE[%s]", rec, string(trace)) - } - - } - }() - - return h.ServeHTTP(w, r) - }) - } -} diff --git a/backend/pkgs/cgofreesqlite/sqlite.go b/backend/pkgs/cgofreesqlite/sqlite.go new file mode 100644 index 0000000..c9faf7a --- /dev/null +++ b/backend/pkgs/cgofreesqlite/sqlite.go @@ -0,0 +1,40 @@ +// Package cgofreesqlite package provides a CGO free implementation of the sqlite3 driver. This wraps the +// modernc.org/sqlite driver and adds the PRAGMA foreign_keys = ON; statement to the connection +// initialization as well as registering the driver with the sql package as "sqlite3" for compatibility +// with entgo.io +// +// NOTE: This does come with around a 30% performance hit compared to the CGO version of the driver. +// however it greatly simplifies the build process and allows for cross compilation. +package cgofreesqlite + +import ( + "database/sql" + "database/sql/driver" + + "modernc.org/sqlite" +) + +type CGOFreeSqliteDriver struct { + *sqlite.Driver +} + +type sqlite3DriverConn interface { + Exec(string, []driver.Value) (driver.Result, error) +} + +func (d CGOFreeSqliteDriver) Open(name string) (conn driver.Conn, err error) { + conn, err = d.Driver.Open(name) + if err != nil { + return nil, err + } + _, err = conn.(sqlite3DriverConn).Exec("PRAGMA foreign_keys = ON;", nil) + if err != nil { + _ = conn.Close() + return nil, err + } + return conn, err +} + +func init() { //nolint:gochecknoinits + sql.Register("sqlite3", CGOFreeSqliteDriver{Driver: &sqlite.Driver{}}) +} diff --git a/backend/pkgs/faker/random.go b/backend/pkgs/faker/random.go index e7b51b9..62e4ff2 100644 --- a/backend/pkgs/faker/random.go +++ b/backend/pkgs/faker/random.go @@ -1,3 +1,4 @@ +// Package faker provides a simple interface for generating fake data for testing. package faker import ( @@ -10,7 +11,6 @@ var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") type Faker struct{} func NewFaker() *Faker { - rand.Seed(time.Now().UnixNano()) return &Faker{} } diff --git a/backend/pkgs/hasher/doc.go b/backend/pkgs/hasher/doc.go new file mode 100644 index 0000000..4cbdab4 --- /dev/null +++ b/backend/pkgs/hasher/doc.go @@ -0,0 +1,2 @@ +// Package hasher provides a simple interface for hashing and verifying passwords. +package hasher diff --git a/backend/pkgs/hasher/password.go b/backend/pkgs/hasher/password.go index 1be8251..a68c868 100644 --- a/backend/pkgs/hasher/password.go +++ b/backend/pkgs/hasher/password.go @@ -9,7 +9,7 @@ import ( var enabled = true -func init() { +func init() { // nolint: gochecknoinits disableHas := os.Getenv("UNSAFE_DISABLE_PASSWORD_PROJECTION") == "yes_i_am_sure" if disableHas { diff --git a/backend/pkgs/mailer/mailer.go b/backend/pkgs/mailer/mailer.go index 22609aa..9b593bc 100644 --- a/backend/pkgs/mailer/mailer.go +++ b/backend/pkgs/mailer/mailer.go @@ -1,3 +1,4 @@ +// Package mailer provides a simple mailer for sending emails. package mailer import ( diff --git a/backend/pkgs/mailer/mailer_test.go b/backend/pkgs/mailer/mailer_test.go index 3e67a68..89e55ca 100644 --- a/backend/pkgs/mailer/mailer_test.go +++ b/backend/pkgs/mailer/mailer_test.go @@ -5,7 +5,7 @@ import ( "os" "testing" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) const ( @@ -59,5 +59,5 @@ func Test_Mailer(t *testing.T) { err = mailer.Send(msg) - assert.Nil(t, err) + require.NoError(t, err) } diff --git a/backend/pkgs/pathlib/pathlib.go b/backend/pkgs/pathlib/pathlib.go index 24420aa..e59366d 100644 --- a/backend/pkgs/pathlib/pathlib.go +++ b/backend/pkgs/pathlib/pathlib.go @@ -1,3 +1,4 @@ +// Package pathlib provides a way to safely create a file path without overwriting any existing files. package pathlib import ( @@ -14,7 +15,7 @@ var dirReader dirReaderFunc = func(directory string) []string { if err != nil { return nil } - defer f.Close() + defer func() { _ = f.Close() }() names, err := f.Readdirnames(-1) if err != nil { diff --git a/backend/pkgs/server/constants.go b/backend/pkgs/server/constants.go deleted file mode 100644 index e083a57..0000000 --- a/backend/pkgs/server/constants.go +++ /dev/null @@ -1,8 +0,0 @@ -package server - -const ( - ContentType = "Content-Type" - ContentJSON = "application/json" - ContentXML = "application/xml" - ContentFormUrlEncoded = "application/x-www-form-urlencoded" -) diff --git a/backend/pkgs/server/errors.go b/backend/pkgs/server/errors.go deleted file mode 100644 index 5b1d60b..0000000 --- a/backend/pkgs/server/errors.go +++ /dev/null @@ -1,23 +0,0 @@ -package server - -import "errors" - -type shutdownError struct { - message string -} - -func (e *shutdownError) Error() string { - return e.message -} - -// ShutdownError returns an error that indicates that the server has lost -// integrity and should be shut down. -func ShutdownError(message string) error { - return &shutdownError{message} -} - -// IsShutdownError returns true if the error is a shutdown error. -func IsShutdownError(err error) bool { - var e *shutdownError - return errors.As(err, &e) -} diff --git a/backend/pkgs/server/handler.go b/backend/pkgs/server/handler.go deleted file mode 100644 index 76ae131..0000000 --- a/backend/pkgs/server/handler.go +++ /dev/null @@ -1,25 +0,0 @@ -package server - -import ( - "net/http" -) - -type HandlerFunc func(w http.ResponseWriter, r *http.Request) error - -func (f HandlerFunc) ServeHTTP(w http.ResponseWriter, r *http.Request) error { - return f(w, r) -} - -type Handler interface { - ServeHTTP(http.ResponseWriter, *http.Request) error -} - -// ToHandler converts a function to a customer implementation of the Handler interface. -// that returns an error. This wrapper around the handler function and simply -// returns the nil in all cases -func ToHandler(handler http.Handler) Handler { - return HandlerFunc(func(w http.ResponseWriter, r *http.Request) error { - handler.ServeHTTP(w, r) - return nil - }) -} diff --git a/backend/pkgs/server/middleware.go b/backend/pkgs/server/middleware.go deleted file mode 100644 index 8e3bb23..0000000 --- a/backend/pkgs/server/middleware.go +++ /dev/null @@ -1,37 +0,0 @@ -package server - -import ( - "net/http" - "strings" -) - -type Middleware func(Handler) Handler - -// wrapMiddleware creates a new handler by wrapping middleware around a final -// handler. The middlewares' Handlers will be executed by requests in the order -// they are provided. -func wrapMiddleware(mw []Middleware, handler Handler) Handler { - // Loop backwards through the middleware invoking each one. Replace the - // handler with the new wrapped handler. Looping backwards ensures that the - // first middleware of the slice is the first to be executed by requests. - for i := len(mw) - 1; i >= 0; i-- { - h := mw[i] - if h != nil { - handler = h(handler) - } - } - - return handler -} - -// StripTrailingSlash is a middleware that will strip trailing slashes from the request path. -// -// Example: /api/v1/ -> /api/v1 -func StripTrailingSlash() Middleware { - return func(h Handler) Handler { - return HandlerFunc(func(w http.ResponseWriter, r *http.Request) error { - r.URL.Path = strings.TrimSuffix(r.URL.Path, "/") - return h.ServeHTTP(w, r) - }) - } -} diff --git a/backend/pkgs/server/mux.go b/backend/pkgs/server/mux.go deleted file mode 100644 index 7f62ab7..0000000 --- a/backend/pkgs/server/mux.go +++ /dev/null @@ -1,102 +0,0 @@ -package server - -import ( - "context" - "net/http" - - "github.com/google/uuid" -) - -type vkey int - -const ( - // Key is the key for the server in the request context. - key vkey = 1 -) - -type Values struct { - TraceID string -} - -func GetTraceID(ctx context.Context) string { - v, ok := ctx.Value(key).(Values) - if !ok { - return "" - } - return v.TraceID -} - -func (s *Server) toHttpHandler(handler Handler, mw ...Middleware) http.HandlerFunc { - handler = wrapMiddleware(mw, handler) - - handler = wrapMiddleware(s.mw, handler) - - return func(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - // Add the trace ID to the context - ctx = context.WithValue(ctx, key, Values{ - TraceID: uuid.NewString(), - }) - - err := handler.ServeHTTP(w, r.WithContext(ctx)) - if err != nil { - if IsShutdownError(err) { - _ = s.Shutdown("SIGTERM") - } - } - } -} - -func (s *Server) handle(method, pattern string, handler Handler, mw ...Middleware) { - h := s.toHttpHandler(handler, mw...) - - switch method { - case http.MethodGet: - s.mux.Get(pattern, h) - case http.MethodPost: - s.mux.Post(pattern, h) - case http.MethodPut: - s.mux.Put(pattern, h) - case http.MethodDelete: - s.mux.Delete(pattern, h) - case http.MethodPatch: - s.mux.Patch(pattern, h) - case http.MethodHead: - s.mux.Head(pattern, h) - case http.MethodOptions: - s.mux.Options(pattern, h) - } -} - -func (s *Server) Get(pattern string, handler Handler, mw ...Middleware) { - s.handle(http.MethodGet, pattern, handler, mw...) -} - -func (s *Server) Post(pattern string, handler Handler, mw ...Middleware) { - s.handle(http.MethodPost, pattern, handler, mw...) -} - -func (s *Server) Put(pattern string, handler Handler, mw ...Middleware) { - s.handle(http.MethodPut, pattern, handler, mw...) -} - -func (s *Server) Delete(pattern string, handler Handler, mw ...Middleware) { - s.handle(http.MethodDelete, pattern, handler, mw...) -} - -func (s *Server) Patch(pattern string, handler Handler, mw ...Middleware) { - s.handle(http.MethodPatch, pattern, handler, mw...) -} - -func (s *Server) Head(pattern string, handler Handler, mw ...Middleware) { - s.handle(http.MethodHead, pattern, handler, mw...) -} - -func (s *Server) Options(pattern string, handler Handler, mw ...Middleware) { - s.handle(http.MethodOptions, pattern, handler, mw...) -} - -func (s *Server) NotFound(handler Handler) { - s.mux.NotFound(s.toHttpHandler(handler)) -} diff --git a/backend/pkgs/server/request.go b/backend/pkgs/server/request.go deleted file mode 100644 index 38c3189..0000000 --- a/backend/pkgs/server/request.go +++ /dev/null @@ -1,48 +0,0 @@ -package server - -import ( - "encoding/json" - "net/http" -) - -// Decode reads the body of an HTTP request looking for a JSON document. The -// body is decoded into the provided value. -func Decode(r *http.Request, val interface{}) error { - decoder := json.NewDecoder(r.Body) - // decoder.DisallowUnknownFields() - if err := decoder.Decode(val); err != nil { - return err - } - return nil -} - -// GetId is a shortcut to get the id from the request URL or return a default value -func GetParam(r *http.Request, key, d string) string { - val := r.URL.Query().Get(key) - - if val == "" { - return d - } - - return val -} - -// GetSkip is a shortcut to get the skip from the request URL parameters -func GetSkip(r *http.Request, d string) string { - return GetParam(r, "skip", d) -} - -// GetSkip is a shortcut to get the skip from the request URL parameters -func GetId(r *http.Request, d string) string { - return GetParam(r, "id", d) -} - -// GetLimit is a shortcut to get the limit from the request URL parameters -func GetLimit(r *http.Request, d string) string { - return GetParam(r, "limit", d) -} - -// GetQuery is a shortcut to get the sort from the request URL parameters -func GetQuery(r *http.Request, d string) string { - return GetParam(r, "query", d) -} diff --git a/backend/pkgs/server/request_test.go b/backend/pkgs/server/request_test.go deleted file mode 100644 index 05dc8c5..0000000 --- a/backend/pkgs/server/request_test.go +++ /dev/null @@ -1,210 +0,0 @@ -package server - -import ( - "net/http" - "net/http/httptest" - "strings" - "testing" -) - -type TestStruct struct { - Name string `json:"name"` - Data string `json:"data"` -} - -func TestDecode(t *testing.T) { - type args struct { - r *http.Request - val interface{} - } - tests := []struct { - name string - args args - wantErr bool - }{ - { - name: "check_error", - args: args{ - r: &http.Request{ - Body: http.NoBody, - }, - val: make(map[string]interface{}), - }, - wantErr: true, - }, - { - name: "check_success", - args: args{ - r: httptest.NewRequest("POST", "/", strings.NewReader(`{"name":"test","data":"test"}`)), - val: TestStruct{ - Name: "test", - Data: "test", - }, - }, - wantErr: false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if err := Decode(tt.args.r, &tt.args.val); (err != nil) != tt.wantErr { - t.Errorf("Decode() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } -} - -func TestGetParam(t *testing.T) { - type args struct { - r *http.Request - key string - d string - } - tests := []struct { - name string - args args - want string - }{ - { - name: "check_default", - args: args{ - r: httptest.NewRequest("POST", "/", strings.NewReader(`{"name":"test","data":"test"}`)), - key: "id", - d: "default", - }, - want: "default", - }, - { - name: "check_id", - args: args{ - r: httptest.NewRequest("POST", "/item?id=123", strings.NewReader(`{"name":"test","data":"test"}`)), - key: "id", - d: "", - }, - want: "123", - }, - { - name: "check_query", - args: args{ - r: httptest.NewRequest("POST", "/item?query=hello-world", strings.NewReader(`{"name":"test","data":"test"}`)), - key: "query", - d: "", - }, - want: "hello-world", - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := GetParam(tt.args.r, tt.args.key, tt.args.d); got != tt.want { - t.Errorf("GetParam() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestGetSkip(t *testing.T) { - type args struct { - r *http.Request - d string - } - tests := []struct { - name string - args args - want string - }{ - { - name: "check_default", - args: args{ - r: httptest.NewRequest("POST", "/", strings.NewReader(`{"name":"test","data":"test"}`)), - d: "0", - }, - want: "0", - }, - { - name: "check_skip", - args: args{ - r: httptest.NewRequest("POST", "/item?skip=107", strings.NewReader(`{"name":"test","data":"test"}`)), - d: "0", - }, - want: "107", - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := GetSkip(tt.args.r, tt.args.d); got != tt.want { - t.Errorf("GetSkip() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestGetLimit(t *testing.T) { - type args struct { - r *http.Request - d string - } - tests := []struct { - name string - args args - want string - }{ - { - name: "check_default", - args: args{ - r: httptest.NewRequest("POST", "/", strings.NewReader(`{"name":"test","data":"test"}`)), - d: "0", - }, - want: "0", - }, - { - name: "check_limit", - args: args{ - r: httptest.NewRequest("POST", "/item?limit=107", strings.NewReader(`{"name":"test","data":"test"}`)), - d: "0", - }, - want: "107", - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := GetLimit(tt.args.r, tt.args.d); got != tt.want { - t.Errorf("GetLimit() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestGetQuery(t *testing.T) { - type args struct { - r *http.Request - d string - } - tests := []struct { - name string - args args - want string - }{ - { - name: "check_default", - args: args{ - r: httptest.NewRequest("POST", "/", strings.NewReader(`{"name":"test","data":"test"}`)), - d: "0", - }, - want: "0", - }, - { - name: "check_query", - args: args{ - r: httptest.NewRequest("POST", "/item?query=hello-query", strings.NewReader(`{"name":"test","data":"test"}`)), - d: "0", - }, - want: "hello-query", - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := GetQuery(tt.args.r, tt.args.d); got != tt.want { - t.Errorf("GetQuery() = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/backend/pkgs/server/response.go b/backend/pkgs/server/response.go deleted file mode 100644 index 7d5880e..0000000 --- a/backend/pkgs/server/response.go +++ /dev/null @@ -1,39 +0,0 @@ -package server - -import ( - "encoding/json" - "net/http" -) - -type ErrorResponse struct { - Error string `json:"error"` - Fields map[string]string `json:"fields,omitempty"` -} - -// Respond converts a Go value to JSON and sends it to the client. -// Adapted from https://github.com/ardanlabs/service/tree/master/foundation/web -func Respond(w http.ResponseWriter, statusCode int, data interface{}) error { - if statusCode == http.StatusNoContent { - w.WriteHeader(statusCode) - return nil - } - - // Convert the response value to JSON. - jsonData, err := json.Marshal(data) - if err != nil { - panic(err) - } - - // Set the content type and headers once we know marshaling has succeeded. - w.Header().Set("Content-Type", ContentJSON) - - // Write the status code to the response. - w.WriteHeader(statusCode) - - // Send the result back to the client. - if _, err := w.Write(jsonData); err != nil { - return err - } - - return nil -} diff --git a/backend/pkgs/server/response_test.go b/backend/pkgs/server/response_test.go deleted file mode 100644 index 14e7a37..0000000 --- a/backend/pkgs/server/response_test.go +++ /dev/null @@ -1,40 +0,0 @@ -package server - -import ( - "net/http" - "net/http/httptest" - "testing" - - "github.com/stretchr/testify/assert" -) - -func Test_Respond_NoContent(t *testing.T) { - recorder := httptest.NewRecorder() - dummystruct := struct { - Name string - }{ - Name: "dummy", - } - - err := Respond(recorder, http.StatusNoContent, dummystruct) - assert.NoError(t, err) - - assert.Equal(t, http.StatusNoContent, recorder.Code) - assert.Empty(t, recorder.Body.String()) -} - -func Test_Respond_JSON(t *testing.T) { - recorder := httptest.NewRecorder() - dummystruct := struct { - Name string `json:"name"` - }{ - Name: "dummy", - } - - err := Respond(recorder, http.StatusCreated, dummystruct) - assert.NoError(t, err) - - assert.Equal(t, http.StatusCreated, recorder.Code) - assert.JSONEq(t, recorder.Body.String(), `{"name":"dummy"}`) - assert.Equal(t, "application/json", recorder.Header().Get("Content-Type")) -} diff --git a/backend/pkgs/server/result.go b/backend/pkgs/server/result.go deleted file mode 100644 index 69dcf81..0000000 --- a/backend/pkgs/server/result.go +++ /dev/null @@ -1,19 +0,0 @@ -package server - -type Result struct { - Error bool `json:"error,omitempty"` - Details interface{} `json:"details,omitempty"` - Message string `json:"message,omitempty"` - Item interface{} `json:"item,omitempty"` -} - -type Results struct { - Items any `json:"items"` -} - -// Wrap creates a Wrapper instance and adds the initial namespace and data to be returned. -func Wrap(data interface{}) Result { - return Result{ - Item: data, - } -} diff --git a/backend/pkgs/server/server.go b/backend/pkgs/server/server.go deleted file mode 100644 index d021b31..0000000 --- a/backend/pkgs/server/server.go +++ /dev/null @@ -1,144 +0,0 @@ -package server - -import ( - "context" - "errors" - "fmt" - "net/http" - "os" - "os/signal" - "sync" - "syscall" - "time" - - "github.com/go-chi/chi/v5" -) - -var ( - ErrServerNotStarted = errors.New("server not started") - ErrServerAlreadyStarted = errors.New("server already started") -) - -type Server struct { - Host string - Port string - Worker Worker - - wg sync.WaitGroup - mux *chi.Mux - - // mw is the global middleware chain for the server. - mw []Middleware - - started bool - activeServer *http.Server - - idleTimeout time.Duration - readTimeout time.Duration - writeTimeout time.Duration -} - -func NewServer(opts ...Option) *Server { - s := &Server{ - Host: "localhost", - Port: "8080", - mux: chi.NewRouter(), - Worker: NewSimpleWorker(), - idleTimeout: 30 * time.Second, - readTimeout: 10 * time.Second, - writeTimeout: 10 * time.Second, - } - - for _, opt := range opts { - err := opt(s) - if err != nil { - panic(err) - } - } - - return s -} - -func (s *Server) Shutdown(sig string) error { - if !s.started { - return ErrServerNotStarted - } - fmt.Printf("Received %s signal, shutting down\n", sig) - - // Create a context with a 5-second timeout. - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - err := s.activeServer.Shutdown(ctx) - s.started = false - if err != nil { - return err - } - - fmt.Println("Http server shutdown, waiting for all tasks to finish") - s.wg.Wait() - - return nil -} - -func (s *Server) Start() error { - if s.started { - return ErrServerAlreadyStarted - } - - s.activeServer = &http.Server{ - Addr: s.Host + ":" + s.Port, - Handler: s.mux, - IdleTimeout: s.idleTimeout, - ReadTimeout: s.readTimeout, - WriteTimeout: s.writeTimeout, - } - - shutdownError := make(chan error) - - go func() { - // Create a quit channel which carries os.Signal values. - quit := make(chan os.Signal, 1) - - // Use signal.Notify() to listen for incoming SIGINT and SIGTERM signals and - // relay them to the quit channel. - signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM) - - // Read the signal from the quit channel. block until received - sig := <-quit - - err := s.Shutdown(sig.String()) - if err != nil { - shutdownError <- err - } - - // Exit the application with a 0 (success) status code. - os.Exit(0) - }() - - s.started = true - err := s.activeServer.ListenAndServe() - - if !errors.Is(err, http.ErrServerClosed) { - return err - } - - err = <-shutdownError - if err != nil { - return err - } - - fmt.Println("Server shutdown successfully") - - return nil -} - -// Background starts a go routine that runs on the servers pool. In the event of a shutdown -// request, the server will wait until all open goroutines have finished before shutting down. -func (svr *Server) Background(task func()) { - svr.wg.Add(1) - svr.Worker.Add(func() { - defer svr.wg.Done() - task() - }) -} diff --git a/backend/pkgs/server/server_options.go b/backend/pkgs/server/server_options.go deleted file mode 100644 index 93b7781..0000000 --- a/backend/pkgs/server/server_options.go +++ /dev/null @@ -1,54 +0,0 @@ -package server - -import "time" - -type Option = func(s *Server) error - -func WithMiddleware(mw ...Middleware) Option { - return func(s *Server) error { - s.mw = append(s.mw, mw...) - return nil - } -} - -func WithWorker(w Worker) Option { - return func(s *Server) error { - s.Worker = w - return nil - } -} - -func WithHost(host string) Option { - return func(s *Server) error { - s.Host = host - return nil - } -} - -func WithPort(port string) Option { - return func(s *Server) error { - s.Port = port - return nil - } -} - -func WithReadTimeout(seconds int) Option { - return func(s *Server) error { - s.readTimeout = time.Duration(seconds) * time.Second - return nil - } -} - -func WithWriteTimeout(seconds int) Option { - return func(s *Server) error { - s.writeTimeout = time.Duration(seconds) * time.Second - return nil - } -} - -func WithIdleTimeout(seconds int) Option { - return func(s *Server) error { - s.idleTimeout = time.Duration(seconds) * time.Second - return nil - } -} diff --git a/backend/pkgs/server/server_test.go b/backend/pkgs/server/server_test.go deleted file mode 100644 index a5cb218..0000000 --- a/backend/pkgs/server/server_test.go +++ /dev/null @@ -1,101 +0,0 @@ -package server - -import ( - "net/http" - "sync/atomic" - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -func testServer(t *testing.T, r http.Handler) *Server { - svr := NewServer(WithHost("127.0.0.1"), WithPort("19245")) - - if r != nil { - svr.mux.Mount("/", r) - } - go func() { - err := svr.Start() - assert.NoError(t, err) - }() - - ping := func() error { - _, err := http.Get("http://127.0.0.1:19245") - return err - } - - for { - if err := ping(); err == nil { - break - } - time.Sleep(time.Millisecond * 100) - } - - return svr -} - -func Test_ServerShutdown_Error(t *testing.T) { - svr := NewServer(WithHost("127.0.0.1"), WithPort("19245")) - - err := svr.Shutdown("test") - assert.ErrorIs(t, err, ErrServerNotStarted) -} - -func Test_ServerStarts_Error(t *testing.T) { - svr := testServer(t, nil) - - err := svr.Start() - assert.ErrorIs(t, err, ErrServerAlreadyStarted) - - err = svr.Shutdown("test") - assert.NoError(t, err) -} - -func Test_ServerStarts(t *testing.T) { - svr := testServer(t, nil) - err := svr.Shutdown("test") - assert.NoError(t, err) -} - -func Test_GracefulServerShutdownWithWorkers(t *testing.T) { - isFinished := false - - svr := testServer(t, nil) - - svr.Background(func() { - time.Sleep(time.Second * 4) - isFinished = true - }) - - err := svr.Shutdown("test") - - assert.NoError(t, err) - assert.True(t, isFinished) -} - -func Test_GracefulServerShutdownWithRequests(t *testing.T) { - var isFinished atomic.Bool - - router := http.NewServeMux() - - // add long running handler func - router.HandleFunc("/test", func(rw http.ResponseWriter, r *http.Request) { - time.Sleep(time.Second * 3) - isFinished.Store(true) - }) - - svr := testServer(t, router) - - // Make request to "/test" - go func() { - _, _ = http.Get("http://127.0.0.1:19245/test") // This is probably bad? - }() - - time.Sleep(time.Second) // Hack to wait for the request to be made - - err := svr.Shutdown("test") - assert.NoError(t, err) - - assert.True(t, isFinished.Load()) -} diff --git a/backend/pkgs/server/worker.go b/backend/pkgs/server/worker.go deleted file mode 100644 index 9cf1cc7..0000000 --- a/backend/pkgs/server/worker.go +++ /dev/null @@ -1,21 +0,0 @@ -package server - -// TODO: #2 Implement Go routine pool/job queue - -type Worker interface { - Add(func()) -} - -// SimpleWorker is a simple background worker that implements -// the Worker interface and runs all tasks in a go routine without -// a pool or que or limits. It's useful for simple or small applications -// with minimal/short background tasks -type SimpleWorker struct{} - -func NewSimpleWorker() *SimpleWorker { - return &SimpleWorker{} -} - -func (sw *SimpleWorker) Add(task func()) { - go task() -} diff --git a/backend/pkgs/set/set.go b/backend/pkgs/set/set.go index b0918bb..fca1c98 100644 --- a/backend/pkgs/set/set.go +++ b/backend/pkgs/set/set.go @@ -1,3 +1,4 @@ +// Package set provides a simple set implementation. package set type key interface { diff --git a/docker-compose.yml b/docker-compose.yml index a1108fc..6b57760 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,5 +1,3 @@ -version: "3.4" - services: homebox: image: homebox diff --git a/docs/docs/api/openapi-2.0.json b/docs/docs/api/openapi-2.0.json new file mode 100644 index 0000000..b10c93a --- /dev/null +++ b/docs/docs/api/openapi-2.0.json @@ -0,0 +1,2992 @@ +{ + "swagger": "2.0", + "info": { + "description": "Track, Manage, and Organize your Things.", + "title": "Homebox API", + "contact": { + "name": "Don't" + }, + "version": "1.0" + }, + "basePath": "/api", + "paths": { + "/v1/actions/ensure-asset-ids": { + "post": { + "security": [ + { + "Bearer": [] + } + ], + "description": "Ensures all items in the database have an asset ID", + "produces": [ + "application/json" + ], + "tags": [ + "Actions" + ], + "summary": "Ensure Asset IDs", + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/v1.ActionAmountResult" + } + } + } + } + }, + "/v1/actions/ensure-import-refs": { + "post": { + "security": [ + { + "Bearer": [] + } + ], + "description": "Ensures all items in the database have an import ref", + "produces": [ + "application/json" + ], + "tags": [ + "Actions" + ], + "summary": "Ensures Import Refs", + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/v1.ActionAmountResult" + } + } + } + } + }, + "/v1/actions/set-primary-photos": { + "post": { + "security": [ + { + "Bearer": [] + } + ], + "description": "Sets the first photo of each item as the primary photo", + "produces": [ + "application/json" + ], + "tags": [ + "Actions" + ], + "summary": "Set Primary Photos", + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/v1.ActionAmountResult" + } + } + } + } + }, + "/v1/actions/zero-item-time-fields": { + "post": { + "security": [ + { + "Bearer": [] + } + ], + "description": "Resets all item date fields to the beginning of the day", + "produces": [ + "application/json" + ], + "tags": [ + "Actions" + ], + "summary": "Zero Out Time Fields", + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/v1.ActionAmountResult" + } + } + } + } + }, + "/v1/assets/{id}": { + "get": { + "security": [ + { + "Bearer": [] + } + ], + "produces": [ + "application/json" + ], + "tags": [ + "Items" + ], + "summary": "Get Item by Asset ID", + "parameters": [ + { + "type": "string", + "description": "Asset ID", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/repo.PaginationResult-repo_ItemSummary" + } + } + } + } + }, + "/v1/currency": { + "get": { + "produces": [ + "application/json" + ], + "tags": [ + "Base" + ], + "summary": "Currency", + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/currencies.Currency" + } + } + } + } + }, + "/v1/groups": { + "get": { + "security": [ + { + "Bearer": [] + } + ], + "produces": [ + "application/json" + ], + "tags": [ + "Group" + ], + "summary": "Get Group", + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/repo.Group" + } + } + } + }, + "put": { + "security": [ + { + "Bearer": [] + } + ], + "produces": [ + "application/json" + ], + "tags": [ + "Group" + ], + "summary": "Update Group", + "parameters": [ + { + "description": "User Data", + "name": "payload", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/repo.GroupUpdate" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/repo.Group" + } + } + } + } + }, + "/v1/groups/invitations": { + "post": { + "security": [ + { + "Bearer": [] + } + ], + "produces": [ + "application/json" + ], + "tags": [ + "Group" + ], + "summary": "Create Group Invitation", + "parameters": [ + { + "description": "User Data", + "name": "payload", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/v1.GroupInvitationCreate" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/v1.GroupInvitation" + } + } + } + } + }, + "/v1/groups/statistics": { + "get": { + "security": [ + { + "Bearer": [] + } + ], + "produces": [ + "application/json" + ], + "tags": [ + "Statistics" + ], + "summary": "Get Group Statistics", + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/repo.GroupStatistics" + } + } + } + } + }, + "/v1/groups/statistics/labels": { + "get": { + "security": [ + { + "Bearer": [] + } + ], + "produces": [ + "application/json" + ], + "tags": [ + "Statistics" + ], + "summary": "Get Label Statistics", + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/repo.TotalsByOrganizer" + } + } + } + } + } + }, + "/v1/groups/statistics/locations": { + "get": { + "security": [ + { + "Bearer": [] + } + ], + "produces": [ + "application/json" + ], + "tags": [ + "Statistics" + ], + "summary": "Get Location Statistics", + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/repo.TotalsByOrganizer" + } + } + } + } + } + }, + "/v1/groups/statistics/purchase-price": { + "get": { + "security": [ + { + "Bearer": [] + } + ], + "produces": [ + "application/json" + ], + "tags": [ + "Statistics" + ], + "summary": "Get Purchase Price Statistics", + "parameters": [ + { + "type": "string", + "description": "start date", + "name": "start", + "in": "query" + }, + { + "type": "string", + "description": "end date", + "name": "end", + "in": "query" + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/repo.ValueOverTime" + } + } + } + } + }, + "/v1/items": { + "get": { + "security": [ + { + "Bearer": [] + } + ], + "produces": [ + "application/json" + ], + "tags": [ + "Items" + ], + "summary": "Query All Items", + "parameters": [ + { + "type": "string", + "description": "search string", + "name": "q", + "in": "query" + }, + { + "type": "integer", + "description": "page number", + "name": "page", + "in": "query" + }, + { + "type": "integer", + "description": "items per page", + "name": "pageSize", + "in": "query" + }, + { + "type": "array", + "items": { + "type": "string" + }, + "collectionFormat": "multi", + "description": "label Ids", + "name": "labels", + "in": "query" + }, + { + "type": "array", + "items": { + "type": "string" + }, + "collectionFormat": "multi", + "description": "location Ids", + "name": "locations", + "in": "query" + }, + { + "type": "array", + "items": { + "type": "string" + }, + "collectionFormat": "multi", + "description": "parent Ids", + "name": "parentIds", + "in": "query" + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/repo.PaginationResult-repo_ItemSummary" + } + } + } + }, + "post": { + "security": [ + { + "Bearer": [] + } + ], + "produces": [ + "application/json" + ], + "tags": [ + "Items" + ], + "summary": "Create Item", + "parameters": [ + { + "description": "Item Data", + "name": "payload", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/repo.ItemCreate" + } + } + ], + "responses": { + "201": { + "description": "Created", + "schema": { + "$ref": "#/definitions/repo.ItemSummary" + } + } + } + } + }, + "/v1/items/export": { + "get": { + "security": [ + { + "Bearer": [] + } + ], + "tags": [ + "Items" + ], + "summary": "Export Items", + "responses": { + "200": { + "description": "text/csv", + "schema": { + "type": "string" + } + } + } + } + }, + "/v1/items/fields": { + "get": { + "security": [ + { + "Bearer": [] + } + ], + "produces": [ + "application/json" + ], + "tags": [ + "Items" + ], + "summary": "Get All Custom Field Names", + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "array", + "items": { + "type": "string" + } + } + } + } + } + }, + "/v1/items/fields/values": { + "get": { + "security": [ + { + "Bearer": [] + } + ], + "produces": [ + "application/json" + ], + "tags": [ + "Items" + ], + "summary": "Get All Custom Field Values", + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "array", + "items": { + "type": "string" + } + } + } + } + } + }, + "/v1/items/import": { + "post": { + "security": [ + { + "Bearer": [] + } + ], + "produces": [ + "application/json" + ], + "tags": [ + "Items" + ], + "summary": "Import Items", + "parameters": [ + { + "type": "file", + "description": "Image to upload", + "name": "csv", + "in": "formData", + "required": true + } + ], + "responses": { + "204": { + "description": "No Content" + } + } + } + }, + "/v1/items/{id}": { + "get": { + "security": [ + { + "Bearer": [] + } + ], + "produces": [ + "application/json" + ], + "tags": [ + "Items" + ], + "summary": "Get Item", + "parameters": [ + { + "type": "string", + "description": "Item ID", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/repo.ItemOut" + } + } + } + }, + "put": { + "security": [ + { + "Bearer": [] + } + ], + "produces": [ + "application/json" + ], + "tags": [ + "Items" + ], + "summary": "Update Item", + "parameters": [ + { + "type": "string", + "description": "Item ID", + "name": "id", + "in": "path", + "required": true + }, + { + "description": "Item Data", + "name": "payload", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/repo.ItemUpdate" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/repo.ItemOut" + } + } + } + }, + "delete": { + "security": [ + { + "Bearer": [] + } + ], + "produces": [ + "application/json" + ], + "tags": [ + "Items" + ], + "summary": "Delete Item", + "parameters": [ + { + "type": "string", + "description": "Item ID", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "204": { + "description": "No Content" + } + } + }, + "patch": { + "security": [ + { + "Bearer": [] + } + ], + "produces": [ + "application/json" + ], + "tags": [ + "Items" + ], + "summary": "Update Item", + "parameters": [ + { + "type": "string", + "description": "Item ID", + "name": "id", + "in": "path", + "required": true + }, + { + "description": "Item Data", + "name": "payload", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/repo.ItemPatch" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/repo.ItemOut" + } + } + } + } + }, + "/v1/items/{id}/attachments": { + "post": { + "security": [ + { + "Bearer": [] + } + ], + "produces": [ + "application/json" + ], + "tags": [ + "Items Attachments" + ], + "summary": "Create Item Attachment", + "parameters": [ + { + "type": "string", + "description": "Item ID", + "name": "id", + "in": "path", + "required": true + }, + { + "type": "file", + "description": "File attachment", + "name": "file", + "in": "formData", + "required": true + }, + { + "type": "string", + "description": "Type of file", + "name": "type", + "in": "formData", + "required": true + }, + { + "type": "string", + "description": "name of the file including extension", + "name": "name", + "in": "formData", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/repo.ItemOut" + } + }, + "422": { + "description": "Unprocessable Entity", + "schema": { + "$ref": "#/definitions/validate.ErrorResponse" + } + } + } + } + }, + "/v1/items/{id}/attachments/{attachment_id}": { + "get": { + "security": [ + { + "Bearer": [] + } + ], + "produces": [ + "application/octet-stream" + ], + "tags": [ + "Items Attachments" + ], + "summary": "Get Item Attachment", + "parameters": [ + { + "type": "string", + "description": "Item ID", + "name": "id", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Attachment ID", + "name": "attachment_id", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/v1.ItemAttachmentToken" + } + } + } + }, + "put": { + "security": [ + { + "Bearer": [] + } + ], + "tags": [ + "Items Attachments" + ], + "summary": "Update Item Attachment", + "parameters": [ + { + "type": "string", + "description": "Item ID", + "name": "id", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Attachment ID", + "name": "attachment_id", + "in": "path", + "required": true + }, + { + "description": "Attachment Update", + "name": "payload", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/repo.ItemAttachmentUpdate" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/repo.ItemOut" + } + } + } + }, + "delete": { + "security": [ + { + "Bearer": [] + } + ], + "tags": [ + "Items Attachments" + ], + "summary": "Delete Item Attachment", + "parameters": [ + { + "type": "string", + "description": "Item ID", + "name": "id", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Attachment ID", + "name": "attachment_id", + "in": "path", + "required": true + } + ], + "responses": { + "204": { + "description": "No Content" + } + } + } + }, + "/v1/items/{id}/maintenance": { + "get": { + "security": [ + { + "Bearer": [] + } + ], + "produces": [ + "application/json" + ], + "tags": [ + "Maintenance" + ], + "summary": "Get Maintenance Log", + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/repo.MaintenanceLog" + } + } + } + }, + "post": { + "security": [ + { + "Bearer": [] + } + ], + "produces": [ + "application/json" + ], + "tags": [ + "Maintenance" + ], + "summary": "Create Maintenance Entry", + "parameters": [ + { + "description": "Entry Data", + "name": "payload", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/repo.MaintenanceEntryCreate" + } + } + ], + "responses": { + "201": { + "description": "Created", + "schema": { + "$ref": "#/definitions/repo.MaintenanceEntry" + } + } + } + } + }, + "/v1/items/{id}/maintenance/{entry_id}": { + "put": { + "security": [ + { + "Bearer": [] + } + ], + "produces": [ + "application/json" + ], + "tags": [ + "Maintenance" + ], + "summary": "Update Maintenance Entry", + "parameters": [ + { + "description": "Entry Data", + "name": "payload", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/repo.MaintenanceEntryUpdate" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/repo.MaintenanceEntry" + } + } + } + }, + "delete": { + "security": [ + { + "Bearer": [] + } + ], + "produces": [ + "application/json" + ], + "tags": [ + "Maintenance" + ], + "summary": "Delete Maintenance Entry", + "responses": { + "204": { + "description": "No Content" + } + } + } + }, + "/v1/items/{id}/path": { + "get": { + "security": [ + { + "Bearer": [] + } + ], + "produces": [ + "application/json" + ], + "tags": [ + "Items" + ], + "summary": "Get the full path of an item", + "parameters": [ + { + "type": "string", + "description": "Item ID", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/repo.ItemPath" + } + } + } + } + } + }, + "/v1/labels": { + "get": { + "security": [ + { + "Bearer": [] + } + ], + "produces": [ + "application/json" + ], + "tags": [ + "Labels" + ], + "summary": "Get All Labels", + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/repo.LabelOut" + } + } + } + } + }, + "post": { + "security": [ + { + "Bearer": [] + } + ], + "produces": [ + "application/json" + ], + "tags": [ + "Labels" + ], + "summary": "Create Label", + "parameters": [ + { + "description": "Label Data", + "name": "payload", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/repo.LabelCreate" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/repo.LabelSummary" + } + } + } + } + }, + "/v1/labels/{id}": { + "get": { + "security": [ + { + "Bearer": [] + } + ], + "produces": [ + "application/json" + ], + "tags": [ + "Labels" + ], + "summary": "Get Label", + "parameters": [ + { + "type": "string", + "description": "Label ID", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/repo.LabelOut" + } + } + } + }, + "put": { + "security": [ + { + "Bearer": [] + } + ], + "produces": [ + "application/json" + ], + "tags": [ + "Labels" + ], + "summary": "Update Label", + "parameters": [ + { + "type": "string", + "description": "Label ID", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/repo.LabelOut" + } + } + } + }, + "delete": { + "security": [ + { + "Bearer": [] + } + ], + "produces": [ + "application/json" + ], + "tags": [ + "Labels" + ], + "summary": "Delete Label", + "parameters": [ + { + "type": "string", + "description": "Label ID", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "204": { + "description": "No Content" + } + } + } + }, + "/v1/locations": { + "get": { + "security": [ + { + "Bearer": [] + } + ], + "produces": [ + "application/json" + ], + "tags": [ + "Locations" + ], + "summary": "Get All Locations", + "parameters": [ + { + "type": "boolean", + "description": "Filter locations with parents", + "name": "filterChildren", + "in": "query" + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/repo.LocationOutCount" + } + } + } + } + }, + "post": { + "security": [ + { + "Bearer": [] + } + ], + "produces": [ + "application/json" + ], + "tags": [ + "Locations" + ], + "summary": "Create Location", + "parameters": [ + { + "description": "Location Data", + "name": "payload", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/repo.LocationCreate" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/repo.LocationSummary" + } + } + } + } + }, + "/v1/locations/tree": { + "get": { + "security": [ + { + "Bearer": [] + } + ], + "produces": [ + "application/json" + ], + "tags": [ + "Locations" + ], + "summary": "Get Locations Tree", + "parameters": [ + { + "type": "boolean", + "description": "include items in response tree", + "name": "withItems", + "in": "query" + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/repo.TreeItem" + } + } + } + } + } + }, + "/v1/locations/{id}": { + "get": { + "security": [ + { + "Bearer": [] + } + ], + "produces": [ + "application/json" + ], + "tags": [ + "Locations" + ], + "summary": "Get Location", + "parameters": [ + { + "type": "string", + "description": "Location ID", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/repo.LocationOut" + } + } + } + }, + "put": { + "security": [ + { + "Bearer": [] + } + ], + "produces": [ + "application/json" + ], + "tags": [ + "Locations" + ], + "summary": "Update Location", + "parameters": [ + { + "type": "string", + "description": "Location ID", + "name": "id", + "in": "path", + "required": true + }, + { + "description": "Location Data", + "name": "payload", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/repo.LocationUpdate" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/repo.LocationOut" + } + } + } + }, + "delete": { + "security": [ + { + "Bearer": [] + } + ], + "produces": [ + "application/json" + ], + "tags": [ + "Locations" + ], + "summary": "Delete Location", + "parameters": [ + { + "type": "string", + "description": "Location ID", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "204": { + "description": "No Content" + } + } + } + }, + "/v1/notifiers": { + "get": { + "security": [ + { + "Bearer": [] + } + ], + "produces": [ + "application/json" + ], + "tags": [ + "Notifiers" + ], + "summary": "Get Notifiers", + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/repo.NotifierOut" + } + } + } + } + }, + "post": { + "security": [ + { + "Bearer": [] + } + ], + "produces": [ + "application/json" + ], + "tags": [ + "Notifiers" + ], + "summary": "Create Notifier", + "parameters": [ + { + "description": "Notifier Data", + "name": "payload", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/repo.NotifierCreate" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/repo.NotifierOut" + } + } + } + } + }, + "/v1/notifiers/test": { + "post": { + "security": [ + { + "Bearer": [] + } + ], + "produces": [ + "application/json" + ], + "tags": [ + "Notifiers" + ], + "summary": "Test Notifier", + "parameters": [ + { + "type": "string", + "description": "Notifier ID", + "name": "id", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "URL", + "name": "url", + "in": "query", + "required": true + } + ], + "responses": { + "204": { + "description": "No Content" + } + } + } + }, + "/v1/notifiers/{id}": { + "put": { + "security": [ + { + "Bearer": [] + } + ], + "tags": [ + "Notifiers" + ], + "summary": "Update Notifier", + "parameters": [ + { + "type": "string", + "description": "Notifier ID", + "name": "id", + "in": "path", + "required": true + }, + { + "description": "Notifier Data", + "name": "payload", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/repo.NotifierUpdate" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/repo.NotifierOut" + } + } + } + }, + "delete": { + "security": [ + { + "Bearer": [] + } + ], + "tags": [ + "Notifiers" + ], + "summary": "Delete a Notifier", + "parameters": [ + { + "type": "string", + "description": "Notifier ID", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "204": { + "description": "No Content" + } + } + } + }, + "/v1/qrcode": { + "get": { + "security": [ + { + "Bearer": [] + } + ], + "produces": [ + "application/json" + ], + "tags": [ + "Items" + ], + "summary": "Create QR Code", + "parameters": [ + { + "type": "string", + "description": "data to be encoded into qrcode", + "name": "data", + "in": "query" + } + ], + "responses": { + "200": { + "description": "image/jpeg", + "schema": { + "type": "string" + } + } + } + } + }, + "/v1/reporting/bill-of-materials": { + "get": { + "security": [ + { + "Bearer": [] + } + ], + "produces": [ + "application/json" + ], + "tags": [ + "Reporting" + ], + "summary": "Export Bill of Materials", + "responses": { + "200": { + "description": "text/csv", + "schema": { + "type": "string" + } + } + } + } + }, + "/v1/status": { + "get": { + "produces": [ + "application/json" + ], + "tags": [ + "Base" + ], + "summary": "Application Info", + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/v1.APISummary" + } + } + } + } + }, + "/v1/users/change-password": { + "put": { + "security": [ + { + "Bearer": [] + } + ], + "tags": [ + "User" + ], + "summary": "Change Password", + "parameters": [ + { + "description": "Password Payload", + "name": "payload", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/v1.ChangePassword" + } + } + ], + "responses": { + "204": { + "description": "No Content" + } + } + } + }, + "/v1/users/login": { + "post": { + "consumes": [ + "application/x-www-form-urlencoded", + "application/json" + ], + "produces": [ + "application/json" + ], + "tags": [ + "Authentication" + ], + "summary": "User Login", + "parameters": [ + { + "type": "string", + "example": "admin@admin.com", + "description": "string", + "name": "username", + "in": "formData" + }, + { + "type": "string", + "example": "admin", + "description": "string", + "name": "password", + "in": "formData" + }, + { + "description": "Login Data", + "name": "payload", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/v1.LoginForm" + } + }, + { + "type": "string", + "description": "auth provider", + "name": "provider", + "in": "query" + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/v1.TokenResponse" + } + } + } + } + }, + "/v1/users/logout": { + "post": { + "security": [ + { + "Bearer": [] + } + ], + "tags": [ + "Authentication" + ], + "summary": "User Logout", + "responses": { + "204": { + "description": "No Content" + } + } + } + }, + "/v1/users/refresh": { + "get": { + "security": [ + { + "Bearer": [] + } + ], + "description": "handleAuthRefresh returns a handler that will issue a new token from an existing token.\nThis does not validate that the user still exists within the database.", + "tags": [ + "Authentication" + ], + "summary": "User Token Refresh", + "responses": { + "200": { + "description": "OK" + } + } + } + }, + "/v1/users/register": { + "post": { + "produces": [ + "application/json" + ], + "tags": [ + "User" + ], + "summary": "Register New User", + "parameters": [ + { + "description": "User Data", + "name": "payload", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/services.UserRegistration" + } + } + ], + "responses": { + "204": { + "description": "No Content" + } + } + } + }, + "/v1/users/self": { + "get": { + "security": [ + { + "Bearer": [] + } + ], + "produces": [ + "application/json" + ], + "tags": [ + "User" + ], + "summary": "Get User Self", + "responses": { + "200": { + "description": "OK", + "schema": { + "allOf": [ + { + "$ref": "#/definitions/v1.Wrapped" + }, + { + "type": "object", + "properties": { + "item": { + "$ref": "#/definitions/repo.UserOut" + } + } + } + ] + } + } + } + }, + "put": { + "security": [ + { + "Bearer": [] + } + ], + "produces": [ + "application/json" + ], + "tags": [ + "User" + ], + "summary": "Update Account", + "parameters": [ + { + "description": "User Data", + "name": "payload", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/repo.UserUpdate" + } + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "allOf": [ + { + "$ref": "#/definitions/v1.Wrapped" + }, + { + "type": "object", + "properties": { + "item": { + "$ref": "#/definitions/repo.UserUpdate" + } + } + } + ] + } + } + } + }, + "delete": { + "security": [ + { + "Bearer": [] + } + ], + "produces": [ + "application/json" + ], + "tags": [ + "User" + ], + "summary": "Delete Account", + "responses": { + "204": { + "description": "No Content" + } + } + } + } + }, + "definitions": { + "currencies.Currency": { + "type": "object", + "properties": { + "code": { + "type": "string" + }, + "local": { + "type": "string" + }, + "name": { + "type": "string" + }, + "symbol": { + "type": "string" + } + } + }, + "repo.DocumentOut": { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "path": { + "type": "string" + }, + "title": { + "type": "string" + } + } + }, + "repo.Group": { + "type": "object", + "properties": { + "createdAt": { + "type": "string" + }, + "currency": { + "type": "string" + }, + "id": { + "type": "string" + }, + "name": { + "type": "string" + }, + "updatedAt": { + "type": "string" + } + } + }, + "repo.GroupStatistics": { + "type": "object", + "properties": { + "totalItemPrice": { + "type": "number" + }, + "totalItems": { + "type": "integer" + }, + "totalLabels": { + "type": "integer" + }, + "totalLocations": { + "type": "integer" + }, + "totalUsers": { + "type": "integer" + }, + "totalWithWarranty": { + "type": "integer" + } + } + }, + "repo.GroupUpdate": { + "type": "object", + "properties": { + "currency": { + "type": "string" + }, + "name": { + "type": "string" + } + } + }, + "repo.ItemAttachment": { + "type": "object", + "properties": { + "createdAt": { + "type": "string" + }, + "document": { + "$ref": "#/definitions/repo.DocumentOut" + }, + "id": { + "type": "string" + }, + "primary": { + "type": "boolean" + }, + "type": { + "type": "string" + }, + "updatedAt": { + "type": "string" + } + } + }, + "repo.ItemAttachmentUpdate": { + "type": "object", + "properties": { + "primary": { + "type": "boolean" + }, + "title": { + "type": "string" + }, + "type": { + "type": "string" + } + } + }, + "repo.ItemCreate": { + "type": "object", + "required": [ + "name" + ], + "properties": { + "description": { + "type": "string", + "maxLength": 1000 + }, + "labelIds": { + "type": "array", + "items": { + "type": "string" + } + }, + "locationId": { + "description": "Edges", + "type": "string" + }, + "name": { + "type": "string", + "maxLength": 255, + "minLength": 1 + }, + "parentId": { + "type": "string", + "x-nullable": true + } + } + }, + "repo.ItemField": { + "type": "object", + "properties": { + "booleanValue": { + "type": "boolean" + }, + "id": { + "type": "string" + }, + "name": { + "type": "string" + }, + "numberValue": { + "type": "integer" + }, + "textValue": { + "type": "string" + }, + "type": { + "type": "string" + } + } + }, + "repo.ItemOut": { + "type": "object", + "properties": { + "archived": { + "type": "boolean" + }, + "assetId": { + "type": "string", + "example": "0" + }, + "attachments": { + "type": "array", + "items": { + "$ref": "#/definitions/repo.ItemAttachment" + } + }, + "createdAt": { + "type": "string" + }, + "description": { + "type": "string" + }, + "fields": { + "type": "array", + "items": { + "$ref": "#/definitions/repo.ItemField" + } + }, + "id": { + "type": "string" + }, + "imageId": { + "type": "string" + }, + "insured": { + "type": "boolean" + }, + "labels": { + "type": "array", + "items": { + "$ref": "#/definitions/repo.LabelSummary" + } + }, + "lifetimeWarranty": { + "description": "Warranty", + "type": "boolean" + }, + "location": { + "description": "Edges", + "allOf": [ + { + "$ref": "#/definitions/repo.LocationSummary" + } + ], + "x-nullable": true, + "x-omitempty": true + }, + "manufacturer": { + "type": "string" + }, + "modelNumber": { + "type": "string" + }, + "name": { + "type": "string" + }, + "notes": { + "description": "Extras", + "type": "string" + }, + "parent": { + "allOf": [ + { + "$ref": "#/definitions/repo.ItemSummary" + } + ], + "x-nullable": true, + "x-omitempty": true + }, + "purchaseFrom": { + "type": "string" + }, + "purchasePrice": { + "type": "string", + "example": "0" + }, + "purchaseTime": { + "description": "Purchase", + "type": "string" + }, + "quantity": { + "type": "integer" + }, + "serialNumber": { + "type": "string" + }, + "soldNotes": { + "type": "string" + }, + "soldPrice": { + "type": "string", + "example": "0" + }, + "soldTime": { + "description": "Sold", + "type": "string" + }, + "soldTo": { + "type": "string" + }, + "updatedAt": { + "type": "string" + }, + "warrantyDetails": { + "type": "string" + }, + "warrantyExpires": { + "type": "string" + } + } + }, + "repo.ItemPatch": { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "quantity": { + "type": "integer", + "x-nullable": true, + "x-omitempty": true + } + } + }, + "repo.ItemPath": { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "name": { + "type": "string" + }, + "type": { + "$ref": "#/definitions/repo.ItemType" + } + } + }, + "repo.ItemSummary": { + "type": "object", + "properties": { + "archived": { + "type": "boolean" + }, + "createdAt": { + "type": "string" + }, + "description": { + "type": "string" + }, + "id": { + "type": "string" + }, + "imageId": { + "type": "string" + }, + "insured": { + "type": "boolean" + }, + "labels": { + "type": "array", + "items": { + "$ref": "#/definitions/repo.LabelSummary" + } + }, + "location": { + "description": "Edges", + "allOf": [ + { + "$ref": "#/definitions/repo.LocationSummary" + } + ], + "x-nullable": true, + "x-omitempty": true + }, + "name": { + "type": "string" + }, + "purchasePrice": { + "type": "string", + "example": "0" + }, + "quantity": { + "type": "integer" + }, + "updatedAt": { + "type": "string" + } + } + }, + "repo.ItemType": { + "type": "string", + "enum": [ + "location", + "item" + ], + "x-enum-varnames": [ + "ItemTypeLocation", + "ItemTypeItem" + ] + }, + "repo.ItemUpdate": { + "type": "object", + "properties": { + "archived": { + "type": "boolean" + }, + "assetId": { + "type": "string" + }, + "description": { + "type": "string" + }, + "fields": { + "type": "array", + "items": { + "$ref": "#/definitions/repo.ItemField" + } + }, + "id": { + "type": "string" + }, + "insured": { + "type": "boolean" + }, + "labelIds": { + "type": "array", + "items": { + "type": "string" + } + }, + "lifetimeWarranty": { + "description": "Warranty", + "type": "boolean" + }, + "locationId": { + "description": "Edges", + "type": "string" + }, + "manufacturer": { + "type": "string" + }, + "modelNumber": { + "type": "string" + }, + "name": { + "type": "string" + }, + "notes": { + "description": "Extras", + "type": "string" + }, + "parentId": { + "type": "string", + "x-nullable": true, + "x-omitempty": true + }, + "purchaseFrom": { + "type": "string" + }, + "purchasePrice": { + "type": "string", + "example": "0" + }, + "purchaseTime": { + "description": "Purchase", + "type": "string" + }, + "quantity": { + "type": "integer" + }, + "serialNumber": { + "description": "Identifications", + "type": "string" + }, + "soldNotes": { + "type": "string" + }, + "soldPrice": { + "type": "string", + "example": "0" + }, + "soldTime": { + "description": "Sold", + "type": "string" + }, + "soldTo": { + "type": "string" + }, + "warrantyDetails": { + "type": "string" + }, + "warrantyExpires": { + "type": "string" + } + } + }, + "repo.LabelCreate": { + "type": "object", + "required": [ + "name" + ], + "properties": { + "color": { + "type": "string" + }, + "description": { + "type": "string", + "maxLength": 255 + }, + "name": { + "type": "string", + "maxLength": 255, + "minLength": 1 + } + } + }, + "repo.LabelOut": { + "type": "object", + "properties": { + "createdAt": { + "type": "string" + }, + "description": { + "type": "string" + }, + "id": { + "type": "string" + }, + "name": { + "type": "string" + }, + "updatedAt": { + "type": "string" + } + } + }, + "repo.LabelSummary": { + "type": "object", + "properties": { + "createdAt": { + "type": "string" + }, + "description": { + "type": "string" + }, + "id": { + "type": "string" + }, + "name": { + "type": "string" + }, + "updatedAt": { + "type": "string" + } + } + }, + "repo.LocationCreate": { + "type": "object", + "properties": { + "description": { + "type": "string" + }, + "name": { + "type": "string" + }, + "parentId": { + "type": "string", + "x-nullable": true + } + } + }, + "repo.LocationOut": { + "type": "object", + "properties": { + "children": { + "type": "array", + "items": { + "$ref": "#/definitions/repo.LocationSummary" + } + }, + "createdAt": { + "type": "string" + }, + "description": { + "type": "string" + }, + "id": { + "type": "string" + }, + "name": { + "type": "string" + }, + "parent": { + "$ref": "#/definitions/repo.LocationSummary" + }, + "updatedAt": { + "type": "string" + } + } + }, + "repo.LocationOutCount": { + "type": "object", + "properties": { + "createdAt": { + "type": "string" + }, + "description": { + "type": "string" + }, + "id": { + "type": "string" + }, + "itemCount": { + "type": "integer" + }, + "name": { + "type": "string" + }, + "updatedAt": { + "type": "string" + } + } + }, + "repo.LocationSummary": { + "type": "object", + "properties": { + "createdAt": { + "type": "string" + }, + "description": { + "type": "string" + }, + "id": { + "type": "string" + }, + "name": { + "type": "string" + }, + "updatedAt": { + "type": "string" + } + } + }, + "repo.LocationUpdate": { + "type": "object", + "properties": { + "description": { + "type": "string" + }, + "id": { + "type": "string" + }, + "name": { + "type": "string" + }, + "parentId": { + "type": "string", + "x-nullable": true + } + } + }, + "repo.MaintenanceEntry": { + "type": "object", + "properties": { + "completedDate": { + "type": "string" + }, + "cost": { + "type": "string", + "example": "0" + }, + "description": { + "type": "string" + }, + "id": { + "type": "string" + }, + "name": { + "type": "string" + }, + "scheduledDate": { + "type": "string" + } + } + }, + "repo.MaintenanceEntryCreate": { + "type": "object", + "required": [ + "name" + ], + "properties": { + "completedDate": { + "type": "string" + }, + "cost": { + "type": "string", + "example": "0" + }, + "description": { + "type": "string" + }, + "name": { + "type": "string" + }, + "scheduledDate": { + "type": "string" + } + } + }, + "repo.MaintenanceEntryUpdate": { + "type": "object", + "properties": { + "completedDate": { + "type": "string" + }, + "cost": { + "type": "string", + "example": "0" + }, + "description": { + "type": "string" + }, + "name": { + "type": "string" + }, + "scheduledDate": { + "type": "string" + } + } + }, + "repo.MaintenanceLog": { + "type": "object", + "properties": { + "costAverage": { + "type": "number" + }, + "costTotal": { + "type": "number" + }, + "entries": { + "type": "array", + "items": { + "$ref": "#/definitions/repo.MaintenanceEntry" + } + }, + "itemId": { + "type": "string" + } + } + }, + "repo.NotifierCreate": { + "type": "object", + "required": [ + "name", + "url" + ], + "properties": { + "isActive": { + "type": "boolean" + }, + "name": { + "type": "string", + "maxLength": 255, + "minLength": 1 + }, + "url": { + "type": "string" + } + } + }, + "repo.NotifierOut": { + "type": "object", + "properties": { + "createdAt": { + "type": "string" + }, + "groupId": { + "type": "string" + }, + "id": { + "type": "string" + }, + "isActive": { + "type": "boolean" + }, + "name": { + "type": "string" + }, + "updatedAt": { + "type": "string" + }, + "userId": { + "type": "string" + } + } + }, + "repo.NotifierUpdate": { + "type": "object", + "required": [ + "name" + ], + "properties": { + "isActive": { + "type": "boolean" + }, + "name": { + "type": "string", + "maxLength": 255, + "minLength": 1 + }, + "url": { + "type": "string", + "x-nullable": true + } + } + }, + "repo.PaginationResult-repo_ItemSummary": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/definitions/repo.ItemSummary" + } + }, + "page": { + "type": "integer" + }, + "pageSize": { + "type": "integer" + }, + "total": { + "type": "integer" + } + } + }, + "repo.TotalsByOrganizer": { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "name": { + "type": "string" + }, + "total": { + "type": "number" + } + } + }, + "repo.TreeItem": { + "type": "object", + "properties": { + "children": { + "type": "array", + "items": { + "$ref": "#/definitions/repo.TreeItem" + } + }, + "id": { + "type": "string" + }, + "name": { + "type": "string" + }, + "type": { + "type": "string" + } + } + }, + "repo.UserOut": { + "type": "object", + "properties": { + "email": { + "type": "string" + }, + "groupId": { + "type": "string" + }, + "groupName": { + "type": "string" + }, + "id": { + "type": "string" + }, + "isOwner": { + "type": "boolean" + }, + "isSuperuser": { + "type": "boolean" + }, + "name": { + "type": "string" + } + } + }, + "repo.UserUpdate": { + "type": "object", + "properties": { + "email": { + "type": "string" + }, + "name": { + "type": "string" + } + } + }, + "repo.ValueOverTime": { + "type": "object", + "properties": { + "end": { + "type": "string" + }, + "entries": { + "type": "array", + "items": { + "$ref": "#/definitions/repo.ValueOverTimeEntry" + } + }, + "start": { + "type": "string" + }, + "valueAtEnd": { + "type": "number" + }, + "valueAtStart": { + "type": "number" + } + } + }, + "repo.ValueOverTimeEntry": { + "type": "object", + "properties": { + "date": { + "type": "string" + }, + "name": { + "type": "string" + }, + "value": { + "type": "number" + } + } + }, + "services.UserRegistration": { + "type": "object", + "properties": { + "email": { + "type": "string" + }, + "name": { + "type": "string" + }, + "password": { + "type": "string" + }, + "token": { + "type": "string" + } + } + }, + "v1.APISummary": { + "type": "object", + "properties": { + "allowRegistration": { + "type": "boolean" + }, + "build": { + "$ref": "#/definitions/v1.Build" + }, + "demo": { + "type": "boolean" + }, + "health": { + "type": "boolean" + }, + "message": { + "type": "string" + }, + "title": { + "type": "string" + }, + "versions": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "v1.ActionAmountResult": { + "type": "object", + "properties": { + "completed": { + "type": "integer" + } + } + }, + "v1.Build": { + "type": "object", + "properties": { + "buildTime": { + "type": "string" + }, + "commit": { + "type": "string" + }, + "version": { + "type": "string" + } + } + }, + "v1.ChangePassword": { + "type": "object", + "properties": { + "current": { + "type": "string" + }, + "new": { + "type": "string" + } + } + }, + "v1.GroupInvitation": { + "type": "object", + "properties": { + "expiresAt": { + "type": "string" + }, + "token": { + "type": "string" + }, + "uses": { + "type": "integer" + } + } + }, + "v1.GroupInvitationCreate": { + "type": "object", + "required": [ + "uses" + ], + "properties": { + "expiresAt": { + "type": "string" + }, + "uses": { + "type": "integer", + "maximum": 100, + "minimum": 1 + } + } + }, + "v1.ItemAttachmentToken": { + "type": "object", + "properties": { + "token": { + "type": "string" + } + } + }, + "v1.LoginForm": { + "type": "object", + "properties": { + "password": { + "type": "string" + }, + "stayLoggedIn": { + "type": "boolean" + }, + "username": { + "type": "string" + } + } + }, + "v1.TokenResponse": { + "type": "object", + "properties": { + "attachmentToken": { + "type": "string" + }, + "expiresAt": { + "type": "string" + }, + "token": { + "type": "string" + } + } + }, + "v1.Wrapped": { + "type": "object", + "properties": { + "item": {} + } + }, + "validate.ErrorResponse": { + "type": "object", + "properties": { + "error": { + "type": "string" + }, + "fields": { + "type": "string" + } + } + } + }, + "securityDefinitions": { + "Bearer": { + "description": "\"Type 'Bearer TOKEN' to correctly set the API Key\"", + "type": "apiKey", + "name": "Authorization", + "in": "header" + } + } +} \ No newline at end of file diff --git a/docs/docs/assets/img/homebox-email-banner.jpg b/docs/docs/assets/img/homebox-email-banner.jpg new file mode 100644 index 0000000..e611c43 Binary files /dev/null and b/docs/docs/assets/img/homebox-email-banner.jpg differ diff --git a/docs/docs/build.md b/docs/docs/build.md index 8dbf5df..e9af902 100644 --- a/docs/docs/build.md +++ b/docs/docs/build.md @@ -4,12 +4,12 @@ This document describes how to build the project from source code. ## Prerequisites -... +TODO ## Building -... +TODO ## Running -... \ No newline at end of file +TODO \ No newline at end of file diff --git a/docs/docs/import-csv.md b/docs/docs/import-csv.md index c708e68..6ed4f4b 100644 --- a/docs/docs/import-csv.md +++ b/docs/docs/import-csv.md @@ -2,58 +2,82 @@ ## Quick Start -Using the CSV import is the recommended way for adding items to the database. It is always going to be the fastest way to import any large amount of items and provides the most flexibility when it comes to adding items. +Using the CSV import is the recommended way for adding items to the database. It is always going to be the fastest way to import any large number of items and provides the most flexibility when it comes to adding items. -**Limitations** +**Current Limitations** - - Currently only supports importing items, locations, and labels - - Does not support attachments. Attachments must be uploaded after import + - Imports only support importing items, locations, and labels + - Imports and Exports do not support attachments. Attachments must be uploaded after import + - CSV Exports do not support nested path exports (e.g. `Home / Office / Desk`) and will only export the Items direct parent, (though imports _do_ support nested paths) + - Cannot specify item-to-item relationships (e.g. `Item A` is a child of `Item B`) !!! tip "File Formats" The CSV import supports both CSV and TSV files. The only difference is the delimiter used. CSV files use a comma `,` as the delimiter and TSV files use a tab `\t` as the delimiter. The file extension does not matter. -**Template** - -You can use this snippet as the headers for your CSV. Copy and paste it into your spreadsheet editor of choice and fill in the value. - -```csv -ImportRef Location Labels Quantity Name Description Insured Serial Number Model Number Manufacturer Notes Purchase From Purchased Price Purchased Time Lifetime Warranty Warranty Expires Warranty Details Sold To Sold Price Sold Time Sold Notes -``` - -!!! tip "Column Order" - Column headers are just there for reference, the important thing is that the order is correct. You can change the headers to anything you like, this behavior may change in the future. - - ## CSV Reference -| Column | Type | Description | -| ----------------- | -------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| ImportRef | String (100) | Import Refs are unique strings that can be used to deduplicate imports. Before an item is imported, we check the database for a matching ref. If the ref exists, we skip that item. | -| Location | String | This is the location of the item that will be created. These are de-duplicated and won't create another instance when reused. | -| Labels | `;` Separated String | List of labels to apply to the item separated by a `;`, can be existing or new | -| Quantity | Integer | The quantity of items to create | -| Name | String | Name of the item | -| Description | String | Description of the item | -| Insured | Boolean | Whether or not the item is insured | -| Serial Number | String | Serial number of the item | -| Model Number | String | Model of the item | -| Manufacturer | String | Manufacturer of the item | -| Notes | String (1000) | General notes about the product | -| Purchase From | String | Name of the place the item was purchased from | -| Purchase Price | Float64 | | -| Purchase At | Date | Date the item was purchased | -| Lifetime Warranty | Boolean | true or false - case insensitive | -| Warranty Expires | Date | Date in the format | -| Warranty Details | String | Details about the warranty | -| Sold To | String | Name of the person the item was sold to | -| Sold At | Date | Date the item was sold | -| Sold Price | Float64 | | -| Sold Notes | String (1000) | | +Below are the supported columns. They are case-sensitive, can be in any ordered or can be omitted unless otherwise specified. + +### Special Syntax Columns + +`HB.import_ref` + +: Import Refs are unique strings that can be used to deduplicate imports. Before an item is imported, we check the database for a matching ref. If the ref exists, we skip the creation of that item. + + * String Type + * Max 100 Characters + + Import Refs are used to de-duplicate imports. It is HIGHLY recommended that you use them to manage your items if you intend to manage your inventory via CSV import/export. If you do not use import refs, you will end up with duplicate items in your database on subsequent imports. + + !!! tip + + Specifying import refs also allows you to update existing items via the CSV import. If you specify an import ref that already exists in the database, we will update the existing item instead of creating a new one. + +`HB.location` + +: This is the location of the item that will be created. These are de-duplicated and won't create another instance when reused. + + * Supports Path Separators for nested locations (e.g. `Home / Office / Desk`) + +`HB.labels` + +: List of labels to apply to the item separated by a `;` can be existing or new labels. + +`HB.field.{field_name}` (e.g. `HB.field.Serial Number`) + +: This is a special column that allows you to add custom fields to the item. The column name must start with `HB.field.` followed by the name of the field. The value of the column will be the value of the field. + + - If the cell value is empty, it will be ignored. + +### Standard Columns + +| Column | Type | Description | +|----------------------|---------------|-----------------------------------------------| +| HB.quantity | Integer | The quantity of items to create | +| HB.name | String | Name of the item | +| HB.asset_id | AssetID | Asset ID for the item | +| HB.description | String | Description of the item | +| HB.insured | Boolean | Whether or not the item is insured | +| HB.serial_number | String | Serial number of the item | +| HB.model_number | String | Model of the item | +| HB.manufacturer | String | Manufacturer of the item | +| HB.notes | String (1000) | General notes about the product | +| HB.purchase_from | String | Name of the place the item was purchased from | +| HB.purchase_price | Float64 | | +| HB.purchase_time | Date | Date the item was purchased | +| HB.lifetime_warranty | Boolean | true or false - case insensitive | +| HB.warranty_expires | Date | Date in the format | +| HB.warranty_details | String | Details about the warranty | +| HB.sold_to | String | Name of the person the item was sold to | +| HB.sold_time | Date | Date the item was sold | +| HB.sold_price | Float64 | | +| HB.sold_notes | String (1000) | | **Type Key** | Type | Format | -| ------- | --------------------------------------------------- | +|---------|-----------------------------------------------------| | String | Max 255 Characters unless otherwise specified | -| Date | MM/DD/YYYY | +| Date | YYYY-MM-DD | | Boolean | true or false, yes or no, 1 or 0 - case insensitive | +| AssetID | 000-000 | diff --git a/docs/docs/index.md b/docs/docs/index.md index cc15a02..188dac5 100644 --- a/docs/docs/index.md +++ b/docs/docs/index.md @@ -15,19 +15,19 @@ -Homebox is the inventory and organization system built for the Home User! With a focus on simplicity and ease of use, Homebox is the perfect solution for your home inventory, organization, and management needs. While developing this project I've tried to keep the following principles in mind: +Homebox is the inventory and organization system built for the Home User! With a focus on simplicity and ease of use, Homebox is the perfect solution for your home inventory, organization, and management needs. While developing this project, I've tried to keep the following principles in mind: - _Simple_ - Homebox is designed to be simple and easy to use. No complicated setup or configuration required. Use either a single docker container, or deploy yourself by compiling the binary for your platform of choice. -- _Blazingly Fast_ - Homebox is written in Go which makes it extremely fast and requires minimal resources to deploy. In general idle memory usage is less than 50MB for the whole container. +- _Blazingly Fast_ - Homebox is written in Go, which makes it extremely fast and requires minimal resources to deploy. In general idle memory usage is less than 50MB for the whole container. - _Portable_ - Homebox is designed to be portable and run on anywhere. We use SQLite and an embedded Web UI to make it easy to deploy, use, and backup. ## Project Status -Homebox is currently in early-active development and is currently in **beta** stage. This means that the project may still be unstable and clunky. Overall we are striving to not introduce any breaking changes and have checks in place to ensure migrations and upgrades are smooth. However, we do not guarantee that there will be no breaking changes. We will try to keep the documentation up to date as we make changes. +Homebox is currently in early active development and is currently in **beta** stage. This means that the project may still be unstable and clunky. Overall, we are striving to not introduce any breaking changes and have checks in place to ensure migrations and upgrades are smooth. However, we do not guarantee that there will be no breaking changes. We will try to keep the documentation up to date as we make changes. ## Features -- Create and Manage _Items_ by provided a name and description - That's it! Homebox requires only a few details to be provided to create an item, after that you can specify as much detail as you want, or hide away some of the things you won't ever need. +- Create and Manage _Items_ by providing a name and a description - That's it! Homebox requires only a few details to be provided to create an item, after that you can specify as much detail as you want, or hide away some of the things you won't ever need. - Optional Details for Items include - Warranty Information - Sold To Information @@ -35,22 +35,22 @@ Homebox is currently in early-active development and is currently in **beta** st - Item Identifications (Serial, Model, etc) - Categorized Attachments (Images, Manuals, General) - Arbitrary/Custom Fields -- Csv Import for quickly creating and managing items +- CSV Import/Export for quickly creating and managing items - Custom Reporting - Bill of Materials Export - QR Code Label Generator - Organize _Items_ by creating _Labels_ and _Locations_ and assigning them to items. -- Multi-Tenant Support - All users are placed inside of a group and can only see items that are apart of their group. Invite family members to your group, or share an instance among friends! +- Multi-Tenant Support - All users are placed in a group and can only see items in their group. Invite family members to your group, or share an instance among friends! ## Why Not Use Something Else? -There are a lot of great inventory management systems out there, but none of them _really_ fit my needs as a home user. Snipe-IT is a fantastic product that has so many robust features and management options that it's easy to become overwhelmed and confused. I wanted something that was simple and easy to use that didn't require a lot of cognitive overhead to manage. I primarily built this to organize my IOT devices and save my warranty and documentation information in a central, searchable location. +There are a lot of great inventory management systems out there, but none of them _really_ fit my needs as a home user. Snipe-IT is a fantastic product that has so many robust features and management options which makes it easy to become overwhelmed and confused. I wanted something that was simple and easy to use that didn't require a lot of cognitive overhead to manage. I primarily built this to organize my IOT devices and save my warranty and documentation information in a central, searchable location. ### Spreadsheet -That's a fair point. If your needs can be fulfilled by a Spreadsheet, I'd suggest using that instead. I've found spreadsheets get pretty unwieldy when you have a lot of data and it's hard to keep track of what's where. I also wanted to be able to search and filter my data in a more robust way than a spreadsheet can provide. I also wanted to leave to door open for more advanced features in the future like maintenance logs, moving label generators, and more. +That's a fair point. If your needs can be fulfilled by a Spreadsheet, I'd suggest using that instead. I've found spreadsheets get pretty unwieldy when you have a lot of data, and it's hard to keep track of what's where. I also wanted to be able to search and filter my data in a more robust way than a spreadsheet can provide. I also wanted to leave the door open for more advanced features in the future like maintenance logs, moving label generators, and more. ### Snipe-It? -Snipe-It is the gold standard for IT management. If your use-case is to manage consumables and IT physical infrastructure I highly suggest you look at Snipe-It over Homebox, it's just more purpose built for that use case. Homebox is, in contrast, purpose built for the home user, which means that we try to focus on keeping things simple and easy to use. Lowering the friction for creating items and managing them is a key goal of Homebox which means you lose out on some of the more advanced features. In most cases this is a good trade-off. \ No newline at end of file +Snipe-It is the gold standard for IT management. If your use-case is to manage consumables and IT physical infrastructure, I highly suggest you look at Snipe-It over Homebox, it's just more purpose built for that use case. Homebox is, in contrast, purpose built for the home user, which means that we try to focus on keeping things simple and easy to use. Lowering the friction for creating items and managing them is a key goal of Homebox which means you lose out on some of the more advanced features. In most cases, this is a good trade-off. \ No newline at end of file diff --git a/docs/docs/quick-start.md b/docs/docs/quick-start.md index 3a5fad3..278b442 100644 --- a/docs/docs/quick-start.md +++ b/docs/docs/quick-start.md @@ -4,21 +4,33 @@ Great for testing out the application, but not recommended for stable use. Checkout the docker-compose for the recommended deployment. +For each image there are two tags, respectively the regular tag and $TAG-rootless, which uses a non-root image. + ```sh -docker run --name=homebox \ - --restart=always \ - --publish=3100:7745 \ - ghcr.io/hay-kot/homebox:latest +# If using the rootless image, ensure data +# folder has correct permissions +$ mkdir -p /path/to/data/folder +$ chown 65532:65532 -R /path/to/data/folder +# --------------------------------------- +# Run the image +$ docker run -d \ + --name homebox \ + --restart unless-stopped \ + --publish 3100:7745 \ + --env TZ=Europe/Bucharest \ + --volume /path/to/data/folder/:/data \ + ghcr.io/hay-kot/homebox:latest +# ghcr.io/hay-kot/homebox:latest-rootless + ``` ## Docker-Compose ```yaml -version: "3.4" - services: homebox: image: ghcr.io/hay-kot/homebox:latest +# image: ghcr.io/hay-kot/homebox:latest-rootless container_name: homebox restart: always environment: @@ -35,6 +47,9 @@ volumes: driver: local ``` +!!! note + If you use the `rootless` image, and instead of using named volumes you would prefer using a hostMount directly (e.g., `volumes: [ /path/to/data/folder:/data ]`) you need to `chown` the chosen directory in advance to the `65532` user (as shown in the Docker example above). + ## Env Variables & Configuration | Variable | Default | Description | @@ -44,9 +59,13 @@ volumes: | HBOX_WEB_HOST | | host to run the web server on, if you're using docker do not change this | | HBOX_OPTIONS_ALLOW_REGISTRATION | true | allow users to register themselves | | HBOX_OPTIONS_AUTO_INCREMENT_ASSET_ID | true | auto increments the asset_id field for new items | +| HBOX_OPTIONS_CURRENCY_CONFIG | | json configuration file containing additional currencie | | HBOX_WEB_MAX_UPLOAD_SIZE | 10 | maximum file upload size supported in MB | +| HBOX_WEB_READ_TIMEOUT | 10 | Read timeout of HTTP sever | +| HBOX_WEB_WRITE_TIMEOUT | 10 | Write timeout of HTTP server | +| HBOX_WEB_IDLE_TIMEOUT | 30 | Idle timeout of HTTP server | | HBOX_STORAGE_DATA | /data/ | path to the data directory, do not change this if you're using docker | -| HBOX_STORAGE_SQLITE_URL | /data/homebox.db?_fk=1 | sqlite database url, in you're using docker do not change this | +| HBOX_STORAGE_SQLITE_URL | /data/homebox.db?_fk=1 | sqlite database url, if you're using docker do not change this | | HBOX_LOG_LEVEL | info | log level to use, can be one of: trace, debug, info, warn, error, critical | | HBOX_LOG_FORMAT | text | log format to use, can be one of: text, json | | HBOX_MAILER_HOST | | email host to use, if not set no email provider will be used | @@ -84,6 +103,7 @@ volumes: --debug-port/$HBOX_DEBUG_PORT (default: 4000) --options-allow-registration/$HBOX_OPTIONS_ALLOW_REGISTRATION (default: true) --options-auto-increment-asset-id/$HBOX_OPTIONS_AUTO_INCREMENT_ASSET_ID (default: true) + --options-currency-config/$HBOX_OPTIONS_CURRENCY_CONFIG --help/-h display this help message ``` diff --git a/docs/docs/tips-tricks.md b/docs/docs/tips-tricks.md index f7f47d9..a5ed05a 100644 --- a/docs/docs/tips-tricks.md +++ b/docs/docs/tips-tricks.md @@ -12,7 +12,7 @@ Custom fields are a great way to add any extra information to your item. The fol Custom fields are appended to the main details section of your item. !!! tip - Homebox Custom Fields also have special support for URLs. Provide a URL (`https://google.com`) and it will be automatically converted to a clickable link in the UI. Optionally, you can also use markdown syntax to add a custom text to the button. `[Google](https://google.com)` + Homebox Custom Fields also have special support for URLs. Provide a URL (`https://google.com`) and it will be automatically converted to a clickable link in the UI. Optionally, you can also use Markdown syntax to add a custom text to the button. `[Google](https://google.com)` ## Managing Asset IDs @@ -20,29 +20,61 @@ Homebox provides the option to auto-set asset IDs, this is the default behavior. Example ID: `000-001` -Asset IDs are partially managed by Homebox, but have a flexible implementation to allow for unique use cases. ID's are non-unique at the database level so there is nothing stopping a user from manually setting duplicate IDs for various items. There are two recommended approaches to manage Asset IDs +Asset IDs are partially managed by Homebox, but have a flexible implementation to allow for unique use cases. IDs are non-unique at the database level, so there is nothing stopping a user from manually setting duplicate IDs for various items. There are two recommended approaches to manage Asset IDs: ### 1. Auto Incrementing IDs -This is the default behavior and likely to one to experience the most consistent behavior. Whenever creating or importing an item, that items receives the next available ID. This is the most consistent approach and is recommended for most users. +This is the default behavior likely to experience the most consistency. Whenever creating or importing an item, that item receives the next available ID. This is recommended for most users. -### 2. Auto Incrementing ID's with Reset +### 2. Auto Incrementing IDs with Reset -In some cases you may want to skip some items such as consumables, or items that are loosely tracked. In this case, we recommend that you leave auto-incrementing ID's enabled _however_ when you create a new item that you want to skip, you can go to that item and reset the ID to 0. This will remove it from the auto-incrementing sequence and the next item will receive the next available ID. +In some cases, you may want to skip some items such as consumables, or items that are loosely tracked. In this case, we recommend that you leave auto-incrementing IDs enabled _however_ when you create a new item that you want to skip, you can go to that item and reset the ID to 0. This will remove it from the auto-incrementing sequence, and the next item will receive the next available ID. !!! tip - If you're migrating from an older version there is a action on the users profile page to assign IDs to all items. This will assign the next available ID to all items in the order of creation. You should _only_ do this once during the migration process. You should be especially cautious of this action if you're using the reset feature described in option number 2 + If you're migrating from an older version, there is an action on the user's profile page to assign IDs to all items. This will assign the next available ID to all items in order of their creation. You should __only do this once__ during the migration process. You should be especially cautious with this if you're using the reset feature described in [option number 2](#2-auto-incrementing-ids-with-reset) ## QR Codes :octicons-tag-24: 0.7.0 -Homebox has a built-in QR code generator that can be used to generate QR codes for your items. This is useful for tracking items with a mobile device. You can generate a QR code for any item by clicking the QR code icon in the top right of the item details page. The same can be done for the Labels and Locations page. Currently support is limited to generating one off QR Codes. +Homebox has a built-in QR code generator that can be used to generate QR codes for your items. This is useful for tracking items with a mobile device. You can generate a QR code for any item by clicking the QR code icon in the top right of the item details page. The same can be done for the Labels and Locations page. Currently, support is limited to generating one-off QR Codes. -However, the API endpoint is available for generating QR codes on the fly for any item (or any other data) if you provide a valid API key in the query parameters. An example url would look like `/api/v1/qrcode?data=https://homebox.fly.dev/item/{uuid}&access_token={api_key}`. Currently the easiest way to get an API token is to use one from an existing URL of the QR Code in the API key, but this will be improved in the future. +However, the API endpoint is available for generating QR codes on the fly for any item (or any other data) if you provide a valid API key in the query parameters. An example url would look like `/api/v1/qrcode?data=https://homebox.fly.dev/item/{uuid}`. Currently, the easiest way to get an API token is to use one from an existing URL of the QR Code in the API key, but this will be improved in the future. -:octicons-tag-24: 0.8.0 +:octicons-tag-24: v0.8.0 In version 0.8.0 We've added a custom label generation. On the tools page, there is now a link to the label-generator page where you can generate labels based on Asset ID for your inventory. These are still in early development, so please provide feedback. There's also more information on the implementation on the label generator page. -[Demo](https://homebox.fly.dev/reports/label-generator) \ No newline at end of file +[Demo](https://homebox.fly.dev/reports/label-generator) + +## Scheduled Maintenance Notifications + +:octicons-tag-24: v0.9.0 + +Homebox uses [shoutrrr](https://containrrr.dev/shoutrrr/0.7/) to send notifications. This allows you to send notifications to a variety of services. On your profile page, you can add notification URLs to your profile which will be used to send notifications when a maintenance event is scheduled. + +**Notifications are sent on the day the maintenance is scheduled at or around 8am.** + +As of `v0.9.0` we have limited support for complex scheduling of maintenance events. If you have requests for extended functionality, please open an issue on GitHub or reach out on Discord. We're still gauging the demand for this feature. + + +## Custom Currencies + +:octicons-tag-24: v0.11.0 + +Homebox allows you to add additional currencies to your instance by specify a JSON file containing the currencies you want to add. + +**Environment Variable:** `HBOX_OPTIONS_CURRENCY_CONFIG` + +### Example + +```json +[ + { + "code": "AED", + "local": "United Arab Emirates", + "symbol": "د.إ", + "name": "United Arab Emirates Dirham" + }, +] +``` diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index 1b7dda5..65bd2e1 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -24,6 +24,7 @@ theme: - navigation.expand - navigation.sections - navigation.tabs.sticky + - navigation.tabs favicon: assets/img/favicon.svg logo: assets/img/favicon.svg @@ -44,12 +45,13 @@ markdown_extensions: custom_checkbox: true - admonition - attr_list - - pymdownx.tabbed - pymdownx.superfences nav: - - Home: index.md - - Quick Start: quick-start.md - - Tips and Tricks: tips-tricks.md - - Importing Data: import-csv.md - - Building The Binary: build.md + - Home: + - Home: index.md + - Quick Start: quick-start.md + - Tips and Tricks: tips-tricks.md + - Import and Export: import-csv.md + - Building The Binary: build.md + - API: "https://redocly.github.io/redoc/?url=https://hay-kot.github.io/homebox/api/openapi-2.0.json" diff --git a/docs/poetry.lock b/docs/poetry.lock deleted file mode 100644 index abf75a8..0000000 --- a/docs/poetry.lock +++ /dev/null @@ -1,651 +0,0 @@ -# This file is automatically @generated by Poetry and should not be changed by hand. - -[[package]] -name = "certifi" -version = "2022.12.7" -description = "Python package for providing Mozilla's CA Bundle." -category = "main" -optional = false -python-versions = ">=3.6" -files = [ - {file = "certifi-2022.12.7-py3-none-any.whl", hash = "sha256:4ad3232f5e926d6718ec31cfc1fcadfde020920e278684144551c91769c7bc18"}, - {file = "certifi-2022.12.7.tar.gz", hash = "sha256:35824b4c3a97115964b408844d64aa14db1cc518f6562e8d7261699d1350a9e3"}, -] - -[[package]] -name = "charset-normalizer" -version = "3.0.1" -description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." -category = "main" -optional = false -python-versions = "*" -files = [ - {file = "charset-normalizer-3.0.1.tar.gz", hash = "sha256:ebea339af930f8ca5d7a699b921106c6e29c617fe9606fa7baa043c1cdae326f"}, - {file = "charset_normalizer-3.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:88600c72ef7587fe1708fd242b385b6ed4b8904976d5da0893e31df8b3480cb6"}, - {file = "charset_normalizer-3.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c75ffc45f25324e68ab238cb4b5c0a38cd1c3d7f1fb1f72b5541de469e2247db"}, - {file = "charset_normalizer-3.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:db72b07027db150f468fbada4d85b3b2729a3db39178abf5c543b784c1254539"}, - {file = "charset_normalizer-3.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:62595ab75873d50d57323a91dd03e6966eb79c41fa834b7a1661ed043b2d404d"}, - {file = "charset_normalizer-3.0.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ff6f3db31555657f3163b15a6b7c6938d08df7adbfc9dd13d9d19edad678f1e8"}, - {file = "charset_normalizer-3.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:772b87914ff1152b92a197ef4ea40efe27a378606c39446ded52c8f80f79702e"}, - {file = "charset_normalizer-3.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70990b9c51340e4044cfc394a81f614f3f90d41397104d226f21e66de668730d"}, - {file = "charset_normalizer-3.0.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:292d5e8ba896bbfd6334b096e34bffb56161c81408d6d036a7dfa6929cff8783"}, - {file = "charset_normalizer-3.0.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:2edb64ee7bf1ed524a1da60cdcd2e1f6e2b4f66ef7c077680739f1641f62f555"}, - {file = "charset_normalizer-3.0.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:31a9ddf4718d10ae04d9b18801bd776693487cbb57d74cc3458a7673f6f34639"}, - {file = "charset_normalizer-3.0.1-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:44ba614de5361b3e5278e1241fda3dc1838deed864b50a10d7ce92983797fa76"}, - {file = "charset_normalizer-3.0.1-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:12db3b2c533c23ab812c2b25934f60383361f8a376ae272665f8e48b88e8e1c6"}, - {file = "charset_normalizer-3.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c512accbd6ff0270939b9ac214b84fb5ada5f0409c44298361b2f5e13f9aed9e"}, - {file = "charset_normalizer-3.0.1-cp310-cp310-win32.whl", hash = "sha256:502218f52498a36d6bf5ea77081844017bf7982cdbe521ad85e64cabee1b608b"}, - {file = "charset_normalizer-3.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:601f36512f9e28f029d9481bdaf8e89e5148ac5d89cffd3b05cd533eeb423b59"}, - {file = "charset_normalizer-3.0.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0298eafff88c99982a4cf66ba2efa1128e4ddaca0b05eec4c456bbc7db691d8d"}, - {file = "charset_normalizer-3.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a8d0fc946c784ff7f7c3742310cc8a57c5c6dc31631269876a88b809dbeff3d3"}, - {file = "charset_normalizer-3.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:87701167f2a5c930b403e9756fab1d31d4d4da52856143b609e30a1ce7160f3c"}, - {file = "charset_normalizer-3.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:14e76c0f23218b8f46c4d87018ca2e441535aed3632ca134b10239dfb6dadd6b"}, - {file = "charset_normalizer-3.0.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0c0a590235ccd933d9892c627dec5bc7511ce6ad6c1011fdf5b11363022746c1"}, - {file = "charset_normalizer-3.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8c7fe7afa480e3e82eed58e0ca89f751cd14d767638e2550c77a92a9e749c317"}, - {file = "charset_normalizer-3.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:79909e27e8e4fcc9db4addea88aa63f6423ebb171db091fb4373e3312cb6d603"}, - {file = "charset_normalizer-3.0.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8ac7b6a045b814cf0c47f3623d21ebd88b3e8cf216a14790b455ea7ff0135d18"}, - {file = "charset_normalizer-3.0.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:72966d1b297c741541ca8cf1223ff262a6febe52481af742036a0b296e35fa5a"}, - {file = "charset_normalizer-3.0.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:f9d0c5c045a3ca9bedfc35dca8526798eb91a07aa7a2c0fee134c6c6f321cbd7"}, - {file = "charset_normalizer-3.0.1-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:5995f0164fa7df59db4746112fec3f49c461dd6b31b841873443bdb077c13cfc"}, - {file = "charset_normalizer-3.0.1-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4a8fcf28c05c1f6d7e177a9a46a1c52798bfe2ad80681d275b10dcf317deaf0b"}, - {file = "charset_normalizer-3.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:761e8904c07ad053d285670f36dd94e1b6ab7f16ce62b9805c475b7aa1cffde6"}, - {file = "charset_normalizer-3.0.1-cp311-cp311-win32.whl", hash = "sha256:71140351489970dfe5e60fc621ada3e0f41104a5eddaca47a7acb3c1b851d6d3"}, - {file = "charset_normalizer-3.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:9ab77acb98eba3fd2a85cd160851816bfce6871d944d885febf012713f06659c"}, - {file = "charset_normalizer-3.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:84c3990934bae40ea69a82034912ffe5a62c60bbf6ec5bc9691419641d7d5c9a"}, - {file = "charset_normalizer-3.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:74292fc76c905c0ef095fe11e188a32ebd03bc38f3f3e9bcb85e4e6db177b7ea"}, - {file = "charset_normalizer-3.0.1-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c95a03c79bbe30eec3ec2b7f076074f4281526724c8685a42872974ef4d36b72"}, - {file = "charset_normalizer-3.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f4c39b0e3eac288fedc2b43055cfc2ca7a60362d0e5e87a637beac5d801ef478"}, - {file = "charset_normalizer-3.0.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:df2c707231459e8a4028eabcd3cfc827befd635b3ef72eada84ab13b52e1574d"}, - {file = "charset_normalizer-3.0.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:93ad6d87ac18e2a90b0fe89df7c65263b9a99a0eb98f0a3d2e079f12a0735837"}, - {file = "charset_normalizer-3.0.1-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:59e5686dd847347e55dffcc191a96622f016bc0ad89105e24c14e0d6305acbc6"}, - {file = "charset_normalizer-3.0.1-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:cd6056167405314a4dc3c173943f11249fa0f1b204f8b51ed4bde1a9cd1834dc"}, - {file = "charset_normalizer-3.0.1-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:083c8d17153ecb403e5e1eb76a7ef4babfc2c48d58899c98fcaa04833e7a2f9a"}, - {file = "charset_normalizer-3.0.1-cp36-cp36m-musllinux_1_1_s390x.whl", hash = "sha256:f5057856d21e7586765171eac8b9fc3f7d44ef39425f85dbcccb13b3ebea806c"}, - {file = "charset_normalizer-3.0.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:7eb33a30d75562222b64f569c642ff3dc6689e09adda43a082208397f016c39a"}, - {file = "charset_normalizer-3.0.1-cp36-cp36m-win32.whl", hash = "sha256:95dea361dd73757c6f1c0a1480ac499952c16ac83f7f5f4f84f0658a01b8ef41"}, - {file = "charset_normalizer-3.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:eaa379fcd227ca235d04152ca6704c7cb55564116f8bc52545ff357628e10602"}, - {file = "charset_normalizer-3.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:3e45867f1f2ab0711d60c6c71746ac53537f1684baa699f4f668d4c6f6ce8e14"}, - {file = "charset_normalizer-3.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cadaeaba78750d58d3cc6ac4d1fd867da6fc73c88156b7a3212a3cd4819d679d"}, - {file = "charset_normalizer-3.0.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:911d8a40b2bef5b8bbae2e36a0b103f142ac53557ab421dc16ac4aafee6f53dc"}, - {file = "charset_normalizer-3.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:503e65837c71b875ecdd733877d852adbc465bd82c768a067badd953bf1bc5a3"}, - {file = "charset_normalizer-3.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a60332922359f920193b1d4826953c507a877b523b2395ad7bc716ddd386d866"}, - {file = "charset_normalizer-3.0.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:16a8663d6e281208d78806dbe14ee9903715361cf81f6d4309944e4d1e59ac5b"}, - {file = "charset_normalizer-3.0.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:a16418ecf1329f71df119e8a65f3aa68004a3f9383821edcb20f0702934d8087"}, - {file = "charset_normalizer-3.0.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:9d9153257a3f70d5f69edf2325357251ed20f772b12e593f3b3377b5f78e7ef8"}, - {file = "charset_normalizer-3.0.1-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:02a51034802cbf38db3f89c66fb5d2ec57e6fe7ef2f4a44d070a593c3688667b"}, - {file = "charset_normalizer-3.0.1-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:2e396d70bc4ef5325b72b593a72c8979999aa52fb8bcf03f701c1b03e1166918"}, - {file = "charset_normalizer-3.0.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:11b53acf2411c3b09e6af37e4b9005cba376c872503c8f28218c7243582df45d"}, - {file = "charset_normalizer-3.0.1-cp37-cp37m-win32.whl", hash = "sha256:0bf2dae5291758b6f84cf923bfaa285632816007db0330002fa1de38bfcb7154"}, - {file = "charset_normalizer-3.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:2c03cc56021a4bd59be889c2b9257dae13bf55041a3372d3295416f86b295fb5"}, - {file = "charset_normalizer-3.0.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:024e606be3ed92216e2b6952ed859d86b4cfa52cd5bc5f050e7dc28f9b43ec42"}, - {file = "charset_normalizer-3.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4b0d02d7102dd0f997580b51edc4cebcf2ab6397a7edf89f1c73b586c614272c"}, - {file = "charset_normalizer-3.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:358a7c4cb8ba9b46c453b1dd8d9e431452d5249072e4f56cfda3149f6ab1405e"}, - {file = "charset_normalizer-3.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:81d6741ab457d14fdedc215516665050f3822d3e56508921cc7239f8c8e66a58"}, - {file = "charset_normalizer-3.0.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8b8af03d2e37866d023ad0ddea594edefc31e827fee64f8de5611a1dbc373174"}, - {file = "charset_normalizer-3.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9cf4e8ad252f7c38dd1f676b46514f92dc0ebeb0db5552f5f403509705e24753"}, - {file = "charset_normalizer-3.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e696f0dd336161fca9adbb846875d40752e6eba585843c768935ba5c9960722b"}, - {file = "charset_normalizer-3.0.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c22d3fe05ce11d3671297dc8973267daa0f938b93ec716e12e0f6dee81591dc1"}, - {file = "charset_normalizer-3.0.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:109487860ef6a328f3eec66f2bf78b0b72400280d8f8ea05f69c51644ba6521a"}, - {file = "charset_normalizer-3.0.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:37f8febc8ec50c14f3ec9637505f28e58d4f66752207ea177c1d67df25da5aed"}, - {file = "charset_normalizer-3.0.1-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:f97e83fa6c25693c7a35de154681fcc257c1c41b38beb0304b9c4d2d9e164479"}, - {file = "charset_normalizer-3.0.1-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:a152f5f33d64a6be73f1d30c9cc82dfc73cec6477ec268e7c6e4c7d23c2d2291"}, - {file = "charset_normalizer-3.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:39049da0ffb96c8cbb65cbf5c5f3ca3168990adf3551bd1dee10c48fce8ae820"}, - {file = "charset_normalizer-3.0.1-cp38-cp38-win32.whl", hash = "sha256:4457ea6774b5611f4bed5eaa5df55f70abde42364d498c5134b7ef4c6958e20e"}, - {file = "charset_normalizer-3.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:e62164b50f84e20601c1ff8eb55620d2ad25fb81b59e3cd776a1902527a788af"}, - {file = "charset_normalizer-3.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:8eade758719add78ec36dc13201483f8e9b5d940329285edcd5f70c0a9edbd7f"}, - {file = "charset_normalizer-3.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8499ca8f4502af841f68135133d8258f7b32a53a1d594aa98cc52013fff55678"}, - {file = "charset_normalizer-3.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3fc1c4a2ffd64890aebdb3f97e1278b0cc72579a08ca4de8cd2c04799a3a22be"}, - {file = "charset_normalizer-3.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:00d3ffdaafe92a5dc603cb9bd5111aaa36dfa187c8285c543be562e61b755f6b"}, - {file = "charset_normalizer-3.0.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c2ac1b08635a8cd4e0cbeaf6f5e922085908d48eb05d44c5ae9eabab148512ca"}, - {file = "charset_normalizer-3.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f6f45710b4459401609ebebdbcfb34515da4fc2aa886f95107f556ac69a9147e"}, - {file = "charset_normalizer-3.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ae1de54a77dc0d6d5fcf623290af4266412a7c4be0b1ff7444394f03f5c54e3"}, - {file = "charset_normalizer-3.0.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3b590df687e3c5ee0deef9fc8c547d81986d9a1b56073d82de008744452d6541"}, - {file = "charset_normalizer-3.0.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ab5de034a886f616a5668aa5d098af2b5385ed70142090e2a31bcbd0af0fdb3d"}, - {file = "charset_normalizer-3.0.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9cb3032517f1627cc012dbc80a8ec976ae76d93ea2b5feaa9d2a5b8882597579"}, - {file = "charset_normalizer-3.0.1-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:608862a7bf6957f2333fc54ab4399e405baad0163dc9f8d99cb236816db169d4"}, - {file = "charset_normalizer-3.0.1-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:0f438ae3532723fb6ead77e7c604be7c8374094ef4ee2c5e03a3a17f1fca256c"}, - {file = "charset_normalizer-3.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:356541bf4381fa35856dafa6a965916e54bed415ad8a24ee6de6e37deccf2786"}, - {file = "charset_normalizer-3.0.1-cp39-cp39-win32.whl", hash = "sha256:39cf9ed17fe3b1bc81f33c9ceb6ce67683ee7526e65fde1447c772afc54a1bb8"}, - {file = "charset_normalizer-3.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:0a11e971ed097d24c534c037d298ad32c6ce81a45736d31e0ff0ad37ab437d59"}, - {file = "charset_normalizer-3.0.1-py3-none-any.whl", hash = "sha256:7e189e2e1d3ed2f4aebabd2d5b0f931e883676e51c7624826e0a4e5fe8a0bf24"}, -] - -[[package]] -name = "click" -version = "8.1.3" -description = "Composable command line interface toolkit" -category = "main" -optional = false -python-versions = ">=3.7" -files = [ - {file = "click-8.1.3-py3-none-any.whl", hash = "sha256:bb4d8133cb15a609f44e8213d9b391b0809795062913b383c62be0ee95b1db48"}, - {file = "click-8.1.3.tar.gz", hash = "sha256:7682dc8afb30297001674575ea00d1814d808d6a36af415a82bd481d37ba7b8e"}, -] - -[package.dependencies] -colorama = {version = "*", markers = "platform_system == \"Windows\""} - -[[package]] -name = "colorama" -version = "0.4.5" -description = "Cross-platform colored terminal text." -category = "main" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" -files = [ - {file = "colorama-0.4.5-py2.py3-none-any.whl", hash = "sha256:854bf444933e37f5824ae7bfc1e98d5bce2ebe4160d46b5edf346a89358e99da"}, - {file = "colorama-0.4.5.tar.gz", hash = "sha256:e6c6b4334fc50988a639d9b98aa429a0b57da6e17b9a44f0451f930b6967b7a4"}, -] - -[[package]] -name = "ghp-import" -version = "2.1.0" -description = "Copy your docs directly to the gh-pages branch." -category = "main" -optional = false -python-versions = "*" -files = [ - {file = "ghp-import-2.1.0.tar.gz", hash = "sha256:9c535c4c61193c2df8871222567d7fd7e5014d835f97dc7b7439069e2413d343"}, - {file = "ghp_import-2.1.0-py3-none-any.whl", hash = "sha256:8337dd7b50877f163d4c0289bc1f1c7f127550241988d568c1db512c4324a619"}, -] - -[package.dependencies] -python-dateutil = ">=2.8.1" - -[package.extras] -dev = ["flake8", "markdown", "twine", "wheel"] - -[[package]] -name = "idna" -version = "3.4" -description = "Internationalized Domain Names in Applications (IDNA)" -category = "main" -optional = false -python-versions = ">=3.5" -files = [ - {file = "idna-3.4-py3-none-any.whl", hash = "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"}, - {file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"}, -] - -[[package]] -name = "Jinja2" -version = "3.1.2" -description = "A very fast and expressive template engine." -category = "main" -optional = false -python-versions = ">=3.7" -files = [ - {file = "Jinja2-3.1.2-py3-none-any.whl", hash = "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61"}, - {file = "Jinja2-3.1.2.tar.gz", hash = "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852"}, -] - -[package.dependencies] -MarkupSafe = ">=2.0" - -[package.extras] -i18n = ["Babel (>=2.7)"] - -[[package]] -name = "Markdown" -version = "3.3.7" -description = "Python implementation of Markdown." -category = "main" -optional = false -python-versions = ">=3.6" -files = [ - {file = "Markdown-3.3.7-py3-none-any.whl", hash = "sha256:f5da449a6e1c989a4cea2631aa8ee67caa5a2ef855d551c88f9e309f4634c621"}, - {file = "Markdown-3.3.7.tar.gz", hash = "sha256:cbb516f16218e643d8e0a95b309f77eb118cb138d39a4f27851e6a63581db874"}, -] - -[package.extras] -testing = ["coverage", "pyyaml"] - -[[package]] -name = "MarkupSafe" -version = "2.1.1" -description = "Safely add untrusted strings to HTML/XML markup." -category = "main" -optional = false -python-versions = ">=3.7" -files = [ - {file = "MarkupSafe-2.1.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:86b1f75c4e7c2ac2ccdaec2b9022845dbb81880ca318bb7a0a01fbf7813e3812"}, - {file = "MarkupSafe-2.1.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f121a1420d4e173a5d96e47e9a0c0dcff965afdf1626d28de1460815f7c4ee7a"}, - {file = "MarkupSafe-2.1.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a49907dd8420c5685cfa064a1335b6754b74541bbb3706c259c02ed65b644b3e"}, - {file = "MarkupSafe-2.1.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10c1bfff05d95783da83491be968e8fe789263689c02724e0c691933c52994f5"}, - {file = "MarkupSafe-2.1.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b7bd98b796e2b6553da7225aeb61f447f80a1ca64f41d83612e6139ca5213aa4"}, - {file = "MarkupSafe-2.1.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b09bf97215625a311f669476f44b8b318b075847b49316d3e28c08e41a7a573f"}, - {file = "MarkupSafe-2.1.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:694deca8d702d5db21ec83983ce0bb4b26a578e71fbdbd4fdcd387daa90e4d5e"}, - {file = "MarkupSafe-2.1.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:efc1913fd2ca4f334418481c7e595c00aad186563bbc1ec76067848c7ca0a933"}, - {file = "MarkupSafe-2.1.1-cp310-cp310-win32.whl", hash = "sha256:4a33dea2b688b3190ee12bd7cfa29d39c9ed176bda40bfa11099a3ce5d3a7ac6"}, - {file = "MarkupSafe-2.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:dda30ba7e87fbbb7eab1ec9f58678558fd9a6b8b853530e176eabd064da81417"}, - {file = "MarkupSafe-2.1.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:671cd1187ed5e62818414afe79ed29da836dde67166a9fac6d435873c44fdd02"}, - {file = "MarkupSafe-2.1.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3799351e2336dc91ea70b034983ee71cf2f9533cdff7c14c90ea126bfd95d65a"}, - {file = "MarkupSafe-2.1.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e72591e9ecd94d7feb70c1cbd7be7b3ebea3f548870aa91e2732960fa4d57a37"}, - {file = "MarkupSafe-2.1.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6fbf47b5d3728c6aea2abb0589b5d30459e369baa772e0f37a0320185e87c980"}, - {file = "MarkupSafe-2.1.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:d5ee4f386140395a2c818d149221149c54849dfcfcb9f1debfe07a8b8bd63f9a"}, - {file = "MarkupSafe-2.1.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:bcb3ed405ed3222f9904899563d6fc492ff75cce56cba05e32eff40e6acbeaa3"}, - {file = "MarkupSafe-2.1.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:e1c0b87e09fa55a220f058d1d49d3fb8df88fbfab58558f1198e08c1e1de842a"}, - {file = "MarkupSafe-2.1.1-cp37-cp37m-win32.whl", hash = "sha256:8dc1c72a69aa7e082593c4a203dcf94ddb74bb5c8a731e4e1eb68d031e8498ff"}, - {file = "MarkupSafe-2.1.1-cp37-cp37m-win_amd64.whl", hash = "sha256:97a68e6ada378df82bc9f16b800ab77cbf4b2fada0081794318520138c088e4a"}, - {file = "MarkupSafe-2.1.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:e8c843bbcda3a2f1e3c2ab25913c80a3c5376cd00c6e8c4a86a89a28c8dc5452"}, - {file = "MarkupSafe-2.1.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0212a68688482dc52b2d45013df70d169f542b7394fc744c02a57374a4207003"}, - {file = "MarkupSafe-2.1.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e576a51ad59e4bfaac456023a78f6b5e6e7651dcd383bcc3e18d06f9b55d6d1"}, - {file = "MarkupSafe-2.1.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b9fe39a2ccc108a4accc2676e77da025ce383c108593d65cc909add5c3bd601"}, - {file = "MarkupSafe-2.1.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:96e37a3dc86e80bf81758c152fe66dbf60ed5eca3d26305edf01892257049925"}, - {file = "MarkupSafe-2.1.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6d0072fea50feec76a4c418096652f2c3238eaa014b2f94aeb1d56a66b41403f"}, - {file = "MarkupSafe-2.1.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:089cf3dbf0cd6c100f02945abeb18484bd1ee57a079aefd52cffd17fba910b88"}, - {file = "MarkupSafe-2.1.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6a074d34ee7a5ce3effbc526b7083ec9731bb3cbf921bbe1d3005d4d2bdb3a63"}, - {file = "MarkupSafe-2.1.1-cp38-cp38-win32.whl", hash = "sha256:421be9fbf0ffe9ffd7a378aafebbf6f4602d564d34be190fc19a193232fd12b1"}, - {file = "MarkupSafe-2.1.1-cp38-cp38-win_amd64.whl", hash = "sha256:fc7b548b17d238737688817ab67deebb30e8073c95749d55538ed473130ec0c7"}, - {file = "MarkupSafe-2.1.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:e04e26803c9c3851c931eac40c695602c6295b8d432cbe78609649ad9bd2da8a"}, - {file = "MarkupSafe-2.1.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b87db4360013327109564f0e591bd2a3b318547bcef31b468a92ee504d07ae4f"}, - {file = "MarkupSafe-2.1.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:99a2a507ed3ac881b975a2976d59f38c19386d128e7a9a18b7df6fff1fd4c1d6"}, - {file = "MarkupSafe-2.1.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:56442863ed2b06d19c37f94d999035e15ee982988920e12a5b4ba29b62ad1f77"}, - {file = "MarkupSafe-2.1.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3ce11ee3f23f79dbd06fb3d63e2f6af7b12db1d46932fe7bd8afa259a5996603"}, - {file = "MarkupSafe-2.1.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:33b74d289bd2f5e527beadcaa3f401e0df0a89927c1559c8566c066fa4248ab7"}, - {file = "MarkupSafe-2.1.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:43093fb83d8343aac0b1baa75516da6092f58f41200907ef92448ecab8825135"}, - {file = "MarkupSafe-2.1.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8e3dcf21f367459434c18e71b2a9532d96547aef8a871872a5bd69a715c15f96"}, - {file = "MarkupSafe-2.1.1-cp39-cp39-win32.whl", hash = "sha256:d4306c36ca495956b6d568d276ac11fdd9c30a36f1b6eb928070dc5360b22e1c"}, - {file = "MarkupSafe-2.1.1-cp39-cp39-win_amd64.whl", hash = "sha256:46d00d6cfecdde84d40e572d63735ef81423ad31184100411e6e3388d405e247"}, - {file = "MarkupSafe-2.1.1.tar.gz", hash = "sha256:7f91197cc9e48f989d12e4e6fbc46495c446636dfc81b9ccf50bb0ec74b91d4b"}, -] - -[[package]] -name = "mergedeep" -version = "1.3.4" -description = "A deep merge function for 🐍." -category = "main" -optional = false -python-versions = ">=3.6" -files = [ - {file = "mergedeep-1.3.4-py3-none-any.whl", hash = "sha256:70775750742b25c0d8f36c55aed03d24c3384d17c951b3175d898bd778ef0307"}, - {file = "mergedeep-1.3.4.tar.gz", hash = "sha256:0096d52e9dad9939c3d975a774666af186eda617e6ca84df4c94dec30004f2a8"}, -] - -[[package]] -name = "mkdocs" -version = "1.4.2" -description = "Project documentation with Markdown." -category = "main" -optional = false -python-versions = ">=3.7" -files = [ - {file = "mkdocs-1.4.2-py3-none-any.whl", hash = "sha256:c8856a832c1e56702577023cd64cc5f84948280c1c0fcc6af4cd39006ea6aa8c"}, - {file = "mkdocs-1.4.2.tar.gz", hash = "sha256:8947af423a6d0facf41ea1195b8e1e8c85ad94ac95ae307fe11232e0424b11c5"}, -] - -[package.dependencies] -click = ">=7.0" -colorama = {version = ">=0.4", markers = "platform_system == \"Windows\""} -ghp-import = ">=1.0" -jinja2 = ">=2.11.1" -markdown = ">=3.2.1,<3.4" -mergedeep = ">=1.3.4" -packaging = ">=20.5" -pyyaml = ">=5.1" -pyyaml-env-tag = ">=0.1" -watchdog = ">=2.0" - -[package.extras] -i18n = ["babel (>=2.9.0)"] -min-versions = ["babel (==2.9.0)", "click (==7.0)", "colorama (==0.4)", "ghp-import (==1.0)", "importlib-metadata (==4.3)", "jinja2 (==2.11.1)", "markdown (==3.2.1)", "markupsafe (==2.0.1)", "mergedeep (==1.3.4)", "packaging (==20.5)", "pyyaml (==5.1)", "pyyaml-env-tag (==0.1)", "typing-extensions (==3.10)", "watchdog (==2.0)"] - -[[package]] -name = "mkdocs-material" -version = "9.0.5" -description = "Documentation that simply works" -category = "main" -optional = false -python-versions = ">=3.7" -files = [ - {file = "mkdocs_material-9.0.5-py3-none-any.whl", hash = "sha256:53194bf8ae7dfb527fef2892a6ee291d3efc7b57d010b04dbb818b4ee88074a5"}, - {file = "mkdocs_material-9.0.5.tar.gz", hash = "sha256:bbfed71788223b4c548a6e637cb7a9ee5b6ad6593c6d5b04e57c9c4d2c39d76b"}, -] - -[package.dependencies] -colorama = ">=0.4" -jinja2 = ">=3.0" -markdown = ">=3.2" -mkdocs = ">=1.4.2" -mkdocs-material-extensions = ">=1.1" -pygments = ">=2.14" -pymdown-extensions = ">=9.9.1" -regex = ">=2022.4.24" -requests = ">=2.26" - -[[package]] -name = "mkdocs-material-extensions" -version = "1.1.1" -description = "Extension pack for Python Markdown and MkDocs Material." -category = "main" -optional = false -python-versions = ">=3.7" -files = [ - {file = "mkdocs_material_extensions-1.1.1-py3-none-any.whl", hash = "sha256:e41d9f38e4798b6617ad98ca8f7f1157b1e4385ac1459ca1e4ea219b556df945"}, - {file = "mkdocs_material_extensions-1.1.1.tar.gz", hash = "sha256:9c003da71e2cc2493d910237448c672e00cefc800d3d6ae93d2fc69979e3bd93"}, -] - -[[package]] -name = "packaging" -version = "21.3" -description = "Core utilities for Python packages" -category = "main" -optional = false -python-versions = ">=3.6" -files = [ - {file = "packaging-21.3-py3-none-any.whl", hash = "sha256:ef103e05f519cdc783ae24ea4e2e0f508a9c99b2d4969652eed6a2e1ea5bd522"}, - {file = "packaging-21.3.tar.gz", hash = "sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb"}, -] - -[package.dependencies] -pyparsing = ">=2.0.2,<3.0.5 || >3.0.5" - -[[package]] -name = "pygments" -version = "2.14.0" -description = "Pygments is a syntax highlighting package written in Python." -category = "main" -optional = false -python-versions = ">=3.6" -files = [ - {file = "Pygments-2.14.0-py3-none-any.whl", hash = "sha256:fa7bd7bd2771287c0de303af8bfdfc731f51bd2c6a47ab69d117138893b82717"}, - {file = "Pygments-2.14.0.tar.gz", hash = "sha256:b3ed06a9e8ac9a9aae5a6f5dbe78a8a58655d17b43b93c078f094ddc476ae297"}, -] - -[package.extras] -plugins = ["importlib-metadata"] - -[[package]] -name = "pymdown-extensions" -version = "9.9.1" -description = "Extension pack for Python Markdown." -category = "main" -optional = false -python-versions = ">=3.7" -files = [ - {file = "pymdown_extensions-9.9.1-py3-none-any.whl", hash = "sha256:8a8973933ab45b6fe8f5f8da1de25766356b1f91dee107bf4a34efd158dc340b"}, - {file = "pymdown_extensions-9.9.1.tar.gz", hash = "sha256:abed29926960bbb3b40f5ed5fa6375e29724d4e3cb86ced7c2bbd37ead1afeea"}, -] - -[package.dependencies] -markdown = ">=3.2" - -[[package]] -name = "pyparsing" -version = "3.0.9" -description = "pyparsing module - Classes and methods to define and execute parsing grammars" -category = "main" -optional = false -python-versions = ">=3.6.8" -files = [ - {file = "pyparsing-3.0.9-py3-none-any.whl", hash = "sha256:5026bae9a10eeaefb61dab2f09052b9f4307d44aee4eda64b309723d8d206bbc"}, - {file = "pyparsing-3.0.9.tar.gz", hash = "sha256:2b020ecf7d21b687f219b71ecad3631f644a47f01403fa1d1036b0c6416d70fb"}, -] - -[package.extras] -diagrams = ["jinja2", "railroad-diagrams"] - -[[package]] -name = "python-dateutil" -version = "2.8.2" -description = "Extensions to the standard Python datetime module" -category = "main" -optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" -files = [ - {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, - {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, -] - -[package.dependencies] -six = ">=1.5" - -[[package]] -name = "PyYAML" -version = "6.0" -description = "YAML parser and emitter for Python" -category = "main" -optional = false -python-versions = ">=3.6" -files = [ - {file = "PyYAML-6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d4db7c7aef085872ef65a8fd7d6d09a14ae91f691dec3e87ee5ee0539d516f53"}, - {file = "PyYAML-6.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9df7ed3b3d2e0ecfe09e14741b857df43adb5a3ddadc919a2d94fbdf78fea53c"}, - {file = "PyYAML-6.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77f396e6ef4c73fdc33a9157446466f1cff553d979bd00ecb64385760c6babdc"}, - {file = "PyYAML-6.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a80a78046a72361de73f8f395f1f1e49f956c6be882eed58505a15f3e430962b"}, - {file = "PyYAML-6.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f84fbc98b019fef2ee9a1cb3ce93e3187a6df0b2538a651bfb890254ba9f90b5"}, - {file = "PyYAML-6.0-cp310-cp310-win32.whl", hash = "sha256:2cd5df3de48857ed0544b34e2d40e9fac445930039f3cfe4bcc592a1f836d513"}, - {file = "PyYAML-6.0-cp310-cp310-win_amd64.whl", hash = "sha256:daf496c58a8c52083df09b80c860005194014c3698698d1a57cbcfa182142a3a"}, - {file = "PyYAML-6.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:897b80890765f037df3403d22bab41627ca8811ae55e9a722fd0392850ec4d86"}, - {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50602afada6d6cbfad699b0c7bb50d5ccffa7e46a3d738092afddc1f9758427f"}, - {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:48c346915c114f5fdb3ead70312bd042a953a8ce5c7106d5bfb1a5254e47da92"}, - {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:98c4d36e99714e55cfbaaee6dd5badbc9a1ec339ebfc3b1f52e293aee6bb71a4"}, - {file = "PyYAML-6.0-cp36-cp36m-win32.whl", hash = "sha256:0283c35a6a9fbf047493e3a0ce8d79ef5030852c51e9d911a27badfde0605293"}, - {file = "PyYAML-6.0-cp36-cp36m-win_amd64.whl", hash = "sha256:07751360502caac1c067a8132d150cf3d61339af5691fe9e87803040dbc5db57"}, - {file = "PyYAML-6.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:819b3830a1543db06c4d4b865e70ded25be52a2e0631ccd2f6a47a2822f2fd7c"}, - {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:473f9edb243cb1935ab5a084eb238d842fb8f404ed2193a915d1784b5a6b5fc0"}, - {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0ce82d761c532fe4ec3f87fc45688bdd3a4c1dc5e0b4a19814b9009a29baefd4"}, - {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:231710d57adfd809ef5d34183b8ed1eeae3f76459c18fb4a0b373ad56bedcdd9"}, - {file = "PyYAML-6.0-cp37-cp37m-win32.whl", hash = "sha256:c5687b8d43cf58545ade1fe3e055f70eac7a5a1a0bf42824308d868289a95737"}, - {file = "PyYAML-6.0-cp37-cp37m-win_amd64.whl", hash = "sha256:d15a181d1ecd0d4270dc32edb46f7cb7733c7c508857278d3d378d14d606db2d"}, - {file = "PyYAML-6.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0b4624f379dab24d3725ffde76559cff63d9ec94e1736b556dacdfebe5ab6d4b"}, - {file = "PyYAML-6.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:213c60cd50106436cc818accf5baa1aba61c0189ff610f64f4a3e8c6726218ba"}, - {file = "PyYAML-6.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9fa600030013c4de8165339db93d182b9431076eb98eb40ee068700c9c813e34"}, - {file = "PyYAML-6.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:277a0ef2981ca40581a47093e9e2d13b3f1fbbeffae064c1d21bfceba2030287"}, - {file = "PyYAML-6.0-cp38-cp38-win32.whl", hash = "sha256:d4eccecf9adf6fbcc6861a38015c2a64f38b9d94838ac1810a9023a0609e1b78"}, - {file = "PyYAML-6.0-cp38-cp38-win_amd64.whl", hash = "sha256:1e4747bc279b4f613a09eb64bba2ba602d8a6664c6ce6396a4d0cd413a50ce07"}, - {file = "PyYAML-6.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:055d937d65826939cb044fc8c9b08889e8c743fdc6a32b33e2390f66013e449b"}, - {file = "PyYAML-6.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e61ceaab6f49fb8bdfaa0f92c4b57bcfbea54c09277b1b4f7ac376bfb7a7c174"}, - {file = "PyYAML-6.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d67d839ede4ed1b28a4e8909735fc992a923cdb84e618544973d7dfc71540803"}, - {file = "PyYAML-6.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cba8c411ef271aa037d7357a2bc8f9ee8b58b9965831d9e51baf703280dc73d3"}, - {file = "PyYAML-6.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:40527857252b61eacd1d9af500c3337ba8deb8fc298940291486c465c8b46ec0"}, - {file = "PyYAML-6.0-cp39-cp39-win32.whl", hash = "sha256:b5b9eccad747aabaaffbc6064800670f0c297e52c12754eb1d976c57e4f74dcb"}, - {file = "PyYAML-6.0-cp39-cp39-win_amd64.whl", hash = "sha256:b3d267842bf12586ba6c734f89d1f5b871df0273157918b0ccefa29deb05c21c"}, - {file = "PyYAML-6.0.tar.gz", hash = "sha256:68fb519c14306fec9720a2a5b45bc9f0c8d1b9c72adf45c37baedfcd949c35a2"}, -] - -[[package]] -name = "pyyaml_env_tag" -version = "0.1" -description = "A custom YAML tag for referencing environment variables in YAML files. " -category = "main" -optional = false -python-versions = ">=3.6" -files = [ - {file = "pyyaml_env_tag-0.1-py3-none-any.whl", hash = "sha256:af31106dec8a4d68c60207c1886031cbf839b68aa7abccdb19868200532c2069"}, - {file = "pyyaml_env_tag-0.1.tar.gz", hash = "sha256:70092675bda14fdec33b31ba77e7543de9ddc88f2e5b99160396572d11525bdb"}, -] - -[package.dependencies] -pyyaml = "*" - -[[package]] -name = "regex" -version = "2022.10.31" -description = "Alternative regular expression module, to replace re." -category = "main" -optional = false -python-versions = ">=3.6" -files = [ - {file = "regex-2022.10.31-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a8ff454ef0bb061e37df03557afda9d785c905dab15584860f982e88be73015f"}, - {file = "regex-2022.10.31-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1eba476b1b242620c266edf6325b443a2e22b633217a9835a52d8da2b5c051f9"}, - {file = "regex-2022.10.31-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0e5af9a9effb88535a472e19169e09ce750c3d442fb222254a276d77808620b"}, - {file = "regex-2022.10.31-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d03fe67b2325cb3f09be029fd5da8df9e6974f0cde2c2ac6a79d2634e791dd57"}, - {file = "regex-2022.10.31-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a9d0b68ac1743964755ae2d89772c7e6fb0118acd4d0b7464eaf3921c6b49dd4"}, - {file = "regex-2022.10.31-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8a45b6514861916c429e6059a55cf7db74670eaed2052a648e3e4d04f070e001"}, - {file = "regex-2022.10.31-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8b0886885f7323beea6f552c28bff62cbe0983b9fbb94126531693ea6c5ebb90"}, - {file = "regex-2022.10.31-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:5aefb84a301327ad115e9d346c8e2760009131d9d4b4c6b213648d02e2abe144"}, - {file = "regex-2022.10.31-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:702d8fc6f25bbf412ee706bd73019da5e44a8400861dfff7ff31eb5b4a1276dc"}, - {file = "regex-2022.10.31-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:a3c1ebd4ed8e76e886507c9eddb1a891673686c813adf889b864a17fafcf6d66"}, - {file = "regex-2022.10.31-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:50921c140561d3db2ab9f5b11c5184846cde686bb5a9dc64cae442926e86f3af"}, - {file = "regex-2022.10.31-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:7db345956ecce0c99b97b042b4ca7326feeec6b75facd8390af73b18e2650ffc"}, - {file = "regex-2022.10.31-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:763b64853b0a8f4f9cfb41a76a4a85a9bcda7fdda5cb057016e7706fde928e66"}, - {file = "regex-2022.10.31-cp310-cp310-win32.whl", hash = "sha256:44136355e2f5e06bf6b23d337a75386371ba742ffa771440b85bed367c1318d1"}, - {file = "regex-2022.10.31-cp310-cp310-win_amd64.whl", hash = "sha256:bfff48c7bd23c6e2aec6454aaf6edc44444b229e94743b34bdcdda2e35126cf5"}, - {file = "regex-2022.10.31-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4b4b1fe58cd102d75ef0552cf17242705ce0759f9695334a56644ad2d83903fe"}, - {file = "regex-2022.10.31-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:542e3e306d1669b25936b64917285cdffcd4f5c6f0247636fec037187bd93542"}, - {file = "regex-2022.10.31-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c27cc1e4b197092e50ddbf0118c788d9977f3f8f35bfbbd3e76c1846a3443df7"}, - {file = "regex-2022.10.31-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b8e38472739028e5f2c3a4aded0ab7eadc447f0d84f310c7a8bb697ec417229e"}, - {file = "regex-2022.10.31-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:76c598ca73ec73a2f568e2a72ba46c3b6c8690ad9a07092b18e48ceb936e9f0c"}, - {file = "regex-2022.10.31-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c28d3309ebd6d6b2cf82969b5179bed5fefe6142c70f354ece94324fa11bf6a1"}, - {file = "regex-2022.10.31-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9af69f6746120998cd9c355e9c3c6aec7dff70d47247188feb4f829502be8ab4"}, - {file = "regex-2022.10.31-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a5f9505efd574d1e5b4a76ac9dd92a12acb2b309551e9aa874c13c11caefbe4f"}, - {file = "regex-2022.10.31-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:5ff525698de226c0ca743bfa71fc6b378cda2ddcf0d22d7c37b1cc925c9650a5"}, - {file = "regex-2022.10.31-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:4fe7fda2fe7c8890d454f2cbc91d6c01baf206fbc96d89a80241a02985118c0c"}, - {file = "regex-2022.10.31-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:2cdc55ca07b4e70dda898d2ab7150ecf17c990076d3acd7a5f3b25cb23a69f1c"}, - {file = "regex-2022.10.31-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:44a6c2f6374e0033873e9ed577a54a3602b4f609867794c1a3ebba65e4c93ee7"}, - {file = "regex-2022.10.31-cp311-cp311-win32.whl", hash = "sha256:d8716f82502997b3d0895d1c64c3b834181b1eaca28f3f6336a71777e437c2af"}, - {file = "regex-2022.10.31-cp311-cp311-win_amd64.whl", hash = "sha256:61edbca89aa3f5ef7ecac8c23d975fe7261c12665f1d90a6b1af527bba86ce61"}, - {file = "regex-2022.10.31-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:0a069c8483466806ab94ea9068c34b200b8bfc66b6762f45a831c4baaa9e8cdd"}, - {file = "regex-2022.10.31-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d26166acf62f731f50bdd885b04b38828436d74e8e362bfcb8df221d868b5d9b"}, - {file = "regex-2022.10.31-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ac741bf78b9bb432e2d314439275235f41656e189856b11fb4e774d9f7246d81"}, - {file = "regex-2022.10.31-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75f591b2055523fc02a4bbe598aa867df9e953255f0b7f7715d2a36a9c30065c"}, - {file = "regex-2022.10.31-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b30bddd61d2a3261f025ad0f9ee2586988c6a00c780a2fb0a92cea2aa702c54"}, - {file = "regex-2022.10.31-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ef4163770525257876f10e8ece1cf25b71468316f61451ded1a6f44273eedeb5"}, - {file = "regex-2022.10.31-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:7b280948d00bd3973c1998f92e22aa3ecb76682e3a4255f33e1020bd32adf443"}, - {file = "regex-2022.10.31-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:d0213671691e341f6849bf33cd9fad21f7b1cb88b89e024f33370733fec58742"}, - {file = "regex-2022.10.31-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:22e7ebc231d28393dfdc19b185d97e14a0f178bedd78e85aad660e93b646604e"}, - {file = "regex-2022.10.31-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:8ad241da7fac963d7573cc67a064c57c58766b62a9a20c452ca1f21050868dfa"}, - {file = "regex-2022.10.31-cp36-cp36m-musllinux_1_1_s390x.whl", hash = "sha256:586b36ebda81e6c1a9c5a5d0bfdc236399ba6595e1397842fd4a45648c30f35e"}, - {file = "regex-2022.10.31-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:0653d012b3bf45f194e5e6a41df9258811ac8fc395579fa82958a8b76286bea4"}, - {file = "regex-2022.10.31-cp36-cp36m-win32.whl", hash = "sha256:144486e029793a733e43b2e37df16a16df4ceb62102636ff3db6033994711066"}, - {file = "regex-2022.10.31-cp36-cp36m-win_amd64.whl", hash = "sha256:c14b63c9d7bab795d17392c7c1f9aaabbffd4cf4387725a0ac69109fb3b550c6"}, - {file = "regex-2022.10.31-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:4cac3405d8dda8bc6ed499557625585544dd5cbf32072dcc72b5a176cb1271c8"}, - {file = "regex-2022.10.31-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23cbb932cc53a86ebde0fb72e7e645f9a5eec1a5af7aa9ce333e46286caef783"}, - {file = "regex-2022.10.31-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:74bcab50a13960f2a610cdcd066e25f1fd59e23b69637c92ad470784a51b1347"}, - {file = "regex-2022.10.31-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:78d680ef3e4d405f36f0d6d1ea54e740366f061645930072d39bca16a10d8c93"}, - {file = "regex-2022.10.31-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce6910b56b700bea7be82c54ddf2e0ed792a577dfaa4a76b9af07d550af435c6"}, - {file = "regex-2022.10.31-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:659175b2144d199560d99a8d13b2228b85e6019b6e09e556209dfb8c37b78a11"}, - {file = "regex-2022.10.31-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:1ddf14031a3882f684b8642cb74eea3af93a2be68893901b2b387c5fd92a03ec"}, - {file = "regex-2022.10.31-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:b683e5fd7f74fb66e89a1ed16076dbab3f8e9f34c18b1979ded614fe10cdc4d9"}, - {file = "regex-2022.10.31-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:2bde29cc44fa81c0a0c8686992c3080b37c488df167a371500b2a43ce9f026d1"}, - {file = "regex-2022.10.31-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:4919899577ba37f505aaebdf6e7dc812d55e8f097331312db7f1aab18767cce8"}, - {file = "regex-2022.10.31-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:9c94f7cc91ab16b36ba5ce476f1904c91d6c92441f01cd61a8e2729442d6fcf5"}, - {file = "regex-2022.10.31-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ae1e96785696b543394a4e3f15f3f225d44f3c55dafe3f206493031419fedf95"}, - {file = "regex-2022.10.31-cp37-cp37m-win32.whl", hash = "sha256:c670f4773f2f6f1957ff8a3962c7dd12e4be54d05839b216cb7fd70b5a1df394"}, - {file = "regex-2022.10.31-cp37-cp37m-win_amd64.whl", hash = "sha256:8e0caeff18b96ea90fc0eb6e3bdb2b10ab5b01a95128dfeccb64a7238decf5f0"}, - {file = "regex-2022.10.31-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:131d4be09bea7ce2577f9623e415cab287a3c8e0624f778c1d955ec7c281bd4d"}, - {file = "regex-2022.10.31-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e613a98ead2005c4ce037c7b061f2409a1a4e45099edb0ef3200ee26ed2a69a8"}, - {file = "regex-2022.10.31-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:052b670fafbe30966bbe5d025e90b2a491f85dfe5b2583a163b5e60a85a321ad"}, - {file = "regex-2022.10.31-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aa62a07ac93b7cb6b7d0389d8ef57ffc321d78f60c037b19dfa78d6b17c928ee"}, - {file = "regex-2022.10.31-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5352bea8a8f84b89d45ccc503f390a6be77917932b1c98c4cdc3565137acc714"}, - {file = "regex-2022.10.31-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:20f61c9944f0be2dc2b75689ba409938c14876c19d02f7585af4460b6a21403e"}, - {file = "regex-2022.10.31-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:29c04741b9ae13d1e94cf93fca257730b97ce6ea64cfe1eba11cf9ac4e85afb6"}, - {file = "regex-2022.10.31-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:543883e3496c8b6d58bd036c99486c3c8387c2fc01f7a342b760c1ea3158a318"}, - {file = "regex-2022.10.31-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b7a8b43ee64ca8f4befa2bea4083f7c52c92864d8518244bfa6e88c751fa8fff"}, - {file = "regex-2022.10.31-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:6a9a19bea8495bb419dc5d38c4519567781cd8d571c72efc6aa959473d10221a"}, - {file = "regex-2022.10.31-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:6ffd55b5aedc6f25fd8d9f905c9376ca44fcf768673ffb9d160dd6f409bfda73"}, - {file = "regex-2022.10.31-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:4bdd56ee719a8f751cf5a593476a441c4e56c9b64dc1f0f30902858c4ef8771d"}, - {file = "regex-2022.10.31-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8ca88da1bd78990b536c4a7765f719803eb4f8f9971cc22d6ca965c10a7f2c4c"}, - {file = "regex-2022.10.31-cp38-cp38-win32.whl", hash = "sha256:5a260758454580f11dd8743fa98319bb046037dfab4f7828008909d0aa5292bc"}, - {file = "regex-2022.10.31-cp38-cp38-win_amd64.whl", hash = "sha256:5e6a5567078b3eaed93558842346c9d678e116ab0135e22eb72db8325e90b453"}, - {file = "regex-2022.10.31-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5217c25229b6a85049416a5c1e6451e9060a1edcf988641e309dbe3ab26d3e49"}, - {file = "regex-2022.10.31-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4bf41b8b0a80708f7e0384519795e80dcb44d7199a35d52c15cc674d10b3081b"}, - {file = "regex-2022.10.31-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cf0da36a212978be2c2e2e2d04bdff46f850108fccc1851332bcae51c8907cc"}, - {file = "regex-2022.10.31-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d403d781b0e06d2922435ce3b8d2376579f0c217ae491e273bab8d092727d244"}, - {file = "regex-2022.10.31-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a37d51fa9a00d265cf73f3de3930fa9c41548177ba4f0faf76e61d512c774690"}, - {file = "regex-2022.10.31-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4f781ffedd17b0b834c8731b75cce2639d5a8afe961c1e58ee7f1f20b3af185"}, - {file = "regex-2022.10.31-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d243b36fbf3d73c25e48014961e83c19c9cc92530516ce3c43050ea6276a2ab7"}, - {file = "regex-2022.10.31-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:370f6e97d02bf2dd20d7468ce4f38e173a124e769762d00beadec3bc2f4b3bc4"}, - {file = "regex-2022.10.31-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:597f899f4ed42a38df7b0e46714880fb4e19a25c2f66e5c908805466721760f5"}, - {file = "regex-2022.10.31-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7dbdce0c534bbf52274b94768b3498abdf675a691fec5f751b6057b3030f34c1"}, - {file = "regex-2022.10.31-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:22960019a842777a9fa5134c2364efaed5fbf9610ddc5c904bd3a400973b0eb8"}, - {file = "regex-2022.10.31-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:7f5a3ffc731494f1a57bd91c47dc483a1e10048131ffb52d901bfe2beb6102e8"}, - {file = "regex-2022.10.31-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7ef6b5942e6bfc5706301a18a62300c60db9af7f6368042227ccb7eeb22d0892"}, - {file = "regex-2022.10.31-cp39-cp39-win32.whl", hash = "sha256:395161bbdbd04a8333b9ff9763a05e9ceb4fe210e3c7690f5e68cedd3d65d8e1"}, - {file = "regex-2022.10.31-cp39-cp39-win_amd64.whl", hash = "sha256:957403a978e10fb3ca42572a23e6f7badff39aa1ce2f4ade68ee452dc6807692"}, - {file = "regex-2022.10.31.tar.gz", hash = "sha256:a3a98921da9a1bf8457aeee6a551948a83601689e5ecdd736894ea9bbec77e83"}, -] - -[[package]] -name = "requests" -version = "2.28.2" -description = "Python HTTP for Humans." -category = "main" -optional = false -python-versions = ">=3.7, <4" -files = [ - {file = "requests-2.28.2-py3-none-any.whl", hash = "sha256:64299f4909223da747622c030b781c0d7811e359c37124b4bd368fb8c6518baa"}, - {file = "requests-2.28.2.tar.gz", hash = "sha256:98b1b2782e3c6c4904938b84c0eb932721069dfdb9134313beff7c83c2df24bf"}, -] - -[package.dependencies] -certifi = ">=2017.4.17" -charset-normalizer = ">=2,<4" -idna = ">=2.5,<4" -urllib3 = ">=1.21.1,<1.27" - -[package.extras] -socks = ["PySocks (>=1.5.6,!=1.5.7)"] -use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] - -[[package]] -name = "six" -version = "1.16.0" -description = "Python 2 and 3 compatibility utilities" -category = "main" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" -files = [ - {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, - {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, -] - -[[package]] -name = "urllib3" -version = "1.26.14" -description = "HTTP library with thread-safe connection pooling, file post, and more." -category = "main" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" -files = [ - {file = "urllib3-1.26.14-py2.py3-none-any.whl", hash = "sha256:75edcdc2f7d85b137124a6c3c9fc3933cdeaa12ecb9a6a959f22797a0feca7e1"}, - {file = "urllib3-1.26.14.tar.gz", hash = "sha256:076907bf8fd355cde77728471316625a4d2f7e713c125f51953bb5b3eecf4f72"}, -] - -[package.extras] -brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotlipy (>=0.6.0)"] -secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"] -socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] - -[[package]] -name = "watchdog" -version = "2.1.9" -description = "Filesystem events monitoring" -category = "main" -optional = false -python-versions = ">=3.6" -files = [ - {file = "watchdog-2.1.9-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a735a990a1095f75ca4f36ea2ef2752c99e6ee997c46b0de507ba40a09bf7330"}, - {file = "watchdog-2.1.9-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6b17d302850c8d412784d9246cfe8d7e3af6bcd45f958abb2d08a6f8bedf695d"}, - {file = "watchdog-2.1.9-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ee3e38a6cc050a8830089f79cbec8a3878ec2fe5160cdb2dc8ccb6def8552658"}, - {file = "watchdog-2.1.9-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:64a27aed691408a6abd83394b38503e8176f69031ca25d64131d8d640a307591"}, - {file = "watchdog-2.1.9-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:195fc70c6e41237362ba720e9aaf394f8178bfc7fa68207f112d108edef1af33"}, - {file = "watchdog-2.1.9-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:bfc4d351e6348d6ec51df007432e6fe80adb53fd41183716017026af03427846"}, - {file = "watchdog-2.1.9-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8250546a98388cbc00c3ee3cc5cf96799b5a595270dfcfa855491a64b86ef8c3"}, - {file = "watchdog-2.1.9-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:117ffc6ec261639a0209a3252546b12800670d4bf5f84fbd355957a0595fe654"}, - {file = "watchdog-2.1.9-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:97f9752208f5154e9e7b76acc8c4f5a58801b338de2af14e7e181ee3b28a5d39"}, - {file = "watchdog-2.1.9-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:247dcf1df956daa24828bfea5a138d0e7a7c98b1a47cf1fa5b0c3c16241fcbb7"}, - {file = "watchdog-2.1.9-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:226b3c6c468ce72051a4c15a4cc2ef317c32590d82ba0b330403cafd98a62cfd"}, - {file = "watchdog-2.1.9-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d9820fe47c20c13e3c9dd544d3706a2a26c02b2b43c993b62fcd8011bcc0adb3"}, - {file = "watchdog-2.1.9-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:70af927aa1613ded6a68089a9262a009fbdf819f46d09c1a908d4b36e1ba2b2d"}, - {file = "watchdog-2.1.9-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:ed80a1628cee19f5cfc6bb74e173f1b4189eb532e705e2a13e3250312a62e0c9"}, - {file = "watchdog-2.1.9-py3-none-manylinux2014_aarch64.whl", hash = "sha256:9f05a5f7c12452f6a27203f76779ae3f46fa30f1dd833037ea8cbc2887c60213"}, - {file = "watchdog-2.1.9-py3-none-manylinux2014_armv7l.whl", hash = "sha256:255bb5758f7e89b1a13c05a5bceccec2219f8995a3a4c4d6968fe1de6a3b2892"}, - {file = "watchdog-2.1.9-py3-none-manylinux2014_i686.whl", hash = "sha256:d3dda00aca282b26194bdd0adec21e4c21e916956d972369359ba63ade616153"}, - {file = "watchdog-2.1.9-py3-none-manylinux2014_ppc64.whl", hash = "sha256:186f6c55abc5e03872ae14c2f294a153ec7292f807af99f57611acc8caa75306"}, - {file = "watchdog-2.1.9-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:083171652584e1b8829581f965b9b7723ca5f9a2cd7e20271edf264cfd7c1412"}, - {file = "watchdog-2.1.9-py3-none-manylinux2014_s390x.whl", hash = "sha256:b530ae007a5f5d50b7fbba96634c7ee21abec70dc3e7f0233339c81943848dc1"}, - {file = "watchdog-2.1.9-py3-none-manylinux2014_x86_64.whl", hash = "sha256:4f4e1c4aa54fb86316a62a87b3378c025e228178d55481d30d857c6c438897d6"}, - {file = "watchdog-2.1.9-py3-none-win32.whl", hash = "sha256:5952135968519e2447a01875a6f5fc8c03190b24d14ee52b0f4b1682259520b1"}, - {file = "watchdog-2.1.9-py3-none-win_amd64.whl", hash = "sha256:7a833211f49143c3d336729b0020ffd1274078e94b0ae42e22f596999f50279c"}, - {file = "watchdog-2.1.9-py3-none-win_ia64.whl", hash = "sha256:ad576a565260d8f99d97f2e64b0f97a48228317095908568a9d5c786c829d428"}, - {file = "watchdog-2.1.9.tar.gz", hash = "sha256:43ce20ebb36a51f21fa376f76d1d4692452b2527ccd601950d69ed36b9e21609"}, -] - -[package.extras] -watchmedo = ["PyYAML (>=3.10)"] - -[metadata] -lock-version = "2.0" -python-versions = "^3.10" -content-hash = "a0df662c99e9a84d2274616cea45eb315004b70884a296b6db240d790943f1b5" diff --git a/docs/pyproject.toml b/docs/pyproject.toml deleted file mode 100644 index f46a1b8..0000000 --- a/docs/pyproject.toml +++ /dev/null @@ -1,15 +0,0 @@ -[tool.poetry] -name = "docs" -version = "0.1.0" -description = "" -authors = ["Hayden <64056131+hay-kot@users.noreply.github.com>"] -readme = "README.md" - -[tool.poetry.dependencies] -python = "^3.10" -mkdocs-material = "^9.0.5" - - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" diff --git a/docs/requirements.txt b/docs/requirements.txt new file mode 100644 index 0000000..d7301ed --- /dev/null +++ b/docs/requirements.txt @@ -0,0 +1 @@ +mkdocs-material==9.5.12 \ No newline at end of file diff --git a/frontend/.eslintrc.js b/frontend/.eslintrc.js index fa57669..c567952 100644 --- a/frontend/.eslintrc.js +++ b/frontend/.eslintrc.js @@ -25,6 +25,7 @@ module.exports = { "vue/no-setup-props-destructure": 0, "vue/no-multiple-template-root": 0, "vue/no-v-model-argument": 0, + "@typescript-eslint/consistent-type-imports": "error", "@typescript-eslint/ban-ts-comment": 0, "@typescript-eslint/no-unused-vars": [ "error", diff --git a/frontend/app.vue b/frontend/app.vue index e8e26f3..8c48eb8 100644 --- a/frontend/app.vue +++ b/frontend/app.vue @@ -2,6 +2,10 @@ + + + + diff --git a/frontend/components/App/Header.vue b/frontend/components/App/Header.vue index a142f8d..b42da7f 100644 --- a/frontend/components/App/Header.vue +++ b/frontend/components/App/Header.vue @@ -1,4 +1,6 @@ diff --git a/frontend/components/App/Toast.vue b/frontend/components/App/Toast.vue index 06c3944..d713714 100644 --- a/frontend/components/App/Toast.vue +++ b/frontend/components/App/Toast.vue @@ -14,14 +14,14 @@ >
{{ notify.message }}
@@ -31,6 +31,10 @@ diff --git a/frontend/components/Base/Modal.vue b/frontend/components/Base/Modal.vue index 0aee636..c1e7591 100644 --- a/frontend/components/Base/Modal.vue +++ b/frontend/components/Base/Modal.vue @@ -32,6 +32,12 @@ }, }); + function escClose(e: KeyboardEvent) { + if (e.key === "Escape") { + close(); + } + } + function close() { if (props.readonly) { emit("cancel"); @@ -42,4 +48,12 @@ const modalId = useId(); const modal = useVModel(props, "modelValue", emit); + + watchEffect(() => { + if (modal.value) { + document.addEventListener("keydown", escClose); + } else { + document.removeEventListener("keydown", escClose); + } + }); diff --git a/frontend/components/Base/SectionHeader.vue b/frontend/components/Base/SectionHeader.vue index 43fa8b2..da25def 100644 --- a/frontend/components/Base/SectionHeader.vue +++ b/frontend/components/Base/SectionHeader.vue @@ -1,7 +1,7 @@ - - diff --git a/frontend/components/Chart/Line.vue b/frontend/components/Chart/Line.vue deleted file mode 100644 index c36ef93..0000000 --- a/frontend/components/Chart/Line.vue +++ /dev/null @@ -1,113 +0,0 @@ - - - - - diff --git a/frontend/components/Form/Autocomplete2.vue b/frontend/components/Form/Autocomplete2.vue index b71c30e..d9fe64a 100644 --- a/frontend/components/Form/Autocomplete2.vue +++ b/frontend/components/Form/Autocomplete2.vue @@ -10,8 +10,16 @@ class="w-full input input-bordered" @change="search = $event.target.value" /> + - + -